xref: /linux/drivers/usb/dwc2/core.c (revision 005438a8eef063495ac059d128eea71b58de50e5)
1 /*
2  * core.c - DesignWare HS OTG Controller common routines
3  *
4  * Copyright (C) 2004-2013 Synopsys, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions, and the following disclaimer,
11  *    without modification.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The names of the above-listed copyright holders may not be used
16  *    to endorse or promote products derived from this software without
17  *    specific prior written permission.
18  *
19  * ALTERNATIVELY, this software may be distributed under the terms of the
20  * GNU General Public License ("GPL") as published by the Free Software
21  * Foundation; either version 2 of the License, or (at your option) any
22  * later version.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 /*
38  * The Core code provides basic services for accessing and managing the
39  * DWC_otg hardware. These services are used by both the Host Controller
40  * Driver and the Peripheral Controller Driver.
41  */
42 #include <linux/kernel.h>
43 #include <linux/module.h>
44 #include <linux/moduleparam.h>
45 #include <linux/spinlock.h>
46 #include <linux/interrupt.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/delay.h>
49 #include <linux/io.h>
50 #include <linux/slab.h>
51 #include <linux/usb.h>
52 
53 #include <linux/usb/hcd.h>
54 #include <linux/usb/ch11.h>
55 
56 #include "core.h"
57 #include "hcd.h"
58 
59 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
60 /**
61  * dwc2_backup_host_registers() - Backup controller host registers.
62  * When suspending usb bus, registers needs to be backuped
63  * if controller power is disabled once suspended.
64  *
65  * @hsotg: Programming view of the DWC_otg controller
66  */
67 static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
68 {
69 	struct dwc2_hregs_backup *hr;
70 	int i;
71 
72 	dev_dbg(hsotg->dev, "%s\n", __func__);
73 
74 	/* Backup Host regs */
75 	hr = hsotg->hr_backup;
76 	if (!hr) {
77 		hr = devm_kzalloc(hsotg->dev, sizeof(*hr), GFP_KERNEL);
78 		if (!hr) {
79 			dev_err(hsotg->dev, "%s: can't allocate host regs\n",
80 					__func__);
81 			return -ENOMEM;
82 		}
83 
84 		hsotg->hr_backup = hr;
85 	}
86 	hr->hcfg = readl(hsotg->regs + HCFG);
87 	hr->haintmsk = readl(hsotg->regs + HAINTMSK);
88 	for (i = 0; i < hsotg->core_params->host_channels; ++i)
89 		hr->hcintmsk[i] = readl(hsotg->regs + HCINTMSK(i));
90 
91 	hr->hprt0 = readl(hsotg->regs + HPRT0);
92 	hr->hfir = readl(hsotg->regs + HFIR);
93 
94 	return 0;
95 }
96 
97 /**
98  * dwc2_restore_host_registers() - Restore controller host registers.
99  * When resuming usb bus, device registers needs to be restored
100  * if controller power were disabled.
101  *
102  * @hsotg: Programming view of the DWC_otg controller
103  */
104 static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
105 {
106 	struct dwc2_hregs_backup *hr;
107 	int i;
108 
109 	dev_dbg(hsotg->dev, "%s\n", __func__);
110 
111 	/* Restore host regs */
112 	hr = hsotg->hr_backup;
113 	if (!hr) {
114 		dev_err(hsotg->dev, "%s: no host registers to restore\n",
115 				__func__);
116 		return -EINVAL;
117 	}
118 
119 	writel(hr->hcfg, hsotg->regs + HCFG);
120 	writel(hr->haintmsk, hsotg->regs + HAINTMSK);
121 
122 	for (i = 0; i < hsotg->core_params->host_channels; ++i)
123 		writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i));
124 
125 	writel(hr->hprt0, hsotg->regs + HPRT0);
126 	writel(hr->hfir, hsotg->regs + HFIR);
127 
128 	return 0;
129 }
130 #else
131 static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
132 { return 0; }
133 
134 static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
135 { return 0; }
136 #endif
137 
138 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
139 	IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
140 /**
141  * dwc2_backup_device_registers() - Backup controller device registers.
142  * When suspending usb bus, registers needs to be backuped
143  * if controller power is disabled once suspended.
144  *
145  * @hsotg: Programming view of the DWC_otg controller
146  */
147 static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
148 {
149 	struct dwc2_dregs_backup *dr;
150 	int i;
151 
152 	dev_dbg(hsotg->dev, "%s\n", __func__);
153 
154 	/* Backup dev regs */
155 	dr = hsotg->dr_backup;
156 	if (!dr) {
157 		dr = devm_kzalloc(hsotg->dev, sizeof(*dr), GFP_KERNEL);
158 		if (!dr) {
159 			dev_err(hsotg->dev, "%s: can't allocate device regs\n",
160 					__func__);
161 			return -ENOMEM;
162 		}
163 
164 		hsotg->dr_backup = dr;
165 	}
166 
167 	dr->dcfg = readl(hsotg->regs + DCFG);
168 	dr->dctl = readl(hsotg->regs + DCTL);
169 	dr->daintmsk = readl(hsotg->regs + DAINTMSK);
170 	dr->diepmsk = readl(hsotg->regs + DIEPMSK);
171 	dr->doepmsk = readl(hsotg->regs + DOEPMSK);
172 
173 	for (i = 0; i < hsotg->num_of_eps; i++) {
174 		/* Backup IN EPs */
175 		dr->diepctl[i] = readl(hsotg->regs + DIEPCTL(i));
176 
177 		/* Ensure DATA PID is correctly configured */
178 		if (dr->diepctl[i] & DXEPCTL_DPID)
179 			dr->diepctl[i] |= DXEPCTL_SETD1PID;
180 		else
181 			dr->diepctl[i] |= DXEPCTL_SETD0PID;
182 
183 		dr->dieptsiz[i] = readl(hsotg->regs + DIEPTSIZ(i));
184 		dr->diepdma[i] = readl(hsotg->regs + DIEPDMA(i));
185 
186 		/* Backup OUT EPs */
187 		dr->doepctl[i] = readl(hsotg->regs + DOEPCTL(i));
188 
189 		/* Ensure DATA PID is correctly configured */
190 		if (dr->doepctl[i] & DXEPCTL_DPID)
191 			dr->doepctl[i] |= DXEPCTL_SETD1PID;
192 		else
193 			dr->doepctl[i] |= DXEPCTL_SETD0PID;
194 
195 		dr->doeptsiz[i] = readl(hsotg->regs + DOEPTSIZ(i));
196 		dr->doepdma[i] = readl(hsotg->regs + DOEPDMA(i));
197 	}
198 
199 	return 0;
200 }
201 
202 /**
203  * dwc2_restore_device_registers() - Restore controller device registers.
204  * When resuming usb bus, device registers needs to be restored
205  * if controller power were disabled.
206  *
207  * @hsotg: Programming view of the DWC_otg controller
208  */
209 static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
210 {
211 	struct dwc2_dregs_backup *dr;
212 	u32 dctl;
213 	int i;
214 
215 	dev_dbg(hsotg->dev, "%s\n", __func__);
216 
217 	/* Restore dev regs */
218 	dr = hsotg->dr_backup;
219 	if (!dr) {
220 		dev_err(hsotg->dev, "%s: no device registers to restore\n",
221 				__func__);
222 		return -EINVAL;
223 	}
224 
225 	writel(dr->dcfg, hsotg->regs + DCFG);
226 	writel(dr->dctl, hsotg->regs + DCTL);
227 	writel(dr->daintmsk, hsotg->regs + DAINTMSK);
228 	writel(dr->diepmsk, hsotg->regs + DIEPMSK);
229 	writel(dr->doepmsk, hsotg->regs + DOEPMSK);
230 
231 	for (i = 0; i < hsotg->num_of_eps; i++) {
232 		/* Restore IN EPs */
233 		writel(dr->diepctl[i], hsotg->regs + DIEPCTL(i));
234 		writel(dr->dieptsiz[i], hsotg->regs + DIEPTSIZ(i));
235 		writel(dr->diepdma[i], hsotg->regs + DIEPDMA(i));
236 
237 		/* Restore OUT EPs */
238 		writel(dr->doepctl[i], hsotg->regs + DOEPCTL(i));
239 		writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i));
240 		writel(dr->doepdma[i], hsotg->regs + DOEPDMA(i));
241 	}
242 
243 	/* Set the Power-On Programming done bit */
244 	dctl = readl(hsotg->regs + DCTL);
245 	dctl |= DCTL_PWRONPRGDONE;
246 	writel(dctl, hsotg->regs + DCTL);
247 
248 	return 0;
249 }
250 #else
251 static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
252 { return 0; }
253 
254 static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
255 { return 0; }
256 #endif
257 
258 /**
259  * dwc2_backup_global_registers() - Backup global controller registers.
260  * When suspending usb bus, registers needs to be backuped
261  * if controller power is disabled once suspended.
262  *
263  * @hsotg: Programming view of the DWC_otg controller
264  */
265 static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
266 {
267 	struct dwc2_gregs_backup *gr;
268 	int i;
269 
270 	/* Backup global regs */
271 	gr = hsotg->gr_backup;
272 	if (!gr) {
273 		gr = devm_kzalloc(hsotg->dev, sizeof(*gr), GFP_KERNEL);
274 		if (!gr) {
275 			dev_err(hsotg->dev, "%s: can't allocate global regs\n",
276 					__func__);
277 			return -ENOMEM;
278 		}
279 
280 		hsotg->gr_backup = gr;
281 	}
282 
283 	gr->gotgctl = readl(hsotg->regs + GOTGCTL);
284 	gr->gintmsk = readl(hsotg->regs + GINTMSK);
285 	gr->gahbcfg = readl(hsotg->regs + GAHBCFG);
286 	gr->gusbcfg = readl(hsotg->regs + GUSBCFG);
287 	gr->grxfsiz = readl(hsotg->regs + GRXFSIZ);
288 	gr->gnptxfsiz = readl(hsotg->regs + GNPTXFSIZ);
289 	gr->hptxfsiz = readl(hsotg->regs + HPTXFSIZ);
290 	gr->gdfifocfg = readl(hsotg->regs + GDFIFOCFG);
291 	for (i = 0; i < MAX_EPS_CHANNELS; i++)
292 		gr->dtxfsiz[i] = readl(hsotg->regs + DPTXFSIZN(i));
293 
294 	return 0;
295 }
296 
297 /**
298  * dwc2_restore_global_registers() - Restore controller global registers.
299  * When resuming usb bus, device registers needs to be restored
300  * if controller power were disabled.
301  *
302  * @hsotg: Programming view of the DWC_otg controller
303  */
304 static int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg)
305 {
306 	struct dwc2_gregs_backup *gr;
307 	int i;
308 
309 	dev_dbg(hsotg->dev, "%s\n", __func__);
310 
311 	/* Restore global regs */
312 	gr = hsotg->gr_backup;
313 	if (!gr) {
314 		dev_err(hsotg->dev, "%s: no global registers to restore\n",
315 				__func__);
316 		return -EINVAL;
317 	}
318 
319 	writel(0xffffffff, hsotg->regs + GINTSTS);
320 	writel(gr->gotgctl, hsotg->regs + GOTGCTL);
321 	writel(gr->gintmsk, hsotg->regs + GINTMSK);
322 	writel(gr->gusbcfg, hsotg->regs + GUSBCFG);
323 	writel(gr->gahbcfg, hsotg->regs + GAHBCFG);
324 	writel(gr->grxfsiz, hsotg->regs + GRXFSIZ);
325 	writel(gr->gnptxfsiz, hsotg->regs + GNPTXFSIZ);
326 	writel(gr->hptxfsiz, hsotg->regs + HPTXFSIZ);
327 	writel(gr->gdfifocfg, hsotg->regs + GDFIFOCFG);
328 	for (i = 0; i < MAX_EPS_CHANNELS; i++)
329 		writel(gr->dtxfsiz[i], hsotg->regs + DPTXFSIZN(i));
330 
331 	return 0;
332 }
333 
334 /**
335  * dwc2_exit_hibernation() - Exit controller from Partial Power Down.
336  *
337  * @hsotg: Programming view of the DWC_otg controller
338  * @restore: Controller registers need to be restored
339  */
340 int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore)
341 {
342 	u32 pcgcctl;
343 	int ret = 0;
344 
345 	if (!hsotg->core_params->hibernation)
346 		return -ENOTSUPP;
347 
348 	pcgcctl = readl(hsotg->regs + PCGCTL);
349 	pcgcctl &= ~PCGCTL_STOPPCLK;
350 	writel(pcgcctl, hsotg->regs + PCGCTL);
351 
352 	pcgcctl = readl(hsotg->regs + PCGCTL);
353 	pcgcctl &= ~PCGCTL_PWRCLMP;
354 	writel(pcgcctl, hsotg->regs + PCGCTL);
355 
356 	pcgcctl = readl(hsotg->regs + PCGCTL);
357 	pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
358 	writel(pcgcctl, hsotg->regs + PCGCTL);
359 
360 	udelay(100);
361 	if (restore) {
362 		ret = dwc2_restore_global_registers(hsotg);
363 		if (ret) {
364 			dev_err(hsotg->dev, "%s: failed to restore registers\n",
365 					__func__);
366 			return ret;
367 		}
368 		if (dwc2_is_host_mode(hsotg)) {
369 			ret = dwc2_restore_host_registers(hsotg);
370 			if (ret) {
371 				dev_err(hsotg->dev, "%s: failed to restore host registers\n",
372 						__func__);
373 				return ret;
374 			}
375 		} else {
376 			ret = dwc2_restore_device_registers(hsotg);
377 			if (ret) {
378 				dev_err(hsotg->dev, "%s: failed to restore device registers\n",
379 						__func__);
380 				return ret;
381 			}
382 		}
383 	}
384 
385 	return ret;
386 }
387 
388 /**
389  * dwc2_enter_hibernation() - Put controller in Partial Power Down.
390  *
391  * @hsotg: Programming view of the DWC_otg controller
392  */
393 int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg)
394 {
395 	u32 pcgcctl;
396 	int ret = 0;
397 
398 	if (!hsotg->core_params->hibernation)
399 		return -ENOTSUPP;
400 
401 	/* Backup all registers */
402 	ret = dwc2_backup_global_registers(hsotg);
403 	if (ret) {
404 		dev_err(hsotg->dev, "%s: failed to backup global registers\n",
405 				__func__);
406 		return ret;
407 	}
408 
409 	if (dwc2_is_host_mode(hsotg)) {
410 		ret = dwc2_backup_host_registers(hsotg);
411 		if (ret) {
412 			dev_err(hsotg->dev, "%s: failed to backup host registers\n",
413 					__func__);
414 			return ret;
415 		}
416 	} else {
417 		ret = dwc2_backup_device_registers(hsotg);
418 		if (ret) {
419 			dev_err(hsotg->dev, "%s: failed to backup device registers\n",
420 					__func__);
421 			return ret;
422 		}
423 	}
424 
425 	/* Put the controller in low power state */
426 	pcgcctl = readl(hsotg->regs + PCGCTL);
427 
428 	pcgcctl |= PCGCTL_PWRCLMP;
429 	writel(pcgcctl, hsotg->regs + PCGCTL);
430 	ndelay(20);
431 
432 	pcgcctl |= PCGCTL_RSTPDWNMODULE;
433 	writel(pcgcctl, hsotg->regs + PCGCTL);
434 	ndelay(20);
435 
436 	pcgcctl |= PCGCTL_STOPPCLK;
437 	writel(pcgcctl, hsotg->regs + PCGCTL);
438 
439 	return ret;
440 }
441 
442 /**
443  * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
444  * used in both device and host modes
445  *
446  * @hsotg: Programming view of the DWC_otg controller
447  */
448 static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
449 {
450 	u32 intmsk;
451 
452 	/* Clear any pending OTG Interrupts */
453 	writel(0xffffffff, hsotg->regs + GOTGINT);
454 
455 	/* Clear any pending interrupts */
456 	writel(0xffffffff, hsotg->regs + GINTSTS);
457 
458 	/* Enable the interrupts in the GINTMSK */
459 	intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
460 
461 	if (hsotg->core_params->dma_enable <= 0)
462 		intmsk |= GINTSTS_RXFLVL;
463 	if (hsotg->core_params->external_id_pin_ctl <= 0)
464 		intmsk |= GINTSTS_CONIDSTSCHNG;
465 
466 	intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
467 		  GINTSTS_SESSREQINT;
468 
469 	writel(intmsk, hsotg->regs + GINTMSK);
470 }
471 
472 /*
473  * Initializes the FSLSPClkSel field of the HCFG register depending on the
474  * PHY type
475  */
476 static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
477 {
478 	u32 hcfg, val;
479 
480 	if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
481 	     hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
482 	     hsotg->core_params->ulpi_fs_ls > 0) ||
483 	    hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
484 		/* Full speed PHY */
485 		val = HCFG_FSLSPCLKSEL_48_MHZ;
486 	} else {
487 		/* High speed PHY running at full speed or high speed */
488 		val = HCFG_FSLSPCLKSEL_30_60_MHZ;
489 	}
490 
491 	dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
492 	hcfg = readl(hsotg->regs + HCFG);
493 	hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
494 	hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
495 	writel(hcfg, hsotg->regs + HCFG);
496 }
497 
498 /*
499  * Do core a soft reset of the core.  Be careful with this because it
500  * resets all the internal state machines of the core.
501  */
502 static int dwc2_core_reset(struct dwc2_hsotg *hsotg)
503 {
504 	u32 greset;
505 	int count = 0;
506 	u32 gusbcfg;
507 
508 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
509 
510 	/* Wait for AHB master IDLE state */
511 	do {
512 		usleep_range(20000, 40000);
513 		greset = readl(hsotg->regs + GRSTCTL);
514 		if (++count > 50) {
515 			dev_warn(hsotg->dev,
516 				 "%s() HANG! AHB Idle GRSTCTL=%0x\n",
517 				 __func__, greset);
518 			return -EBUSY;
519 		}
520 	} while (!(greset & GRSTCTL_AHBIDLE));
521 
522 	/* Core Soft Reset */
523 	count = 0;
524 	greset |= GRSTCTL_CSFTRST;
525 	writel(greset, hsotg->regs + GRSTCTL);
526 	do {
527 		usleep_range(20000, 40000);
528 		greset = readl(hsotg->regs + GRSTCTL);
529 		if (++count > 50) {
530 			dev_warn(hsotg->dev,
531 				 "%s() HANG! Soft Reset GRSTCTL=%0x\n",
532 				 __func__, greset);
533 			return -EBUSY;
534 		}
535 	} while (greset & GRSTCTL_CSFTRST);
536 
537 	if (hsotg->dr_mode == USB_DR_MODE_HOST) {
538 		gusbcfg = readl(hsotg->regs + GUSBCFG);
539 		gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
540 		gusbcfg |= GUSBCFG_FORCEHOSTMODE;
541 		writel(gusbcfg, hsotg->regs + GUSBCFG);
542 	} else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
543 		gusbcfg = readl(hsotg->regs + GUSBCFG);
544 		gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
545 		gusbcfg |= GUSBCFG_FORCEDEVMODE;
546 		writel(gusbcfg, hsotg->regs + GUSBCFG);
547 	} else if (hsotg->dr_mode == USB_DR_MODE_OTG) {
548 		gusbcfg = readl(hsotg->regs + GUSBCFG);
549 		gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
550 		gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
551 		writel(gusbcfg, hsotg->regs + GUSBCFG);
552 	}
553 
554 	/*
555 	 * NOTE: This long sleep is _very_ important, otherwise the core will
556 	 * not stay in host mode after a connector ID change!
557 	 */
558 	usleep_range(150000, 200000);
559 
560 	return 0;
561 }
562 
563 static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
564 {
565 	u32 usbcfg, i2cctl;
566 	int retval = 0;
567 
568 	/*
569 	 * core_init() is now called on every switch so only call the
570 	 * following for the first time through
571 	 */
572 	if (select_phy) {
573 		dev_dbg(hsotg->dev, "FS PHY selected\n");
574 		usbcfg = readl(hsotg->regs + GUSBCFG);
575 		usbcfg |= GUSBCFG_PHYSEL;
576 		writel(usbcfg, hsotg->regs + GUSBCFG);
577 
578 		/* Reset after a PHY select */
579 		retval = dwc2_core_reset(hsotg);
580 		if (retval) {
581 			dev_err(hsotg->dev, "%s() Reset failed, aborting",
582 					__func__);
583 			return retval;
584 		}
585 	}
586 
587 	/*
588 	 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
589 	 * do this on HNP Dev/Host mode switches (done in dev_init and
590 	 * host_init).
591 	 */
592 	if (dwc2_is_host_mode(hsotg))
593 		dwc2_init_fs_ls_pclk_sel(hsotg);
594 
595 	if (hsotg->core_params->i2c_enable > 0) {
596 		dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
597 
598 		/* Program GUSBCFG.OtgUtmiFsSel to I2C */
599 		usbcfg = readl(hsotg->regs + GUSBCFG);
600 		usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
601 		writel(usbcfg, hsotg->regs + GUSBCFG);
602 
603 		/* Program GI2CCTL.I2CEn */
604 		i2cctl = readl(hsotg->regs + GI2CCTL);
605 		i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
606 		i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
607 		i2cctl &= ~GI2CCTL_I2CEN;
608 		writel(i2cctl, hsotg->regs + GI2CCTL);
609 		i2cctl |= GI2CCTL_I2CEN;
610 		writel(i2cctl, hsotg->regs + GI2CCTL);
611 	}
612 
613 	return retval;
614 }
615 
616 static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
617 {
618 	u32 usbcfg;
619 	int retval = 0;
620 
621 	if (!select_phy)
622 		return 0;
623 
624 	usbcfg = readl(hsotg->regs + GUSBCFG);
625 
626 	/*
627 	 * HS PHY parameters. These parameters are preserved during soft reset
628 	 * so only program the first time. Do a soft reset immediately after
629 	 * setting phyif.
630 	 */
631 	switch (hsotg->core_params->phy_type) {
632 	case DWC2_PHY_TYPE_PARAM_ULPI:
633 		/* ULPI interface */
634 		dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
635 		usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
636 		usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
637 		if (hsotg->core_params->phy_ulpi_ddr > 0)
638 			usbcfg |= GUSBCFG_DDRSEL;
639 		break;
640 	case DWC2_PHY_TYPE_PARAM_UTMI:
641 		/* UTMI+ interface */
642 		dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
643 		usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
644 		if (hsotg->core_params->phy_utmi_width == 16)
645 			usbcfg |= GUSBCFG_PHYIF16;
646 		break;
647 	default:
648 		dev_err(hsotg->dev, "FS PHY selected at HS!\n");
649 		break;
650 	}
651 
652 	writel(usbcfg, hsotg->regs + GUSBCFG);
653 
654 	/* Reset after setting the PHY parameters */
655 	retval = dwc2_core_reset(hsotg);
656 	if (retval) {
657 		dev_err(hsotg->dev, "%s() Reset failed, aborting",
658 				__func__);
659 		return retval;
660 	}
661 
662 	return retval;
663 }
664 
665 static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
666 {
667 	u32 usbcfg;
668 	int retval = 0;
669 
670 	if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
671 	    hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
672 		/* If FS mode with FS PHY */
673 		retval = dwc2_fs_phy_init(hsotg, select_phy);
674 		if (retval)
675 			return retval;
676 	} else {
677 		/* High speed PHY */
678 		retval = dwc2_hs_phy_init(hsotg, select_phy);
679 		if (retval)
680 			return retval;
681 	}
682 
683 	if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
684 	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
685 	    hsotg->core_params->ulpi_fs_ls > 0) {
686 		dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
687 		usbcfg = readl(hsotg->regs + GUSBCFG);
688 		usbcfg |= GUSBCFG_ULPI_FS_LS;
689 		usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
690 		writel(usbcfg, hsotg->regs + GUSBCFG);
691 	} else {
692 		usbcfg = readl(hsotg->regs + GUSBCFG);
693 		usbcfg &= ~GUSBCFG_ULPI_FS_LS;
694 		usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
695 		writel(usbcfg, hsotg->regs + GUSBCFG);
696 	}
697 
698 	return retval;
699 }
700 
701 static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
702 {
703 	u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
704 
705 	switch (hsotg->hw_params.arch) {
706 	case GHWCFG2_EXT_DMA_ARCH:
707 		dev_err(hsotg->dev, "External DMA Mode not supported\n");
708 		return -EINVAL;
709 
710 	case GHWCFG2_INT_DMA_ARCH:
711 		dev_dbg(hsotg->dev, "Internal DMA Mode\n");
712 		if (hsotg->core_params->ahbcfg != -1) {
713 			ahbcfg &= GAHBCFG_CTRL_MASK;
714 			ahbcfg |= hsotg->core_params->ahbcfg &
715 				  ~GAHBCFG_CTRL_MASK;
716 		}
717 		break;
718 
719 	case GHWCFG2_SLAVE_ONLY_ARCH:
720 	default:
721 		dev_dbg(hsotg->dev, "Slave Only Mode\n");
722 		break;
723 	}
724 
725 	dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
726 		hsotg->core_params->dma_enable,
727 		hsotg->core_params->dma_desc_enable);
728 
729 	if (hsotg->core_params->dma_enable > 0) {
730 		if (hsotg->core_params->dma_desc_enable > 0)
731 			dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
732 		else
733 			dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
734 	} else {
735 		dev_dbg(hsotg->dev, "Using Slave mode\n");
736 		hsotg->core_params->dma_desc_enable = 0;
737 	}
738 
739 	if (hsotg->core_params->dma_enable > 0)
740 		ahbcfg |= GAHBCFG_DMA_EN;
741 
742 	writel(ahbcfg, hsotg->regs + GAHBCFG);
743 
744 	return 0;
745 }
746 
747 static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
748 {
749 	u32 usbcfg;
750 
751 	usbcfg = readl(hsotg->regs + GUSBCFG);
752 	usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
753 
754 	switch (hsotg->hw_params.op_mode) {
755 	case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
756 		if (hsotg->core_params->otg_cap ==
757 				DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
758 			usbcfg |= GUSBCFG_HNPCAP;
759 		if (hsotg->core_params->otg_cap !=
760 				DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
761 			usbcfg |= GUSBCFG_SRPCAP;
762 		break;
763 
764 	case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
765 	case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
766 	case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
767 		if (hsotg->core_params->otg_cap !=
768 				DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
769 			usbcfg |= GUSBCFG_SRPCAP;
770 		break;
771 
772 	case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
773 	case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
774 	case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
775 	default:
776 		break;
777 	}
778 
779 	writel(usbcfg, hsotg->regs + GUSBCFG);
780 }
781 
782 /**
783  * dwc2_core_init() - Initializes the DWC_otg controller registers and
784  * prepares the core for device mode or host mode operation
785  *
786  * @hsotg:      Programming view of the DWC_otg controller
787  * @select_phy: If true then also set the Phy type
788  * @irq:        If >= 0, the irq to register
789  */
790 int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq)
791 {
792 	u32 usbcfg, otgctl;
793 	int retval;
794 
795 	dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
796 
797 	usbcfg = readl(hsotg->regs + GUSBCFG);
798 
799 	/* Set ULPI External VBUS bit if needed */
800 	usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
801 	if (hsotg->core_params->phy_ulpi_ext_vbus ==
802 				DWC2_PHY_ULPI_EXTERNAL_VBUS)
803 		usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
804 
805 	/* Set external TS Dline pulsing bit if needed */
806 	usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
807 	if (hsotg->core_params->ts_dline > 0)
808 		usbcfg |= GUSBCFG_TERMSELDLPULSE;
809 
810 	writel(usbcfg, hsotg->regs + GUSBCFG);
811 
812 	/* Reset the Controller */
813 	retval = dwc2_core_reset(hsotg);
814 	if (retval) {
815 		dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
816 				__func__);
817 		return retval;
818 	}
819 
820 	/*
821 	 * This needs to happen in FS mode before any other programming occurs
822 	 */
823 	retval = dwc2_phy_init(hsotg, select_phy);
824 	if (retval)
825 		return retval;
826 
827 	/* Program the GAHBCFG Register */
828 	retval = dwc2_gahbcfg_init(hsotg);
829 	if (retval)
830 		return retval;
831 
832 	/* Program the GUSBCFG register */
833 	dwc2_gusbcfg_init(hsotg);
834 
835 	/* Program the GOTGCTL register */
836 	otgctl = readl(hsotg->regs + GOTGCTL);
837 	otgctl &= ~GOTGCTL_OTGVER;
838 	if (hsotg->core_params->otg_ver > 0)
839 		otgctl |= GOTGCTL_OTGVER;
840 	writel(otgctl, hsotg->regs + GOTGCTL);
841 	dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
842 
843 	/* Clear the SRP success bit for FS-I2c */
844 	hsotg->srp_success = 0;
845 
846 	/* Enable common interrupts */
847 	dwc2_enable_common_interrupts(hsotg);
848 
849 	/*
850 	 * Do device or host initialization based on mode during PCD and
851 	 * HCD initialization
852 	 */
853 	if (dwc2_is_host_mode(hsotg)) {
854 		dev_dbg(hsotg->dev, "Host Mode\n");
855 		hsotg->op_state = OTG_STATE_A_HOST;
856 	} else {
857 		dev_dbg(hsotg->dev, "Device Mode\n");
858 		hsotg->op_state = OTG_STATE_B_PERIPHERAL;
859 	}
860 
861 	return 0;
862 }
863 
864 /**
865  * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
866  *
867  * @hsotg: Programming view of DWC_otg controller
868  */
869 void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
870 {
871 	u32 intmsk;
872 
873 	dev_dbg(hsotg->dev, "%s()\n", __func__);
874 
875 	/* Disable all interrupts */
876 	writel(0, hsotg->regs + GINTMSK);
877 	writel(0, hsotg->regs + HAINTMSK);
878 
879 	/* Enable the common interrupts */
880 	dwc2_enable_common_interrupts(hsotg);
881 
882 	/* Enable host mode interrupts without disturbing common interrupts */
883 	intmsk = readl(hsotg->regs + GINTMSK);
884 	intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
885 	writel(intmsk, hsotg->regs + GINTMSK);
886 }
887 
888 /**
889  * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
890  *
891  * @hsotg: Programming view of DWC_otg controller
892  */
893 void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
894 {
895 	u32 intmsk = readl(hsotg->regs + GINTMSK);
896 
897 	/* Disable host mode interrupts without disturbing common interrupts */
898 	intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
899 		    GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP);
900 	writel(intmsk, hsotg->regs + GINTMSK);
901 }
902 
903 /*
904  * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
905  * For system that have a total fifo depth that is smaller than the default
906  * RX + TX fifo size.
907  *
908  * @hsotg: Programming view of DWC_otg controller
909  */
910 static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
911 {
912 	struct dwc2_core_params *params = hsotg->core_params;
913 	struct dwc2_hw_params *hw = &hsotg->hw_params;
914 	u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
915 
916 	total_fifo_size = hw->total_fifo_size;
917 	rxfsiz = params->host_rx_fifo_size;
918 	nptxfsiz = params->host_nperio_tx_fifo_size;
919 	ptxfsiz = params->host_perio_tx_fifo_size;
920 
921 	/*
922 	 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
923 	 * allocation with support for high bandwidth endpoints. Synopsys
924 	 * defines MPS(Max Packet size) for a periodic EP=1024, and for
925 	 * non-periodic as 512.
926 	 */
927 	if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) {
928 		/*
929 		 * For Buffer DMA mode/Scatter Gather DMA mode
930 		 * 2 * ((Largest Packet size / 4) + 1 + 1) + n
931 		 * with n = number of host channel.
932 		 * 2 * ((1024/4) + 2) = 516
933 		 */
934 		rxfsiz = 516 + hw->host_channels;
935 
936 		/*
937 		 * min non-periodic tx fifo depth
938 		 * 2 * (largest non-periodic USB packet used / 4)
939 		 * 2 * (512/4) = 256
940 		 */
941 		nptxfsiz = 256;
942 
943 		/*
944 		 * min periodic tx fifo depth
945 		 * (largest packet size*MC)/4
946 		 * (1024 * 3)/4 = 768
947 		 */
948 		ptxfsiz = 768;
949 
950 		params->host_rx_fifo_size = rxfsiz;
951 		params->host_nperio_tx_fifo_size = nptxfsiz;
952 		params->host_perio_tx_fifo_size = ptxfsiz;
953 	}
954 
955 	/*
956 	 * If the summation of RX, NPTX and PTX fifo sizes is still
957 	 * bigger than the total_fifo_size, then we have a problem.
958 	 *
959 	 * We won't be able to allocate as many endpoints. Right now,
960 	 * we're just printing an error message, but ideally this FIFO
961 	 * allocation algorithm would be improved in the future.
962 	 *
963 	 * FIXME improve this FIFO allocation algorithm.
964 	 */
965 	if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)))
966 		dev_err(hsotg->dev, "invalid fifo sizes\n");
967 }
968 
969 static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
970 {
971 	struct dwc2_core_params *params = hsotg->core_params;
972 	u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
973 
974 	if (!params->enable_dynamic_fifo)
975 		return;
976 
977 	dwc2_calculate_dynamic_fifo(hsotg);
978 
979 	/* Rx FIFO */
980 	grxfsiz = readl(hsotg->regs + GRXFSIZ);
981 	dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
982 	grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
983 	grxfsiz |= params->host_rx_fifo_size <<
984 		   GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
985 	writel(grxfsiz, hsotg->regs + GRXFSIZ);
986 	dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", readl(hsotg->regs + GRXFSIZ));
987 
988 	/* Non-periodic Tx FIFO */
989 	dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
990 		readl(hsotg->regs + GNPTXFSIZ));
991 	nptxfsiz = params->host_nperio_tx_fifo_size <<
992 		   FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
993 	nptxfsiz |= params->host_rx_fifo_size <<
994 		    FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
995 	writel(nptxfsiz, hsotg->regs + GNPTXFSIZ);
996 	dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
997 		readl(hsotg->regs + GNPTXFSIZ));
998 
999 	/* Periodic Tx FIFO */
1000 	dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
1001 		readl(hsotg->regs + HPTXFSIZ));
1002 	hptxfsiz = params->host_perio_tx_fifo_size <<
1003 		   FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
1004 	hptxfsiz |= (params->host_rx_fifo_size +
1005 		     params->host_nperio_tx_fifo_size) <<
1006 		    FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
1007 	writel(hptxfsiz, hsotg->regs + HPTXFSIZ);
1008 	dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
1009 		readl(hsotg->regs + HPTXFSIZ));
1010 
1011 	if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
1012 	    hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
1013 		/*
1014 		 * Global DFIFOCFG calculation for Host mode -
1015 		 * include RxFIFO, NPTXFIFO and HPTXFIFO
1016 		 */
1017 		dfifocfg = readl(hsotg->regs + GDFIFOCFG);
1018 		dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
1019 		dfifocfg |= (params->host_rx_fifo_size +
1020 			     params->host_nperio_tx_fifo_size +
1021 			     params->host_perio_tx_fifo_size) <<
1022 			    GDFIFOCFG_EPINFOBASE_SHIFT &
1023 			    GDFIFOCFG_EPINFOBASE_MASK;
1024 		writel(dfifocfg, hsotg->regs + GDFIFOCFG);
1025 	}
1026 }
1027 
1028 /**
1029  * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
1030  * Host mode
1031  *
1032  * @hsotg: Programming view of DWC_otg controller
1033  *
1034  * This function flushes the Tx and Rx FIFOs and flushes any entries in the
1035  * request queues. Host channels are reset to ensure that they are ready for
1036  * performing transfers.
1037  */
1038 void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
1039 {
1040 	u32 hcfg, hfir, otgctl;
1041 
1042 	dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
1043 
1044 	/* Restart the Phy Clock */
1045 	writel(0, hsotg->regs + PCGCTL);
1046 
1047 	/* Initialize Host Configuration Register */
1048 	dwc2_init_fs_ls_pclk_sel(hsotg);
1049 	if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
1050 		hcfg = readl(hsotg->regs + HCFG);
1051 		hcfg |= HCFG_FSLSSUPP;
1052 		writel(hcfg, hsotg->regs + HCFG);
1053 	}
1054 
1055 	/*
1056 	 * This bit allows dynamic reloading of the HFIR register during
1057 	 * runtime. This bit needs to be programmed during initial configuration
1058 	 * and its value must not be changed during runtime.
1059 	 */
1060 	if (hsotg->core_params->reload_ctl > 0) {
1061 		hfir = readl(hsotg->regs + HFIR);
1062 		hfir |= HFIR_RLDCTRL;
1063 		writel(hfir, hsotg->regs + HFIR);
1064 	}
1065 
1066 	if (hsotg->core_params->dma_desc_enable > 0) {
1067 		u32 op_mode = hsotg->hw_params.op_mode;
1068 		if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
1069 		    !hsotg->hw_params.dma_desc_enable ||
1070 		    op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
1071 		    op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
1072 		    op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
1073 			dev_err(hsotg->dev,
1074 				"Hardware does not support descriptor DMA mode -\n");
1075 			dev_err(hsotg->dev,
1076 				"falling back to buffer DMA mode.\n");
1077 			hsotg->core_params->dma_desc_enable = 0;
1078 		} else {
1079 			hcfg = readl(hsotg->regs + HCFG);
1080 			hcfg |= HCFG_DESCDMA;
1081 			writel(hcfg, hsotg->regs + HCFG);
1082 		}
1083 	}
1084 
1085 	/* Configure data FIFO sizes */
1086 	dwc2_config_fifos(hsotg);
1087 
1088 	/* TODO - check this */
1089 	/* Clear Host Set HNP Enable in the OTG Control Register */
1090 	otgctl = readl(hsotg->regs + GOTGCTL);
1091 	otgctl &= ~GOTGCTL_HSTSETHNPEN;
1092 	writel(otgctl, hsotg->regs + GOTGCTL);
1093 
1094 	/* Make sure the FIFOs are flushed */
1095 	dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
1096 	dwc2_flush_rx_fifo(hsotg);
1097 
1098 	/* Clear Host Set HNP Enable in the OTG Control Register */
1099 	otgctl = readl(hsotg->regs + GOTGCTL);
1100 	otgctl &= ~GOTGCTL_HSTSETHNPEN;
1101 	writel(otgctl, hsotg->regs + GOTGCTL);
1102 
1103 	if (hsotg->core_params->dma_desc_enable <= 0) {
1104 		int num_channels, i;
1105 		u32 hcchar;
1106 
1107 		/* Flush out any leftover queued requests */
1108 		num_channels = hsotg->core_params->host_channels;
1109 		for (i = 0; i < num_channels; i++) {
1110 			hcchar = readl(hsotg->regs + HCCHAR(i));
1111 			hcchar &= ~HCCHAR_CHENA;
1112 			hcchar |= HCCHAR_CHDIS;
1113 			hcchar &= ~HCCHAR_EPDIR;
1114 			writel(hcchar, hsotg->regs + HCCHAR(i));
1115 		}
1116 
1117 		/* Halt all channels to put them into a known state */
1118 		for (i = 0; i < num_channels; i++) {
1119 			int count = 0;
1120 
1121 			hcchar = readl(hsotg->regs + HCCHAR(i));
1122 			hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
1123 			hcchar &= ~HCCHAR_EPDIR;
1124 			writel(hcchar, hsotg->regs + HCCHAR(i));
1125 			dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
1126 				__func__, i);
1127 			do {
1128 				hcchar = readl(hsotg->regs + HCCHAR(i));
1129 				if (++count > 1000) {
1130 					dev_err(hsotg->dev,
1131 						"Unable to clear enable on channel %d\n",
1132 						i);
1133 					break;
1134 				}
1135 				udelay(1);
1136 			} while (hcchar & HCCHAR_CHENA);
1137 		}
1138 	}
1139 
1140 	/* Turn on the vbus power */
1141 	dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
1142 	if (hsotg->op_state == OTG_STATE_A_HOST) {
1143 		u32 hprt0 = dwc2_read_hprt0(hsotg);
1144 
1145 		dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
1146 			!!(hprt0 & HPRT0_PWR));
1147 		if (!(hprt0 & HPRT0_PWR)) {
1148 			hprt0 |= HPRT0_PWR;
1149 			writel(hprt0, hsotg->regs + HPRT0);
1150 		}
1151 	}
1152 
1153 	dwc2_enable_host_interrupts(hsotg);
1154 }
1155 
1156 static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
1157 				      struct dwc2_host_chan *chan)
1158 {
1159 	u32 hcintmsk = HCINTMSK_CHHLTD;
1160 
1161 	switch (chan->ep_type) {
1162 	case USB_ENDPOINT_XFER_CONTROL:
1163 	case USB_ENDPOINT_XFER_BULK:
1164 		dev_vdbg(hsotg->dev, "control/bulk\n");
1165 		hcintmsk |= HCINTMSK_XFERCOMPL;
1166 		hcintmsk |= HCINTMSK_STALL;
1167 		hcintmsk |= HCINTMSK_XACTERR;
1168 		hcintmsk |= HCINTMSK_DATATGLERR;
1169 		if (chan->ep_is_in) {
1170 			hcintmsk |= HCINTMSK_BBLERR;
1171 		} else {
1172 			hcintmsk |= HCINTMSK_NAK;
1173 			hcintmsk |= HCINTMSK_NYET;
1174 			if (chan->do_ping)
1175 				hcintmsk |= HCINTMSK_ACK;
1176 		}
1177 
1178 		if (chan->do_split) {
1179 			hcintmsk |= HCINTMSK_NAK;
1180 			if (chan->complete_split)
1181 				hcintmsk |= HCINTMSK_NYET;
1182 			else
1183 				hcintmsk |= HCINTMSK_ACK;
1184 		}
1185 
1186 		if (chan->error_state)
1187 			hcintmsk |= HCINTMSK_ACK;
1188 		break;
1189 
1190 	case USB_ENDPOINT_XFER_INT:
1191 		if (dbg_perio())
1192 			dev_vdbg(hsotg->dev, "intr\n");
1193 		hcintmsk |= HCINTMSK_XFERCOMPL;
1194 		hcintmsk |= HCINTMSK_NAK;
1195 		hcintmsk |= HCINTMSK_STALL;
1196 		hcintmsk |= HCINTMSK_XACTERR;
1197 		hcintmsk |= HCINTMSK_DATATGLERR;
1198 		hcintmsk |= HCINTMSK_FRMOVRUN;
1199 
1200 		if (chan->ep_is_in)
1201 			hcintmsk |= HCINTMSK_BBLERR;
1202 		if (chan->error_state)
1203 			hcintmsk |= HCINTMSK_ACK;
1204 		if (chan->do_split) {
1205 			if (chan->complete_split)
1206 				hcintmsk |= HCINTMSK_NYET;
1207 			else
1208 				hcintmsk |= HCINTMSK_ACK;
1209 		}
1210 		break;
1211 
1212 	case USB_ENDPOINT_XFER_ISOC:
1213 		if (dbg_perio())
1214 			dev_vdbg(hsotg->dev, "isoc\n");
1215 		hcintmsk |= HCINTMSK_XFERCOMPL;
1216 		hcintmsk |= HCINTMSK_FRMOVRUN;
1217 		hcintmsk |= HCINTMSK_ACK;
1218 
1219 		if (chan->ep_is_in) {
1220 			hcintmsk |= HCINTMSK_XACTERR;
1221 			hcintmsk |= HCINTMSK_BBLERR;
1222 		}
1223 		break;
1224 	default:
1225 		dev_err(hsotg->dev, "## Unknown EP type ##\n");
1226 		break;
1227 	}
1228 
1229 	writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
1230 	if (dbg_hc(chan))
1231 		dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
1232 }
1233 
1234 static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
1235 				    struct dwc2_host_chan *chan)
1236 {
1237 	u32 hcintmsk = HCINTMSK_CHHLTD;
1238 
1239 	/*
1240 	 * For Descriptor DMA mode core halts the channel on AHB error.
1241 	 * Interrupt is not required.
1242 	 */
1243 	if (hsotg->core_params->dma_desc_enable <= 0) {
1244 		if (dbg_hc(chan))
1245 			dev_vdbg(hsotg->dev, "desc DMA disabled\n");
1246 		hcintmsk |= HCINTMSK_AHBERR;
1247 	} else {
1248 		if (dbg_hc(chan))
1249 			dev_vdbg(hsotg->dev, "desc DMA enabled\n");
1250 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1251 			hcintmsk |= HCINTMSK_XFERCOMPL;
1252 	}
1253 
1254 	if (chan->error_state && !chan->do_split &&
1255 	    chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
1256 		if (dbg_hc(chan))
1257 			dev_vdbg(hsotg->dev, "setting ACK\n");
1258 		hcintmsk |= HCINTMSK_ACK;
1259 		if (chan->ep_is_in) {
1260 			hcintmsk |= HCINTMSK_DATATGLERR;
1261 			if (chan->ep_type != USB_ENDPOINT_XFER_INT)
1262 				hcintmsk |= HCINTMSK_NAK;
1263 		}
1264 	}
1265 
1266 	writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
1267 	if (dbg_hc(chan))
1268 		dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
1269 }
1270 
1271 static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
1272 				struct dwc2_host_chan *chan)
1273 {
1274 	u32 intmsk;
1275 
1276 	if (hsotg->core_params->dma_enable > 0) {
1277 		if (dbg_hc(chan))
1278 			dev_vdbg(hsotg->dev, "DMA enabled\n");
1279 		dwc2_hc_enable_dma_ints(hsotg, chan);
1280 	} else {
1281 		if (dbg_hc(chan))
1282 			dev_vdbg(hsotg->dev, "DMA disabled\n");
1283 		dwc2_hc_enable_slave_ints(hsotg, chan);
1284 	}
1285 
1286 	/* Enable the top level host channel interrupt */
1287 	intmsk = readl(hsotg->regs + HAINTMSK);
1288 	intmsk |= 1 << chan->hc_num;
1289 	writel(intmsk, hsotg->regs + HAINTMSK);
1290 	if (dbg_hc(chan))
1291 		dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
1292 
1293 	/* Make sure host channel interrupts are enabled */
1294 	intmsk = readl(hsotg->regs + GINTMSK);
1295 	intmsk |= GINTSTS_HCHINT;
1296 	writel(intmsk, hsotg->regs + GINTMSK);
1297 	if (dbg_hc(chan))
1298 		dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
1299 }
1300 
1301 /**
1302  * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
1303  * a specific endpoint
1304  *
1305  * @hsotg: Programming view of DWC_otg controller
1306  * @chan:  Information needed to initialize the host channel
1307  *
1308  * The HCCHARn register is set up with the characteristics specified in chan.
1309  * Host channel interrupts that may need to be serviced while this transfer is
1310  * in progress are enabled.
1311  */
1312 void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1313 {
1314 	u8 hc_num = chan->hc_num;
1315 	u32 hcintmsk;
1316 	u32 hcchar;
1317 	u32 hcsplt = 0;
1318 
1319 	if (dbg_hc(chan))
1320 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
1321 
1322 	/* Clear old interrupt conditions for this host channel */
1323 	hcintmsk = 0xffffffff;
1324 	hcintmsk &= ~HCINTMSK_RESERVED14_31;
1325 	writel(hcintmsk, hsotg->regs + HCINT(hc_num));
1326 
1327 	/* Enable channel interrupts required for this transfer */
1328 	dwc2_hc_enable_ints(hsotg, chan);
1329 
1330 	/*
1331 	 * Program the HCCHARn register with the endpoint characteristics for
1332 	 * the current transfer
1333 	 */
1334 	hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
1335 	hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
1336 	if (chan->ep_is_in)
1337 		hcchar |= HCCHAR_EPDIR;
1338 	if (chan->speed == USB_SPEED_LOW)
1339 		hcchar |= HCCHAR_LSPDDEV;
1340 	hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
1341 	hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
1342 	writel(hcchar, hsotg->regs + HCCHAR(hc_num));
1343 	if (dbg_hc(chan)) {
1344 		dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
1345 			 hc_num, hcchar);
1346 
1347 		dev_vdbg(hsotg->dev, "%s: Channel %d\n",
1348 			 __func__, hc_num);
1349 		dev_vdbg(hsotg->dev, "	 Dev Addr: %d\n",
1350 			 chan->dev_addr);
1351 		dev_vdbg(hsotg->dev, "	 Ep Num: %d\n",
1352 			 chan->ep_num);
1353 		dev_vdbg(hsotg->dev, "	 Is In: %d\n",
1354 			 chan->ep_is_in);
1355 		dev_vdbg(hsotg->dev, "	 Is Low Speed: %d\n",
1356 			 chan->speed == USB_SPEED_LOW);
1357 		dev_vdbg(hsotg->dev, "	 Ep Type: %d\n",
1358 			 chan->ep_type);
1359 		dev_vdbg(hsotg->dev, "	 Max Pkt: %d\n",
1360 			 chan->max_packet);
1361 	}
1362 
1363 	/* Program the HCSPLT register for SPLITs */
1364 	if (chan->do_split) {
1365 		if (dbg_hc(chan))
1366 			dev_vdbg(hsotg->dev,
1367 				 "Programming HC %d with split --> %s\n",
1368 				 hc_num,
1369 				 chan->complete_split ? "CSPLIT" : "SSPLIT");
1370 		if (chan->complete_split)
1371 			hcsplt |= HCSPLT_COMPSPLT;
1372 		hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
1373 			  HCSPLT_XACTPOS_MASK;
1374 		hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
1375 			  HCSPLT_HUBADDR_MASK;
1376 		hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
1377 			  HCSPLT_PRTADDR_MASK;
1378 		if (dbg_hc(chan)) {
1379 			dev_vdbg(hsotg->dev, "	  comp split %d\n",
1380 				 chan->complete_split);
1381 			dev_vdbg(hsotg->dev, "	  xact pos %d\n",
1382 				 chan->xact_pos);
1383 			dev_vdbg(hsotg->dev, "	  hub addr %d\n",
1384 				 chan->hub_addr);
1385 			dev_vdbg(hsotg->dev, "	  hub port %d\n",
1386 				 chan->hub_port);
1387 			dev_vdbg(hsotg->dev, "	  is_in %d\n",
1388 				 chan->ep_is_in);
1389 			dev_vdbg(hsotg->dev, "	  Max Pkt %d\n",
1390 				 chan->max_packet);
1391 			dev_vdbg(hsotg->dev, "	  xferlen %d\n",
1392 				 chan->xfer_len);
1393 		}
1394 	}
1395 
1396 	writel(hcsplt, hsotg->regs + HCSPLT(hc_num));
1397 }
1398 
1399 /**
1400  * dwc2_hc_halt() - Attempts to halt a host channel
1401  *
1402  * @hsotg:       Controller register interface
1403  * @chan:        Host channel to halt
1404  * @halt_status: Reason for halting the channel
1405  *
1406  * This function should only be called in Slave mode or to abort a transfer in
1407  * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
1408  * controller halts the channel when the transfer is complete or a condition
1409  * occurs that requires application intervention.
1410  *
1411  * In slave mode, checks for a free request queue entry, then sets the Channel
1412  * Enable and Channel Disable bits of the Host Channel Characteristics
1413  * register of the specified channel to intiate the halt. If there is no free
1414  * request queue entry, sets only the Channel Disable bit of the HCCHARn
1415  * register to flush requests for this channel. In the latter case, sets a
1416  * flag to indicate that the host channel needs to be halted when a request
1417  * queue slot is open.
1418  *
1419  * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
1420  * HCCHARn register. The controller ensures there is space in the request
1421  * queue before submitting the halt request.
1422  *
1423  * Some time may elapse before the core flushes any posted requests for this
1424  * host channel and halts. The Channel Halted interrupt handler completes the
1425  * deactivation of the host channel.
1426  */
1427 void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
1428 		  enum dwc2_halt_status halt_status)
1429 {
1430 	u32 nptxsts, hptxsts, hcchar;
1431 
1432 	if (dbg_hc(chan))
1433 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
1434 	if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
1435 		dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
1436 
1437 	if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1438 	    halt_status == DWC2_HC_XFER_AHB_ERR) {
1439 		/*
1440 		 * Disable all channel interrupts except Ch Halted. The QTD
1441 		 * and QH state associated with this transfer has been cleared
1442 		 * (in the case of URB_DEQUEUE), so the channel needs to be
1443 		 * shut down carefully to prevent crashes.
1444 		 */
1445 		u32 hcintmsk = HCINTMSK_CHHLTD;
1446 
1447 		dev_vdbg(hsotg->dev, "dequeue/error\n");
1448 		writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
1449 
1450 		/*
1451 		 * Make sure no other interrupts besides halt are currently
1452 		 * pending. Handling another interrupt could cause a crash due
1453 		 * to the QTD and QH state.
1454 		 */
1455 		writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1456 
1457 		/*
1458 		 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1459 		 * even if the channel was already halted for some other
1460 		 * reason
1461 		 */
1462 		chan->halt_status = halt_status;
1463 
1464 		hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1465 		if (!(hcchar & HCCHAR_CHENA)) {
1466 			/*
1467 			 * The channel is either already halted or it hasn't
1468 			 * started yet. In DMA mode, the transfer may halt if
1469 			 * it finishes normally or a condition occurs that
1470 			 * requires driver intervention. Don't want to halt
1471 			 * the channel again. In either Slave or DMA mode,
1472 			 * it's possible that the transfer has been assigned
1473 			 * to a channel, but not started yet when an URB is
1474 			 * dequeued. Don't want to halt a channel that hasn't
1475 			 * started yet.
1476 			 */
1477 			return;
1478 		}
1479 	}
1480 	if (chan->halt_pending) {
1481 		/*
1482 		 * A halt has already been issued for this channel. This might
1483 		 * happen when a transfer is aborted by a higher level in
1484 		 * the stack.
1485 		 */
1486 		dev_vdbg(hsotg->dev,
1487 			 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1488 			 __func__, chan->hc_num);
1489 		return;
1490 	}
1491 
1492 	hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1493 
1494 	/* No need to set the bit in DDMA for disabling the channel */
1495 	/* TODO check it everywhere channel is disabled */
1496 	if (hsotg->core_params->dma_desc_enable <= 0) {
1497 		if (dbg_hc(chan))
1498 			dev_vdbg(hsotg->dev, "desc DMA disabled\n");
1499 		hcchar |= HCCHAR_CHENA;
1500 	} else {
1501 		if (dbg_hc(chan))
1502 			dev_dbg(hsotg->dev, "desc DMA enabled\n");
1503 	}
1504 	hcchar |= HCCHAR_CHDIS;
1505 
1506 	if (hsotg->core_params->dma_enable <= 0) {
1507 		if (dbg_hc(chan))
1508 			dev_vdbg(hsotg->dev, "DMA not enabled\n");
1509 		hcchar |= HCCHAR_CHENA;
1510 
1511 		/* Check for space in the request queue to issue the halt */
1512 		if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1513 		    chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1514 			dev_vdbg(hsotg->dev, "control/bulk\n");
1515 			nptxsts = readl(hsotg->regs + GNPTXSTS);
1516 			if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1517 				dev_vdbg(hsotg->dev, "Disabling channel\n");
1518 				hcchar &= ~HCCHAR_CHENA;
1519 			}
1520 		} else {
1521 			if (dbg_perio())
1522 				dev_vdbg(hsotg->dev, "isoc/intr\n");
1523 			hptxsts = readl(hsotg->regs + HPTXSTS);
1524 			if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1525 			    hsotg->queuing_high_bandwidth) {
1526 				if (dbg_perio())
1527 					dev_vdbg(hsotg->dev, "Disabling channel\n");
1528 				hcchar &= ~HCCHAR_CHENA;
1529 			}
1530 		}
1531 	} else {
1532 		if (dbg_hc(chan))
1533 			dev_vdbg(hsotg->dev, "DMA enabled\n");
1534 	}
1535 
1536 	writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1537 	chan->halt_status = halt_status;
1538 
1539 	if (hcchar & HCCHAR_CHENA) {
1540 		if (dbg_hc(chan))
1541 			dev_vdbg(hsotg->dev, "Channel enabled\n");
1542 		chan->halt_pending = 1;
1543 		chan->halt_on_queue = 0;
1544 	} else {
1545 		if (dbg_hc(chan))
1546 			dev_vdbg(hsotg->dev, "Channel disabled\n");
1547 		chan->halt_on_queue = 1;
1548 	}
1549 
1550 	if (dbg_hc(chan)) {
1551 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1552 			 chan->hc_num);
1553 		dev_vdbg(hsotg->dev, "	 hcchar: 0x%08x\n",
1554 			 hcchar);
1555 		dev_vdbg(hsotg->dev, "	 halt_pending: %d\n",
1556 			 chan->halt_pending);
1557 		dev_vdbg(hsotg->dev, "	 halt_on_queue: %d\n",
1558 			 chan->halt_on_queue);
1559 		dev_vdbg(hsotg->dev, "	 halt_status: %d\n",
1560 			 chan->halt_status);
1561 	}
1562 }
1563 
1564 /**
1565  * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1566  *
1567  * @hsotg: Programming view of DWC_otg controller
1568  * @chan:  Identifies the host channel to clean up
1569  *
1570  * This function is normally called after a transfer is done and the host
1571  * channel is being released
1572  */
1573 void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1574 {
1575 	u32 hcintmsk;
1576 
1577 	chan->xfer_started = 0;
1578 
1579 	/*
1580 	 * Clear channel interrupt enables and any unhandled channel interrupt
1581 	 * conditions
1582 	 */
1583 	writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
1584 	hcintmsk = 0xffffffff;
1585 	hcintmsk &= ~HCINTMSK_RESERVED14_31;
1586 	writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1587 }
1588 
1589 /**
1590  * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1591  * which frame a periodic transfer should occur
1592  *
1593  * @hsotg:  Programming view of DWC_otg controller
1594  * @chan:   Identifies the host channel to set up and its properties
1595  * @hcchar: Current value of the HCCHAR register for the specified host channel
1596  *
1597  * This function has no effect on non-periodic transfers
1598  */
1599 static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1600 				       struct dwc2_host_chan *chan, u32 *hcchar)
1601 {
1602 	if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1603 	    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1604 		/* 1 if _next_ frame is odd, 0 if it's even */
1605 		if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1))
1606 			*hcchar |= HCCHAR_ODDFRM;
1607 	}
1608 }
1609 
1610 static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1611 {
1612 	/* Set up the initial PID for the transfer */
1613 	if (chan->speed == USB_SPEED_HIGH) {
1614 		if (chan->ep_is_in) {
1615 			if (chan->multi_count == 1)
1616 				chan->data_pid_start = DWC2_HC_PID_DATA0;
1617 			else if (chan->multi_count == 2)
1618 				chan->data_pid_start = DWC2_HC_PID_DATA1;
1619 			else
1620 				chan->data_pid_start = DWC2_HC_PID_DATA2;
1621 		} else {
1622 			if (chan->multi_count == 1)
1623 				chan->data_pid_start = DWC2_HC_PID_DATA0;
1624 			else
1625 				chan->data_pid_start = DWC2_HC_PID_MDATA;
1626 		}
1627 	} else {
1628 		chan->data_pid_start = DWC2_HC_PID_DATA0;
1629 	}
1630 }
1631 
1632 /**
1633  * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1634  * the Host Channel
1635  *
1636  * @hsotg: Programming view of DWC_otg controller
1637  * @chan:  Information needed to initialize the host channel
1638  *
1639  * This function should only be called in Slave mode. For a channel associated
1640  * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1641  * associated with a periodic EP, the periodic Tx FIFO is written.
1642  *
1643  * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1644  * the number of bytes written to the Tx FIFO.
1645  */
1646 static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1647 				 struct dwc2_host_chan *chan)
1648 {
1649 	u32 i;
1650 	u32 remaining_count;
1651 	u32 byte_count;
1652 	u32 dword_count;
1653 	u32 __iomem *data_fifo;
1654 	u32 *data_buf = (u32 *)chan->xfer_buf;
1655 
1656 	if (dbg_hc(chan))
1657 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
1658 
1659 	data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
1660 
1661 	remaining_count = chan->xfer_len - chan->xfer_count;
1662 	if (remaining_count > chan->max_packet)
1663 		byte_count = chan->max_packet;
1664 	else
1665 		byte_count = remaining_count;
1666 
1667 	dword_count = (byte_count + 3) / 4;
1668 
1669 	if (((unsigned long)data_buf & 0x3) == 0) {
1670 		/* xfer_buf is DWORD aligned */
1671 		for (i = 0; i < dword_count; i++, data_buf++)
1672 			writel(*data_buf, data_fifo);
1673 	} else {
1674 		/* xfer_buf is not DWORD aligned */
1675 		for (i = 0; i < dword_count; i++, data_buf++) {
1676 			u32 data = data_buf[0] | data_buf[1] << 8 |
1677 				   data_buf[2] << 16 | data_buf[3] << 24;
1678 			writel(data, data_fifo);
1679 		}
1680 	}
1681 
1682 	chan->xfer_count += byte_count;
1683 	chan->xfer_buf += byte_count;
1684 }
1685 
1686 /**
1687  * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1688  * channel and starts the transfer
1689  *
1690  * @hsotg: Programming view of DWC_otg controller
1691  * @chan:  Information needed to initialize the host channel. The xfer_len value
1692  *         may be reduced to accommodate the max widths of the XferSize and
1693  *         PktCnt fields in the HCTSIZn register. The multi_count value may be
1694  *         changed to reflect the final xfer_len value.
1695  *
1696  * This function may be called in either Slave mode or DMA mode. In Slave mode,
1697  * the caller must ensure that there is sufficient space in the request queue
1698  * and Tx Data FIFO.
1699  *
1700  * For an OUT transfer in Slave mode, it loads a data packet into the
1701  * appropriate FIFO. If necessary, additional data packets are loaded in the
1702  * Host ISR.
1703  *
1704  * For an IN transfer in Slave mode, a data packet is requested. The data
1705  * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1706  * additional data packets are requested in the Host ISR.
1707  *
1708  * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1709  * register along with a packet count of 1 and the channel is enabled. This
1710  * causes a single PING transaction to occur. Other fields in HCTSIZ are
1711  * simply set to 0 since no data transfer occurs in this case.
1712  *
1713  * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1714  * all the information required to perform the subsequent data transfer. In
1715  * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1716  * controller performs the entire PING protocol, then starts the data
1717  * transfer.
1718  */
1719 void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1720 			    struct dwc2_host_chan *chan)
1721 {
1722 	u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
1723 	u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
1724 	u32 hcchar;
1725 	u32 hctsiz = 0;
1726 	u16 num_packets;
1727 
1728 	if (dbg_hc(chan))
1729 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
1730 
1731 	if (chan->do_ping) {
1732 		if (hsotg->core_params->dma_enable <= 0) {
1733 			if (dbg_hc(chan))
1734 				dev_vdbg(hsotg->dev, "ping, no DMA\n");
1735 			dwc2_hc_do_ping(hsotg, chan);
1736 			chan->xfer_started = 1;
1737 			return;
1738 		} else {
1739 			if (dbg_hc(chan))
1740 				dev_vdbg(hsotg->dev, "ping, DMA\n");
1741 			hctsiz |= TSIZ_DOPNG;
1742 		}
1743 	}
1744 
1745 	if (chan->do_split) {
1746 		if (dbg_hc(chan))
1747 			dev_vdbg(hsotg->dev, "split\n");
1748 		num_packets = 1;
1749 
1750 		if (chan->complete_split && !chan->ep_is_in)
1751 			/*
1752 			 * For CSPLIT OUT Transfer, set the size to 0 so the
1753 			 * core doesn't expect any data written to the FIFO
1754 			 */
1755 			chan->xfer_len = 0;
1756 		else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1757 			chan->xfer_len = chan->max_packet;
1758 		else if (!chan->ep_is_in && chan->xfer_len > 188)
1759 			chan->xfer_len = 188;
1760 
1761 		hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1762 			  TSIZ_XFERSIZE_MASK;
1763 	} else {
1764 		if (dbg_hc(chan))
1765 			dev_vdbg(hsotg->dev, "no split\n");
1766 		/*
1767 		 * Ensure that the transfer length and packet count will fit
1768 		 * in the widths allocated for them in the HCTSIZn register
1769 		 */
1770 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1771 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1772 			/*
1773 			 * Make sure the transfer size is no larger than one
1774 			 * (micro)frame's worth of data. (A check was done
1775 			 * when the periodic transfer was accepted to ensure
1776 			 * that a (micro)frame's worth of data can be
1777 			 * programmed into a channel.)
1778 			 */
1779 			u32 max_periodic_len =
1780 				chan->multi_count * chan->max_packet;
1781 
1782 			if (chan->xfer_len > max_periodic_len)
1783 				chan->xfer_len = max_periodic_len;
1784 		} else if (chan->xfer_len > max_hc_xfer_size) {
1785 			/*
1786 			 * Make sure that xfer_len is a multiple of max packet
1787 			 * size
1788 			 */
1789 			chan->xfer_len =
1790 				max_hc_xfer_size - chan->max_packet + 1;
1791 		}
1792 
1793 		if (chan->xfer_len > 0) {
1794 			num_packets = (chan->xfer_len + chan->max_packet - 1) /
1795 					chan->max_packet;
1796 			if (num_packets > max_hc_pkt_count) {
1797 				num_packets = max_hc_pkt_count;
1798 				chan->xfer_len = num_packets * chan->max_packet;
1799 			}
1800 		} else {
1801 			/* Need 1 packet for transfer length of 0 */
1802 			num_packets = 1;
1803 		}
1804 
1805 		if (chan->ep_is_in)
1806 			/*
1807 			 * Always program an integral # of max packets for IN
1808 			 * transfers
1809 			 */
1810 			chan->xfer_len = num_packets * chan->max_packet;
1811 
1812 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1813 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1814 			/*
1815 			 * Make sure that the multi_count field matches the
1816 			 * actual transfer length
1817 			 */
1818 			chan->multi_count = num_packets;
1819 
1820 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1821 			dwc2_set_pid_isoc(chan);
1822 
1823 		hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1824 			  TSIZ_XFERSIZE_MASK;
1825 	}
1826 
1827 	chan->start_pkt_count = num_packets;
1828 	hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1829 	hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1830 		  TSIZ_SC_MC_PID_MASK;
1831 	writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1832 	if (dbg_hc(chan)) {
1833 		dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1834 			 hctsiz, chan->hc_num);
1835 
1836 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1837 			 chan->hc_num);
1838 		dev_vdbg(hsotg->dev, "	 Xfer Size: %d\n",
1839 			 (hctsiz & TSIZ_XFERSIZE_MASK) >>
1840 			 TSIZ_XFERSIZE_SHIFT);
1841 		dev_vdbg(hsotg->dev, "	 Num Pkts: %d\n",
1842 			 (hctsiz & TSIZ_PKTCNT_MASK) >>
1843 			 TSIZ_PKTCNT_SHIFT);
1844 		dev_vdbg(hsotg->dev, "	 Start PID: %d\n",
1845 			 (hctsiz & TSIZ_SC_MC_PID_MASK) >>
1846 			 TSIZ_SC_MC_PID_SHIFT);
1847 	}
1848 
1849 	if (hsotg->core_params->dma_enable > 0) {
1850 		dma_addr_t dma_addr;
1851 
1852 		if (chan->align_buf) {
1853 			if (dbg_hc(chan))
1854 				dev_vdbg(hsotg->dev, "align_buf\n");
1855 			dma_addr = chan->align_buf;
1856 		} else {
1857 			dma_addr = chan->xfer_dma;
1858 		}
1859 		writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
1860 		if (dbg_hc(chan))
1861 			dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
1862 				 (unsigned long)dma_addr, chan->hc_num);
1863 	}
1864 
1865 	/* Start the split */
1866 	if (chan->do_split) {
1867 		u32 hcsplt = readl(hsotg->regs + HCSPLT(chan->hc_num));
1868 
1869 		hcsplt |= HCSPLT_SPLTENA;
1870 		writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num));
1871 	}
1872 
1873 	hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1874 	hcchar &= ~HCCHAR_MULTICNT_MASK;
1875 	hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1876 		  HCCHAR_MULTICNT_MASK;
1877 	dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1878 
1879 	if (hcchar & HCCHAR_CHDIS)
1880 		dev_warn(hsotg->dev,
1881 			 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1882 			 __func__, chan->hc_num, hcchar);
1883 
1884 	/* Set host channel enable after all other setup is complete */
1885 	hcchar |= HCCHAR_CHENA;
1886 	hcchar &= ~HCCHAR_CHDIS;
1887 
1888 	if (dbg_hc(chan))
1889 		dev_vdbg(hsotg->dev, "	 Multi Cnt: %d\n",
1890 			 (hcchar & HCCHAR_MULTICNT_MASK) >>
1891 			 HCCHAR_MULTICNT_SHIFT);
1892 
1893 	writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1894 	if (dbg_hc(chan))
1895 		dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1896 			 chan->hc_num);
1897 
1898 	chan->xfer_started = 1;
1899 	chan->requests++;
1900 
1901 	if (hsotg->core_params->dma_enable <= 0 &&
1902 	    !chan->ep_is_in && chan->xfer_len > 0)
1903 		/* Load OUT packet into the appropriate Tx FIFO */
1904 		dwc2_hc_write_packet(hsotg, chan);
1905 }
1906 
1907 /**
1908  * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1909  * host channel and starts the transfer in Descriptor DMA mode
1910  *
1911  * @hsotg: Programming view of DWC_otg controller
1912  * @chan:  Information needed to initialize the host channel
1913  *
1914  * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1915  * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1916  * with micro-frame bitmap.
1917  *
1918  * Initializes HCDMA register with descriptor list address and CTD value then
1919  * starts the transfer via enabling the channel.
1920  */
1921 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
1922 				 struct dwc2_host_chan *chan)
1923 {
1924 	u32 hcchar;
1925 	u32 hc_dma;
1926 	u32 hctsiz = 0;
1927 
1928 	if (chan->do_ping)
1929 		hctsiz |= TSIZ_DOPNG;
1930 
1931 	if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1932 		dwc2_set_pid_isoc(chan);
1933 
1934 	/* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1935 	hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1936 		  TSIZ_SC_MC_PID_MASK;
1937 
1938 	/* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1939 	hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
1940 
1941 	/* Non-zero only for high-speed interrupt endpoints */
1942 	hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
1943 
1944 	if (dbg_hc(chan)) {
1945 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1946 			 chan->hc_num);
1947 		dev_vdbg(hsotg->dev, "	 Start PID: %d\n",
1948 			 chan->data_pid_start);
1949 		dev_vdbg(hsotg->dev, "	 NTD: %d\n", chan->ntd - 1);
1950 	}
1951 
1952 	writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1953 
1954 	hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK;
1955 
1956 	/* Always start from first descriptor */
1957 	hc_dma &= ~HCDMA_CTD_MASK;
1958 	writel(hc_dma, hsotg->regs + HCDMA(chan->hc_num));
1959 	if (dbg_hc(chan))
1960 		dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n",
1961 			 hc_dma, chan->hc_num);
1962 
1963 	hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1964 	hcchar &= ~HCCHAR_MULTICNT_MASK;
1965 	hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1966 		  HCCHAR_MULTICNT_MASK;
1967 
1968 	if (hcchar & HCCHAR_CHDIS)
1969 		dev_warn(hsotg->dev,
1970 			 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1971 			 __func__, chan->hc_num, hcchar);
1972 
1973 	/* Set host channel enable after all other setup is complete */
1974 	hcchar |= HCCHAR_CHENA;
1975 	hcchar &= ~HCCHAR_CHDIS;
1976 
1977 	if (dbg_hc(chan))
1978 		dev_vdbg(hsotg->dev, "	 Multi Cnt: %d\n",
1979 			 (hcchar & HCCHAR_MULTICNT_MASK) >>
1980 			 HCCHAR_MULTICNT_SHIFT);
1981 
1982 	writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1983 	if (dbg_hc(chan))
1984 		dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1985 			 chan->hc_num);
1986 
1987 	chan->xfer_started = 1;
1988 	chan->requests++;
1989 }
1990 
1991 /**
1992  * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
1993  * a previous call to dwc2_hc_start_transfer()
1994  *
1995  * @hsotg: Programming view of DWC_otg controller
1996  * @chan:  Information needed to initialize the host channel
1997  *
1998  * The caller must ensure there is sufficient space in the request queue and Tx
1999  * Data FIFO. This function should only be called in Slave mode. In DMA mode,
2000  * the controller acts autonomously to complete transfers programmed to a host
2001  * channel.
2002  *
2003  * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
2004  * if there is any data remaining to be queued. For an IN transfer, another
2005  * data packet is always requested. For the SETUP phase of a control transfer,
2006  * this function does nothing.
2007  *
2008  * Return: 1 if a new request is queued, 0 if no more requests are required
2009  * for this transfer
2010  */
2011 int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
2012 			      struct dwc2_host_chan *chan)
2013 {
2014 	if (dbg_hc(chan))
2015 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
2016 			 chan->hc_num);
2017 
2018 	if (chan->do_split)
2019 		/* SPLITs always queue just once per channel */
2020 		return 0;
2021 
2022 	if (chan->data_pid_start == DWC2_HC_PID_SETUP)
2023 		/* SETUPs are queued only once since they can't be NAK'd */
2024 		return 0;
2025 
2026 	if (chan->ep_is_in) {
2027 		/*
2028 		 * Always queue another request for other IN transfers. If
2029 		 * back-to-back INs are issued and NAKs are received for both,
2030 		 * the driver may still be processing the first NAK when the
2031 		 * second NAK is received. When the interrupt handler clears
2032 		 * the NAK interrupt for the first NAK, the second NAK will
2033 		 * not be seen. So we can't depend on the NAK interrupt
2034 		 * handler to requeue a NAK'd request. Instead, IN requests
2035 		 * are issued each time this function is called. When the
2036 		 * transfer completes, the extra requests for the channel will
2037 		 * be flushed.
2038 		 */
2039 		u32 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
2040 
2041 		dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
2042 		hcchar |= HCCHAR_CHENA;
2043 		hcchar &= ~HCCHAR_CHDIS;
2044 		if (dbg_hc(chan))
2045 			dev_vdbg(hsotg->dev, "	 IN xfer: hcchar = 0x%08x\n",
2046 				 hcchar);
2047 		writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
2048 		chan->requests++;
2049 		return 1;
2050 	}
2051 
2052 	/* OUT transfers */
2053 
2054 	if (chan->xfer_count < chan->xfer_len) {
2055 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
2056 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
2057 			u32 hcchar = readl(hsotg->regs +
2058 					   HCCHAR(chan->hc_num));
2059 
2060 			dwc2_hc_set_even_odd_frame(hsotg, chan,
2061 						   &hcchar);
2062 		}
2063 
2064 		/* Load OUT packet into the appropriate Tx FIFO */
2065 		dwc2_hc_write_packet(hsotg, chan);
2066 		chan->requests++;
2067 		return 1;
2068 	}
2069 
2070 	return 0;
2071 }
2072 
2073 /**
2074  * dwc2_hc_do_ping() - Starts a PING transfer
2075  *
2076  * @hsotg: Programming view of DWC_otg controller
2077  * @chan:  Information needed to initialize the host channel
2078  *
2079  * This function should only be called in Slave mode. The Do Ping bit is set in
2080  * the HCTSIZ register, then the channel is enabled.
2081  */
2082 void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
2083 {
2084 	u32 hcchar;
2085 	u32 hctsiz;
2086 
2087 	if (dbg_hc(chan))
2088 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
2089 			 chan->hc_num);
2090 
2091 
2092 	hctsiz = TSIZ_DOPNG;
2093 	hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
2094 	writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
2095 
2096 	hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
2097 	hcchar |= HCCHAR_CHENA;
2098 	hcchar &= ~HCCHAR_CHDIS;
2099 	writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
2100 }
2101 
2102 /**
2103  * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
2104  * the HFIR register according to PHY type and speed
2105  *
2106  * @hsotg: Programming view of DWC_otg controller
2107  *
2108  * NOTE: The caller can modify the value of the HFIR register only after the
2109  * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
2110  * has been set
2111  */
2112 u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
2113 {
2114 	u32 usbcfg;
2115 	u32 hprt0;
2116 	int clock = 60;	/* default value */
2117 
2118 	usbcfg = readl(hsotg->regs + GUSBCFG);
2119 	hprt0 = readl(hsotg->regs + HPRT0);
2120 
2121 	if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
2122 	    !(usbcfg & GUSBCFG_PHYIF16))
2123 		clock = 60;
2124 	if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
2125 	    GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
2126 		clock = 48;
2127 	if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2128 	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
2129 		clock = 30;
2130 	if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2131 	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
2132 		clock = 60;
2133 	if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2134 	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
2135 		clock = 48;
2136 	if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
2137 	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
2138 		clock = 48;
2139 	if ((usbcfg & GUSBCFG_PHYSEL) &&
2140 	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2141 		clock = 48;
2142 
2143 	if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
2144 		/* High speed case */
2145 		return 125 * clock;
2146 	else
2147 		/* FS/LS case */
2148 		return 1000 * clock;
2149 }
2150 
2151 /**
2152  * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
2153  * buffer
2154  *
2155  * @core_if: Programming view of DWC_otg controller
2156  * @dest:    Destination buffer for the packet
2157  * @bytes:   Number of bytes to copy to the destination
2158  */
2159 void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
2160 {
2161 	u32 __iomem *fifo = hsotg->regs + HCFIFO(0);
2162 	u32 *data_buf = (u32 *)dest;
2163 	int word_count = (bytes + 3) / 4;
2164 	int i;
2165 
2166 	/*
2167 	 * Todo: Account for the case where dest is not dword aligned. This
2168 	 * requires reading data from the FIFO into a u32 temp buffer, then
2169 	 * moving it into the data buffer.
2170 	 */
2171 
2172 	dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
2173 
2174 	for (i = 0; i < word_count; i++, data_buf++)
2175 		*data_buf = readl(fifo);
2176 }
2177 
2178 /**
2179  * dwc2_dump_host_registers() - Prints the host registers
2180  *
2181  * @hsotg: Programming view of DWC_otg controller
2182  *
2183  * NOTE: This function will be removed once the peripheral controller code
2184  * is integrated and the driver is stable
2185  */
2186 void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
2187 {
2188 #ifdef DEBUG
2189 	u32 __iomem *addr;
2190 	int i;
2191 
2192 	dev_dbg(hsotg->dev, "Host Global Registers\n");
2193 	addr = hsotg->regs + HCFG;
2194 	dev_dbg(hsotg->dev, "HCFG	 @0x%08lX : 0x%08X\n",
2195 		(unsigned long)addr, readl(addr));
2196 	addr = hsotg->regs + HFIR;
2197 	dev_dbg(hsotg->dev, "HFIR	 @0x%08lX : 0x%08X\n",
2198 		(unsigned long)addr, readl(addr));
2199 	addr = hsotg->regs + HFNUM;
2200 	dev_dbg(hsotg->dev, "HFNUM	 @0x%08lX : 0x%08X\n",
2201 		(unsigned long)addr, readl(addr));
2202 	addr = hsotg->regs + HPTXSTS;
2203 	dev_dbg(hsotg->dev, "HPTXSTS	 @0x%08lX : 0x%08X\n",
2204 		(unsigned long)addr, readl(addr));
2205 	addr = hsotg->regs + HAINT;
2206 	dev_dbg(hsotg->dev, "HAINT	 @0x%08lX : 0x%08X\n",
2207 		(unsigned long)addr, readl(addr));
2208 	addr = hsotg->regs + HAINTMSK;
2209 	dev_dbg(hsotg->dev, "HAINTMSK	 @0x%08lX : 0x%08X\n",
2210 		(unsigned long)addr, readl(addr));
2211 	if (hsotg->core_params->dma_desc_enable > 0) {
2212 		addr = hsotg->regs + HFLBADDR;
2213 		dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
2214 			(unsigned long)addr, readl(addr));
2215 	}
2216 
2217 	addr = hsotg->regs + HPRT0;
2218 	dev_dbg(hsotg->dev, "HPRT0	 @0x%08lX : 0x%08X\n",
2219 		(unsigned long)addr, readl(addr));
2220 
2221 	for (i = 0; i < hsotg->core_params->host_channels; i++) {
2222 		dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
2223 		addr = hsotg->regs + HCCHAR(i);
2224 		dev_dbg(hsotg->dev, "HCCHAR	 @0x%08lX : 0x%08X\n",
2225 			(unsigned long)addr, readl(addr));
2226 		addr = hsotg->regs + HCSPLT(i);
2227 		dev_dbg(hsotg->dev, "HCSPLT	 @0x%08lX : 0x%08X\n",
2228 			(unsigned long)addr, readl(addr));
2229 		addr = hsotg->regs + HCINT(i);
2230 		dev_dbg(hsotg->dev, "HCINT	 @0x%08lX : 0x%08X\n",
2231 			(unsigned long)addr, readl(addr));
2232 		addr = hsotg->regs + HCINTMSK(i);
2233 		dev_dbg(hsotg->dev, "HCINTMSK	 @0x%08lX : 0x%08X\n",
2234 			(unsigned long)addr, readl(addr));
2235 		addr = hsotg->regs + HCTSIZ(i);
2236 		dev_dbg(hsotg->dev, "HCTSIZ	 @0x%08lX : 0x%08X\n",
2237 			(unsigned long)addr, readl(addr));
2238 		addr = hsotg->regs + HCDMA(i);
2239 		dev_dbg(hsotg->dev, "HCDMA	 @0x%08lX : 0x%08X\n",
2240 			(unsigned long)addr, readl(addr));
2241 		if (hsotg->core_params->dma_desc_enable > 0) {
2242 			addr = hsotg->regs + HCDMAB(i);
2243 			dev_dbg(hsotg->dev, "HCDMAB	 @0x%08lX : 0x%08X\n",
2244 				(unsigned long)addr, readl(addr));
2245 		}
2246 	}
2247 #endif
2248 }
2249 
2250 /**
2251  * dwc2_dump_global_registers() - Prints the core global registers
2252  *
2253  * @hsotg: Programming view of DWC_otg controller
2254  *
2255  * NOTE: This function will be removed once the peripheral controller code
2256  * is integrated and the driver is stable
2257  */
2258 void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
2259 {
2260 #ifdef DEBUG
2261 	u32 __iomem *addr;
2262 
2263 	dev_dbg(hsotg->dev, "Core Global Registers\n");
2264 	addr = hsotg->regs + GOTGCTL;
2265 	dev_dbg(hsotg->dev, "GOTGCTL	 @0x%08lX : 0x%08X\n",
2266 		(unsigned long)addr, readl(addr));
2267 	addr = hsotg->regs + GOTGINT;
2268 	dev_dbg(hsotg->dev, "GOTGINT	 @0x%08lX : 0x%08X\n",
2269 		(unsigned long)addr, readl(addr));
2270 	addr = hsotg->regs + GAHBCFG;
2271 	dev_dbg(hsotg->dev, "GAHBCFG	 @0x%08lX : 0x%08X\n",
2272 		(unsigned long)addr, readl(addr));
2273 	addr = hsotg->regs + GUSBCFG;
2274 	dev_dbg(hsotg->dev, "GUSBCFG	 @0x%08lX : 0x%08X\n",
2275 		(unsigned long)addr, readl(addr));
2276 	addr = hsotg->regs + GRSTCTL;
2277 	dev_dbg(hsotg->dev, "GRSTCTL	 @0x%08lX : 0x%08X\n",
2278 		(unsigned long)addr, readl(addr));
2279 	addr = hsotg->regs + GINTSTS;
2280 	dev_dbg(hsotg->dev, "GINTSTS	 @0x%08lX : 0x%08X\n",
2281 		(unsigned long)addr, readl(addr));
2282 	addr = hsotg->regs + GINTMSK;
2283 	dev_dbg(hsotg->dev, "GINTMSK	 @0x%08lX : 0x%08X\n",
2284 		(unsigned long)addr, readl(addr));
2285 	addr = hsotg->regs + GRXSTSR;
2286 	dev_dbg(hsotg->dev, "GRXSTSR	 @0x%08lX : 0x%08X\n",
2287 		(unsigned long)addr, readl(addr));
2288 	addr = hsotg->regs + GRXFSIZ;
2289 	dev_dbg(hsotg->dev, "GRXFSIZ	 @0x%08lX : 0x%08X\n",
2290 		(unsigned long)addr, readl(addr));
2291 	addr = hsotg->regs + GNPTXFSIZ;
2292 	dev_dbg(hsotg->dev, "GNPTXFSIZ	 @0x%08lX : 0x%08X\n",
2293 		(unsigned long)addr, readl(addr));
2294 	addr = hsotg->regs + GNPTXSTS;
2295 	dev_dbg(hsotg->dev, "GNPTXSTS	 @0x%08lX : 0x%08X\n",
2296 		(unsigned long)addr, readl(addr));
2297 	addr = hsotg->regs + GI2CCTL;
2298 	dev_dbg(hsotg->dev, "GI2CCTL	 @0x%08lX : 0x%08X\n",
2299 		(unsigned long)addr, readl(addr));
2300 	addr = hsotg->regs + GPVNDCTL;
2301 	dev_dbg(hsotg->dev, "GPVNDCTL	 @0x%08lX : 0x%08X\n",
2302 		(unsigned long)addr, readl(addr));
2303 	addr = hsotg->regs + GGPIO;
2304 	dev_dbg(hsotg->dev, "GGPIO	 @0x%08lX : 0x%08X\n",
2305 		(unsigned long)addr, readl(addr));
2306 	addr = hsotg->regs + GUID;
2307 	dev_dbg(hsotg->dev, "GUID	 @0x%08lX : 0x%08X\n",
2308 		(unsigned long)addr, readl(addr));
2309 	addr = hsotg->regs + GSNPSID;
2310 	dev_dbg(hsotg->dev, "GSNPSID	 @0x%08lX : 0x%08X\n",
2311 		(unsigned long)addr, readl(addr));
2312 	addr = hsotg->regs + GHWCFG1;
2313 	dev_dbg(hsotg->dev, "GHWCFG1	 @0x%08lX : 0x%08X\n",
2314 		(unsigned long)addr, readl(addr));
2315 	addr = hsotg->regs + GHWCFG2;
2316 	dev_dbg(hsotg->dev, "GHWCFG2	 @0x%08lX : 0x%08X\n",
2317 		(unsigned long)addr, readl(addr));
2318 	addr = hsotg->regs + GHWCFG3;
2319 	dev_dbg(hsotg->dev, "GHWCFG3	 @0x%08lX : 0x%08X\n",
2320 		(unsigned long)addr, readl(addr));
2321 	addr = hsotg->regs + GHWCFG4;
2322 	dev_dbg(hsotg->dev, "GHWCFG4	 @0x%08lX : 0x%08X\n",
2323 		(unsigned long)addr, readl(addr));
2324 	addr = hsotg->regs + GLPMCFG;
2325 	dev_dbg(hsotg->dev, "GLPMCFG	 @0x%08lX : 0x%08X\n",
2326 		(unsigned long)addr, readl(addr));
2327 	addr = hsotg->regs + GPWRDN;
2328 	dev_dbg(hsotg->dev, "GPWRDN	 @0x%08lX : 0x%08X\n",
2329 		(unsigned long)addr, readl(addr));
2330 	addr = hsotg->regs + GDFIFOCFG;
2331 	dev_dbg(hsotg->dev, "GDFIFOCFG	 @0x%08lX : 0x%08X\n",
2332 		(unsigned long)addr, readl(addr));
2333 	addr = hsotg->regs + HPTXFSIZ;
2334 	dev_dbg(hsotg->dev, "HPTXFSIZ	 @0x%08lX : 0x%08X\n",
2335 		(unsigned long)addr, readl(addr));
2336 
2337 	addr = hsotg->regs + PCGCTL;
2338 	dev_dbg(hsotg->dev, "PCGCTL	 @0x%08lX : 0x%08X\n",
2339 		(unsigned long)addr, readl(addr));
2340 #endif
2341 }
2342 
2343 /**
2344  * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
2345  *
2346  * @hsotg: Programming view of DWC_otg controller
2347  * @num:   Tx FIFO to flush
2348  */
2349 void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
2350 {
2351 	u32 greset;
2352 	int count = 0;
2353 
2354 	dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
2355 
2356 	greset = GRSTCTL_TXFFLSH;
2357 	greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
2358 	writel(greset, hsotg->regs + GRSTCTL);
2359 
2360 	do {
2361 		greset = readl(hsotg->regs + GRSTCTL);
2362 		if (++count > 10000) {
2363 			dev_warn(hsotg->dev,
2364 				 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
2365 				 __func__, greset,
2366 				 readl(hsotg->regs + GNPTXSTS));
2367 			break;
2368 		}
2369 		udelay(1);
2370 	} while (greset & GRSTCTL_TXFFLSH);
2371 
2372 	/* Wait for at least 3 PHY Clocks */
2373 	udelay(1);
2374 }
2375 
2376 /**
2377  * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
2378  *
2379  * @hsotg: Programming view of DWC_otg controller
2380  */
2381 void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
2382 {
2383 	u32 greset;
2384 	int count = 0;
2385 
2386 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
2387 
2388 	greset = GRSTCTL_RXFFLSH;
2389 	writel(greset, hsotg->regs + GRSTCTL);
2390 
2391 	do {
2392 		greset = readl(hsotg->regs + GRSTCTL);
2393 		if (++count > 10000) {
2394 			dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n",
2395 				 __func__, greset);
2396 			break;
2397 		}
2398 		udelay(1);
2399 	} while (greset & GRSTCTL_RXFFLSH);
2400 
2401 	/* Wait for at least 3 PHY Clocks */
2402 	udelay(1);
2403 }
2404 
2405 #define DWC2_OUT_OF_BOUNDS(a, b, c)	((a) < (b) || (a) > (c))
2406 
2407 /* Parameter access functions */
2408 void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
2409 {
2410 	int valid = 1;
2411 
2412 	switch (val) {
2413 	case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
2414 		if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
2415 			valid = 0;
2416 		break;
2417 	case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
2418 		switch (hsotg->hw_params.op_mode) {
2419 		case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
2420 		case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2421 		case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2422 		case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2423 			break;
2424 		default:
2425 			valid = 0;
2426 			break;
2427 		}
2428 		break;
2429 	case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
2430 		/* always valid */
2431 		break;
2432 	default:
2433 		valid = 0;
2434 		break;
2435 	}
2436 
2437 	if (!valid) {
2438 		if (val >= 0)
2439 			dev_err(hsotg->dev,
2440 				"%d invalid for otg_cap parameter. Check HW configuration.\n",
2441 				val);
2442 		switch (hsotg->hw_params.op_mode) {
2443 		case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
2444 			val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
2445 			break;
2446 		case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2447 		case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2448 		case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2449 			val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
2450 			break;
2451 		default:
2452 			val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
2453 			break;
2454 		}
2455 		dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
2456 	}
2457 
2458 	hsotg->core_params->otg_cap = val;
2459 }
2460 
2461 void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
2462 {
2463 	int valid = 1;
2464 
2465 	if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
2466 		valid = 0;
2467 	if (val < 0)
2468 		valid = 0;
2469 
2470 	if (!valid) {
2471 		if (val >= 0)
2472 			dev_err(hsotg->dev,
2473 				"%d invalid for dma_enable parameter. Check HW configuration.\n",
2474 				val);
2475 		val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
2476 		dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
2477 	}
2478 
2479 	hsotg->core_params->dma_enable = val;
2480 }
2481 
2482 void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
2483 {
2484 	int valid = 1;
2485 
2486 	if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
2487 			!hsotg->hw_params.dma_desc_enable))
2488 		valid = 0;
2489 	if (val < 0)
2490 		valid = 0;
2491 
2492 	if (!valid) {
2493 		if (val >= 0)
2494 			dev_err(hsotg->dev,
2495 				"%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
2496 				val);
2497 		val = (hsotg->core_params->dma_enable > 0 &&
2498 			hsotg->hw_params.dma_desc_enable);
2499 		dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
2500 	}
2501 
2502 	hsotg->core_params->dma_desc_enable = val;
2503 }
2504 
2505 void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
2506 						 int val)
2507 {
2508 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2509 		if (val >= 0) {
2510 			dev_err(hsotg->dev,
2511 				"Wrong value for host_support_fs_low_power\n");
2512 			dev_err(hsotg->dev,
2513 				"host_support_fs_low_power must be 0 or 1\n");
2514 		}
2515 		val = 0;
2516 		dev_dbg(hsotg->dev,
2517 			"Setting host_support_fs_low_power to %d\n", val);
2518 	}
2519 
2520 	hsotg->core_params->host_support_fs_ls_low_power = val;
2521 }
2522 
2523 void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
2524 {
2525 	int valid = 1;
2526 
2527 	if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
2528 		valid = 0;
2529 	if (val < 0)
2530 		valid = 0;
2531 
2532 	if (!valid) {
2533 		if (val >= 0)
2534 			dev_err(hsotg->dev,
2535 				"%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
2536 				val);
2537 		val = hsotg->hw_params.enable_dynamic_fifo;
2538 		dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
2539 	}
2540 
2541 	hsotg->core_params->enable_dynamic_fifo = val;
2542 }
2543 
2544 void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2545 {
2546 	int valid = 1;
2547 
2548 	if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
2549 		valid = 0;
2550 
2551 	if (!valid) {
2552 		if (val >= 0)
2553 			dev_err(hsotg->dev,
2554 				"%d invalid for host_rx_fifo_size. Check HW configuration.\n",
2555 				val);
2556 		val = hsotg->hw_params.host_rx_fifo_size;
2557 		dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
2558 	}
2559 
2560 	hsotg->core_params->host_rx_fifo_size = val;
2561 }
2562 
2563 void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2564 {
2565 	int valid = 1;
2566 
2567 	if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
2568 		valid = 0;
2569 
2570 	if (!valid) {
2571 		if (val >= 0)
2572 			dev_err(hsotg->dev,
2573 				"%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
2574 				val);
2575 		val = hsotg->hw_params.host_nperio_tx_fifo_size;
2576 		dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
2577 			val);
2578 	}
2579 
2580 	hsotg->core_params->host_nperio_tx_fifo_size = val;
2581 }
2582 
2583 void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2584 {
2585 	int valid = 1;
2586 
2587 	if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
2588 		valid = 0;
2589 
2590 	if (!valid) {
2591 		if (val >= 0)
2592 			dev_err(hsotg->dev,
2593 				"%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
2594 				val);
2595 		val = hsotg->hw_params.host_perio_tx_fifo_size;
2596 		dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
2597 			val);
2598 	}
2599 
2600 	hsotg->core_params->host_perio_tx_fifo_size = val;
2601 }
2602 
2603 void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
2604 {
2605 	int valid = 1;
2606 
2607 	if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
2608 		valid = 0;
2609 
2610 	if (!valid) {
2611 		if (val >= 0)
2612 			dev_err(hsotg->dev,
2613 				"%d invalid for max_transfer_size. Check HW configuration.\n",
2614 				val);
2615 		val = hsotg->hw_params.max_transfer_size;
2616 		dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
2617 	}
2618 
2619 	hsotg->core_params->max_transfer_size = val;
2620 }
2621 
2622 void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
2623 {
2624 	int valid = 1;
2625 
2626 	if (val < 15 || val > hsotg->hw_params.max_packet_count)
2627 		valid = 0;
2628 
2629 	if (!valid) {
2630 		if (val >= 0)
2631 			dev_err(hsotg->dev,
2632 				"%d invalid for max_packet_count. Check HW configuration.\n",
2633 				val);
2634 		val = hsotg->hw_params.max_packet_count;
2635 		dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
2636 	}
2637 
2638 	hsotg->core_params->max_packet_count = val;
2639 }
2640 
2641 void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
2642 {
2643 	int valid = 1;
2644 
2645 	if (val < 1 || val > hsotg->hw_params.host_channels)
2646 		valid = 0;
2647 
2648 	if (!valid) {
2649 		if (val >= 0)
2650 			dev_err(hsotg->dev,
2651 				"%d invalid for host_channels. Check HW configuration.\n",
2652 				val);
2653 		val = hsotg->hw_params.host_channels;
2654 		dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
2655 	}
2656 
2657 	hsotg->core_params->host_channels = val;
2658 }
2659 
2660 void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
2661 {
2662 	int valid = 0;
2663 	u32 hs_phy_type, fs_phy_type;
2664 
2665 	if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
2666 			       DWC2_PHY_TYPE_PARAM_ULPI)) {
2667 		if (val >= 0) {
2668 			dev_err(hsotg->dev, "Wrong value for phy_type\n");
2669 			dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
2670 		}
2671 
2672 		valid = 0;
2673 	}
2674 
2675 	hs_phy_type = hsotg->hw_params.hs_phy_type;
2676 	fs_phy_type = hsotg->hw_params.fs_phy_type;
2677 	if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
2678 	    (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2679 	     hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2680 		valid = 1;
2681 	else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
2682 		 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
2683 		  hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2684 		valid = 1;
2685 	else if (val == DWC2_PHY_TYPE_PARAM_FS &&
2686 		 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2687 		valid = 1;
2688 
2689 	if (!valid) {
2690 		if (val >= 0)
2691 			dev_err(hsotg->dev,
2692 				"%d invalid for phy_type. Check HW configuration.\n",
2693 				val);
2694 		val = DWC2_PHY_TYPE_PARAM_FS;
2695 		if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
2696 			if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2697 			    hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
2698 				val = DWC2_PHY_TYPE_PARAM_UTMI;
2699 			else
2700 				val = DWC2_PHY_TYPE_PARAM_ULPI;
2701 		}
2702 		dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
2703 	}
2704 
2705 	hsotg->core_params->phy_type = val;
2706 }
2707 
2708 static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
2709 {
2710 	return hsotg->core_params->phy_type;
2711 }
2712 
2713 void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
2714 {
2715 	int valid = 1;
2716 
2717 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2718 		if (val >= 0) {
2719 			dev_err(hsotg->dev, "Wrong value for speed parameter\n");
2720 			dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
2721 		}
2722 		valid = 0;
2723 	}
2724 
2725 	if (val == DWC2_SPEED_PARAM_HIGH &&
2726 	    dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2727 		valid = 0;
2728 
2729 	if (!valid) {
2730 		if (val >= 0)
2731 			dev_err(hsotg->dev,
2732 				"%d invalid for speed parameter. Check HW configuration.\n",
2733 				val);
2734 		val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
2735 				DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
2736 		dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
2737 	}
2738 
2739 	hsotg->core_params->speed = val;
2740 }
2741 
2742 void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
2743 {
2744 	int valid = 1;
2745 
2746 	if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
2747 			       DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
2748 		if (val >= 0) {
2749 			dev_err(hsotg->dev,
2750 				"Wrong value for host_ls_low_power_phy_clk parameter\n");
2751 			dev_err(hsotg->dev,
2752 				"host_ls_low_power_phy_clk must be 0 or 1\n");
2753 		}
2754 		valid = 0;
2755 	}
2756 
2757 	if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
2758 	    dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2759 		valid = 0;
2760 
2761 	if (!valid) {
2762 		if (val >= 0)
2763 			dev_err(hsotg->dev,
2764 				"%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
2765 				val);
2766 		val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
2767 			? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
2768 			: DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
2769 		dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
2770 			val);
2771 	}
2772 
2773 	hsotg->core_params->host_ls_low_power_phy_clk = val;
2774 }
2775 
2776 void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
2777 {
2778 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2779 		if (val >= 0) {
2780 			dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
2781 			dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
2782 		}
2783 		val = 0;
2784 		dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
2785 	}
2786 
2787 	hsotg->core_params->phy_ulpi_ddr = val;
2788 }
2789 
2790 void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
2791 {
2792 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2793 		if (val >= 0) {
2794 			dev_err(hsotg->dev,
2795 				"Wrong value for phy_ulpi_ext_vbus\n");
2796 			dev_err(hsotg->dev,
2797 				"phy_ulpi_ext_vbus must be 0 or 1\n");
2798 		}
2799 		val = 0;
2800 		dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
2801 	}
2802 
2803 	hsotg->core_params->phy_ulpi_ext_vbus = val;
2804 }
2805 
2806 void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
2807 {
2808 	int valid = 0;
2809 
2810 	switch (hsotg->hw_params.utmi_phy_data_width) {
2811 	case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
2812 		valid = (val == 8);
2813 		break;
2814 	case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
2815 		valid = (val == 16);
2816 		break;
2817 	case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
2818 		valid = (val == 8 || val == 16);
2819 		break;
2820 	}
2821 
2822 	if (!valid) {
2823 		if (val >= 0) {
2824 			dev_err(hsotg->dev,
2825 				"%d invalid for phy_utmi_width. Check HW configuration.\n",
2826 				val);
2827 		}
2828 		val = (hsotg->hw_params.utmi_phy_data_width ==
2829 		       GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
2830 		dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
2831 	}
2832 
2833 	hsotg->core_params->phy_utmi_width = val;
2834 }
2835 
2836 void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
2837 {
2838 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2839 		if (val >= 0) {
2840 			dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
2841 			dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
2842 		}
2843 		val = 0;
2844 		dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
2845 	}
2846 
2847 	hsotg->core_params->ulpi_fs_ls = val;
2848 }
2849 
2850 void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
2851 {
2852 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2853 		if (val >= 0) {
2854 			dev_err(hsotg->dev, "Wrong value for ts_dline\n");
2855 			dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
2856 		}
2857 		val = 0;
2858 		dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
2859 	}
2860 
2861 	hsotg->core_params->ts_dline = val;
2862 }
2863 
2864 void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
2865 {
2866 	int valid = 1;
2867 
2868 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2869 		if (val >= 0) {
2870 			dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
2871 			dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
2872 		}
2873 
2874 		valid = 0;
2875 	}
2876 
2877 	if (val == 1 && !(hsotg->hw_params.i2c_enable))
2878 		valid = 0;
2879 
2880 	if (!valid) {
2881 		if (val >= 0)
2882 			dev_err(hsotg->dev,
2883 				"%d invalid for i2c_enable. Check HW configuration.\n",
2884 				val);
2885 		val = hsotg->hw_params.i2c_enable;
2886 		dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
2887 	}
2888 
2889 	hsotg->core_params->i2c_enable = val;
2890 }
2891 
2892 void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
2893 {
2894 	int valid = 1;
2895 
2896 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2897 		if (val >= 0) {
2898 			dev_err(hsotg->dev,
2899 				"Wrong value for en_multiple_tx_fifo,\n");
2900 			dev_err(hsotg->dev,
2901 				"en_multiple_tx_fifo must be 0 or 1\n");
2902 		}
2903 		valid = 0;
2904 	}
2905 
2906 	if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
2907 		valid = 0;
2908 
2909 	if (!valid) {
2910 		if (val >= 0)
2911 			dev_err(hsotg->dev,
2912 				"%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
2913 				val);
2914 		val = hsotg->hw_params.en_multiple_tx_fifo;
2915 		dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
2916 	}
2917 
2918 	hsotg->core_params->en_multiple_tx_fifo = val;
2919 }
2920 
2921 void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
2922 {
2923 	int valid = 1;
2924 
2925 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2926 		if (val >= 0) {
2927 			dev_err(hsotg->dev,
2928 				"'%d' invalid for parameter reload_ctl\n", val);
2929 			dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
2930 		}
2931 		valid = 0;
2932 	}
2933 
2934 	if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
2935 		valid = 0;
2936 
2937 	if (!valid) {
2938 		if (val >= 0)
2939 			dev_err(hsotg->dev,
2940 				"%d invalid for parameter reload_ctl. Check HW configuration.\n",
2941 				val);
2942 		val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
2943 		dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
2944 	}
2945 
2946 	hsotg->core_params->reload_ctl = val;
2947 }
2948 
2949 void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
2950 {
2951 	if (val != -1)
2952 		hsotg->core_params->ahbcfg = val;
2953 	else
2954 		hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
2955 						GAHBCFG_HBSTLEN_SHIFT;
2956 }
2957 
2958 void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
2959 {
2960 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2961 		if (val >= 0) {
2962 			dev_err(hsotg->dev,
2963 				"'%d' invalid for parameter otg_ver\n", val);
2964 			dev_err(hsotg->dev,
2965 				"otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
2966 		}
2967 		val = 0;
2968 		dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
2969 	}
2970 
2971 	hsotg->core_params->otg_ver = val;
2972 }
2973 
2974 static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
2975 {
2976 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2977 		if (val >= 0) {
2978 			dev_err(hsotg->dev,
2979 				"'%d' invalid for parameter uframe_sched\n",
2980 				val);
2981 			dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
2982 		}
2983 		val = 1;
2984 		dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
2985 	}
2986 
2987 	hsotg->core_params->uframe_sched = val;
2988 }
2989 
2990 static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg,
2991 		int val)
2992 {
2993 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2994 		if (val >= 0) {
2995 			dev_err(hsotg->dev,
2996 				"'%d' invalid for parameter external_id_pin_ctl\n",
2997 				val);
2998 			dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n");
2999 		}
3000 		val = 0;
3001 		dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val);
3002 	}
3003 
3004 	hsotg->core_params->external_id_pin_ctl = val;
3005 }
3006 
3007 static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg,
3008 		int val)
3009 {
3010 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3011 		if (val >= 0) {
3012 			dev_err(hsotg->dev,
3013 				"'%d' invalid for parameter hibernation\n",
3014 				val);
3015 			dev_err(hsotg->dev, "hibernation must be 0 or 1\n");
3016 		}
3017 		val = 0;
3018 		dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val);
3019 	}
3020 
3021 	hsotg->core_params->hibernation = val;
3022 }
3023 
3024 /*
3025  * This function is called during module intialization to pass module parameters
3026  * for the DWC_otg core.
3027  */
3028 void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
3029 			 const struct dwc2_core_params *params)
3030 {
3031 	dev_dbg(hsotg->dev, "%s()\n", __func__);
3032 
3033 	dwc2_set_param_otg_cap(hsotg, params->otg_cap);
3034 	dwc2_set_param_dma_enable(hsotg, params->dma_enable);
3035 	dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
3036 	dwc2_set_param_host_support_fs_ls_low_power(hsotg,
3037 			params->host_support_fs_ls_low_power);
3038 	dwc2_set_param_enable_dynamic_fifo(hsotg,
3039 			params->enable_dynamic_fifo);
3040 	dwc2_set_param_host_rx_fifo_size(hsotg,
3041 			params->host_rx_fifo_size);
3042 	dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
3043 			params->host_nperio_tx_fifo_size);
3044 	dwc2_set_param_host_perio_tx_fifo_size(hsotg,
3045 			params->host_perio_tx_fifo_size);
3046 	dwc2_set_param_max_transfer_size(hsotg,
3047 			params->max_transfer_size);
3048 	dwc2_set_param_max_packet_count(hsotg,
3049 			params->max_packet_count);
3050 	dwc2_set_param_host_channels(hsotg, params->host_channels);
3051 	dwc2_set_param_phy_type(hsotg, params->phy_type);
3052 	dwc2_set_param_speed(hsotg, params->speed);
3053 	dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
3054 			params->host_ls_low_power_phy_clk);
3055 	dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
3056 	dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
3057 			params->phy_ulpi_ext_vbus);
3058 	dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
3059 	dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
3060 	dwc2_set_param_ts_dline(hsotg, params->ts_dline);
3061 	dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
3062 	dwc2_set_param_en_multiple_tx_fifo(hsotg,
3063 			params->en_multiple_tx_fifo);
3064 	dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
3065 	dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
3066 	dwc2_set_param_otg_ver(hsotg, params->otg_ver);
3067 	dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
3068 	dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl);
3069 	dwc2_set_param_hibernation(hsotg, params->hibernation);
3070 }
3071 
3072 /**
3073  * During device initialization, read various hardware configuration
3074  * registers and interpret the contents.
3075  */
3076 int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
3077 {
3078 	struct dwc2_hw_params *hw = &hsotg->hw_params;
3079 	unsigned width;
3080 	u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
3081 	u32 hptxfsiz, grxfsiz, gnptxfsiz;
3082 	u32 gusbcfg;
3083 
3084 	/*
3085 	 * Attempt to ensure this device is really a DWC_otg Controller.
3086 	 * Read and verify the GSNPSID register contents. The value should be
3087 	 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
3088 	 * as in "OTG version 2.xx" or "OTG version 3.xx".
3089 	 */
3090 	hw->snpsid = readl(hsotg->regs + GSNPSID);
3091 	if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
3092 	    (hw->snpsid & 0xfffff000) != 0x4f543000) {
3093 		dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
3094 			hw->snpsid);
3095 		return -ENODEV;
3096 	}
3097 
3098 	dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
3099 		hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
3100 		hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
3101 
3102 	hwcfg1 = readl(hsotg->regs + GHWCFG1);
3103 	hwcfg2 = readl(hsotg->regs + GHWCFG2);
3104 	hwcfg3 = readl(hsotg->regs + GHWCFG3);
3105 	hwcfg4 = readl(hsotg->regs + GHWCFG4);
3106 	grxfsiz = readl(hsotg->regs + GRXFSIZ);
3107 
3108 	dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
3109 	dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
3110 	dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
3111 	dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
3112 	dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
3113 
3114 	/* Force host mode to get HPTXFSIZ / GNPTXFSIZ exact power on value */
3115 	gusbcfg = readl(hsotg->regs + GUSBCFG);
3116 	gusbcfg |= GUSBCFG_FORCEHOSTMODE;
3117 	writel(gusbcfg, hsotg->regs + GUSBCFG);
3118 	usleep_range(100000, 150000);
3119 
3120 	gnptxfsiz = readl(hsotg->regs + GNPTXFSIZ);
3121 	hptxfsiz = readl(hsotg->regs + HPTXFSIZ);
3122 	dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
3123 	dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
3124 	gusbcfg = readl(hsotg->regs + GUSBCFG);
3125 	gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
3126 	writel(gusbcfg, hsotg->regs + GUSBCFG);
3127 	usleep_range(100000, 150000);
3128 
3129 	/* hwcfg2 */
3130 	hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
3131 		      GHWCFG2_OP_MODE_SHIFT;
3132 	hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
3133 		   GHWCFG2_ARCHITECTURE_SHIFT;
3134 	hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
3135 	hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
3136 				GHWCFG2_NUM_HOST_CHAN_SHIFT);
3137 	hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
3138 			  GHWCFG2_HS_PHY_TYPE_SHIFT;
3139 	hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
3140 			  GHWCFG2_FS_PHY_TYPE_SHIFT;
3141 	hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
3142 			 GHWCFG2_NUM_DEV_EP_SHIFT;
3143 	hw->nperio_tx_q_depth =
3144 		(hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
3145 		GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
3146 	hw->host_perio_tx_q_depth =
3147 		(hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
3148 		GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
3149 	hw->dev_token_q_depth =
3150 		(hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
3151 		GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
3152 
3153 	/* hwcfg3 */
3154 	width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
3155 		GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
3156 	hw->max_transfer_size = (1 << (width + 11)) - 1;
3157 	/*
3158 	 * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates
3159 	 * coherent buffers with this size, and if it's too large we can
3160 	 * exhaust the coherent DMA pool.
3161 	 */
3162 	if (hw->max_transfer_size > 65535)
3163 		hw->max_transfer_size = 65535;
3164 	width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
3165 		GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
3166 	hw->max_packet_count = (1 << (width + 4)) - 1;
3167 	hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
3168 	hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
3169 			      GHWCFG3_DFIFO_DEPTH_SHIFT;
3170 
3171 	/* hwcfg4 */
3172 	hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
3173 	hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
3174 				  GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
3175 	hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
3176 	hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
3177 	hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
3178 				  GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
3179 
3180 	/* fifo sizes */
3181 	hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
3182 				GRXFSIZ_DEPTH_SHIFT;
3183 	hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
3184 				       FIFOSIZE_DEPTH_SHIFT;
3185 	hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
3186 				      FIFOSIZE_DEPTH_SHIFT;
3187 
3188 	dev_dbg(hsotg->dev, "Detected values from hardware:\n");
3189 	dev_dbg(hsotg->dev, "  op_mode=%d\n",
3190 		hw->op_mode);
3191 	dev_dbg(hsotg->dev, "  arch=%d\n",
3192 		hw->arch);
3193 	dev_dbg(hsotg->dev, "  dma_desc_enable=%d\n",
3194 		hw->dma_desc_enable);
3195 	dev_dbg(hsotg->dev, "  power_optimized=%d\n",
3196 		hw->power_optimized);
3197 	dev_dbg(hsotg->dev, "  i2c_enable=%d\n",
3198 		hw->i2c_enable);
3199 	dev_dbg(hsotg->dev, "  hs_phy_type=%d\n",
3200 		hw->hs_phy_type);
3201 	dev_dbg(hsotg->dev, "  fs_phy_type=%d\n",
3202 		hw->fs_phy_type);
3203 	dev_dbg(hsotg->dev, "  utmi_phy_data_wdith=%d\n",
3204 		hw->utmi_phy_data_width);
3205 	dev_dbg(hsotg->dev, "  num_dev_ep=%d\n",
3206 		hw->num_dev_ep);
3207 	dev_dbg(hsotg->dev, "  num_dev_perio_in_ep=%d\n",
3208 		hw->num_dev_perio_in_ep);
3209 	dev_dbg(hsotg->dev, "  host_channels=%d\n",
3210 		hw->host_channels);
3211 	dev_dbg(hsotg->dev, "  max_transfer_size=%d\n",
3212 		hw->max_transfer_size);
3213 	dev_dbg(hsotg->dev, "  max_packet_count=%d\n",
3214 		hw->max_packet_count);
3215 	dev_dbg(hsotg->dev, "  nperio_tx_q_depth=0x%0x\n",
3216 		hw->nperio_tx_q_depth);
3217 	dev_dbg(hsotg->dev, "  host_perio_tx_q_depth=0x%0x\n",
3218 		hw->host_perio_tx_q_depth);
3219 	dev_dbg(hsotg->dev, "  dev_token_q_depth=0x%0x\n",
3220 		hw->dev_token_q_depth);
3221 	dev_dbg(hsotg->dev, "  enable_dynamic_fifo=%d\n",
3222 		hw->enable_dynamic_fifo);
3223 	dev_dbg(hsotg->dev, "  en_multiple_tx_fifo=%d\n",
3224 		hw->en_multiple_tx_fifo);
3225 	dev_dbg(hsotg->dev, "  total_fifo_size=%d\n",
3226 		hw->total_fifo_size);
3227 	dev_dbg(hsotg->dev, "  host_rx_fifo_size=%d\n",
3228 		hw->host_rx_fifo_size);
3229 	dev_dbg(hsotg->dev, "  host_nperio_tx_fifo_size=%d\n",
3230 		hw->host_nperio_tx_fifo_size);
3231 	dev_dbg(hsotg->dev, "  host_perio_tx_fifo_size=%d\n",
3232 		hw->host_perio_tx_fifo_size);
3233 	dev_dbg(hsotg->dev, "\n");
3234 
3235 	return 0;
3236 }
3237 
3238 /*
3239  * Sets all parameters to the given value.
3240  *
3241  * Assumes that the dwc2_core_params struct contains only integers.
3242  */
3243 void dwc2_set_all_params(struct dwc2_core_params *params, int value)
3244 {
3245 	int *p = (int *)params;
3246 	size_t size = sizeof(*params) / sizeof(*p);
3247 	int i;
3248 
3249 	for (i = 0; i < size; i++)
3250 		p[i] = value;
3251 }
3252 
3253 
3254 u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
3255 {
3256 	return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
3257 }
3258 
3259 bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
3260 {
3261 	if (readl(hsotg->regs + GSNPSID) == 0xffffffff)
3262 		return false;
3263 	else
3264 		return true;
3265 }
3266 
3267 /**
3268  * dwc2_enable_global_interrupts() - Enables the controller's Global
3269  * Interrupt in the AHB Config register
3270  *
3271  * @hsotg: Programming view of DWC_otg controller
3272  */
3273 void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
3274 {
3275 	u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
3276 
3277 	ahbcfg |= GAHBCFG_GLBL_INTR_EN;
3278 	writel(ahbcfg, hsotg->regs + GAHBCFG);
3279 }
3280 
3281 /**
3282  * dwc2_disable_global_interrupts() - Disables the controller's Global
3283  * Interrupt in the AHB Config register
3284  *
3285  * @hsotg: Programming view of DWC_otg controller
3286  */
3287 void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
3288 {
3289 	u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
3290 
3291 	ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
3292 	writel(ahbcfg, hsotg->regs + GAHBCFG);
3293 }
3294 
3295 MODULE_DESCRIPTION("DESIGNWARE HS OTG Core");
3296 MODULE_AUTHOR("Synopsys, Inc.");
3297 MODULE_LICENSE("Dual BSD/GPL");
3298