xref: /linux/drivers/usb/dwc2/core.c (revision 2ba9268dd603d23e17643437b2246acb6844953b)
1 /*
2  * core.c - DesignWare HS OTG Controller common routines
3  *
4  * Copyright (C) 2004-2013 Synopsys, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions, and the following disclaimer,
11  *    without modification.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The names of the above-listed copyright holders may not be used
16  *    to endorse or promote products derived from this software without
17  *    specific prior written permission.
18  *
19  * ALTERNATIVELY, this software may be distributed under the terms of the
20  * GNU General Public License ("GPL") as published by the Free Software
21  * Foundation; either version 2 of the License, or (at your option) any
22  * later version.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 /*
38  * The Core code provides basic services for accessing and managing the
39  * DWC_otg hardware. These services are used by both the Host Controller
40  * Driver and the Peripheral Controller Driver.
41  */
42 #include <linux/kernel.h>
43 #include <linux/module.h>
44 #include <linux/moduleparam.h>
45 #include <linux/spinlock.h>
46 #include <linux/interrupt.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/delay.h>
49 #include <linux/io.h>
50 #include <linux/slab.h>
51 #include <linux/usb.h>
52 
53 #include <linux/usb/hcd.h>
54 #include <linux/usb/ch11.h>
55 
56 #include "core.h"
57 #include "hcd.h"
58 
59 /**
60  * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
61  * used in both device and host modes
62  *
63  * @hsotg: Programming view of the DWC_otg controller
64  */
65 static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
66 {
67 	u32 intmsk;
68 
69 	/* Clear any pending OTG Interrupts */
70 	writel(0xffffffff, hsotg->regs + GOTGINT);
71 
72 	/* Clear any pending interrupts */
73 	writel(0xffffffff, hsotg->regs + GINTSTS);
74 
75 	/* Enable the interrupts in the GINTMSK */
76 	intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
77 
78 	if (hsotg->core_params->dma_enable <= 0)
79 		intmsk |= GINTSTS_RXFLVL;
80 
81 	intmsk |= GINTSTS_CONIDSTSCHNG | GINTSTS_WKUPINT | GINTSTS_USBSUSP |
82 		  GINTSTS_SESSREQINT;
83 
84 	writel(intmsk, hsotg->regs + GINTMSK);
85 }
86 
87 /*
88  * Initializes the FSLSPClkSel field of the HCFG register depending on the
89  * PHY type
90  */
91 static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
92 {
93 	u32 hcfg, val;
94 
95 	if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
96 	     hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
97 	     hsotg->core_params->ulpi_fs_ls > 0) ||
98 	    hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
99 		/* Full speed PHY */
100 		val = HCFG_FSLSPCLKSEL_48_MHZ;
101 	} else {
102 		/* High speed PHY running at full speed or high speed */
103 		val = HCFG_FSLSPCLKSEL_30_60_MHZ;
104 	}
105 
106 	dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
107 	hcfg = readl(hsotg->regs + HCFG);
108 	hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
109 	hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
110 	writel(hcfg, hsotg->regs + HCFG);
111 }
112 
113 /*
114  * Do core a soft reset of the core.  Be careful with this because it
115  * resets all the internal state machines of the core.
116  */
117 static int dwc2_core_reset(struct dwc2_hsotg *hsotg)
118 {
119 	u32 greset;
120 	int count = 0;
121 	u32 gusbcfg;
122 
123 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
124 
125 	/* Wait for AHB master IDLE state */
126 	do {
127 		usleep_range(20000, 40000);
128 		greset = readl(hsotg->regs + GRSTCTL);
129 		if (++count > 50) {
130 			dev_warn(hsotg->dev,
131 				 "%s() HANG! AHB Idle GRSTCTL=%0x\n",
132 				 __func__, greset);
133 			return -EBUSY;
134 		}
135 	} while (!(greset & GRSTCTL_AHBIDLE));
136 
137 	/* Core Soft Reset */
138 	count = 0;
139 	greset |= GRSTCTL_CSFTRST;
140 	writel(greset, hsotg->regs + GRSTCTL);
141 	do {
142 		usleep_range(20000, 40000);
143 		greset = readl(hsotg->regs + GRSTCTL);
144 		if (++count > 50) {
145 			dev_warn(hsotg->dev,
146 				 "%s() HANG! Soft Reset GRSTCTL=%0x\n",
147 				 __func__, greset);
148 			return -EBUSY;
149 		}
150 	} while (greset & GRSTCTL_CSFTRST);
151 
152 	if (hsotg->dr_mode == USB_DR_MODE_HOST) {
153 		gusbcfg = readl(hsotg->regs + GUSBCFG);
154 		gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
155 		gusbcfg |= GUSBCFG_FORCEHOSTMODE;
156 		writel(gusbcfg, hsotg->regs + GUSBCFG);
157 	} else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
158 		gusbcfg = readl(hsotg->regs + GUSBCFG);
159 		gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
160 		gusbcfg |= GUSBCFG_FORCEDEVMODE;
161 		writel(gusbcfg, hsotg->regs + GUSBCFG);
162 	} else if (hsotg->dr_mode == USB_DR_MODE_OTG) {
163 		gusbcfg = readl(hsotg->regs + GUSBCFG);
164 		gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
165 		gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
166 		writel(gusbcfg, hsotg->regs + GUSBCFG);
167 	}
168 
169 	/*
170 	 * NOTE: This long sleep is _very_ important, otherwise the core will
171 	 * not stay in host mode after a connector ID change!
172 	 */
173 	usleep_range(150000, 200000);
174 
175 	return 0;
176 }
177 
178 static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
179 {
180 	u32 usbcfg, i2cctl;
181 	int retval = 0;
182 
183 	/*
184 	 * core_init() is now called on every switch so only call the
185 	 * following for the first time through
186 	 */
187 	if (select_phy) {
188 		dev_dbg(hsotg->dev, "FS PHY selected\n");
189 		usbcfg = readl(hsotg->regs + GUSBCFG);
190 		usbcfg |= GUSBCFG_PHYSEL;
191 		writel(usbcfg, hsotg->regs + GUSBCFG);
192 
193 		/* Reset after a PHY select */
194 		retval = dwc2_core_reset(hsotg);
195 		if (retval) {
196 			dev_err(hsotg->dev, "%s() Reset failed, aborting",
197 					__func__);
198 			return retval;
199 		}
200 	}
201 
202 	/*
203 	 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
204 	 * do this on HNP Dev/Host mode switches (done in dev_init and
205 	 * host_init).
206 	 */
207 	if (dwc2_is_host_mode(hsotg))
208 		dwc2_init_fs_ls_pclk_sel(hsotg);
209 
210 	if (hsotg->core_params->i2c_enable > 0) {
211 		dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
212 
213 		/* Program GUSBCFG.OtgUtmiFsSel to I2C */
214 		usbcfg = readl(hsotg->regs + GUSBCFG);
215 		usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
216 		writel(usbcfg, hsotg->regs + GUSBCFG);
217 
218 		/* Program GI2CCTL.I2CEn */
219 		i2cctl = readl(hsotg->regs + GI2CCTL);
220 		i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
221 		i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
222 		i2cctl &= ~GI2CCTL_I2CEN;
223 		writel(i2cctl, hsotg->regs + GI2CCTL);
224 		i2cctl |= GI2CCTL_I2CEN;
225 		writel(i2cctl, hsotg->regs + GI2CCTL);
226 	}
227 
228 	return retval;
229 }
230 
231 static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
232 {
233 	u32 usbcfg;
234 	int retval = 0;
235 
236 	if (!select_phy)
237 		return 0;
238 
239 	usbcfg = readl(hsotg->regs + GUSBCFG);
240 
241 	/*
242 	 * HS PHY parameters. These parameters are preserved during soft reset
243 	 * so only program the first time. Do a soft reset immediately after
244 	 * setting phyif.
245 	 */
246 	switch (hsotg->core_params->phy_type) {
247 	case DWC2_PHY_TYPE_PARAM_ULPI:
248 		/* ULPI interface */
249 		dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
250 		usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
251 		usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
252 		if (hsotg->core_params->phy_ulpi_ddr > 0)
253 			usbcfg |= GUSBCFG_DDRSEL;
254 		break;
255 	case DWC2_PHY_TYPE_PARAM_UTMI:
256 		/* UTMI+ interface */
257 		dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
258 		usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
259 		if (hsotg->core_params->phy_utmi_width == 16)
260 			usbcfg |= GUSBCFG_PHYIF16;
261 		break;
262 	default:
263 		dev_err(hsotg->dev, "FS PHY selected at HS!\n");
264 		break;
265 	}
266 
267 	writel(usbcfg, hsotg->regs + GUSBCFG);
268 
269 	/* Reset after setting the PHY parameters */
270 	retval = dwc2_core_reset(hsotg);
271 	if (retval) {
272 		dev_err(hsotg->dev, "%s() Reset failed, aborting",
273 				__func__);
274 		return retval;
275 	}
276 
277 	return retval;
278 }
279 
280 static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
281 {
282 	u32 usbcfg;
283 	int retval = 0;
284 
285 	if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
286 	    hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
287 		/* If FS mode with FS PHY */
288 		retval = dwc2_fs_phy_init(hsotg, select_phy);
289 		if (retval)
290 			return retval;
291 	} else {
292 		/* High speed PHY */
293 		retval = dwc2_hs_phy_init(hsotg, select_phy);
294 		if (retval)
295 			return retval;
296 	}
297 
298 	if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
299 	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
300 	    hsotg->core_params->ulpi_fs_ls > 0) {
301 		dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
302 		usbcfg = readl(hsotg->regs + GUSBCFG);
303 		usbcfg |= GUSBCFG_ULPI_FS_LS;
304 		usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
305 		writel(usbcfg, hsotg->regs + GUSBCFG);
306 	} else {
307 		usbcfg = readl(hsotg->regs + GUSBCFG);
308 		usbcfg &= ~GUSBCFG_ULPI_FS_LS;
309 		usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
310 		writel(usbcfg, hsotg->regs + GUSBCFG);
311 	}
312 
313 	return retval;
314 }
315 
316 static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
317 {
318 	u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
319 
320 	switch (hsotg->hw_params.arch) {
321 	case GHWCFG2_EXT_DMA_ARCH:
322 		dev_err(hsotg->dev, "External DMA Mode not supported\n");
323 		return -EINVAL;
324 
325 	case GHWCFG2_INT_DMA_ARCH:
326 		dev_dbg(hsotg->dev, "Internal DMA Mode\n");
327 		if (hsotg->core_params->ahbcfg != -1) {
328 			ahbcfg &= GAHBCFG_CTRL_MASK;
329 			ahbcfg |= hsotg->core_params->ahbcfg &
330 				  ~GAHBCFG_CTRL_MASK;
331 		}
332 		break;
333 
334 	case GHWCFG2_SLAVE_ONLY_ARCH:
335 	default:
336 		dev_dbg(hsotg->dev, "Slave Only Mode\n");
337 		break;
338 	}
339 
340 	dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
341 		hsotg->core_params->dma_enable,
342 		hsotg->core_params->dma_desc_enable);
343 
344 	if (hsotg->core_params->dma_enable > 0) {
345 		if (hsotg->core_params->dma_desc_enable > 0)
346 			dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
347 		else
348 			dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
349 	} else {
350 		dev_dbg(hsotg->dev, "Using Slave mode\n");
351 		hsotg->core_params->dma_desc_enable = 0;
352 	}
353 
354 	if (hsotg->core_params->dma_enable > 0)
355 		ahbcfg |= GAHBCFG_DMA_EN;
356 
357 	writel(ahbcfg, hsotg->regs + GAHBCFG);
358 
359 	return 0;
360 }
361 
362 static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
363 {
364 	u32 usbcfg;
365 
366 	usbcfg = readl(hsotg->regs + GUSBCFG);
367 	usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
368 
369 	switch (hsotg->hw_params.op_mode) {
370 	case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
371 		if (hsotg->core_params->otg_cap ==
372 				DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
373 			usbcfg |= GUSBCFG_HNPCAP;
374 		if (hsotg->core_params->otg_cap !=
375 				DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
376 			usbcfg |= GUSBCFG_SRPCAP;
377 		break;
378 
379 	case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
380 	case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
381 	case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
382 		if (hsotg->core_params->otg_cap !=
383 				DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
384 			usbcfg |= GUSBCFG_SRPCAP;
385 		break;
386 
387 	case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
388 	case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
389 	case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
390 	default:
391 		break;
392 	}
393 
394 	writel(usbcfg, hsotg->regs + GUSBCFG);
395 }
396 
397 /**
398  * dwc2_core_init() - Initializes the DWC_otg controller registers and
399  * prepares the core for device mode or host mode operation
400  *
401  * @hsotg:      Programming view of the DWC_otg controller
402  * @select_phy: If true then also set the Phy type
403  * @irq:        If >= 0, the irq to register
404  */
405 int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq)
406 {
407 	u32 usbcfg, otgctl;
408 	int retval;
409 
410 	dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
411 
412 	usbcfg = readl(hsotg->regs + GUSBCFG);
413 
414 	/* Set ULPI External VBUS bit if needed */
415 	usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
416 	if (hsotg->core_params->phy_ulpi_ext_vbus ==
417 				DWC2_PHY_ULPI_EXTERNAL_VBUS)
418 		usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
419 
420 	/* Set external TS Dline pulsing bit if needed */
421 	usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
422 	if (hsotg->core_params->ts_dline > 0)
423 		usbcfg |= GUSBCFG_TERMSELDLPULSE;
424 
425 	writel(usbcfg, hsotg->regs + GUSBCFG);
426 
427 	/* Reset the Controller */
428 	retval = dwc2_core_reset(hsotg);
429 	if (retval) {
430 		dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
431 				__func__);
432 		return retval;
433 	}
434 
435 	/*
436 	 * This needs to happen in FS mode before any other programming occurs
437 	 */
438 	retval = dwc2_phy_init(hsotg, select_phy);
439 	if (retval)
440 		return retval;
441 
442 	/* Program the GAHBCFG Register */
443 	retval = dwc2_gahbcfg_init(hsotg);
444 	if (retval)
445 		return retval;
446 
447 	/* Program the GUSBCFG register */
448 	dwc2_gusbcfg_init(hsotg);
449 
450 	/* Program the GOTGCTL register */
451 	otgctl = readl(hsotg->regs + GOTGCTL);
452 	otgctl &= ~GOTGCTL_OTGVER;
453 	if (hsotg->core_params->otg_ver > 0)
454 		otgctl |= GOTGCTL_OTGVER;
455 	writel(otgctl, hsotg->regs + GOTGCTL);
456 	dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
457 
458 	/* Clear the SRP success bit for FS-I2c */
459 	hsotg->srp_success = 0;
460 
461 	/* Enable common interrupts */
462 	dwc2_enable_common_interrupts(hsotg);
463 
464 	/*
465 	 * Do device or host initialization based on mode during PCD and
466 	 * HCD initialization
467 	 */
468 	if (dwc2_is_host_mode(hsotg)) {
469 		dev_dbg(hsotg->dev, "Host Mode\n");
470 		hsotg->op_state = OTG_STATE_A_HOST;
471 	} else {
472 		dev_dbg(hsotg->dev, "Device Mode\n");
473 		hsotg->op_state = OTG_STATE_B_PERIPHERAL;
474 	}
475 
476 	return 0;
477 }
478 
479 /**
480  * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
481  *
482  * @hsotg: Programming view of DWC_otg controller
483  */
484 void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
485 {
486 	u32 intmsk;
487 
488 	dev_dbg(hsotg->dev, "%s()\n", __func__);
489 
490 	/* Disable all interrupts */
491 	writel(0, hsotg->regs + GINTMSK);
492 	writel(0, hsotg->regs + HAINTMSK);
493 
494 	/* Enable the common interrupts */
495 	dwc2_enable_common_interrupts(hsotg);
496 
497 	/* Enable host mode interrupts without disturbing common interrupts */
498 	intmsk = readl(hsotg->regs + GINTMSK);
499 	intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
500 	writel(intmsk, hsotg->regs + GINTMSK);
501 }
502 
503 /**
504  * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
505  *
506  * @hsotg: Programming view of DWC_otg controller
507  */
508 void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
509 {
510 	u32 intmsk = readl(hsotg->regs + GINTMSK);
511 
512 	/* Disable host mode interrupts without disturbing common interrupts */
513 	intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
514 		    GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP);
515 	writel(intmsk, hsotg->regs + GINTMSK);
516 }
517 
518 /*
519  * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
520  * For system that have a total fifo depth that is smaller than the default
521  * RX + TX fifo size.
522  *
523  * @hsotg: Programming view of DWC_otg controller
524  */
525 static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
526 {
527 	struct dwc2_core_params *params = hsotg->core_params;
528 	struct dwc2_hw_params *hw = &hsotg->hw_params;
529 	u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
530 
531 	total_fifo_size = hw->total_fifo_size;
532 	rxfsiz = params->host_rx_fifo_size;
533 	nptxfsiz = params->host_nperio_tx_fifo_size;
534 	ptxfsiz = params->host_perio_tx_fifo_size;
535 
536 	/*
537 	 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
538 	 * allocation with support for high bandwidth endpoints. Synopsys
539 	 * defines MPS(Max Packet size) for a periodic EP=1024, and for
540 	 * non-periodic as 512.
541 	 */
542 	if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) {
543 		/*
544 		 * For Buffer DMA mode/Scatter Gather DMA mode
545 		 * 2 * ((Largest Packet size / 4) + 1 + 1) + n
546 		 * with n = number of host channel.
547 		 * 2 * ((1024/4) + 2) = 516
548 		 */
549 		rxfsiz = 516 + hw->host_channels;
550 
551 		/*
552 		 * min non-periodic tx fifo depth
553 		 * 2 * (largest non-periodic USB packet used / 4)
554 		 * 2 * (512/4) = 256
555 		 */
556 		nptxfsiz = 256;
557 
558 		/*
559 		 * min periodic tx fifo depth
560 		 * (largest packet size*MC)/4
561 		 * (1024 * 3)/4 = 768
562 		 */
563 		ptxfsiz = 768;
564 
565 		params->host_rx_fifo_size = rxfsiz;
566 		params->host_nperio_tx_fifo_size = nptxfsiz;
567 		params->host_perio_tx_fifo_size = ptxfsiz;
568 	}
569 
570 	/*
571 	 * If the summation of RX, NPTX and PTX fifo sizes is still
572 	 * bigger than the total_fifo_size, then we have a problem.
573 	 *
574 	 * We won't be able to allocate as many endpoints. Right now,
575 	 * we're just printing an error message, but ideally this FIFO
576 	 * allocation algorithm would be improved in the future.
577 	 *
578 	 * FIXME improve this FIFO allocation algorithm.
579 	 */
580 	if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)))
581 		dev_err(hsotg->dev, "invalid fifo sizes\n");
582 }
583 
584 static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
585 {
586 	struct dwc2_core_params *params = hsotg->core_params;
587 	u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
588 
589 	if (!params->enable_dynamic_fifo)
590 		return;
591 
592 	dwc2_calculate_dynamic_fifo(hsotg);
593 
594 	/* Rx FIFO */
595 	grxfsiz = readl(hsotg->regs + GRXFSIZ);
596 	dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
597 	grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
598 	grxfsiz |= params->host_rx_fifo_size <<
599 		   GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
600 	writel(grxfsiz, hsotg->regs + GRXFSIZ);
601 	dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", readl(hsotg->regs + GRXFSIZ));
602 
603 	/* Non-periodic Tx FIFO */
604 	dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
605 		readl(hsotg->regs + GNPTXFSIZ));
606 	nptxfsiz = params->host_nperio_tx_fifo_size <<
607 		   FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
608 	nptxfsiz |= params->host_rx_fifo_size <<
609 		    FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
610 	writel(nptxfsiz, hsotg->regs + GNPTXFSIZ);
611 	dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
612 		readl(hsotg->regs + GNPTXFSIZ));
613 
614 	/* Periodic Tx FIFO */
615 	dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
616 		readl(hsotg->regs + HPTXFSIZ));
617 	hptxfsiz = params->host_perio_tx_fifo_size <<
618 		   FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
619 	hptxfsiz |= (params->host_rx_fifo_size +
620 		     params->host_nperio_tx_fifo_size) <<
621 		    FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
622 	writel(hptxfsiz, hsotg->regs + HPTXFSIZ);
623 	dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
624 		readl(hsotg->regs + HPTXFSIZ));
625 
626 	if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
627 	    hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
628 		/*
629 		 * Global DFIFOCFG calculation for Host mode -
630 		 * include RxFIFO, NPTXFIFO and HPTXFIFO
631 		 */
632 		dfifocfg = readl(hsotg->regs + GDFIFOCFG);
633 		dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
634 		dfifocfg |= (params->host_rx_fifo_size +
635 			     params->host_nperio_tx_fifo_size +
636 			     params->host_perio_tx_fifo_size) <<
637 			    GDFIFOCFG_EPINFOBASE_SHIFT &
638 			    GDFIFOCFG_EPINFOBASE_MASK;
639 		writel(dfifocfg, hsotg->regs + GDFIFOCFG);
640 	}
641 }
642 
643 /**
644  * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
645  * Host mode
646  *
647  * @hsotg: Programming view of DWC_otg controller
648  *
649  * This function flushes the Tx and Rx FIFOs and flushes any entries in the
650  * request queues. Host channels are reset to ensure that they are ready for
651  * performing transfers.
652  */
653 void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
654 {
655 	u32 hcfg, hfir, otgctl;
656 
657 	dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
658 
659 	/* Restart the Phy Clock */
660 	writel(0, hsotg->regs + PCGCTL);
661 
662 	/* Initialize Host Configuration Register */
663 	dwc2_init_fs_ls_pclk_sel(hsotg);
664 	if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
665 		hcfg = readl(hsotg->regs + HCFG);
666 		hcfg |= HCFG_FSLSSUPP;
667 		writel(hcfg, hsotg->regs + HCFG);
668 	}
669 
670 	/*
671 	 * This bit allows dynamic reloading of the HFIR register during
672 	 * runtime. This bit needs to be programmed during initial configuration
673 	 * and its value must not be changed during runtime.
674 	 */
675 	if (hsotg->core_params->reload_ctl > 0) {
676 		hfir = readl(hsotg->regs + HFIR);
677 		hfir |= HFIR_RLDCTRL;
678 		writel(hfir, hsotg->regs + HFIR);
679 	}
680 
681 	if (hsotg->core_params->dma_desc_enable > 0) {
682 		u32 op_mode = hsotg->hw_params.op_mode;
683 		if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
684 		    !hsotg->hw_params.dma_desc_enable ||
685 		    op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
686 		    op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
687 		    op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
688 			dev_err(hsotg->dev,
689 				"Hardware does not support descriptor DMA mode -\n");
690 			dev_err(hsotg->dev,
691 				"falling back to buffer DMA mode.\n");
692 			hsotg->core_params->dma_desc_enable = 0;
693 		} else {
694 			hcfg = readl(hsotg->regs + HCFG);
695 			hcfg |= HCFG_DESCDMA;
696 			writel(hcfg, hsotg->regs + HCFG);
697 		}
698 	}
699 
700 	/* Configure data FIFO sizes */
701 	dwc2_config_fifos(hsotg);
702 
703 	/* TODO - check this */
704 	/* Clear Host Set HNP Enable in the OTG Control Register */
705 	otgctl = readl(hsotg->regs + GOTGCTL);
706 	otgctl &= ~GOTGCTL_HSTSETHNPEN;
707 	writel(otgctl, hsotg->regs + GOTGCTL);
708 
709 	/* Make sure the FIFOs are flushed */
710 	dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
711 	dwc2_flush_rx_fifo(hsotg);
712 
713 	/* Clear Host Set HNP Enable in the OTG Control Register */
714 	otgctl = readl(hsotg->regs + GOTGCTL);
715 	otgctl &= ~GOTGCTL_HSTSETHNPEN;
716 	writel(otgctl, hsotg->regs + GOTGCTL);
717 
718 	if (hsotg->core_params->dma_desc_enable <= 0) {
719 		int num_channels, i;
720 		u32 hcchar;
721 
722 		/* Flush out any leftover queued requests */
723 		num_channels = hsotg->core_params->host_channels;
724 		for (i = 0; i < num_channels; i++) {
725 			hcchar = readl(hsotg->regs + HCCHAR(i));
726 			hcchar &= ~HCCHAR_CHENA;
727 			hcchar |= HCCHAR_CHDIS;
728 			hcchar &= ~HCCHAR_EPDIR;
729 			writel(hcchar, hsotg->regs + HCCHAR(i));
730 		}
731 
732 		/* Halt all channels to put them into a known state */
733 		for (i = 0; i < num_channels; i++) {
734 			int count = 0;
735 
736 			hcchar = readl(hsotg->regs + HCCHAR(i));
737 			hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
738 			hcchar &= ~HCCHAR_EPDIR;
739 			writel(hcchar, hsotg->regs + HCCHAR(i));
740 			dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
741 				__func__, i);
742 			do {
743 				hcchar = readl(hsotg->regs + HCCHAR(i));
744 				if (++count > 1000) {
745 					dev_err(hsotg->dev,
746 						"Unable to clear enable on channel %d\n",
747 						i);
748 					break;
749 				}
750 				udelay(1);
751 			} while (hcchar & HCCHAR_CHENA);
752 		}
753 	}
754 
755 	/* Turn on the vbus power */
756 	dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
757 	if (hsotg->op_state == OTG_STATE_A_HOST) {
758 		u32 hprt0 = dwc2_read_hprt0(hsotg);
759 
760 		dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
761 			!!(hprt0 & HPRT0_PWR));
762 		if (!(hprt0 & HPRT0_PWR)) {
763 			hprt0 |= HPRT0_PWR;
764 			writel(hprt0, hsotg->regs + HPRT0);
765 		}
766 	}
767 
768 	dwc2_enable_host_interrupts(hsotg);
769 }
770 
771 static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
772 				      struct dwc2_host_chan *chan)
773 {
774 	u32 hcintmsk = HCINTMSK_CHHLTD;
775 
776 	switch (chan->ep_type) {
777 	case USB_ENDPOINT_XFER_CONTROL:
778 	case USB_ENDPOINT_XFER_BULK:
779 		dev_vdbg(hsotg->dev, "control/bulk\n");
780 		hcintmsk |= HCINTMSK_XFERCOMPL;
781 		hcintmsk |= HCINTMSK_STALL;
782 		hcintmsk |= HCINTMSK_XACTERR;
783 		hcintmsk |= HCINTMSK_DATATGLERR;
784 		if (chan->ep_is_in) {
785 			hcintmsk |= HCINTMSK_BBLERR;
786 		} else {
787 			hcintmsk |= HCINTMSK_NAK;
788 			hcintmsk |= HCINTMSK_NYET;
789 			if (chan->do_ping)
790 				hcintmsk |= HCINTMSK_ACK;
791 		}
792 
793 		if (chan->do_split) {
794 			hcintmsk |= HCINTMSK_NAK;
795 			if (chan->complete_split)
796 				hcintmsk |= HCINTMSK_NYET;
797 			else
798 				hcintmsk |= HCINTMSK_ACK;
799 		}
800 
801 		if (chan->error_state)
802 			hcintmsk |= HCINTMSK_ACK;
803 		break;
804 
805 	case USB_ENDPOINT_XFER_INT:
806 		if (dbg_perio())
807 			dev_vdbg(hsotg->dev, "intr\n");
808 		hcintmsk |= HCINTMSK_XFERCOMPL;
809 		hcintmsk |= HCINTMSK_NAK;
810 		hcintmsk |= HCINTMSK_STALL;
811 		hcintmsk |= HCINTMSK_XACTERR;
812 		hcintmsk |= HCINTMSK_DATATGLERR;
813 		hcintmsk |= HCINTMSK_FRMOVRUN;
814 
815 		if (chan->ep_is_in)
816 			hcintmsk |= HCINTMSK_BBLERR;
817 		if (chan->error_state)
818 			hcintmsk |= HCINTMSK_ACK;
819 		if (chan->do_split) {
820 			if (chan->complete_split)
821 				hcintmsk |= HCINTMSK_NYET;
822 			else
823 				hcintmsk |= HCINTMSK_ACK;
824 		}
825 		break;
826 
827 	case USB_ENDPOINT_XFER_ISOC:
828 		if (dbg_perio())
829 			dev_vdbg(hsotg->dev, "isoc\n");
830 		hcintmsk |= HCINTMSK_XFERCOMPL;
831 		hcintmsk |= HCINTMSK_FRMOVRUN;
832 		hcintmsk |= HCINTMSK_ACK;
833 
834 		if (chan->ep_is_in) {
835 			hcintmsk |= HCINTMSK_XACTERR;
836 			hcintmsk |= HCINTMSK_BBLERR;
837 		}
838 		break;
839 	default:
840 		dev_err(hsotg->dev, "## Unknown EP type ##\n");
841 		break;
842 	}
843 
844 	writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
845 	if (dbg_hc(chan))
846 		dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
847 }
848 
849 static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
850 				    struct dwc2_host_chan *chan)
851 {
852 	u32 hcintmsk = HCINTMSK_CHHLTD;
853 
854 	/*
855 	 * For Descriptor DMA mode core halts the channel on AHB error.
856 	 * Interrupt is not required.
857 	 */
858 	if (hsotg->core_params->dma_desc_enable <= 0) {
859 		if (dbg_hc(chan))
860 			dev_vdbg(hsotg->dev, "desc DMA disabled\n");
861 		hcintmsk |= HCINTMSK_AHBERR;
862 	} else {
863 		if (dbg_hc(chan))
864 			dev_vdbg(hsotg->dev, "desc DMA enabled\n");
865 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
866 			hcintmsk |= HCINTMSK_XFERCOMPL;
867 	}
868 
869 	if (chan->error_state && !chan->do_split &&
870 	    chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
871 		if (dbg_hc(chan))
872 			dev_vdbg(hsotg->dev, "setting ACK\n");
873 		hcintmsk |= HCINTMSK_ACK;
874 		if (chan->ep_is_in) {
875 			hcintmsk |= HCINTMSK_DATATGLERR;
876 			if (chan->ep_type != USB_ENDPOINT_XFER_INT)
877 				hcintmsk |= HCINTMSK_NAK;
878 		}
879 	}
880 
881 	writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
882 	if (dbg_hc(chan))
883 		dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
884 }
885 
886 static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
887 				struct dwc2_host_chan *chan)
888 {
889 	u32 intmsk;
890 
891 	if (hsotg->core_params->dma_enable > 0) {
892 		if (dbg_hc(chan))
893 			dev_vdbg(hsotg->dev, "DMA enabled\n");
894 		dwc2_hc_enable_dma_ints(hsotg, chan);
895 	} else {
896 		if (dbg_hc(chan))
897 			dev_vdbg(hsotg->dev, "DMA disabled\n");
898 		dwc2_hc_enable_slave_ints(hsotg, chan);
899 	}
900 
901 	/* Enable the top level host channel interrupt */
902 	intmsk = readl(hsotg->regs + HAINTMSK);
903 	intmsk |= 1 << chan->hc_num;
904 	writel(intmsk, hsotg->regs + HAINTMSK);
905 	if (dbg_hc(chan))
906 		dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
907 
908 	/* Make sure host channel interrupts are enabled */
909 	intmsk = readl(hsotg->regs + GINTMSK);
910 	intmsk |= GINTSTS_HCHINT;
911 	writel(intmsk, hsotg->regs + GINTMSK);
912 	if (dbg_hc(chan))
913 		dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
914 }
915 
916 /**
917  * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
918  * a specific endpoint
919  *
920  * @hsotg: Programming view of DWC_otg controller
921  * @chan:  Information needed to initialize the host channel
922  *
923  * The HCCHARn register is set up with the characteristics specified in chan.
924  * Host channel interrupts that may need to be serviced while this transfer is
925  * in progress are enabled.
926  */
927 void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
928 {
929 	u8 hc_num = chan->hc_num;
930 	u32 hcintmsk;
931 	u32 hcchar;
932 	u32 hcsplt = 0;
933 
934 	if (dbg_hc(chan))
935 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
936 
937 	/* Clear old interrupt conditions for this host channel */
938 	hcintmsk = 0xffffffff;
939 	hcintmsk &= ~HCINTMSK_RESERVED14_31;
940 	writel(hcintmsk, hsotg->regs + HCINT(hc_num));
941 
942 	/* Enable channel interrupts required for this transfer */
943 	dwc2_hc_enable_ints(hsotg, chan);
944 
945 	/*
946 	 * Program the HCCHARn register with the endpoint characteristics for
947 	 * the current transfer
948 	 */
949 	hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
950 	hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
951 	if (chan->ep_is_in)
952 		hcchar |= HCCHAR_EPDIR;
953 	if (chan->speed == USB_SPEED_LOW)
954 		hcchar |= HCCHAR_LSPDDEV;
955 	hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
956 	hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
957 	writel(hcchar, hsotg->regs + HCCHAR(hc_num));
958 	if (dbg_hc(chan)) {
959 		dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
960 			 hc_num, hcchar);
961 
962 		dev_vdbg(hsotg->dev, "%s: Channel %d\n",
963 			 __func__, hc_num);
964 		dev_vdbg(hsotg->dev, "	 Dev Addr: %d\n",
965 			 chan->dev_addr);
966 		dev_vdbg(hsotg->dev, "	 Ep Num: %d\n",
967 			 chan->ep_num);
968 		dev_vdbg(hsotg->dev, "	 Is In: %d\n",
969 			 chan->ep_is_in);
970 		dev_vdbg(hsotg->dev, "	 Is Low Speed: %d\n",
971 			 chan->speed == USB_SPEED_LOW);
972 		dev_vdbg(hsotg->dev, "	 Ep Type: %d\n",
973 			 chan->ep_type);
974 		dev_vdbg(hsotg->dev, "	 Max Pkt: %d\n",
975 			 chan->max_packet);
976 	}
977 
978 	/* Program the HCSPLT register for SPLITs */
979 	if (chan->do_split) {
980 		if (dbg_hc(chan))
981 			dev_vdbg(hsotg->dev,
982 				 "Programming HC %d with split --> %s\n",
983 				 hc_num,
984 				 chan->complete_split ? "CSPLIT" : "SSPLIT");
985 		if (chan->complete_split)
986 			hcsplt |= HCSPLT_COMPSPLT;
987 		hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
988 			  HCSPLT_XACTPOS_MASK;
989 		hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
990 			  HCSPLT_HUBADDR_MASK;
991 		hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
992 			  HCSPLT_PRTADDR_MASK;
993 		if (dbg_hc(chan)) {
994 			dev_vdbg(hsotg->dev, "	  comp split %d\n",
995 				 chan->complete_split);
996 			dev_vdbg(hsotg->dev, "	  xact pos %d\n",
997 				 chan->xact_pos);
998 			dev_vdbg(hsotg->dev, "	  hub addr %d\n",
999 				 chan->hub_addr);
1000 			dev_vdbg(hsotg->dev, "	  hub port %d\n",
1001 				 chan->hub_port);
1002 			dev_vdbg(hsotg->dev, "	  is_in %d\n",
1003 				 chan->ep_is_in);
1004 			dev_vdbg(hsotg->dev, "	  Max Pkt %d\n",
1005 				 chan->max_packet);
1006 			dev_vdbg(hsotg->dev, "	  xferlen %d\n",
1007 				 chan->xfer_len);
1008 		}
1009 	}
1010 
1011 	writel(hcsplt, hsotg->regs + HCSPLT(hc_num));
1012 }
1013 
1014 /**
1015  * dwc2_hc_halt() - Attempts to halt a host channel
1016  *
1017  * @hsotg:       Controller register interface
1018  * @chan:        Host channel to halt
1019  * @halt_status: Reason for halting the channel
1020  *
1021  * This function should only be called in Slave mode or to abort a transfer in
1022  * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
1023  * controller halts the channel when the transfer is complete or a condition
1024  * occurs that requires application intervention.
1025  *
1026  * In slave mode, checks for a free request queue entry, then sets the Channel
1027  * Enable and Channel Disable bits of the Host Channel Characteristics
1028  * register of the specified channel to intiate the halt. If there is no free
1029  * request queue entry, sets only the Channel Disable bit of the HCCHARn
1030  * register to flush requests for this channel. In the latter case, sets a
1031  * flag to indicate that the host channel needs to be halted when a request
1032  * queue slot is open.
1033  *
1034  * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
1035  * HCCHARn register. The controller ensures there is space in the request
1036  * queue before submitting the halt request.
1037  *
1038  * Some time may elapse before the core flushes any posted requests for this
1039  * host channel and halts. The Channel Halted interrupt handler completes the
1040  * deactivation of the host channel.
1041  */
1042 void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
1043 		  enum dwc2_halt_status halt_status)
1044 {
1045 	u32 nptxsts, hptxsts, hcchar;
1046 
1047 	if (dbg_hc(chan))
1048 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
1049 	if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
1050 		dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
1051 
1052 	if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1053 	    halt_status == DWC2_HC_XFER_AHB_ERR) {
1054 		/*
1055 		 * Disable all channel interrupts except Ch Halted. The QTD
1056 		 * and QH state associated with this transfer has been cleared
1057 		 * (in the case of URB_DEQUEUE), so the channel needs to be
1058 		 * shut down carefully to prevent crashes.
1059 		 */
1060 		u32 hcintmsk = HCINTMSK_CHHLTD;
1061 
1062 		dev_vdbg(hsotg->dev, "dequeue/error\n");
1063 		writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
1064 
1065 		/*
1066 		 * Make sure no other interrupts besides halt are currently
1067 		 * pending. Handling another interrupt could cause a crash due
1068 		 * to the QTD and QH state.
1069 		 */
1070 		writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1071 
1072 		/*
1073 		 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1074 		 * even if the channel was already halted for some other
1075 		 * reason
1076 		 */
1077 		chan->halt_status = halt_status;
1078 
1079 		hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1080 		if (!(hcchar & HCCHAR_CHENA)) {
1081 			/*
1082 			 * The channel is either already halted or it hasn't
1083 			 * started yet. In DMA mode, the transfer may halt if
1084 			 * it finishes normally or a condition occurs that
1085 			 * requires driver intervention. Don't want to halt
1086 			 * the channel again. In either Slave or DMA mode,
1087 			 * it's possible that the transfer has been assigned
1088 			 * to a channel, but not started yet when an URB is
1089 			 * dequeued. Don't want to halt a channel that hasn't
1090 			 * started yet.
1091 			 */
1092 			return;
1093 		}
1094 	}
1095 	if (chan->halt_pending) {
1096 		/*
1097 		 * A halt has already been issued for this channel. This might
1098 		 * happen when a transfer is aborted by a higher level in
1099 		 * the stack.
1100 		 */
1101 		dev_vdbg(hsotg->dev,
1102 			 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1103 			 __func__, chan->hc_num);
1104 		return;
1105 	}
1106 
1107 	hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1108 
1109 	/* No need to set the bit in DDMA for disabling the channel */
1110 	/* TODO check it everywhere channel is disabled */
1111 	if (hsotg->core_params->dma_desc_enable <= 0) {
1112 		if (dbg_hc(chan))
1113 			dev_vdbg(hsotg->dev, "desc DMA disabled\n");
1114 		hcchar |= HCCHAR_CHENA;
1115 	} else {
1116 		if (dbg_hc(chan))
1117 			dev_dbg(hsotg->dev, "desc DMA enabled\n");
1118 	}
1119 	hcchar |= HCCHAR_CHDIS;
1120 
1121 	if (hsotg->core_params->dma_enable <= 0) {
1122 		if (dbg_hc(chan))
1123 			dev_vdbg(hsotg->dev, "DMA not enabled\n");
1124 		hcchar |= HCCHAR_CHENA;
1125 
1126 		/* Check for space in the request queue to issue the halt */
1127 		if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1128 		    chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1129 			dev_vdbg(hsotg->dev, "control/bulk\n");
1130 			nptxsts = readl(hsotg->regs + GNPTXSTS);
1131 			if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1132 				dev_vdbg(hsotg->dev, "Disabling channel\n");
1133 				hcchar &= ~HCCHAR_CHENA;
1134 			}
1135 		} else {
1136 			if (dbg_perio())
1137 				dev_vdbg(hsotg->dev, "isoc/intr\n");
1138 			hptxsts = readl(hsotg->regs + HPTXSTS);
1139 			if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1140 			    hsotg->queuing_high_bandwidth) {
1141 				if (dbg_perio())
1142 					dev_vdbg(hsotg->dev, "Disabling channel\n");
1143 				hcchar &= ~HCCHAR_CHENA;
1144 			}
1145 		}
1146 	} else {
1147 		if (dbg_hc(chan))
1148 			dev_vdbg(hsotg->dev, "DMA enabled\n");
1149 	}
1150 
1151 	writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1152 	chan->halt_status = halt_status;
1153 
1154 	if (hcchar & HCCHAR_CHENA) {
1155 		if (dbg_hc(chan))
1156 			dev_vdbg(hsotg->dev, "Channel enabled\n");
1157 		chan->halt_pending = 1;
1158 		chan->halt_on_queue = 0;
1159 	} else {
1160 		if (dbg_hc(chan))
1161 			dev_vdbg(hsotg->dev, "Channel disabled\n");
1162 		chan->halt_on_queue = 1;
1163 	}
1164 
1165 	if (dbg_hc(chan)) {
1166 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1167 			 chan->hc_num);
1168 		dev_vdbg(hsotg->dev, "	 hcchar: 0x%08x\n",
1169 			 hcchar);
1170 		dev_vdbg(hsotg->dev, "	 halt_pending: %d\n",
1171 			 chan->halt_pending);
1172 		dev_vdbg(hsotg->dev, "	 halt_on_queue: %d\n",
1173 			 chan->halt_on_queue);
1174 		dev_vdbg(hsotg->dev, "	 halt_status: %d\n",
1175 			 chan->halt_status);
1176 	}
1177 }
1178 
1179 /**
1180  * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1181  *
1182  * @hsotg: Programming view of DWC_otg controller
1183  * @chan:  Identifies the host channel to clean up
1184  *
1185  * This function is normally called after a transfer is done and the host
1186  * channel is being released
1187  */
1188 void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1189 {
1190 	u32 hcintmsk;
1191 
1192 	chan->xfer_started = 0;
1193 
1194 	/*
1195 	 * Clear channel interrupt enables and any unhandled channel interrupt
1196 	 * conditions
1197 	 */
1198 	writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
1199 	hcintmsk = 0xffffffff;
1200 	hcintmsk &= ~HCINTMSK_RESERVED14_31;
1201 	writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1202 }
1203 
1204 /**
1205  * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1206  * which frame a periodic transfer should occur
1207  *
1208  * @hsotg:  Programming view of DWC_otg controller
1209  * @chan:   Identifies the host channel to set up and its properties
1210  * @hcchar: Current value of the HCCHAR register for the specified host channel
1211  *
1212  * This function has no effect on non-periodic transfers
1213  */
1214 static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1215 				       struct dwc2_host_chan *chan, u32 *hcchar)
1216 {
1217 	if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1218 	    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1219 		/* 1 if _next_ frame is odd, 0 if it's even */
1220 		if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1))
1221 			*hcchar |= HCCHAR_ODDFRM;
1222 	}
1223 }
1224 
1225 static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1226 {
1227 	/* Set up the initial PID for the transfer */
1228 	if (chan->speed == USB_SPEED_HIGH) {
1229 		if (chan->ep_is_in) {
1230 			if (chan->multi_count == 1)
1231 				chan->data_pid_start = DWC2_HC_PID_DATA0;
1232 			else if (chan->multi_count == 2)
1233 				chan->data_pid_start = DWC2_HC_PID_DATA1;
1234 			else
1235 				chan->data_pid_start = DWC2_HC_PID_DATA2;
1236 		} else {
1237 			if (chan->multi_count == 1)
1238 				chan->data_pid_start = DWC2_HC_PID_DATA0;
1239 			else
1240 				chan->data_pid_start = DWC2_HC_PID_MDATA;
1241 		}
1242 	} else {
1243 		chan->data_pid_start = DWC2_HC_PID_DATA0;
1244 	}
1245 }
1246 
1247 /**
1248  * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1249  * the Host Channel
1250  *
1251  * @hsotg: Programming view of DWC_otg controller
1252  * @chan:  Information needed to initialize the host channel
1253  *
1254  * This function should only be called in Slave mode. For a channel associated
1255  * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1256  * associated with a periodic EP, the periodic Tx FIFO is written.
1257  *
1258  * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1259  * the number of bytes written to the Tx FIFO.
1260  */
1261 static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1262 				 struct dwc2_host_chan *chan)
1263 {
1264 	u32 i;
1265 	u32 remaining_count;
1266 	u32 byte_count;
1267 	u32 dword_count;
1268 	u32 __iomem *data_fifo;
1269 	u32 *data_buf = (u32 *)chan->xfer_buf;
1270 
1271 	if (dbg_hc(chan))
1272 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
1273 
1274 	data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
1275 
1276 	remaining_count = chan->xfer_len - chan->xfer_count;
1277 	if (remaining_count > chan->max_packet)
1278 		byte_count = chan->max_packet;
1279 	else
1280 		byte_count = remaining_count;
1281 
1282 	dword_count = (byte_count + 3) / 4;
1283 
1284 	if (((unsigned long)data_buf & 0x3) == 0) {
1285 		/* xfer_buf is DWORD aligned */
1286 		for (i = 0; i < dword_count; i++, data_buf++)
1287 			writel(*data_buf, data_fifo);
1288 	} else {
1289 		/* xfer_buf is not DWORD aligned */
1290 		for (i = 0; i < dword_count; i++, data_buf++) {
1291 			u32 data = data_buf[0] | data_buf[1] << 8 |
1292 				   data_buf[2] << 16 | data_buf[3] << 24;
1293 			writel(data, data_fifo);
1294 		}
1295 	}
1296 
1297 	chan->xfer_count += byte_count;
1298 	chan->xfer_buf += byte_count;
1299 }
1300 
1301 /**
1302  * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1303  * channel and starts the transfer
1304  *
1305  * @hsotg: Programming view of DWC_otg controller
1306  * @chan:  Information needed to initialize the host channel. The xfer_len value
1307  *         may be reduced to accommodate the max widths of the XferSize and
1308  *         PktCnt fields in the HCTSIZn register. The multi_count value may be
1309  *         changed to reflect the final xfer_len value.
1310  *
1311  * This function may be called in either Slave mode or DMA mode. In Slave mode,
1312  * the caller must ensure that there is sufficient space in the request queue
1313  * and Tx Data FIFO.
1314  *
1315  * For an OUT transfer in Slave mode, it loads a data packet into the
1316  * appropriate FIFO. If necessary, additional data packets are loaded in the
1317  * Host ISR.
1318  *
1319  * For an IN transfer in Slave mode, a data packet is requested. The data
1320  * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1321  * additional data packets are requested in the Host ISR.
1322  *
1323  * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1324  * register along with a packet count of 1 and the channel is enabled. This
1325  * causes a single PING transaction to occur. Other fields in HCTSIZ are
1326  * simply set to 0 since no data transfer occurs in this case.
1327  *
1328  * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1329  * all the information required to perform the subsequent data transfer. In
1330  * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1331  * controller performs the entire PING protocol, then starts the data
1332  * transfer.
1333  */
1334 void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1335 			    struct dwc2_host_chan *chan)
1336 {
1337 	u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
1338 	u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
1339 	u32 hcchar;
1340 	u32 hctsiz = 0;
1341 	u16 num_packets;
1342 
1343 	if (dbg_hc(chan))
1344 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
1345 
1346 	if (chan->do_ping) {
1347 		if (hsotg->core_params->dma_enable <= 0) {
1348 			if (dbg_hc(chan))
1349 				dev_vdbg(hsotg->dev, "ping, no DMA\n");
1350 			dwc2_hc_do_ping(hsotg, chan);
1351 			chan->xfer_started = 1;
1352 			return;
1353 		} else {
1354 			if (dbg_hc(chan))
1355 				dev_vdbg(hsotg->dev, "ping, DMA\n");
1356 			hctsiz |= TSIZ_DOPNG;
1357 		}
1358 	}
1359 
1360 	if (chan->do_split) {
1361 		if (dbg_hc(chan))
1362 			dev_vdbg(hsotg->dev, "split\n");
1363 		num_packets = 1;
1364 
1365 		if (chan->complete_split && !chan->ep_is_in)
1366 			/*
1367 			 * For CSPLIT OUT Transfer, set the size to 0 so the
1368 			 * core doesn't expect any data written to the FIFO
1369 			 */
1370 			chan->xfer_len = 0;
1371 		else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1372 			chan->xfer_len = chan->max_packet;
1373 		else if (!chan->ep_is_in && chan->xfer_len > 188)
1374 			chan->xfer_len = 188;
1375 
1376 		hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1377 			  TSIZ_XFERSIZE_MASK;
1378 	} else {
1379 		if (dbg_hc(chan))
1380 			dev_vdbg(hsotg->dev, "no split\n");
1381 		/*
1382 		 * Ensure that the transfer length and packet count will fit
1383 		 * in the widths allocated for them in the HCTSIZn register
1384 		 */
1385 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1386 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1387 			/*
1388 			 * Make sure the transfer size is no larger than one
1389 			 * (micro)frame's worth of data. (A check was done
1390 			 * when the periodic transfer was accepted to ensure
1391 			 * that a (micro)frame's worth of data can be
1392 			 * programmed into a channel.)
1393 			 */
1394 			u32 max_periodic_len =
1395 				chan->multi_count * chan->max_packet;
1396 
1397 			if (chan->xfer_len > max_periodic_len)
1398 				chan->xfer_len = max_periodic_len;
1399 		} else if (chan->xfer_len > max_hc_xfer_size) {
1400 			/*
1401 			 * Make sure that xfer_len is a multiple of max packet
1402 			 * size
1403 			 */
1404 			chan->xfer_len =
1405 				max_hc_xfer_size - chan->max_packet + 1;
1406 		}
1407 
1408 		if (chan->xfer_len > 0) {
1409 			num_packets = (chan->xfer_len + chan->max_packet - 1) /
1410 					chan->max_packet;
1411 			if (num_packets > max_hc_pkt_count) {
1412 				num_packets = max_hc_pkt_count;
1413 				chan->xfer_len = num_packets * chan->max_packet;
1414 			}
1415 		} else {
1416 			/* Need 1 packet for transfer length of 0 */
1417 			num_packets = 1;
1418 		}
1419 
1420 		if (chan->ep_is_in)
1421 			/*
1422 			 * Always program an integral # of max packets for IN
1423 			 * transfers
1424 			 */
1425 			chan->xfer_len = num_packets * chan->max_packet;
1426 
1427 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1428 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1429 			/*
1430 			 * Make sure that the multi_count field matches the
1431 			 * actual transfer length
1432 			 */
1433 			chan->multi_count = num_packets;
1434 
1435 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1436 			dwc2_set_pid_isoc(chan);
1437 
1438 		hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1439 			  TSIZ_XFERSIZE_MASK;
1440 	}
1441 
1442 	chan->start_pkt_count = num_packets;
1443 	hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1444 	hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1445 		  TSIZ_SC_MC_PID_MASK;
1446 	writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1447 	if (dbg_hc(chan)) {
1448 		dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1449 			 hctsiz, chan->hc_num);
1450 
1451 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1452 			 chan->hc_num);
1453 		dev_vdbg(hsotg->dev, "	 Xfer Size: %d\n",
1454 			 (hctsiz & TSIZ_XFERSIZE_MASK) >>
1455 			 TSIZ_XFERSIZE_SHIFT);
1456 		dev_vdbg(hsotg->dev, "	 Num Pkts: %d\n",
1457 			 (hctsiz & TSIZ_PKTCNT_MASK) >>
1458 			 TSIZ_PKTCNT_SHIFT);
1459 		dev_vdbg(hsotg->dev, "	 Start PID: %d\n",
1460 			 (hctsiz & TSIZ_SC_MC_PID_MASK) >>
1461 			 TSIZ_SC_MC_PID_SHIFT);
1462 	}
1463 
1464 	if (hsotg->core_params->dma_enable > 0) {
1465 		dma_addr_t dma_addr;
1466 
1467 		if (chan->align_buf) {
1468 			if (dbg_hc(chan))
1469 				dev_vdbg(hsotg->dev, "align_buf\n");
1470 			dma_addr = chan->align_buf;
1471 		} else {
1472 			dma_addr = chan->xfer_dma;
1473 		}
1474 		writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
1475 		if (dbg_hc(chan))
1476 			dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
1477 				 (unsigned long)dma_addr, chan->hc_num);
1478 	}
1479 
1480 	/* Start the split */
1481 	if (chan->do_split) {
1482 		u32 hcsplt = readl(hsotg->regs + HCSPLT(chan->hc_num));
1483 
1484 		hcsplt |= HCSPLT_SPLTENA;
1485 		writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num));
1486 	}
1487 
1488 	hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1489 	hcchar &= ~HCCHAR_MULTICNT_MASK;
1490 	hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1491 		  HCCHAR_MULTICNT_MASK;
1492 	dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1493 
1494 	if (hcchar & HCCHAR_CHDIS)
1495 		dev_warn(hsotg->dev,
1496 			 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1497 			 __func__, chan->hc_num, hcchar);
1498 
1499 	/* Set host channel enable after all other setup is complete */
1500 	hcchar |= HCCHAR_CHENA;
1501 	hcchar &= ~HCCHAR_CHDIS;
1502 
1503 	if (dbg_hc(chan))
1504 		dev_vdbg(hsotg->dev, "	 Multi Cnt: %d\n",
1505 			 (hcchar & HCCHAR_MULTICNT_MASK) >>
1506 			 HCCHAR_MULTICNT_SHIFT);
1507 
1508 	writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1509 	if (dbg_hc(chan))
1510 		dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1511 			 chan->hc_num);
1512 
1513 	chan->xfer_started = 1;
1514 	chan->requests++;
1515 
1516 	if (hsotg->core_params->dma_enable <= 0 &&
1517 	    !chan->ep_is_in && chan->xfer_len > 0)
1518 		/* Load OUT packet into the appropriate Tx FIFO */
1519 		dwc2_hc_write_packet(hsotg, chan);
1520 }
1521 
1522 /**
1523  * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1524  * host channel and starts the transfer in Descriptor DMA mode
1525  *
1526  * @hsotg: Programming view of DWC_otg controller
1527  * @chan:  Information needed to initialize the host channel
1528  *
1529  * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1530  * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1531  * with micro-frame bitmap.
1532  *
1533  * Initializes HCDMA register with descriptor list address and CTD value then
1534  * starts the transfer via enabling the channel.
1535  */
1536 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
1537 				 struct dwc2_host_chan *chan)
1538 {
1539 	u32 hcchar;
1540 	u32 hc_dma;
1541 	u32 hctsiz = 0;
1542 
1543 	if (chan->do_ping)
1544 		hctsiz |= TSIZ_DOPNG;
1545 
1546 	if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1547 		dwc2_set_pid_isoc(chan);
1548 
1549 	/* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1550 	hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1551 		  TSIZ_SC_MC_PID_MASK;
1552 
1553 	/* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1554 	hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
1555 
1556 	/* Non-zero only for high-speed interrupt endpoints */
1557 	hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
1558 
1559 	if (dbg_hc(chan)) {
1560 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1561 			 chan->hc_num);
1562 		dev_vdbg(hsotg->dev, "	 Start PID: %d\n",
1563 			 chan->data_pid_start);
1564 		dev_vdbg(hsotg->dev, "	 NTD: %d\n", chan->ntd - 1);
1565 	}
1566 
1567 	writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1568 
1569 	hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK;
1570 
1571 	/* Always start from first descriptor */
1572 	hc_dma &= ~HCDMA_CTD_MASK;
1573 	writel(hc_dma, hsotg->regs + HCDMA(chan->hc_num));
1574 	if (dbg_hc(chan))
1575 		dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n",
1576 			 hc_dma, chan->hc_num);
1577 
1578 	hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1579 	hcchar &= ~HCCHAR_MULTICNT_MASK;
1580 	hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1581 		  HCCHAR_MULTICNT_MASK;
1582 
1583 	if (hcchar & HCCHAR_CHDIS)
1584 		dev_warn(hsotg->dev,
1585 			 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1586 			 __func__, chan->hc_num, hcchar);
1587 
1588 	/* Set host channel enable after all other setup is complete */
1589 	hcchar |= HCCHAR_CHENA;
1590 	hcchar &= ~HCCHAR_CHDIS;
1591 
1592 	if (dbg_hc(chan))
1593 		dev_vdbg(hsotg->dev, "	 Multi Cnt: %d\n",
1594 			 (hcchar & HCCHAR_MULTICNT_MASK) >>
1595 			 HCCHAR_MULTICNT_SHIFT);
1596 
1597 	writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1598 	if (dbg_hc(chan))
1599 		dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1600 			 chan->hc_num);
1601 
1602 	chan->xfer_started = 1;
1603 	chan->requests++;
1604 }
1605 
1606 /**
1607  * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
1608  * a previous call to dwc2_hc_start_transfer()
1609  *
1610  * @hsotg: Programming view of DWC_otg controller
1611  * @chan:  Information needed to initialize the host channel
1612  *
1613  * The caller must ensure there is sufficient space in the request queue and Tx
1614  * Data FIFO. This function should only be called in Slave mode. In DMA mode,
1615  * the controller acts autonomously to complete transfers programmed to a host
1616  * channel.
1617  *
1618  * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
1619  * if there is any data remaining to be queued. For an IN transfer, another
1620  * data packet is always requested. For the SETUP phase of a control transfer,
1621  * this function does nothing.
1622  *
1623  * Return: 1 if a new request is queued, 0 if no more requests are required
1624  * for this transfer
1625  */
1626 int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
1627 			      struct dwc2_host_chan *chan)
1628 {
1629 	if (dbg_hc(chan))
1630 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1631 			 chan->hc_num);
1632 
1633 	if (chan->do_split)
1634 		/* SPLITs always queue just once per channel */
1635 		return 0;
1636 
1637 	if (chan->data_pid_start == DWC2_HC_PID_SETUP)
1638 		/* SETUPs are queued only once since they can't be NAK'd */
1639 		return 0;
1640 
1641 	if (chan->ep_is_in) {
1642 		/*
1643 		 * Always queue another request for other IN transfers. If
1644 		 * back-to-back INs are issued and NAKs are received for both,
1645 		 * the driver may still be processing the first NAK when the
1646 		 * second NAK is received. When the interrupt handler clears
1647 		 * the NAK interrupt for the first NAK, the second NAK will
1648 		 * not be seen. So we can't depend on the NAK interrupt
1649 		 * handler to requeue a NAK'd request. Instead, IN requests
1650 		 * are issued each time this function is called. When the
1651 		 * transfer completes, the extra requests for the channel will
1652 		 * be flushed.
1653 		 */
1654 		u32 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1655 
1656 		dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1657 		hcchar |= HCCHAR_CHENA;
1658 		hcchar &= ~HCCHAR_CHDIS;
1659 		if (dbg_hc(chan))
1660 			dev_vdbg(hsotg->dev, "	 IN xfer: hcchar = 0x%08x\n",
1661 				 hcchar);
1662 		writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1663 		chan->requests++;
1664 		return 1;
1665 	}
1666 
1667 	/* OUT transfers */
1668 
1669 	if (chan->xfer_count < chan->xfer_len) {
1670 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1671 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1672 			u32 hcchar = readl(hsotg->regs +
1673 					   HCCHAR(chan->hc_num));
1674 
1675 			dwc2_hc_set_even_odd_frame(hsotg, chan,
1676 						   &hcchar);
1677 		}
1678 
1679 		/* Load OUT packet into the appropriate Tx FIFO */
1680 		dwc2_hc_write_packet(hsotg, chan);
1681 		chan->requests++;
1682 		return 1;
1683 	}
1684 
1685 	return 0;
1686 }
1687 
1688 /**
1689  * dwc2_hc_do_ping() - Starts a PING transfer
1690  *
1691  * @hsotg: Programming view of DWC_otg controller
1692  * @chan:  Information needed to initialize the host channel
1693  *
1694  * This function should only be called in Slave mode. The Do Ping bit is set in
1695  * the HCTSIZ register, then the channel is enabled.
1696  */
1697 void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1698 {
1699 	u32 hcchar;
1700 	u32 hctsiz;
1701 
1702 	if (dbg_hc(chan))
1703 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1704 			 chan->hc_num);
1705 
1706 
1707 	hctsiz = TSIZ_DOPNG;
1708 	hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
1709 	writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1710 
1711 	hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1712 	hcchar |= HCCHAR_CHENA;
1713 	hcchar &= ~HCCHAR_CHDIS;
1714 	writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1715 }
1716 
1717 /**
1718  * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
1719  * the HFIR register according to PHY type and speed
1720  *
1721  * @hsotg: Programming view of DWC_otg controller
1722  *
1723  * NOTE: The caller can modify the value of the HFIR register only after the
1724  * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
1725  * has been set
1726  */
1727 u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
1728 {
1729 	u32 usbcfg;
1730 	u32 hprt0;
1731 	int clock = 60;	/* default value */
1732 
1733 	usbcfg = readl(hsotg->regs + GUSBCFG);
1734 	hprt0 = readl(hsotg->regs + HPRT0);
1735 
1736 	if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
1737 	    !(usbcfg & GUSBCFG_PHYIF16))
1738 		clock = 60;
1739 	if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
1740 	    GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
1741 		clock = 48;
1742 	if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1743 	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1744 		clock = 30;
1745 	if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1746 	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
1747 		clock = 60;
1748 	if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1749 	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1750 		clock = 48;
1751 	if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
1752 	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
1753 		clock = 48;
1754 	if ((usbcfg & GUSBCFG_PHYSEL) &&
1755 	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
1756 		clock = 48;
1757 
1758 	if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
1759 		/* High speed case */
1760 		return 125 * clock;
1761 	else
1762 		/* FS/LS case */
1763 		return 1000 * clock;
1764 }
1765 
1766 /**
1767  * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
1768  * buffer
1769  *
1770  * @core_if: Programming view of DWC_otg controller
1771  * @dest:    Destination buffer for the packet
1772  * @bytes:   Number of bytes to copy to the destination
1773  */
1774 void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
1775 {
1776 	u32 __iomem *fifo = hsotg->regs + HCFIFO(0);
1777 	u32 *data_buf = (u32 *)dest;
1778 	int word_count = (bytes + 3) / 4;
1779 	int i;
1780 
1781 	/*
1782 	 * Todo: Account for the case where dest is not dword aligned. This
1783 	 * requires reading data from the FIFO into a u32 temp buffer, then
1784 	 * moving it into the data buffer.
1785 	 */
1786 
1787 	dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
1788 
1789 	for (i = 0; i < word_count; i++, data_buf++)
1790 		*data_buf = readl(fifo);
1791 }
1792 
1793 /**
1794  * dwc2_dump_host_registers() - Prints the host registers
1795  *
1796  * @hsotg: Programming view of DWC_otg controller
1797  *
1798  * NOTE: This function will be removed once the peripheral controller code
1799  * is integrated and the driver is stable
1800  */
1801 void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
1802 {
1803 #ifdef DEBUG
1804 	u32 __iomem *addr;
1805 	int i;
1806 
1807 	dev_dbg(hsotg->dev, "Host Global Registers\n");
1808 	addr = hsotg->regs + HCFG;
1809 	dev_dbg(hsotg->dev, "HCFG	 @0x%08lX : 0x%08X\n",
1810 		(unsigned long)addr, readl(addr));
1811 	addr = hsotg->regs + HFIR;
1812 	dev_dbg(hsotg->dev, "HFIR	 @0x%08lX : 0x%08X\n",
1813 		(unsigned long)addr, readl(addr));
1814 	addr = hsotg->regs + HFNUM;
1815 	dev_dbg(hsotg->dev, "HFNUM	 @0x%08lX : 0x%08X\n",
1816 		(unsigned long)addr, readl(addr));
1817 	addr = hsotg->regs + HPTXSTS;
1818 	dev_dbg(hsotg->dev, "HPTXSTS	 @0x%08lX : 0x%08X\n",
1819 		(unsigned long)addr, readl(addr));
1820 	addr = hsotg->regs + HAINT;
1821 	dev_dbg(hsotg->dev, "HAINT	 @0x%08lX : 0x%08X\n",
1822 		(unsigned long)addr, readl(addr));
1823 	addr = hsotg->regs + HAINTMSK;
1824 	dev_dbg(hsotg->dev, "HAINTMSK	 @0x%08lX : 0x%08X\n",
1825 		(unsigned long)addr, readl(addr));
1826 	if (hsotg->core_params->dma_desc_enable > 0) {
1827 		addr = hsotg->regs + HFLBADDR;
1828 		dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
1829 			(unsigned long)addr, readl(addr));
1830 	}
1831 
1832 	addr = hsotg->regs + HPRT0;
1833 	dev_dbg(hsotg->dev, "HPRT0	 @0x%08lX : 0x%08X\n",
1834 		(unsigned long)addr, readl(addr));
1835 
1836 	for (i = 0; i < hsotg->core_params->host_channels; i++) {
1837 		dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
1838 		addr = hsotg->regs + HCCHAR(i);
1839 		dev_dbg(hsotg->dev, "HCCHAR	 @0x%08lX : 0x%08X\n",
1840 			(unsigned long)addr, readl(addr));
1841 		addr = hsotg->regs + HCSPLT(i);
1842 		dev_dbg(hsotg->dev, "HCSPLT	 @0x%08lX : 0x%08X\n",
1843 			(unsigned long)addr, readl(addr));
1844 		addr = hsotg->regs + HCINT(i);
1845 		dev_dbg(hsotg->dev, "HCINT	 @0x%08lX : 0x%08X\n",
1846 			(unsigned long)addr, readl(addr));
1847 		addr = hsotg->regs + HCINTMSK(i);
1848 		dev_dbg(hsotg->dev, "HCINTMSK	 @0x%08lX : 0x%08X\n",
1849 			(unsigned long)addr, readl(addr));
1850 		addr = hsotg->regs + HCTSIZ(i);
1851 		dev_dbg(hsotg->dev, "HCTSIZ	 @0x%08lX : 0x%08X\n",
1852 			(unsigned long)addr, readl(addr));
1853 		addr = hsotg->regs + HCDMA(i);
1854 		dev_dbg(hsotg->dev, "HCDMA	 @0x%08lX : 0x%08X\n",
1855 			(unsigned long)addr, readl(addr));
1856 		if (hsotg->core_params->dma_desc_enable > 0) {
1857 			addr = hsotg->regs + HCDMAB(i);
1858 			dev_dbg(hsotg->dev, "HCDMAB	 @0x%08lX : 0x%08X\n",
1859 				(unsigned long)addr, readl(addr));
1860 		}
1861 	}
1862 #endif
1863 }
1864 
1865 /**
1866  * dwc2_dump_global_registers() - Prints the core global registers
1867  *
1868  * @hsotg: Programming view of DWC_otg controller
1869  *
1870  * NOTE: This function will be removed once the peripheral controller code
1871  * is integrated and the driver is stable
1872  */
1873 void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
1874 {
1875 #ifdef DEBUG
1876 	u32 __iomem *addr;
1877 
1878 	dev_dbg(hsotg->dev, "Core Global Registers\n");
1879 	addr = hsotg->regs + GOTGCTL;
1880 	dev_dbg(hsotg->dev, "GOTGCTL	 @0x%08lX : 0x%08X\n",
1881 		(unsigned long)addr, readl(addr));
1882 	addr = hsotg->regs + GOTGINT;
1883 	dev_dbg(hsotg->dev, "GOTGINT	 @0x%08lX : 0x%08X\n",
1884 		(unsigned long)addr, readl(addr));
1885 	addr = hsotg->regs + GAHBCFG;
1886 	dev_dbg(hsotg->dev, "GAHBCFG	 @0x%08lX : 0x%08X\n",
1887 		(unsigned long)addr, readl(addr));
1888 	addr = hsotg->regs + GUSBCFG;
1889 	dev_dbg(hsotg->dev, "GUSBCFG	 @0x%08lX : 0x%08X\n",
1890 		(unsigned long)addr, readl(addr));
1891 	addr = hsotg->regs + GRSTCTL;
1892 	dev_dbg(hsotg->dev, "GRSTCTL	 @0x%08lX : 0x%08X\n",
1893 		(unsigned long)addr, readl(addr));
1894 	addr = hsotg->regs + GINTSTS;
1895 	dev_dbg(hsotg->dev, "GINTSTS	 @0x%08lX : 0x%08X\n",
1896 		(unsigned long)addr, readl(addr));
1897 	addr = hsotg->regs + GINTMSK;
1898 	dev_dbg(hsotg->dev, "GINTMSK	 @0x%08lX : 0x%08X\n",
1899 		(unsigned long)addr, readl(addr));
1900 	addr = hsotg->regs + GRXSTSR;
1901 	dev_dbg(hsotg->dev, "GRXSTSR	 @0x%08lX : 0x%08X\n",
1902 		(unsigned long)addr, readl(addr));
1903 	addr = hsotg->regs + GRXFSIZ;
1904 	dev_dbg(hsotg->dev, "GRXFSIZ	 @0x%08lX : 0x%08X\n",
1905 		(unsigned long)addr, readl(addr));
1906 	addr = hsotg->regs + GNPTXFSIZ;
1907 	dev_dbg(hsotg->dev, "GNPTXFSIZ	 @0x%08lX : 0x%08X\n",
1908 		(unsigned long)addr, readl(addr));
1909 	addr = hsotg->regs + GNPTXSTS;
1910 	dev_dbg(hsotg->dev, "GNPTXSTS	 @0x%08lX : 0x%08X\n",
1911 		(unsigned long)addr, readl(addr));
1912 	addr = hsotg->regs + GI2CCTL;
1913 	dev_dbg(hsotg->dev, "GI2CCTL	 @0x%08lX : 0x%08X\n",
1914 		(unsigned long)addr, readl(addr));
1915 	addr = hsotg->regs + GPVNDCTL;
1916 	dev_dbg(hsotg->dev, "GPVNDCTL	 @0x%08lX : 0x%08X\n",
1917 		(unsigned long)addr, readl(addr));
1918 	addr = hsotg->regs + GGPIO;
1919 	dev_dbg(hsotg->dev, "GGPIO	 @0x%08lX : 0x%08X\n",
1920 		(unsigned long)addr, readl(addr));
1921 	addr = hsotg->regs + GUID;
1922 	dev_dbg(hsotg->dev, "GUID	 @0x%08lX : 0x%08X\n",
1923 		(unsigned long)addr, readl(addr));
1924 	addr = hsotg->regs + GSNPSID;
1925 	dev_dbg(hsotg->dev, "GSNPSID	 @0x%08lX : 0x%08X\n",
1926 		(unsigned long)addr, readl(addr));
1927 	addr = hsotg->regs + GHWCFG1;
1928 	dev_dbg(hsotg->dev, "GHWCFG1	 @0x%08lX : 0x%08X\n",
1929 		(unsigned long)addr, readl(addr));
1930 	addr = hsotg->regs + GHWCFG2;
1931 	dev_dbg(hsotg->dev, "GHWCFG2	 @0x%08lX : 0x%08X\n",
1932 		(unsigned long)addr, readl(addr));
1933 	addr = hsotg->regs + GHWCFG3;
1934 	dev_dbg(hsotg->dev, "GHWCFG3	 @0x%08lX : 0x%08X\n",
1935 		(unsigned long)addr, readl(addr));
1936 	addr = hsotg->regs + GHWCFG4;
1937 	dev_dbg(hsotg->dev, "GHWCFG4	 @0x%08lX : 0x%08X\n",
1938 		(unsigned long)addr, readl(addr));
1939 	addr = hsotg->regs + GLPMCFG;
1940 	dev_dbg(hsotg->dev, "GLPMCFG	 @0x%08lX : 0x%08X\n",
1941 		(unsigned long)addr, readl(addr));
1942 	addr = hsotg->regs + GPWRDN;
1943 	dev_dbg(hsotg->dev, "GPWRDN	 @0x%08lX : 0x%08X\n",
1944 		(unsigned long)addr, readl(addr));
1945 	addr = hsotg->regs + GDFIFOCFG;
1946 	dev_dbg(hsotg->dev, "GDFIFOCFG	 @0x%08lX : 0x%08X\n",
1947 		(unsigned long)addr, readl(addr));
1948 	addr = hsotg->regs + HPTXFSIZ;
1949 	dev_dbg(hsotg->dev, "HPTXFSIZ	 @0x%08lX : 0x%08X\n",
1950 		(unsigned long)addr, readl(addr));
1951 
1952 	addr = hsotg->regs + PCGCTL;
1953 	dev_dbg(hsotg->dev, "PCGCTL	 @0x%08lX : 0x%08X\n",
1954 		(unsigned long)addr, readl(addr));
1955 #endif
1956 }
1957 
1958 /**
1959  * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
1960  *
1961  * @hsotg: Programming view of DWC_otg controller
1962  * @num:   Tx FIFO to flush
1963  */
1964 void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
1965 {
1966 	u32 greset;
1967 	int count = 0;
1968 
1969 	dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
1970 
1971 	greset = GRSTCTL_TXFFLSH;
1972 	greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
1973 	writel(greset, hsotg->regs + GRSTCTL);
1974 
1975 	do {
1976 		greset = readl(hsotg->regs + GRSTCTL);
1977 		if (++count > 10000) {
1978 			dev_warn(hsotg->dev,
1979 				 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
1980 				 __func__, greset,
1981 				 readl(hsotg->regs + GNPTXSTS));
1982 			break;
1983 		}
1984 		udelay(1);
1985 	} while (greset & GRSTCTL_TXFFLSH);
1986 
1987 	/* Wait for at least 3 PHY Clocks */
1988 	udelay(1);
1989 }
1990 
1991 /**
1992  * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
1993  *
1994  * @hsotg: Programming view of DWC_otg controller
1995  */
1996 void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
1997 {
1998 	u32 greset;
1999 	int count = 0;
2000 
2001 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
2002 
2003 	greset = GRSTCTL_RXFFLSH;
2004 	writel(greset, hsotg->regs + GRSTCTL);
2005 
2006 	do {
2007 		greset = readl(hsotg->regs + GRSTCTL);
2008 		if (++count > 10000) {
2009 			dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n",
2010 				 __func__, greset);
2011 			break;
2012 		}
2013 		udelay(1);
2014 	} while (greset & GRSTCTL_RXFFLSH);
2015 
2016 	/* Wait for at least 3 PHY Clocks */
2017 	udelay(1);
2018 }
2019 
2020 #define DWC2_OUT_OF_BOUNDS(a, b, c)	((a) < (b) || (a) > (c))
2021 
2022 /* Parameter access functions */
2023 void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
2024 {
2025 	int valid = 1;
2026 
2027 	switch (val) {
2028 	case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
2029 		if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
2030 			valid = 0;
2031 		break;
2032 	case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
2033 		switch (hsotg->hw_params.op_mode) {
2034 		case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
2035 		case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2036 		case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2037 		case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2038 			break;
2039 		default:
2040 			valid = 0;
2041 			break;
2042 		}
2043 		break;
2044 	case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
2045 		/* always valid */
2046 		break;
2047 	default:
2048 		valid = 0;
2049 		break;
2050 	}
2051 
2052 	if (!valid) {
2053 		if (val >= 0)
2054 			dev_err(hsotg->dev,
2055 				"%d invalid for otg_cap parameter. Check HW configuration.\n",
2056 				val);
2057 		switch (hsotg->hw_params.op_mode) {
2058 		case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
2059 			val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
2060 			break;
2061 		case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2062 		case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2063 		case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2064 			val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
2065 			break;
2066 		default:
2067 			val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
2068 			break;
2069 		}
2070 		dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
2071 	}
2072 
2073 	hsotg->core_params->otg_cap = val;
2074 }
2075 
2076 void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
2077 {
2078 	int valid = 1;
2079 
2080 	if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
2081 		valid = 0;
2082 	if (val < 0)
2083 		valid = 0;
2084 
2085 	if (!valid) {
2086 		if (val >= 0)
2087 			dev_err(hsotg->dev,
2088 				"%d invalid for dma_enable parameter. Check HW configuration.\n",
2089 				val);
2090 		val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
2091 		dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
2092 	}
2093 
2094 	hsotg->core_params->dma_enable = val;
2095 }
2096 
2097 void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
2098 {
2099 	int valid = 1;
2100 
2101 	if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
2102 			!hsotg->hw_params.dma_desc_enable))
2103 		valid = 0;
2104 	if (val < 0)
2105 		valid = 0;
2106 
2107 	if (!valid) {
2108 		if (val >= 0)
2109 			dev_err(hsotg->dev,
2110 				"%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
2111 				val);
2112 		val = (hsotg->core_params->dma_enable > 0 &&
2113 			hsotg->hw_params.dma_desc_enable);
2114 		dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
2115 	}
2116 
2117 	hsotg->core_params->dma_desc_enable = val;
2118 }
2119 
2120 void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
2121 						 int val)
2122 {
2123 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2124 		if (val >= 0) {
2125 			dev_err(hsotg->dev,
2126 				"Wrong value for host_support_fs_low_power\n");
2127 			dev_err(hsotg->dev,
2128 				"host_support_fs_low_power must be 0 or 1\n");
2129 		}
2130 		val = 0;
2131 		dev_dbg(hsotg->dev,
2132 			"Setting host_support_fs_low_power to %d\n", val);
2133 	}
2134 
2135 	hsotg->core_params->host_support_fs_ls_low_power = val;
2136 }
2137 
2138 void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
2139 {
2140 	int valid = 1;
2141 
2142 	if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
2143 		valid = 0;
2144 	if (val < 0)
2145 		valid = 0;
2146 
2147 	if (!valid) {
2148 		if (val >= 0)
2149 			dev_err(hsotg->dev,
2150 				"%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
2151 				val);
2152 		val = hsotg->hw_params.enable_dynamic_fifo;
2153 		dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
2154 	}
2155 
2156 	hsotg->core_params->enable_dynamic_fifo = val;
2157 }
2158 
2159 void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2160 {
2161 	int valid = 1;
2162 
2163 	if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
2164 		valid = 0;
2165 
2166 	if (!valid) {
2167 		if (val >= 0)
2168 			dev_err(hsotg->dev,
2169 				"%d invalid for host_rx_fifo_size. Check HW configuration.\n",
2170 				val);
2171 		val = hsotg->hw_params.host_rx_fifo_size;
2172 		dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
2173 	}
2174 
2175 	hsotg->core_params->host_rx_fifo_size = val;
2176 }
2177 
2178 void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2179 {
2180 	int valid = 1;
2181 
2182 	if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
2183 		valid = 0;
2184 
2185 	if (!valid) {
2186 		if (val >= 0)
2187 			dev_err(hsotg->dev,
2188 				"%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
2189 				val);
2190 		val = hsotg->hw_params.host_nperio_tx_fifo_size;
2191 		dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
2192 			val);
2193 	}
2194 
2195 	hsotg->core_params->host_nperio_tx_fifo_size = val;
2196 }
2197 
2198 void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2199 {
2200 	int valid = 1;
2201 
2202 	if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
2203 		valid = 0;
2204 
2205 	if (!valid) {
2206 		if (val >= 0)
2207 			dev_err(hsotg->dev,
2208 				"%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
2209 				val);
2210 		val = hsotg->hw_params.host_perio_tx_fifo_size;
2211 		dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
2212 			val);
2213 	}
2214 
2215 	hsotg->core_params->host_perio_tx_fifo_size = val;
2216 }
2217 
2218 void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
2219 {
2220 	int valid = 1;
2221 
2222 	if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
2223 		valid = 0;
2224 
2225 	if (!valid) {
2226 		if (val >= 0)
2227 			dev_err(hsotg->dev,
2228 				"%d invalid for max_transfer_size. Check HW configuration.\n",
2229 				val);
2230 		val = hsotg->hw_params.max_transfer_size;
2231 		dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
2232 	}
2233 
2234 	hsotg->core_params->max_transfer_size = val;
2235 }
2236 
2237 void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
2238 {
2239 	int valid = 1;
2240 
2241 	if (val < 15 || val > hsotg->hw_params.max_packet_count)
2242 		valid = 0;
2243 
2244 	if (!valid) {
2245 		if (val >= 0)
2246 			dev_err(hsotg->dev,
2247 				"%d invalid for max_packet_count. Check HW configuration.\n",
2248 				val);
2249 		val = hsotg->hw_params.max_packet_count;
2250 		dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
2251 	}
2252 
2253 	hsotg->core_params->max_packet_count = val;
2254 }
2255 
2256 void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
2257 {
2258 	int valid = 1;
2259 
2260 	if (val < 1 || val > hsotg->hw_params.host_channels)
2261 		valid = 0;
2262 
2263 	if (!valid) {
2264 		if (val >= 0)
2265 			dev_err(hsotg->dev,
2266 				"%d invalid for host_channels. Check HW configuration.\n",
2267 				val);
2268 		val = hsotg->hw_params.host_channels;
2269 		dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
2270 	}
2271 
2272 	hsotg->core_params->host_channels = val;
2273 }
2274 
2275 void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
2276 {
2277 	int valid = 0;
2278 	u32 hs_phy_type, fs_phy_type;
2279 
2280 	if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
2281 			       DWC2_PHY_TYPE_PARAM_ULPI)) {
2282 		if (val >= 0) {
2283 			dev_err(hsotg->dev, "Wrong value for phy_type\n");
2284 			dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
2285 		}
2286 
2287 		valid = 0;
2288 	}
2289 
2290 	hs_phy_type = hsotg->hw_params.hs_phy_type;
2291 	fs_phy_type = hsotg->hw_params.fs_phy_type;
2292 	if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
2293 	    (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2294 	     hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2295 		valid = 1;
2296 	else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
2297 		 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
2298 		  hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2299 		valid = 1;
2300 	else if (val == DWC2_PHY_TYPE_PARAM_FS &&
2301 		 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2302 		valid = 1;
2303 
2304 	if (!valid) {
2305 		if (val >= 0)
2306 			dev_err(hsotg->dev,
2307 				"%d invalid for phy_type. Check HW configuration.\n",
2308 				val);
2309 		val = DWC2_PHY_TYPE_PARAM_FS;
2310 		if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
2311 			if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2312 			    hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
2313 				val = DWC2_PHY_TYPE_PARAM_UTMI;
2314 			else
2315 				val = DWC2_PHY_TYPE_PARAM_ULPI;
2316 		}
2317 		dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
2318 	}
2319 
2320 	hsotg->core_params->phy_type = val;
2321 }
2322 
2323 static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
2324 {
2325 	return hsotg->core_params->phy_type;
2326 }
2327 
2328 void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
2329 {
2330 	int valid = 1;
2331 
2332 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2333 		if (val >= 0) {
2334 			dev_err(hsotg->dev, "Wrong value for speed parameter\n");
2335 			dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
2336 		}
2337 		valid = 0;
2338 	}
2339 
2340 	if (val == DWC2_SPEED_PARAM_HIGH &&
2341 	    dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2342 		valid = 0;
2343 
2344 	if (!valid) {
2345 		if (val >= 0)
2346 			dev_err(hsotg->dev,
2347 				"%d invalid for speed parameter. Check HW configuration.\n",
2348 				val);
2349 		val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
2350 				DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
2351 		dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
2352 	}
2353 
2354 	hsotg->core_params->speed = val;
2355 }
2356 
2357 void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
2358 {
2359 	int valid = 1;
2360 
2361 	if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
2362 			       DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
2363 		if (val >= 0) {
2364 			dev_err(hsotg->dev,
2365 				"Wrong value for host_ls_low_power_phy_clk parameter\n");
2366 			dev_err(hsotg->dev,
2367 				"host_ls_low_power_phy_clk must be 0 or 1\n");
2368 		}
2369 		valid = 0;
2370 	}
2371 
2372 	if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
2373 	    dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2374 		valid = 0;
2375 
2376 	if (!valid) {
2377 		if (val >= 0)
2378 			dev_err(hsotg->dev,
2379 				"%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
2380 				val);
2381 		val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
2382 			? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
2383 			: DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
2384 		dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
2385 			val);
2386 	}
2387 
2388 	hsotg->core_params->host_ls_low_power_phy_clk = val;
2389 }
2390 
2391 void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
2392 {
2393 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2394 		if (val >= 0) {
2395 			dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
2396 			dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
2397 		}
2398 		val = 0;
2399 		dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
2400 	}
2401 
2402 	hsotg->core_params->phy_ulpi_ddr = val;
2403 }
2404 
2405 void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
2406 {
2407 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2408 		if (val >= 0) {
2409 			dev_err(hsotg->dev,
2410 				"Wrong value for phy_ulpi_ext_vbus\n");
2411 			dev_err(hsotg->dev,
2412 				"phy_ulpi_ext_vbus must be 0 or 1\n");
2413 		}
2414 		val = 0;
2415 		dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
2416 	}
2417 
2418 	hsotg->core_params->phy_ulpi_ext_vbus = val;
2419 }
2420 
2421 void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
2422 {
2423 	int valid = 0;
2424 
2425 	switch (hsotg->hw_params.utmi_phy_data_width) {
2426 	case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
2427 		valid = (val == 8);
2428 		break;
2429 	case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
2430 		valid = (val == 16);
2431 		break;
2432 	case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
2433 		valid = (val == 8 || val == 16);
2434 		break;
2435 	}
2436 
2437 	if (!valid) {
2438 		if (val >= 0) {
2439 			dev_err(hsotg->dev,
2440 				"%d invalid for phy_utmi_width. Check HW configuration.\n",
2441 				val);
2442 		}
2443 		val = (hsotg->hw_params.utmi_phy_data_width ==
2444 		       GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
2445 		dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
2446 	}
2447 
2448 	hsotg->core_params->phy_utmi_width = val;
2449 }
2450 
2451 void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
2452 {
2453 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2454 		if (val >= 0) {
2455 			dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
2456 			dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
2457 		}
2458 		val = 0;
2459 		dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
2460 	}
2461 
2462 	hsotg->core_params->ulpi_fs_ls = val;
2463 }
2464 
2465 void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
2466 {
2467 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2468 		if (val >= 0) {
2469 			dev_err(hsotg->dev, "Wrong value for ts_dline\n");
2470 			dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
2471 		}
2472 		val = 0;
2473 		dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
2474 	}
2475 
2476 	hsotg->core_params->ts_dline = val;
2477 }
2478 
2479 void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
2480 {
2481 	int valid = 1;
2482 
2483 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2484 		if (val >= 0) {
2485 			dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
2486 			dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
2487 		}
2488 
2489 		valid = 0;
2490 	}
2491 
2492 	if (val == 1 && !(hsotg->hw_params.i2c_enable))
2493 		valid = 0;
2494 
2495 	if (!valid) {
2496 		if (val >= 0)
2497 			dev_err(hsotg->dev,
2498 				"%d invalid for i2c_enable. Check HW configuration.\n",
2499 				val);
2500 		val = hsotg->hw_params.i2c_enable;
2501 		dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
2502 	}
2503 
2504 	hsotg->core_params->i2c_enable = val;
2505 }
2506 
2507 void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
2508 {
2509 	int valid = 1;
2510 
2511 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2512 		if (val >= 0) {
2513 			dev_err(hsotg->dev,
2514 				"Wrong value for en_multiple_tx_fifo,\n");
2515 			dev_err(hsotg->dev,
2516 				"en_multiple_tx_fifo must be 0 or 1\n");
2517 		}
2518 		valid = 0;
2519 	}
2520 
2521 	if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
2522 		valid = 0;
2523 
2524 	if (!valid) {
2525 		if (val >= 0)
2526 			dev_err(hsotg->dev,
2527 				"%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
2528 				val);
2529 		val = hsotg->hw_params.en_multiple_tx_fifo;
2530 		dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
2531 	}
2532 
2533 	hsotg->core_params->en_multiple_tx_fifo = val;
2534 }
2535 
2536 void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
2537 {
2538 	int valid = 1;
2539 
2540 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2541 		if (val >= 0) {
2542 			dev_err(hsotg->dev,
2543 				"'%d' invalid for parameter reload_ctl\n", val);
2544 			dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
2545 		}
2546 		valid = 0;
2547 	}
2548 
2549 	if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
2550 		valid = 0;
2551 
2552 	if (!valid) {
2553 		if (val >= 0)
2554 			dev_err(hsotg->dev,
2555 				"%d invalid for parameter reload_ctl. Check HW configuration.\n",
2556 				val);
2557 		val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
2558 		dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
2559 	}
2560 
2561 	hsotg->core_params->reload_ctl = val;
2562 }
2563 
2564 void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
2565 {
2566 	if (val != -1)
2567 		hsotg->core_params->ahbcfg = val;
2568 	else
2569 		hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
2570 						GAHBCFG_HBSTLEN_SHIFT;
2571 }
2572 
2573 void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
2574 {
2575 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2576 		if (val >= 0) {
2577 			dev_err(hsotg->dev,
2578 				"'%d' invalid for parameter otg_ver\n", val);
2579 			dev_err(hsotg->dev,
2580 				"otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
2581 		}
2582 		val = 0;
2583 		dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
2584 	}
2585 
2586 	hsotg->core_params->otg_ver = val;
2587 }
2588 
2589 static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
2590 {
2591 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2592 		if (val >= 0) {
2593 			dev_err(hsotg->dev,
2594 				"'%d' invalid for parameter uframe_sched\n",
2595 				val);
2596 			dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
2597 		}
2598 		val = 1;
2599 		dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
2600 	}
2601 
2602 	hsotg->core_params->uframe_sched = val;
2603 }
2604 
2605 /*
2606  * This function is called during module intialization to pass module parameters
2607  * for the DWC_otg core.
2608  */
2609 void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
2610 			 const struct dwc2_core_params *params)
2611 {
2612 	dev_dbg(hsotg->dev, "%s()\n", __func__);
2613 
2614 	dwc2_set_param_otg_cap(hsotg, params->otg_cap);
2615 	dwc2_set_param_dma_enable(hsotg, params->dma_enable);
2616 	dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
2617 	dwc2_set_param_host_support_fs_ls_low_power(hsotg,
2618 			params->host_support_fs_ls_low_power);
2619 	dwc2_set_param_enable_dynamic_fifo(hsotg,
2620 			params->enable_dynamic_fifo);
2621 	dwc2_set_param_host_rx_fifo_size(hsotg,
2622 			params->host_rx_fifo_size);
2623 	dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
2624 			params->host_nperio_tx_fifo_size);
2625 	dwc2_set_param_host_perio_tx_fifo_size(hsotg,
2626 			params->host_perio_tx_fifo_size);
2627 	dwc2_set_param_max_transfer_size(hsotg,
2628 			params->max_transfer_size);
2629 	dwc2_set_param_max_packet_count(hsotg,
2630 			params->max_packet_count);
2631 	dwc2_set_param_host_channels(hsotg, params->host_channels);
2632 	dwc2_set_param_phy_type(hsotg, params->phy_type);
2633 	dwc2_set_param_speed(hsotg, params->speed);
2634 	dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
2635 			params->host_ls_low_power_phy_clk);
2636 	dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
2637 	dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
2638 			params->phy_ulpi_ext_vbus);
2639 	dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
2640 	dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
2641 	dwc2_set_param_ts_dline(hsotg, params->ts_dline);
2642 	dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
2643 	dwc2_set_param_en_multiple_tx_fifo(hsotg,
2644 			params->en_multiple_tx_fifo);
2645 	dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
2646 	dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
2647 	dwc2_set_param_otg_ver(hsotg, params->otg_ver);
2648 	dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
2649 }
2650 
2651 /**
2652  * During device initialization, read various hardware configuration
2653  * registers and interpret the contents.
2654  */
2655 int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
2656 {
2657 	struct dwc2_hw_params *hw = &hsotg->hw_params;
2658 	unsigned width;
2659 	u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
2660 	u32 hptxfsiz, grxfsiz, gnptxfsiz;
2661 	u32 gusbcfg;
2662 
2663 	/*
2664 	 * Attempt to ensure this device is really a DWC_otg Controller.
2665 	 * Read and verify the GSNPSID register contents. The value should be
2666 	 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
2667 	 * as in "OTG version 2.xx" or "OTG version 3.xx".
2668 	 */
2669 	hw->snpsid = readl(hsotg->regs + GSNPSID);
2670 	if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
2671 	    (hw->snpsid & 0xfffff000) != 0x4f543000) {
2672 		dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
2673 			hw->snpsid);
2674 		return -ENODEV;
2675 	}
2676 
2677 	dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
2678 		hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
2679 		hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
2680 
2681 	hwcfg1 = readl(hsotg->regs + GHWCFG1);
2682 	hwcfg2 = readl(hsotg->regs + GHWCFG2);
2683 	hwcfg3 = readl(hsotg->regs + GHWCFG3);
2684 	hwcfg4 = readl(hsotg->regs + GHWCFG4);
2685 	grxfsiz = readl(hsotg->regs + GRXFSIZ);
2686 
2687 	dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
2688 	dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
2689 	dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
2690 	dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
2691 	dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
2692 
2693 	/* Force host mode to get HPTXFSIZ / GNPTXFSIZ exact power on value */
2694 	gusbcfg = readl(hsotg->regs + GUSBCFG);
2695 	gusbcfg |= GUSBCFG_FORCEHOSTMODE;
2696 	writel(gusbcfg, hsotg->regs + GUSBCFG);
2697 	usleep_range(100000, 150000);
2698 
2699 	gnptxfsiz = readl(hsotg->regs + GNPTXFSIZ);
2700 	hptxfsiz = readl(hsotg->regs + HPTXFSIZ);
2701 	dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
2702 	dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
2703 	gusbcfg = readl(hsotg->regs + GUSBCFG);
2704 	gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
2705 	writel(gusbcfg, hsotg->regs + GUSBCFG);
2706 	usleep_range(100000, 150000);
2707 
2708 	/* hwcfg2 */
2709 	hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
2710 		      GHWCFG2_OP_MODE_SHIFT;
2711 	hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
2712 		   GHWCFG2_ARCHITECTURE_SHIFT;
2713 	hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
2714 	hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
2715 				GHWCFG2_NUM_HOST_CHAN_SHIFT);
2716 	hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
2717 			  GHWCFG2_HS_PHY_TYPE_SHIFT;
2718 	hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
2719 			  GHWCFG2_FS_PHY_TYPE_SHIFT;
2720 	hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
2721 			 GHWCFG2_NUM_DEV_EP_SHIFT;
2722 	hw->nperio_tx_q_depth =
2723 		(hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
2724 		GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
2725 	hw->host_perio_tx_q_depth =
2726 		(hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
2727 		GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
2728 	hw->dev_token_q_depth =
2729 		(hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
2730 		GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
2731 
2732 	/* hwcfg3 */
2733 	width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
2734 		GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
2735 	hw->max_transfer_size = (1 << (width + 11)) - 1;
2736 	/*
2737 	 * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates
2738 	 * coherent buffers with this size, and if it's too large we can
2739 	 * exhaust the coherent DMA pool.
2740 	 */
2741 	if (hw->max_transfer_size > 65535)
2742 		hw->max_transfer_size = 65535;
2743 	width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
2744 		GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
2745 	hw->max_packet_count = (1 << (width + 4)) - 1;
2746 	hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
2747 	hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
2748 			      GHWCFG3_DFIFO_DEPTH_SHIFT;
2749 
2750 	/* hwcfg4 */
2751 	hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
2752 	hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
2753 				  GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
2754 	hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
2755 	hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
2756 	hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
2757 				  GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
2758 
2759 	/* fifo sizes */
2760 	hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
2761 				GRXFSIZ_DEPTH_SHIFT;
2762 	hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
2763 				       FIFOSIZE_DEPTH_SHIFT;
2764 	hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
2765 				      FIFOSIZE_DEPTH_SHIFT;
2766 
2767 	dev_dbg(hsotg->dev, "Detected values from hardware:\n");
2768 	dev_dbg(hsotg->dev, "  op_mode=%d\n",
2769 		hw->op_mode);
2770 	dev_dbg(hsotg->dev, "  arch=%d\n",
2771 		hw->arch);
2772 	dev_dbg(hsotg->dev, "  dma_desc_enable=%d\n",
2773 		hw->dma_desc_enable);
2774 	dev_dbg(hsotg->dev, "  power_optimized=%d\n",
2775 		hw->power_optimized);
2776 	dev_dbg(hsotg->dev, "  i2c_enable=%d\n",
2777 		hw->i2c_enable);
2778 	dev_dbg(hsotg->dev, "  hs_phy_type=%d\n",
2779 		hw->hs_phy_type);
2780 	dev_dbg(hsotg->dev, "  fs_phy_type=%d\n",
2781 		hw->fs_phy_type);
2782 	dev_dbg(hsotg->dev, "  utmi_phy_data_wdith=%d\n",
2783 		hw->utmi_phy_data_width);
2784 	dev_dbg(hsotg->dev, "  num_dev_ep=%d\n",
2785 		hw->num_dev_ep);
2786 	dev_dbg(hsotg->dev, "  num_dev_perio_in_ep=%d\n",
2787 		hw->num_dev_perio_in_ep);
2788 	dev_dbg(hsotg->dev, "  host_channels=%d\n",
2789 		hw->host_channels);
2790 	dev_dbg(hsotg->dev, "  max_transfer_size=%d\n",
2791 		hw->max_transfer_size);
2792 	dev_dbg(hsotg->dev, "  max_packet_count=%d\n",
2793 		hw->max_packet_count);
2794 	dev_dbg(hsotg->dev, "  nperio_tx_q_depth=0x%0x\n",
2795 		hw->nperio_tx_q_depth);
2796 	dev_dbg(hsotg->dev, "  host_perio_tx_q_depth=0x%0x\n",
2797 		hw->host_perio_tx_q_depth);
2798 	dev_dbg(hsotg->dev, "  dev_token_q_depth=0x%0x\n",
2799 		hw->dev_token_q_depth);
2800 	dev_dbg(hsotg->dev, "  enable_dynamic_fifo=%d\n",
2801 		hw->enable_dynamic_fifo);
2802 	dev_dbg(hsotg->dev, "  en_multiple_tx_fifo=%d\n",
2803 		hw->en_multiple_tx_fifo);
2804 	dev_dbg(hsotg->dev, "  total_fifo_size=%d\n",
2805 		hw->total_fifo_size);
2806 	dev_dbg(hsotg->dev, "  host_rx_fifo_size=%d\n",
2807 		hw->host_rx_fifo_size);
2808 	dev_dbg(hsotg->dev, "  host_nperio_tx_fifo_size=%d\n",
2809 		hw->host_nperio_tx_fifo_size);
2810 	dev_dbg(hsotg->dev, "  host_perio_tx_fifo_size=%d\n",
2811 		hw->host_perio_tx_fifo_size);
2812 	dev_dbg(hsotg->dev, "\n");
2813 
2814 	return 0;
2815 }
2816 
2817 u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
2818 {
2819 	return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
2820 }
2821 
2822 bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
2823 {
2824 	if (readl(hsotg->regs + GSNPSID) == 0xffffffff)
2825 		return false;
2826 	else
2827 		return true;
2828 }
2829 
2830 /**
2831  * dwc2_enable_global_interrupts() - Enables the controller's Global
2832  * Interrupt in the AHB Config register
2833  *
2834  * @hsotg: Programming view of DWC_otg controller
2835  */
2836 void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
2837 {
2838 	u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
2839 
2840 	ahbcfg |= GAHBCFG_GLBL_INTR_EN;
2841 	writel(ahbcfg, hsotg->regs + GAHBCFG);
2842 }
2843 
2844 /**
2845  * dwc2_disable_global_interrupts() - Disables the controller's Global
2846  * Interrupt in the AHB Config register
2847  *
2848  * @hsotg: Programming view of DWC_otg controller
2849  */
2850 void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
2851 {
2852 	u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
2853 
2854 	ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
2855 	writel(ahbcfg, hsotg->regs + GAHBCFG);
2856 }
2857 
2858 MODULE_DESCRIPTION("DESIGNWARE HS OTG Core");
2859 MODULE_AUTHOR("Synopsys, Inc.");
2860 MODULE_LICENSE("Dual BSD/GPL");
2861