xref: /linux/drivers/mmc/host/sdhci-tegra.c (revision ebf68996de0ab250c5d520eb2291ab65643e9a1e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2010 Google, Inc.
4  */
5 
6 #include <linux/delay.h>
7 #include <linux/err.h>
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/iopoll.h>
11 #include <linux/platform_device.h>
12 #include <linux/clk.h>
13 #include <linux/io.h>
14 #include <linux/of.h>
15 #include <linux/of_device.h>
16 #include <linux/pinctrl/consumer.h>
17 #include <linux/regulator/consumer.h>
18 #include <linux/reset.h>
19 #include <linux/mmc/card.h>
20 #include <linux/mmc/host.h>
21 #include <linux/mmc/mmc.h>
22 #include <linux/mmc/slot-gpio.h>
23 #include <linux/gpio/consumer.h>
24 #include <linux/ktime.h>
25 
26 #include "sdhci-pltfm.h"
27 #include "cqhci.h"
28 
29 /* Tegra SDHOST controller vendor register definitions */
30 #define SDHCI_TEGRA_VENDOR_CLOCK_CTRL			0x100
31 #define SDHCI_CLOCK_CTRL_TAP_MASK			0x00ff0000
32 #define SDHCI_CLOCK_CTRL_TAP_SHIFT			16
33 #define SDHCI_CLOCK_CTRL_TRIM_MASK			0x1f000000
34 #define SDHCI_CLOCK_CTRL_TRIM_SHIFT			24
35 #define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE		BIT(5)
36 #define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE		BIT(3)
37 #define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE	BIT(2)
38 
39 #define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL			0x104
40 #define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE		BIT(31)
41 
42 #define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES		0x10c
43 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK		0x00003f00
44 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT	8
45 
46 #define SDHCI_TEGRA_VENDOR_MISC_CTRL			0x120
47 #define SDHCI_MISC_CTRL_ENABLE_SDR104			0x8
48 #define SDHCI_MISC_CTRL_ENABLE_SDR50			0x10
49 #define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300		0x20
50 #define SDHCI_MISC_CTRL_ENABLE_DDR50			0x200
51 
52 #define SDHCI_TEGRA_VENDOR_DLLCAL_CFG			0x1b0
53 #define SDHCI_TEGRA_DLLCAL_CALIBRATE			BIT(31)
54 
55 #define SDHCI_TEGRA_VENDOR_DLLCAL_STA			0x1bc
56 #define SDHCI_TEGRA_DLLCAL_STA_ACTIVE			BIT(31)
57 
58 #define SDHCI_VNDR_TUN_CTRL0_0				0x1c0
59 #define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP			0x20000
60 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK		0x03fc0000
61 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT	18
62 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK			0x00001fc0
63 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT		6
64 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK		0x000e000
65 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT		13
66 #define TRIES_128					2
67 #define TRIES_256					4
68 #define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK		0x7
69 
70 #define SDHCI_TEGRA_VNDR_TUN_CTRL1_0			0x1c4
71 #define SDHCI_TEGRA_VNDR_TUN_STATUS0			0x1C8
72 #define SDHCI_TEGRA_VNDR_TUN_STATUS1			0x1CC
73 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK		0xFF
74 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT	0x8
75 #define TUNING_WORD_BIT_SIZE				32
76 
77 #define SDHCI_TEGRA_AUTO_CAL_CONFIG			0x1e4
78 #define SDHCI_AUTO_CAL_START				BIT(31)
79 #define SDHCI_AUTO_CAL_ENABLE				BIT(29)
80 #define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK			0x0000ffff
81 
82 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL			0x1e0
83 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK	0x0000000f
84 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL	0x7
85 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD	BIT(31)
86 #define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK		0x07FFF000
87 
88 #define SDHCI_TEGRA_AUTO_CAL_STATUS			0x1ec
89 #define SDHCI_TEGRA_AUTO_CAL_ACTIVE			BIT(31)
90 
91 #define NVQUIRK_FORCE_SDHCI_SPEC_200			BIT(0)
92 #define NVQUIRK_ENABLE_BLOCK_GAP_DET			BIT(1)
93 #define NVQUIRK_ENABLE_SDHCI_SPEC_300			BIT(2)
94 #define NVQUIRK_ENABLE_SDR50				BIT(3)
95 #define NVQUIRK_ENABLE_SDR104				BIT(4)
96 #define NVQUIRK_ENABLE_DDR50				BIT(5)
97 #define NVQUIRK_HAS_PADCALIB				BIT(6)
98 #define NVQUIRK_NEEDS_PAD_CONTROL			BIT(7)
99 #define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP			BIT(8)
100 #define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING		BIT(9)
101 
102 /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
103 #define SDHCI_TEGRA_CQE_BASE_ADDR			0xF000
104 
105 struct sdhci_tegra_soc_data {
106 	const struct sdhci_pltfm_data *pdata;
107 	u32 nvquirks;
108 	u8 min_tap_delay;
109 	u8 max_tap_delay;
110 };
111 
112 /* Magic pull up and pull down pad calibration offsets */
113 struct sdhci_tegra_autocal_offsets {
114 	u32 pull_up_3v3;
115 	u32 pull_down_3v3;
116 	u32 pull_up_3v3_timeout;
117 	u32 pull_down_3v3_timeout;
118 	u32 pull_up_1v8;
119 	u32 pull_down_1v8;
120 	u32 pull_up_1v8_timeout;
121 	u32 pull_down_1v8_timeout;
122 	u32 pull_up_sdr104;
123 	u32 pull_down_sdr104;
124 	u32 pull_up_hs400;
125 	u32 pull_down_hs400;
126 };
127 
128 struct sdhci_tegra {
129 	const struct sdhci_tegra_soc_data *soc_data;
130 	struct gpio_desc *power_gpio;
131 	bool ddr_signaling;
132 	bool pad_calib_required;
133 	bool pad_control_available;
134 
135 	struct reset_control *rst;
136 	struct pinctrl *pinctrl_sdmmc;
137 	struct pinctrl_state *pinctrl_state_3v3;
138 	struct pinctrl_state *pinctrl_state_1v8;
139 	struct pinctrl_state *pinctrl_state_3v3_drv;
140 	struct pinctrl_state *pinctrl_state_1v8_drv;
141 
142 	struct sdhci_tegra_autocal_offsets autocal_offsets;
143 	ktime_t last_calib;
144 
145 	u32 default_tap;
146 	u32 default_trim;
147 	u32 dqs_trim;
148 	bool enable_hwcq;
149 	unsigned long curr_clk_rate;
150 	u8 tuned_tap_delay;
151 };
152 
153 static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
154 {
155 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
156 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
157 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
158 
159 	if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
160 			(reg == SDHCI_HOST_VERSION))) {
161 		/* Erratum: Version register is invalid in HW. */
162 		return SDHCI_SPEC_200;
163 	}
164 
165 	return readw(host->ioaddr + reg);
166 }
167 
168 static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
169 {
170 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
171 
172 	switch (reg) {
173 	case SDHCI_TRANSFER_MODE:
174 		/*
175 		 * Postpone this write, we must do it together with a
176 		 * command write that is down below.
177 		 */
178 		pltfm_host->xfer_mode_shadow = val;
179 		return;
180 	case SDHCI_COMMAND:
181 		writel((val << 16) | pltfm_host->xfer_mode_shadow,
182 			host->ioaddr + SDHCI_TRANSFER_MODE);
183 		return;
184 	}
185 
186 	writew(val, host->ioaddr + reg);
187 }
188 
189 static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
190 {
191 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
192 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
193 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
194 
195 	/* Seems like we're getting spurious timeout and crc errors, so
196 	 * disable signalling of them. In case of real errors software
197 	 * timers should take care of eventually detecting them.
198 	 */
199 	if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
200 		val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
201 
202 	writel(val, host->ioaddr + reg);
203 
204 	if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
205 			(reg == SDHCI_INT_ENABLE))) {
206 		/* Erratum: Must enable block gap interrupt detection */
207 		u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
208 		if (val & SDHCI_INT_CARD_INT)
209 			gap_ctrl |= 0x8;
210 		else
211 			gap_ctrl &= ~0x8;
212 		writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
213 	}
214 }
215 
216 static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable)
217 {
218 	bool status;
219 	u32 reg;
220 
221 	reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
222 	status = !!(reg & SDHCI_CLOCK_CARD_EN);
223 
224 	if (status == enable)
225 		return status;
226 
227 	if (enable)
228 		reg |= SDHCI_CLOCK_CARD_EN;
229 	else
230 		reg &= ~SDHCI_CLOCK_CARD_EN;
231 
232 	sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
233 
234 	return status;
235 }
236 
237 static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
238 {
239 	bool is_tuning_cmd = 0;
240 	bool clk_enabled;
241 	u8 cmd;
242 
243 	if (reg == SDHCI_COMMAND) {
244 		cmd = SDHCI_GET_CMD(val);
245 		is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK ||
246 				cmd == MMC_SEND_TUNING_BLOCK_HS200;
247 	}
248 
249 	if (is_tuning_cmd)
250 		clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
251 
252 	writew(val, host->ioaddr + reg);
253 
254 	if (is_tuning_cmd) {
255 		udelay(1);
256 		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
257 		tegra_sdhci_configure_card_clk(host, clk_enabled);
258 	}
259 }
260 
261 static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
262 {
263 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
264 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
265 	int has_1v8, has_3v3;
266 
267 	/*
268 	 * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad
269 	 * voltage configuration in order to perform voltage switching. This
270 	 * means that valid pinctrl info is required on SDHCI instances capable
271 	 * of performing voltage switching. Whether or not an SDHCI instance is
272 	 * capable of voltage switching is determined based on the regulator.
273 	 */
274 
275 	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
276 		return true;
277 
278 	if (IS_ERR(host->mmc->supply.vqmmc))
279 		return false;
280 
281 	has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
282 						 1700000, 1950000);
283 
284 	has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
285 						 2700000, 3600000);
286 
287 	if (has_1v8 == 1 && has_3v3 == 1)
288 		return tegra_host->pad_control_available;
289 
290 	/* Fixed voltage, no pad control required. */
291 	return true;
292 }
293 
294 static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
295 {
296 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
297 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
298 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
299 	bool card_clk_enabled = false;
300 	u32 reg;
301 
302 	/*
303 	 * Touching the tap values is a bit tricky on some SoC generations.
304 	 * The quirk enables a workaround for a glitch that sometimes occurs if
305 	 * the tap values are changed.
306 	 */
307 
308 	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP)
309 		card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
310 
311 	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
312 	reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
313 	reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
314 	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
315 
316 	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP &&
317 	    card_clk_enabled) {
318 		udelay(1);
319 		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
320 		tegra_sdhci_configure_card_clk(host, card_clk_enabled);
321 	}
322 }
323 
324 static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
325 					      struct mmc_ios *ios)
326 {
327 	struct sdhci_host *host = mmc_priv(mmc);
328 	u32 val;
329 
330 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
331 
332 	if (ios->enhanced_strobe)
333 		val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
334 	else
335 		val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
336 
337 	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
338 
339 }
340 
341 static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
342 {
343 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
344 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
345 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
346 	u32 misc_ctrl, clk_ctrl, pad_ctrl;
347 
348 	sdhci_reset(host, mask);
349 
350 	if (!(mask & SDHCI_RESET_ALL))
351 		return;
352 
353 	tegra_sdhci_set_tap(host, tegra_host->default_tap);
354 
355 	misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
356 	clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
357 
358 	misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 |
359 		       SDHCI_MISC_CTRL_ENABLE_SDR50 |
360 		       SDHCI_MISC_CTRL_ENABLE_DDR50 |
361 		       SDHCI_MISC_CTRL_ENABLE_SDR104);
362 
363 	clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK |
364 		      SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE);
365 
366 	if (tegra_sdhci_is_pad_and_regulator_valid(host)) {
367 		/* Erratum: Enable SDHCI spec v3.00 support */
368 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
369 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
370 		/* Advertise UHS modes as supported by host */
371 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
372 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
373 		if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
374 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
375 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
376 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
377 		if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
378 			clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
379 	}
380 
381 	clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT;
382 
383 	sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
384 	sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
385 
386 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) {
387 		pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
388 		pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK;
389 		pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL;
390 		sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
391 
392 		tegra_host->pad_calib_required = true;
393 	}
394 
395 	tegra_host->ddr_signaling = false;
396 }
397 
398 static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable)
399 {
400 	u32 val;
401 
402 	/*
403 	 * Enable or disable the additional I/O pad used by the drive strength
404 	 * calibration process.
405 	 */
406 	val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
407 
408 	if (enable)
409 		val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
410 	else
411 		val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
412 
413 	sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
414 
415 	if (enable)
416 		usleep_range(1, 2);
417 }
418 
419 static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
420 					       u16 pdpu)
421 {
422 	u32 reg;
423 
424 	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
425 	reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK;
426 	reg |= pdpu;
427 	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
428 }
429 
430 static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage,
431 				   bool state_drvupdn)
432 {
433 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
434 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
435 	struct sdhci_tegra_autocal_offsets *offsets =
436 						&tegra_host->autocal_offsets;
437 	struct pinctrl_state *pinctrl_drvupdn = NULL;
438 	int ret = 0;
439 	u8 drvup = 0, drvdn = 0;
440 	u32 reg;
441 
442 	if (!state_drvupdn) {
443 		/* PADS Drive Strength */
444 		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
445 			if (tegra_host->pinctrl_state_1v8_drv) {
446 				pinctrl_drvupdn =
447 					tegra_host->pinctrl_state_1v8_drv;
448 			} else {
449 				drvup = offsets->pull_up_1v8_timeout;
450 				drvdn = offsets->pull_down_1v8_timeout;
451 			}
452 		} else {
453 			if (tegra_host->pinctrl_state_3v3_drv) {
454 				pinctrl_drvupdn =
455 					tegra_host->pinctrl_state_3v3_drv;
456 			} else {
457 				drvup = offsets->pull_up_3v3_timeout;
458 				drvdn = offsets->pull_down_3v3_timeout;
459 			}
460 		}
461 
462 		if (pinctrl_drvupdn != NULL) {
463 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
464 							pinctrl_drvupdn);
465 			if (ret < 0)
466 				dev_err(mmc_dev(host->mmc),
467 					"failed pads drvupdn, ret: %d\n", ret);
468 		} else if ((drvup) || (drvdn)) {
469 			reg = sdhci_readl(host,
470 					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
471 			reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK;
472 			reg |= (drvup << 20) | (drvdn << 12);
473 			sdhci_writel(host, reg,
474 					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
475 		}
476 
477 	} else {
478 		/* Dual Voltage PADS Voltage selection */
479 		if (!tegra_host->pad_control_available)
480 			return 0;
481 
482 		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
483 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
484 						tegra_host->pinctrl_state_1v8);
485 			if (ret < 0)
486 				dev_err(mmc_dev(host->mmc),
487 					"setting 1.8V failed, ret: %d\n", ret);
488 		} else {
489 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
490 						tegra_host->pinctrl_state_3v3);
491 			if (ret < 0)
492 				dev_err(mmc_dev(host->mmc),
493 					"setting 3.3V failed, ret: %d\n", ret);
494 		}
495 	}
496 
497 	return ret;
498 }
499 
500 static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
501 {
502 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
503 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
504 	struct sdhci_tegra_autocal_offsets offsets =
505 			tegra_host->autocal_offsets;
506 	struct mmc_ios *ios = &host->mmc->ios;
507 	bool card_clk_enabled;
508 	u16 pdpu;
509 	u32 reg;
510 	int ret;
511 
512 	switch (ios->timing) {
513 	case MMC_TIMING_UHS_SDR104:
514 		pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104;
515 		break;
516 	case MMC_TIMING_MMC_HS400:
517 		pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400;
518 		break;
519 	default:
520 		if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
521 			pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8;
522 		else
523 			pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
524 	}
525 
526 	/* Set initial offset before auto-calibration */
527 	tegra_sdhci_set_pad_autocal_offset(host, pdpu);
528 
529 	card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
530 
531 	tegra_sdhci_configure_cal_pad(host, true);
532 
533 	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
534 	reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
535 	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
536 
537 	usleep_range(1, 2);
538 	/* 10 ms timeout */
539 	ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS,
540 				 reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE),
541 				 1000, 10000);
542 
543 	tegra_sdhci_configure_cal_pad(host, false);
544 
545 	tegra_sdhci_configure_card_clk(host, card_clk_enabled);
546 
547 	if (ret) {
548 		dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
549 
550 		/* Disable automatic cal and use fixed Drive Strengths */
551 		reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
552 		reg &= ~SDHCI_AUTO_CAL_ENABLE;
553 		sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
554 
555 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false);
556 		if (ret < 0)
557 			dev_err(mmc_dev(host->mmc),
558 				"Setting drive strengths failed: %d\n", ret);
559 	}
560 }
561 
562 static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
563 {
564 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
565 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
566 	struct sdhci_tegra_autocal_offsets *autocal =
567 			&tegra_host->autocal_offsets;
568 	int err;
569 
570 	err = device_property_read_u32(host->mmc->parent,
571 			"nvidia,pad-autocal-pull-up-offset-3v3",
572 			&autocal->pull_up_3v3);
573 	if (err)
574 		autocal->pull_up_3v3 = 0;
575 
576 	err = device_property_read_u32(host->mmc->parent,
577 			"nvidia,pad-autocal-pull-down-offset-3v3",
578 			&autocal->pull_down_3v3);
579 	if (err)
580 		autocal->pull_down_3v3 = 0;
581 
582 	err = device_property_read_u32(host->mmc->parent,
583 			"nvidia,pad-autocal-pull-up-offset-1v8",
584 			&autocal->pull_up_1v8);
585 	if (err)
586 		autocal->pull_up_1v8 = 0;
587 
588 	err = device_property_read_u32(host->mmc->parent,
589 			"nvidia,pad-autocal-pull-down-offset-1v8",
590 			&autocal->pull_down_1v8);
591 	if (err)
592 		autocal->pull_down_1v8 = 0;
593 
594 	err = device_property_read_u32(host->mmc->parent,
595 			"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
596 			&autocal->pull_up_3v3_timeout);
597 	if (err) {
598 		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
599 			(tegra_host->pinctrl_state_3v3_drv == NULL))
600 			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
601 				mmc_hostname(host->mmc));
602 		autocal->pull_up_3v3_timeout = 0;
603 	}
604 
605 	err = device_property_read_u32(host->mmc->parent,
606 			"nvidia,pad-autocal-pull-down-offset-3v3-timeout",
607 			&autocal->pull_down_3v3_timeout);
608 	if (err) {
609 		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
610 			(tegra_host->pinctrl_state_3v3_drv == NULL))
611 			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
612 				mmc_hostname(host->mmc));
613 		autocal->pull_down_3v3_timeout = 0;
614 	}
615 
616 	err = device_property_read_u32(host->mmc->parent,
617 			"nvidia,pad-autocal-pull-up-offset-1v8-timeout",
618 			&autocal->pull_up_1v8_timeout);
619 	if (err) {
620 		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
621 			(tegra_host->pinctrl_state_1v8_drv == NULL))
622 			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
623 				mmc_hostname(host->mmc));
624 		autocal->pull_up_1v8_timeout = 0;
625 	}
626 
627 	err = device_property_read_u32(host->mmc->parent,
628 			"nvidia,pad-autocal-pull-down-offset-1v8-timeout",
629 			&autocal->pull_down_1v8_timeout);
630 	if (err) {
631 		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
632 			(tegra_host->pinctrl_state_1v8_drv == NULL))
633 			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
634 				mmc_hostname(host->mmc));
635 		autocal->pull_down_1v8_timeout = 0;
636 	}
637 
638 	err = device_property_read_u32(host->mmc->parent,
639 			"nvidia,pad-autocal-pull-up-offset-sdr104",
640 			&autocal->pull_up_sdr104);
641 	if (err)
642 		autocal->pull_up_sdr104 = autocal->pull_up_1v8;
643 
644 	err = device_property_read_u32(host->mmc->parent,
645 			"nvidia,pad-autocal-pull-down-offset-sdr104",
646 			&autocal->pull_down_sdr104);
647 	if (err)
648 		autocal->pull_down_sdr104 = autocal->pull_down_1v8;
649 
650 	err = device_property_read_u32(host->mmc->parent,
651 			"nvidia,pad-autocal-pull-up-offset-hs400",
652 			&autocal->pull_up_hs400);
653 	if (err)
654 		autocal->pull_up_hs400 = autocal->pull_up_1v8;
655 
656 	err = device_property_read_u32(host->mmc->parent,
657 			"nvidia,pad-autocal-pull-down-offset-hs400",
658 			&autocal->pull_down_hs400);
659 	if (err)
660 		autocal->pull_down_hs400 = autocal->pull_down_1v8;
661 }
662 
663 static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
664 {
665 	struct sdhci_host *host = mmc_priv(mmc);
666 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
667 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
668 	ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib);
669 
670 	/* 100 ms calibration interval is specified in the TRM */
671 	if (ktime_to_ms(since_calib) > 100) {
672 		tegra_sdhci_pad_autocalib(host);
673 		tegra_host->last_calib = ktime_get();
674 	}
675 
676 	sdhci_request(mmc, mrq);
677 }
678 
679 static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
680 {
681 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
682 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
683 	int err;
684 
685 	err = device_property_read_u32(host->mmc->parent, "nvidia,default-tap",
686 				       &tegra_host->default_tap);
687 	if (err)
688 		tegra_host->default_tap = 0;
689 
690 	err = device_property_read_u32(host->mmc->parent, "nvidia,default-trim",
691 				       &tegra_host->default_trim);
692 	if (err)
693 		tegra_host->default_trim = 0;
694 
695 	err = device_property_read_u32(host->mmc->parent, "nvidia,dqs-trim",
696 				       &tegra_host->dqs_trim);
697 	if (err)
698 		tegra_host->dqs_trim = 0x11;
699 }
700 
701 static void tegra_sdhci_parse_dt(struct sdhci_host *host)
702 {
703 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
704 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
705 
706 	if (device_property_read_bool(host->mmc->parent, "supports-cqe"))
707 		tegra_host->enable_hwcq = true;
708 	else
709 		tegra_host->enable_hwcq = false;
710 
711 	tegra_sdhci_parse_pad_autocal_dt(host);
712 	tegra_sdhci_parse_tap_and_trim(host);
713 }
714 
715 static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
716 {
717 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
718 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
719 	unsigned long host_clk;
720 
721 	if (!clock)
722 		return sdhci_set_clock(host, clock);
723 
724 	/*
725 	 * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI
726 	 * divider to be configured to divided the host clock by two. The SDHCI
727 	 * clock divider is calculated as part of sdhci_set_clock() by
728 	 * sdhci_calc_clk(). The divider is calculated from host->max_clk and
729 	 * the requested clock rate.
730 	 *
731 	 * By setting the host->max_clk to clock * 2 the divider calculation
732 	 * will always result in the correct value for DDR50/52 modes,
733 	 * regardless of clock rate rounding, which may happen if the value
734 	 * from clk_get_rate() is used.
735 	 */
736 	host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
737 	clk_set_rate(pltfm_host->clk, host_clk);
738 	tegra_host->curr_clk_rate = host_clk;
739 	if (tegra_host->ddr_signaling)
740 		host->max_clk = host_clk;
741 	else
742 		host->max_clk = clk_get_rate(pltfm_host->clk);
743 
744 	sdhci_set_clock(host, clock);
745 
746 	if (tegra_host->pad_calib_required) {
747 		tegra_sdhci_pad_autocalib(host);
748 		tegra_host->pad_calib_required = false;
749 	}
750 }
751 
752 static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
753 {
754 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
755 
756 	return clk_round_rate(pltfm_host->clk, UINT_MAX);
757 }
758 
759 static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim)
760 {
761 	u32 val;
762 
763 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
764 	val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK;
765 	val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT;
766 	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
767 }
768 
769 static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
770 {
771 	u32 reg;
772 	int err;
773 
774 	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
775 	reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE;
776 	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
777 
778 	/* 1 ms sleep, 5 ms timeout */
779 	err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA,
780 				 reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE),
781 				 1000, 5000);
782 	if (err)
783 		dev_err(mmc_dev(host->mmc),
784 			"HS400 delay line calibration timed out\n");
785 }
786 
787 static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
788 				       u8 thd_low, u8 fixed_tap)
789 {
790 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
791 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
792 	u32 val, tun_status;
793 	u8 word, bit, edge1, tap, window;
794 	bool tap_result;
795 	bool start_fail = false;
796 	bool start_pass = false;
797 	bool end_pass = false;
798 	bool first_fail = false;
799 	bool first_pass = false;
800 	u8 start_pass_tap = 0;
801 	u8 end_pass_tap = 0;
802 	u8 first_fail_tap = 0;
803 	u8 first_pass_tap = 0;
804 	u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE;
805 
806 	/*
807 	 * Read auto-tuned results and extract good valid passing window by
808 	 * filtering out un-wanted bubble/partial/merged windows.
809 	 */
810 	for (word = 0; word < total_tuning_words; word++) {
811 		val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
812 		val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK;
813 		val |= word;
814 		sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
815 		tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0);
816 		bit = 0;
817 		while (bit < TUNING_WORD_BIT_SIZE) {
818 			tap = word * TUNING_WORD_BIT_SIZE + bit;
819 			tap_result = tun_status & (1 << bit);
820 			if (!tap_result && !start_fail) {
821 				start_fail = true;
822 				if (!first_fail) {
823 					first_fail_tap = tap;
824 					first_fail = true;
825 				}
826 
827 			} else if (tap_result && start_fail && !start_pass) {
828 				start_pass_tap = tap;
829 				start_pass = true;
830 				if (!first_pass) {
831 					first_pass_tap = tap;
832 					first_pass = true;
833 				}
834 
835 			} else if (!tap_result && start_fail && start_pass &&
836 				   !end_pass) {
837 				end_pass_tap = tap - 1;
838 				end_pass = true;
839 			} else if (tap_result && start_pass && start_fail &&
840 				   end_pass) {
841 				window = end_pass_tap - start_pass_tap;
842 				/* discard merged window and bubble window */
843 				if (window >= thd_up || window < thd_low) {
844 					start_pass_tap = tap;
845 					end_pass = false;
846 				} else {
847 					/* set tap at middle of valid window */
848 					tap = start_pass_tap + window / 2;
849 					tegra_host->tuned_tap_delay = tap;
850 					return;
851 				}
852 			}
853 
854 			bit++;
855 		}
856 	}
857 
858 	if (!first_fail) {
859 		WARN(1, "no edge detected, continue with hw tuned delay.\n");
860 	} else if (first_pass) {
861 		/* set tap location at fixed tap relative to the first edge */
862 		edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
863 		if (edge1 - 1 > fixed_tap)
864 			tegra_host->tuned_tap_delay = edge1 - fixed_tap;
865 		else
866 			tegra_host->tuned_tap_delay = edge1 + fixed_tap;
867 	}
868 }
869 
870 static void tegra_sdhci_post_tuning(struct sdhci_host *host)
871 {
872 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
873 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
874 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
875 	u32 avg_tap_dly, val, min_tap_dly, max_tap_dly;
876 	u8 fixed_tap, start_tap, end_tap, window_width;
877 	u8 thdupper, thdlower;
878 	u8 num_iter;
879 	u32 clk_rate_mhz, period_ps, bestcase, worstcase;
880 
881 	/* retain HW tuned tap to use incase if no correction is needed */
882 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
883 	tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >>
884 				      SDHCI_CLOCK_CTRL_TAP_SHIFT;
885 	if (soc_data->min_tap_delay && soc_data->max_tap_delay) {
886 		min_tap_dly = soc_data->min_tap_delay;
887 		max_tap_dly = soc_data->max_tap_delay;
888 		clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC;
889 		period_ps = USEC_PER_SEC / clk_rate_mhz;
890 		bestcase = period_ps / min_tap_dly;
891 		worstcase = period_ps / max_tap_dly;
892 		/*
893 		 * Upper and Lower bound thresholds used to detect merged and
894 		 * bubble windows
895 		 */
896 		thdupper = (2 * worstcase + bestcase) / 2;
897 		thdlower = worstcase / 4;
898 		/*
899 		 * fixed tap is used when HW tuning result contains single edge
900 		 * and tap is set at fixed tap delay relative to the first edge
901 		 */
902 		avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly);
903 		fixed_tap = avg_tap_dly / 2;
904 
905 		val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1);
906 		start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
907 		end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) &
908 			  SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
909 		window_width = end_tap - start_tap;
910 		num_iter = host->tuning_loop_count;
911 		/*
912 		 * partial window includes edges of the tuning range.
913 		 * merged window includes more taps so window width is higher
914 		 * than upper threshold.
915 		 */
916 		if (start_tap == 0 || (end_tap == (num_iter - 1)) ||
917 		    (end_tap == num_iter - 2) || window_width >= thdupper) {
918 			pr_debug("%s: Apply tuning correction\n",
919 				 mmc_hostname(host->mmc));
920 			tegra_sdhci_tap_correction(host, thdupper, thdlower,
921 						   fixed_tap);
922 		}
923 	}
924 
925 	tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
926 }
927 
928 static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode)
929 {
930 	struct sdhci_host *host = mmc_priv(mmc);
931 	int err;
932 
933 	err = sdhci_execute_tuning(mmc, opcode);
934 	if (!err && !host->tuning_err)
935 		tegra_sdhci_post_tuning(host);
936 
937 	return err;
938 }
939 
940 static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
941 					  unsigned timing)
942 {
943 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
944 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
945 	bool set_default_tap = false;
946 	bool set_dqs_trim = false;
947 	bool do_hs400_dll_cal = false;
948 	u8 iter = TRIES_256;
949 	u32 val;
950 
951 	tegra_host->ddr_signaling = false;
952 	switch (timing) {
953 	case MMC_TIMING_UHS_SDR50:
954 		break;
955 	case MMC_TIMING_UHS_SDR104:
956 	case MMC_TIMING_MMC_HS200:
957 		/* Don't set default tap on tunable modes. */
958 		iter = TRIES_128;
959 		break;
960 	case MMC_TIMING_MMC_HS400:
961 		set_dqs_trim = true;
962 		do_hs400_dll_cal = true;
963 		iter = TRIES_128;
964 		break;
965 	case MMC_TIMING_MMC_DDR52:
966 	case MMC_TIMING_UHS_DDR50:
967 		tegra_host->ddr_signaling = true;
968 		set_default_tap = true;
969 		break;
970 	default:
971 		set_default_tap = true;
972 		break;
973 	}
974 
975 	val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
976 	val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK |
977 		 SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK |
978 		 SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK);
979 	val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
980 		0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT |
981 		1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT);
982 	sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
983 	sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0);
984 
985 	host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
986 
987 	sdhci_set_uhs_signaling(host, timing);
988 
989 	tegra_sdhci_pad_autocalib(host);
990 
991 	if (tegra_host->tuned_tap_delay && !set_default_tap)
992 		tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
993 	else
994 		tegra_sdhci_set_tap(host, tegra_host->default_tap);
995 
996 	if (set_dqs_trim)
997 		tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim);
998 
999 	if (do_hs400_dll_cal)
1000 		tegra_sdhci_hs400_dll_cal(host);
1001 }
1002 
1003 static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
1004 {
1005 	unsigned int min, max;
1006 
1007 	/*
1008 	 * Start search for minimum tap value at 10, as smaller values are
1009 	 * may wrongly be reported as working but fail at higher speeds,
1010 	 * according to the TRM.
1011 	 */
1012 	min = 10;
1013 	while (min < 255) {
1014 		tegra_sdhci_set_tap(host, min);
1015 		if (!mmc_send_tuning(host->mmc, opcode, NULL))
1016 			break;
1017 		min++;
1018 	}
1019 
1020 	/* Find the maximum tap value that still passes. */
1021 	max = min + 1;
1022 	while (max < 255) {
1023 		tegra_sdhci_set_tap(host, max);
1024 		if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1025 			max--;
1026 			break;
1027 		}
1028 		max++;
1029 	}
1030 
1031 	/* The TRM states the ideal tap value is at 75% in the passing range. */
1032 	tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4));
1033 
1034 	return mmc_send_tuning(host->mmc, opcode, NULL);
1035 }
1036 
1037 static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
1038 						   struct mmc_ios *ios)
1039 {
1040 	struct sdhci_host *host = mmc_priv(mmc);
1041 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1042 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1043 	int ret = 0;
1044 
1045 	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1046 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1047 		if (ret < 0)
1048 			return ret;
1049 		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1050 	} else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
1051 		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1052 		if (ret < 0)
1053 			return ret;
1054 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1055 	}
1056 
1057 	if (tegra_host->pad_calib_required)
1058 		tegra_sdhci_pad_autocalib(host);
1059 
1060 	return ret;
1061 }
1062 
1063 static int tegra_sdhci_init_pinctrl_info(struct device *dev,
1064 					 struct sdhci_tegra *tegra_host)
1065 {
1066 	tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
1067 	if (IS_ERR(tegra_host->pinctrl_sdmmc)) {
1068 		dev_dbg(dev, "No pinctrl info, err: %ld\n",
1069 			PTR_ERR(tegra_host->pinctrl_sdmmc));
1070 		return -1;
1071 	}
1072 
1073 	tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state(
1074 				tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv");
1075 	if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) {
1076 		if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV)
1077 			tegra_host->pinctrl_state_1v8_drv = NULL;
1078 	}
1079 
1080 	tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state(
1081 				tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv");
1082 	if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) {
1083 		if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV)
1084 			tegra_host->pinctrl_state_3v3_drv = NULL;
1085 	}
1086 
1087 	tegra_host->pinctrl_state_3v3 =
1088 		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
1089 	if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
1090 		dev_warn(dev, "Missing 3.3V pad state, err: %ld\n",
1091 			 PTR_ERR(tegra_host->pinctrl_state_3v3));
1092 		return -1;
1093 	}
1094 
1095 	tegra_host->pinctrl_state_1v8 =
1096 		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8");
1097 	if (IS_ERR(tegra_host->pinctrl_state_1v8)) {
1098 		dev_warn(dev, "Missing 1.8V pad state, err: %ld\n",
1099 			 PTR_ERR(tegra_host->pinctrl_state_1v8));
1100 		return -1;
1101 	}
1102 
1103 	tegra_host->pad_control_available = true;
1104 
1105 	return 0;
1106 }
1107 
1108 static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
1109 {
1110 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1111 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1112 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1113 
1114 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1115 		tegra_host->pad_calib_required = true;
1116 }
1117 
1118 static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
1119 {
1120 	struct mmc_host *mmc = cq_host->mmc;
1121 	u8 ctrl;
1122 	ktime_t timeout;
1123 	bool timed_out;
1124 
1125 	/*
1126 	 * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
1127 	 * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
1128 	 * to be re-configured.
1129 	 * Tegra CQHCI/SDHCI prevents write access to block size register when
1130 	 * CQE is unhalted. So handling CQE resume sequence here to configure
1131 	 * SDHCI block registers prior to exiting CQE halt state.
1132 	 */
1133 	if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
1134 	    cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
1135 		sdhci_cqe_enable(mmc);
1136 		writel(val, cq_host->mmio + reg);
1137 		timeout = ktime_add_us(ktime_get(), 50);
1138 		while (1) {
1139 			timed_out = ktime_compare(ktime_get(), timeout) > 0;
1140 			ctrl = cqhci_readl(cq_host, CQHCI_CTL);
1141 			if (!(ctrl & CQHCI_HALT) || timed_out)
1142 				break;
1143 		}
1144 		/*
1145 		 * CQE usually resumes very quick, but incase if Tegra CQE
1146 		 * doesn't resume retry unhalt.
1147 		 */
1148 		if (timed_out)
1149 			writel(val, cq_host->mmio + reg);
1150 	} else {
1151 		writel(val, cq_host->mmio + reg);
1152 	}
1153 }
1154 
1155 static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
1156 					 struct mmc_request *mrq, u64 *data)
1157 {
1158 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc));
1159 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1160 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1161 
1162 	if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING &&
1163 	    mrq->cmd->flags & MMC_RSP_R1B)
1164 		*data |= CQHCI_CMD_TIMING(1);
1165 }
1166 
1167 static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
1168 {
1169 	struct cqhci_host *cq_host = mmc->cqe_private;
1170 	u32 val;
1171 
1172 	/*
1173 	 * Tegra CQHCI/SDMMC design prevents write access to sdhci block size
1174 	 * register when CQE is enabled and unhalted.
1175 	 * CQHCI driver enables CQE prior to activation, so disable CQE before
1176 	 * programming block size in sdhci controller and enable it back.
1177 	 */
1178 	if (!cq_host->activated) {
1179 		val = cqhci_readl(cq_host, CQHCI_CFG);
1180 		if (val & CQHCI_ENABLE)
1181 			cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
1182 				     CQHCI_CFG);
1183 		sdhci_cqe_enable(mmc);
1184 		if (val & CQHCI_ENABLE)
1185 			cqhci_writel(cq_host, val, CQHCI_CFG);
1186 	}
1187 
1188 	/*
1189 	 * CMD CRC errors are seen sometimes with some eMMC devices when status
1190 	 * command is sent during transfer of last data block which is the
1191 	 * default case as send status command block counter (CBC) is 1.
1192 	 * Recommended fix to set CBC to 0 allowing send status command only
1193 	 * when data lines are idle.
1194 	 */
1195 	val = cqhci_readl(cq_host, CQHCI_SSC1);
1196 	val &= ~CQHCI_SSC1_CBC_MASK;
1197 	cqhci_writel(cq_host, val, CQHCI_SSC1);
1198 }
1199 
1200 static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
1201 {
1202 	sdhci_dumpregs(mmc_priv(mmc));
1203 }
1204 
1205 static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
1206 {
1207 	int cmd_error = 0;
1208 	int data_error = 0;
1209 
1210 	if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1211 		return intmask;
1212 
1213 	cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1214 
1215 	return 0;
1216 }
1217 
1218 static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
1219 	.write_l    = tegra_cqhci_writel,
1220 	.enable	= sdhci_tegra_cqe_enable,
1221 	.disable = sdhci_cqe_disable,
1222 	.dumpregs = sdhci_tegra_dumpregs,
1223 	.update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
1224 };
1225 
1226 static const struct sdhci_ops tegra_sdhci_ops = {
1227 	.read_w     = tegra_sdhci_readw,
1228 	.write_l    = tegra_sdhci_writel,
1229 	.set_clock  = tegra_sdhci_set_clock,
1230 	.set_bus_width = sdhci_set_bus_width,
1231 	.reset      = tegra_sdhci_reset,
1232 	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1233 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1234 	.voltage_switch = tegra_sdhci_voltage_switch,
1235 	.get_max_clock = tegra_sdhci_get_max_clock,
1236 };
1237 
1238 static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
1239 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1240 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1241 		  SDHCI_QUIRK_NO_HISPD_BIT |
1242 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1243 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1244 	.ops  = &tegra_sdhci_ops,
1245 };
1246 
1247 static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
1248 	.pdata = &sdhci_tegra20_pdata,
1249 	.nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
1250 		    NVQUIRK_ENABLE_BLOCK_GAP_DET,
1251 };
1252 
1253 static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
1254 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1255 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1256 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1257 		  SDHCI_QUIRK_NO_HISPD_BIT |
1258 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1259 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1260 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1261 		   SDHCI_QUIRK2_BROKEN_HS200 |
1262 		   /*
1263 		    * Auto-CMD23 leads to "Got command interrupt 0x00010000 even
1264 		    * though no command operation was in progress."
1265 		    *
1266 		    * The exact reason is unknown, as the same hardware seems
1267 		    * to support Auto CMD23 on a downstream 3.1 kernel.
1268 		    */
1269 		   SDHCI_QUIRK2_ACMD23_BROKEN,
1270 	.ops  = &tegra_sdhci_ops,
1271 };
1272 
1273 static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
1274 	.pdata = &sdhci_tegra30_pdata,
1275 	.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
1276 		    NVQUIRK_ENABLE_SDR50 |
1277 		    NVQUIRK_ENABLE_SDR104 |
1278 		    NVQUIRK_HAS_PADCALIB,
1279 };
1280 
1281 static const struct sdhci_ops tegra114_sdhci_ops = {
1282 	.read_w     = tegra_sdhci_readw,
1283 	.write_w    = tegra_sdhci_writew,
1284 	.write_l    = tegra_sdhci_writel,
1285 	.set_clock  = tegra_sdhci_set_clock,
1286 	.set_bus_width = sdhci_set_bus_width,
1287 	.reset      = tegra_sdhci_reset,
1288 	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1289 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1290 	.voltage_switch = tegra_sdhci_voltage_switch,
1291 	.get_max_clock = tegra_sdhci_get_max_clock,
1292 };
1293 
1294 static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
1295 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1296 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1297 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1298 		  SDHCI_QUIRK_NO_HISPD_BIT |
1299 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1300 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1301 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1302 	.ops  = &tegra114_sdhci_ops,
1303 };
1304 
1305 static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
1306 	.pdata = &sdhci_tegra114_pdata,
1307 };
1308 
1309 static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
1310 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1311 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1312 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1313 		  SDHCI_QUIRK_NO_HISPD_BIT |
1314 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1315 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1316 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1317 		   /*
1318 		    * The TRM states that the SD/MMC controller found on
1319 		    * Tegra124 can address 34 bits (the maximum supported by
1320 		    * the Tegra memory controller), but tests show that DMA
1321 		    * to or from above 4 GiB doesn't work. This is possibly
1322 		    * caused by missing programming, though it's not obvious
1323 		    * what sequence is required. Mark 64-bit DMA broken for
1324 		    * now to fix this for existing users (e.g. Nyan boards).
1325 		    */
1326 		   SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
1327 	.ops  = &tegra114_sdhci_ops,
1328 };
1329 
1330 static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
1331 	.pdata = &sdhci_tegra124_pdata,
1332 };
1333 
1334 static const struct sdhci_ops tegra210_sdhci_ops = {
1335 	.read_w     = tegra_sdhci_readw,
1336 	.write_w    = tegra210_sdhci_writew,
1337 	.write_l    = tegra_sdhci_writel,
1338 	.set_clock  = tegra_sdhci_set_clock,
1339 	.set_bus_width = sdhci_set_bus_width,
1340 	.reset      = tegra_sdhci_reset,
1341 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1342 	.voltage_switch = tegra_sdhci_voltage_switch,
1343 	.get_max_clock = tegra_sdhci_get_max_clock,
1344 };
1345 
1346 static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
1347 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1348 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1349 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1350 		  SDHCI_QUIRK_NO_HISPD_BIT |
1351 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1352 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1353 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1354 	.ops  = &tegra210_sdhci_ops,
1355 };
1356 
1357 static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
1358 	.pdata = &sdhci_tegra210_pdata,
1359 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1360 		    NVQUIRK_HAS_PADCALIB |
1361 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1362 		    NVQUIRK_ENABLE_SDR50 |
1363 		    NVQUIRK_ENABLE_SDR104,
1364 	.min_tap_delay = 106,
1365 	.max_tap_delay = 185,
1366 };
1367 
1368 static const struct sdhci_ops tegra186_sdhci_ops = {
1369 	.read_w     = tegra_sdhci_readw,
1370 	.write_l    = tegra_sdhci_writel,
1371 	.set_clock  = tegra_sdhci_set_clock,
1372 	.set_bus_width = sdhci_set_bus_width,
1373 	.reset      = tegra_sdhci_reset,
1374 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1375 	.voltage_switch = tegra_sdhci_voltage_switch,
1376 	.get_max_clock = tegra_sdhci_get_max_clock,
1377 	.irq = sdhci_tegra_cqhci_irq,
1378 };
1379 
1380 static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
1381 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1382 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1383 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1384 		  SDHCI_QUIRK_NO_HISPD_BIT |
1385 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1386 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1387 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1388 		   /* SDHCI controllers on Tegra186 support 40-bit addressing.
1389 		    * IOVA addresses are 48-bit wide on Tegra186.
1390 		    * With 64-bit dma mask used for SDHCI, accesses can
1391 		    * be broken. Disable 64-bit dma, which would fall back
1392 		    * to 32-bit dma mask. Ideally 40-bit dma mask would work,
1393 		    * But it is not supported as of now.
1394 		    */
1395 		   SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
1396 	.ops  = &tegra186_sdhci_ops,
1397 };
1398 
1399 static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
1400 	.pdata = &sdhci_tegra186_pdata,
1401 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1402 		    NVQUIRK_HAS_PADCALIB |
1403 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1404 		    NVQUIRK_ENABLE_SDR50 |
1405 		    NVQUIRK_ENABLE_SDR104 |
1406 		    NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
1407 	.min_tap_delay = 84,
1408 	.max_tap_delay = 136,
1409 };
1410 
1411 static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
1412 	.pdata = &sdhci_tegra186_pdata,
1413 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1414 		    NVQUIRK_HAS_PADCALIB |
1415 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1416 		    NVQUIRK_ENABLE_SDR50 |
1417 		    NVQUIRK_ENABLE_SDR104,
1418 	.min_tap_delay = 96,
1419 	.max_tap_delay = 139,
1420 };
1421 
1422 static const struct of_device_id sdhci_tegra_dt_match[] = {
1423 	{ .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
1424 	{ .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
1425 	{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
1426 	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
1427 	{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
1428 	{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
1429 	{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
1430 	{}
1431 };
1432 MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
1433 
1434 static int sdhci_tegra_add_host(struct sdhci_host *host)
1435 {
1436 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1437 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1438 	struct cqhci_host *cq_host;
1439 	bool dma64;
1440 	int ret;
1441 
1442 	if (!tegra_host->enable_hwcq)
1443 		return sdhci_add_host(host);
1444 
1445 	sdhci_enable_v4_mode(host);
1446 
1447 	ret = sdhci_setup_host(host);
1448 	if (ret)
1449 		return ret;
1450 
1451 	host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1452 
1453 	cq_host = devm_kzalloc(host->mmc->parent,
1454 				sizeof(*cq_host), GFP_KERNEL);
1455 	if (!cq_host) {
1456 		ret = -ENOMEM;
1457 		goto cleanup;
1458 	}
1459 
1460 	cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR;
1461 	cq_host->ops = &sdhci_tegra_cqhci_ops;
1462 
1463 	dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1464 	if (dma64)
1465 		cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
1466 
1467 	ret = cqhci_init(cq_host, host->mmc, dma64);
1468 	if (ret)
1469 		goto cleanup;
1470 
1471 	ret = __sdhci_add_host(host);
1472 	if (ret)
1473 		goto cleanup;
1474 
1475 	return 0;
1476 
1477 cleanup:
1478 	sdhci_cleanup_host(host);
1479 	return ret;
1480 }
1481 
1482 static int sdhci_tegra_probe(struct platform_device *pdev)
1483 {
1484 	const struct of_device_id *match;
1485 	const struct sdhci_tegra_soc_data *soc_data;
1486 	struct sdhci_host *host;
1487 	struct sdhci_pltfm_host *pltfm_host;
1488 	struct sdhci_tegra *tegra_host;
1489 	struct clk *clk;
1490 	int rc;
1491 
1492 	match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
1493 	if (!match)
1494 		return -EINVAL;
1495 	soc_data = match->data;
1496 
1497 	host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
1498 	if (IS_ERR(host))
1499 		return PTR_ERR(host);
1500 	pltfm_host = sdhci_priv(host);
1501 
1502 	tegra_host = sdhci_pltfm_priv(pltfm_host);
1503 	tegra_host->ddr_signaling = false;
1504 	tegra_host->pad_calib_required = false;
1505 	tegra_host->pad_control_available = false;
1506 	tegra_host->soc_data = soc_data;
1507 
1508 	if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
1509 		rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
1510 		if (rc == 0)
1511 			host->mmc_host_ops.start_signal_voltage_switch =
1512 				sdhci_tegra_start_signal_voltage_switch;
1513 	}
1514 
1515 	/* Hook to periodically rerun pad calibration */
1516 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1517 		host->mmc_host_ops.request = tegra_sdhci_request;
1518 
1519 	host->mmc_host_ops.hs400_enhanced_strobe =
1520 			tegra_sdhci_hs400_enhanced_strobe;
1521 
1522 	if (!host->ops->platform_execute_tuning)
1523 		host->mmc_host_ops.execute_tuning =
1524 				tegra_sdhci_execute_hw_tuning;
1525 
1526 	rc = mmc_of_parse(host->mmc);
1527 	if (rc)
1528 		goto err_parse_dt;
1529 
1530 	if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
1531 		host->mmc->caps |= MMC_CAP_1_8V_DDR;
1532 
1533 	tegra_sdhci_parse_dt(host);
1534 
1535 	tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
1536 							 GPIOD_OUT_HIGH);
1537 	if (IS_ERR(tegra_host->power_gpio)) {
1538 		rc = PTR_ERR(tegra_host->power_gpio);
1539 		goto err_power_req;
1540 	}
1541 
1542 	clk = devm_clk_get(mmc_dev(host->mmc), NULL);
1543 	if (IS_ERR(clk)) {
1544 		dev_err(mmc_dev(host->mmc), "clk err\n");
1545 		rc = PTR_ERR(clk);
1546 		goto err_clk_get;
1547 	}
1548 	clk_prepare_enable(clk);
1549 	pltfm_host->clk = clk;
1550 
1551 	tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
1552 							   "sdhci");
1553 	if (IS_ERR(tegra_host->rst)) {
1554 		rc = PTR_ERR(tegra_host->rst);
1555 		dev_err(&pdev->dev, "failed to get reset control: %d\n", rc);
1556 		goto err_rst_get;
1557 	}
1558 
1559 	rc = reset_control_assert(tegra_host->rst);
1560 	if (rc)
1561 		goto err_rst_get;
1562 
1563 	usleep_range(2000, 4000);
1564 
1565 	rc = reset_control_deassert(tegra_host->rst);
1566 	if (rc)
1567 		goto err_rst_get;
1568 
1569 	usleep_range(2000, 4000);
1570 
1571 	rc = sdhci_tegra_add_host(host);
1572 	if (rc)
1573 		goto err_add_host;
1574 
1575 	return 0;
1576 
1577 err_add_host:
1578 	reset_control_assert(tegra_host->rst);
1579 err_rst_get:
1580 	clk_disable_unprepare(pltfm_host->clk);
1581 err_clk_get:
1582 err_power_req:
1583 err_parse_dt:
1584 	sdhci_pltfm_free(pdev);
1585 	return rc;
1586 }
1587 
1588 static int sdhci_tegra_remove(struct platform_device *pdev)
1589 {
1590 	struct sdhci_host *host = platform_get_drvdata(pdev);
1591 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1592 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1593 
1594 	sdhci_remove_host(host, 0);
1595 
1596 	reset_control_assert(tegra_host->rst);
1597 	usleep_range(2000, 4000);
1598 	clk_disable_unprepare(pltfm_host->clk);
1599 
1600 	sdhci_pltfm_free(pdev);
1601 
1602 	return 0;
1603 }
1604 
1605 #ifdef CONFIG_PM_SLEEP
1606 static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
1607 {
1608 	struct sdhci_host *host = dev_get_drvdata(dev);
1609 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1610 	int ret;
1611 
1612 	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1613 		ret = cqhci_suspend(host->mmc);
1614 		if (ret)
1615 			return ret;
1616 	}
1617 
1618 	ret = sdhci_suspend_host(host);
1619 	if (ret) {
1620 		cqhci_resume(host->mmc);
1621 		return ret;
1622 	}
1623 
1624 	clk_disable_unprepare(pltfm_host->clk);
1625 	return 0;
1626 }
1627 
1628 static int __maybe_unused sdhci_tegra_resume(struct device *dev)
1629 {
1630 	struct sdhci_host *host = dev_get_drvdata(dev);
1631 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1632 	int ret;
1633 
1634 	ret = clk_prepare_enable(pltfm_host->clk);
1635 	if (ret)
1636 		return ret;
1637 
1638 	ret = sdhci_resume_host(host);
1639 	if (ret)
1640 		goto disable_clk;
1641 
1642 	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1643 		ret = cqhci_resume(host->mmc);
1644 		if (ret)
1645 			goto suspend_host;
1646 	}
1647 
1648 	return 0;
1649 
1650 suspend_host:
1651 	sdhci_suspend_host(host);
1652 disable_clk:
1653 	clk_disable_unprepare(pltfm_host->clk);
1654 	return ret;
1655 }
1656 #endif
1657 
1658 static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend,
1659 			 sdhci_tegra_resume);
1660 
1661 static struct platform_driver sdhci_tegra_driver = {
1662 	.driver		= {
1663 		.name	= "sdhci-tegra",
1664 		.of_match_table = sdhci_tegra_dt_match,
1665 		.pm	= &sdhci_tegra_dev_pm_ops,
1666 	},
1667 	.probe		= sdhci_tegra_probe,
1668 	.remove		= sdhci_tegra_remove,
1669 };
1670 
1671 module_platform_driver(sdhci_tegra_driver);
1672 
1673 MODULE_DESCRIPTION("SDHCI driver for Tegra");
1674 MODULE_AUTHOR("Google, Inc.");
1675 MODULE_LICENSE("GPL v2");
1676