xref: /linux/drivers/mmc/host/sdhci-tegra.c (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2010 Google, Inc.
4  */
5 
6 #include <linux/bitfield.h>
7 #include <linux/clk.h>
8 #include <linux/delay.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/gpio/consumer.h>
12 #include <linux/init.h>
13 #include <linux/io.h>
14 #include <linux/iommu.h>
15 #include <linux/iopoll.h>
16 #include <linux/ktime.h>
17 #include <linux/mmc/card.h>
18 #include <linux/mmc/host.h>
19 #include <linux/mmc/mmc.h>
20 #include <linux/mmc/slot-gpio.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/pinctrl/consumer.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_opp.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/regulator/consumer.h>
28 #include <linux/reset.h>
29 
30 #include <soc/tegra/common.h>
31 
32 #include "sdhci-cqhci.h"
33 #include "sdhci-pltfm.h"
34 #include "cqhci.h"
35 
36 /* Tegra SDHOST controller vendor register definitions */
37 #define SDHCI_TEGRA_VENDOR_CLOCK_CTRL			0x100
38 #define SDHCI_CLOCK_CTRL_TAP_MASK			0x00ff0000
39 #define SDHCI_CLOCK_CTRL_TAP_SHIFT			16
40 #define SDHCI_CLOCK_CTRL_TRIM_MASK			0x1f000000
41 #define SDHCI_CLOCK_CTRL_TRIM_SHIFT			24
42 #define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE		BIT(5)
43 #define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE		BIT(3)
44 #define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE	BIT(2)
45 
46 #define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL			0x104
47 #define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE		BIT(31)
48 
49 #define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES		0x10c
50 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK		0x00003f00
51 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT	8
52 
53 #define SDHCI_TEGRA_VENDOR_MISC_CTRL			0x120
54 #define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT		BIT(0)
55 #define SDHCI_MISC_CTRL_ENABLE_SDR104			0x8
56 #define SDHCI_MISC_CTRL_ENABLE_SDR50			0x10
57 #define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300		0x20
58 #define SDHCI_MISC_CTRL_ENABLE_DDR50			0x200
59 
60 #define SDHCI_TEGRA_VENDOR_DLLCAL_CFG			0x1b0
61 #define SDHCI_TEGRA_DLLCAL_CALIBRATE			BIT(31)
62 
63 #define SDHCI_TEGRA_VENDOR_DLLCAL_STA			0x1bc
64 #define SDHCI_TEGRA_DLLCAL_STA_ACTIVE			BIT(31)
65 
66 #define SDHCI_VNDR_TUN_CTRL0_0				0x1c0
67 #define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP			0x20000
68 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK		0x03fc0000
69 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT	18
70 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK			0x00001fc0
71 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT		6
72 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK		0x000e000
73 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT		13
74 #define TRIES_128					2
75 #define TRIES_256					4
76 #define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK		0x7
77 
78 #define SDHCI_TEGRA_VNDR_TUN_CTRL1_0			0x1c4
79 #define SDHCI_TEGRA_VNDR_TUN_STATUS0			0x1C8
80 #define SDHCI_TEGRA_VNDR_TUN_STATUS1			0x1CC
81 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK		0xFF
82 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT	0x8
83 #define TUNING_WORD_BIT_SIZE				32
84 
85 #define SDHCI_TEGRA_AUTO_CAL_CONFIG			0x1e4
86 #define SDHCI_AUTO_CAL_START				BIT(31)
87 #define SDHCI_AUTO_CAL_ENABLE				BIT(29)
88 #define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK			0x0000ffff
89 
90 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL			0x1e0
91 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK	0x0000000f
92 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL	0x7
93 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD	BIT(31)
94 #define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK		0x07FFF000
95 
96 #define SDHCI_TEGRA_AUTO_CAL_STATUS			0x1ec
97 #define SDHCI_TEGRA_AUTO_CAL_ACTIVE			BIT(31)
98 
99 #define SDHCI_TEGRA_CIF2AXI_CTRL_0			0x1fc
100 
101 #define NVQUIRK_FORCE_SDHCI_SPEC_200			BIT(0)
102 #define NVQUIRK_ENABLE_BLOCK_GAP_DET			BIT(1)
103 #define NVQUIRK_ENABLE_SDHCI_SPEC_300			BIT(2)
104 #define NVQUIRK_ENABLE_SDR50				BIT(3)
105 #define NVQUIRK_ENABLE_SDR104				BIT(4)
106 #define NVQUIRK_ENABLE_DDR50				BIT(5)
107 /*
108  * HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads
109  * drive strength.
110  */
111 #define NVQUIRK_HAS_PADCALIB				BIT(6)
112 /*
113  * NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads.
114  * 3V3/1V8 pad selection happens through pinctrl state selection depending
115  * on the signaling mode.
116  */
117 #define NVQUIRK_NEEDS_PAD_CONTROL			BIT(7)
118 #define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP			BIT(8)
119 #define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING		BIT(9)
120 
121 /*
122  * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
123  * SDMMC hardware data timeout.
124  */
125 #define NVQUIRK_HAS_TMCLK				BIT(10)
126 
127 #define NVQUIRK_HAS_ANDROID_GPT_SECTOR			BIT(11)
128 #define NVQUIRK_PROGRAM_STREAMID			BIT(12)
129 
130 /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
131 #define SDHCI_TEGRA_CQE_BASE_ADDR			0xF000
132 
133 #define SDHCI_TEGRA_CQE_TRNS_MODE	(SDHCI_TRNS_MULTI | \
134 					 SDHCI_TRNS_BLK_CNT_EN | \
135 					 SDHCI_TRNS_DMA)
136 
137 struct sdhci_tegra_soc_data {
138 	const struct sdhci_pltfm_data *pdata;
139 	u64 dma_mask;
140 	u32 nvquirks;
141 	u8 min_tap_delay;
142 	u8 max_tap_delay;
143 };
144 
145 /* Magic pull up and pull down pad calibration offsets */
146 struct sdhci_tegra_autocal_offsets {
147 	u32 pull_up_3v3;
148 	u32 pull_down_3v3;
149 	u32 pull_up_3v3_timeout;
150 	u32 pull_down_3v3_timeout;
151 	u32 pull_up_1v8;
152 	u32 pull_down_1v8;
153 	u32 pull_up_1v8_timeout;
154 	u32 pull_down_1v8_timeout;
155 	u32 pull_up_sdr104;
156 	u32 pull_down_sdr104;
157 	u32 pull_up_hs400;
158 	u32 pull_down_hs400;
159 };
160 
161 struct sdhci_tegra {
162 	const struct sdhci_tegra_soc_data *soc_data;
163 	struct gpio_desc *power_gpio;
164 	struct clk *tmclk;
165 	bool ddr_signaling;
166 	bool pad_calib_required;
167 	bool pad_control_available;
168 
169 	struct reset_control *rst;
170 	struct pinctrl *pinctrl_sdmmc;
171 	struct pinctrl_state *pinctrl_state_3v3;
172 	struct pinctrl_state *pinctrl_state_1v8;
173 	struct pinctrl_state *pinctrl_state_3v3_drv;
174 	struct pinctrl_state *pinctrl_state_1v8_drv;
175 
176 	struct sdhci_tegra_autocal_offsets autocal_offsets;
177 	ktime_t last_calib;
178 
179 	u32 default_tap;
180 	u32 default_trim;
181 	u32 dqs_trim;
182 	bool enable_hwcq;
183 	unsigned long curr_clk_rate;
184 	u8 tuned_tap_delay;
185 	u32 stream_id;
186 };
187 
188 static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
189 {
190 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
191 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
192 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
193 
194 	if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
195 			(reg == SDHCI_HOST_VERSION))) {
196 		/* Erratum: Version register is invalid in HW. */
197 		return SDHCI_SPEC_200;
198 	}
199 
200 	return readw(host->ioaddr + reg);
201 }
202 
203 static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
204 {
205 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
206 
207 	switch (reg) {
208 	case SDHCI_TRANSFER_MODE:
209 		/*
210 		 * Postpone this write, we must do it together with a
211 		 * command write that is down below.
212 		 */
213 		pltfm_host->xfer_mode_shadow = val;
214 		return;
215 	case SDHCI_COMMAND:
216 		writel((val << 16) | pltfm_host->xfer_mode_shadow,
217 			host->ioaddr + SDHCI_TRANSFER_MODE);
218 		return;
219 	}
220 
221 	writew(val, host->ioaddr + reg);
222 }
223 
224 static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
225 {
226 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
227 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
228 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
229 
230 	/* Seems like we're getting spurious timeout and crc errors, so
231 	 * disable signalling of them. In case of real errors software
232 	 * timers should take care of eventually detecting them.
233 	 */
234 	if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
235 		val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
236 
237 	writel(val, host->ioaddr + reg);
238 
239 	if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
240 			(reg == SDHCI_INT_ENABLE))) {
241 		/* Erratum: Must enable block gap interrupt detection */
242 		u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
243 		if (val & SDHCI_INT_CARD_INT)
244 			gap_ctrl |= 0x8;
245 		else
246 			gap_ctrl &= ~0x8;
247 		writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
248 	}
249 }
250 
251 static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable)
252 {
253 	bool status;
254 	u32 reg;
255 
256 	reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
257 	status = !!(reg & SDHCI_CLOCK_CARD_EN);
258 
259 	if (status == enable)
260 		return status;
261 
262 	if (enable)
263 		reg |= SDHCI_CLOCK_CARD_EN;
264 	else
265 		reg &= ~SDHCI_CLOCK_CARD_EN;
266 
267 	sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
268 
269 	return status;
270 }
271 
272 static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
273 {
274 	bool is_tuning_cmd = 0;
275 	bool clk_enabled;
276 
277 	if (reg == SDHCI_COMMAND)
278 		is_tuning_cmd = mmc_op_tuning(SDHCI_GET_CMD(val));
279 
280 	if (is_tuning_cmd)
281 		clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
282 
283 	writew(val, host->ioaddr + reg);
284 
285 	if (is_tuning_cmd) {
286 		udelay(1);
287 		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
288 		tegra_sdhci_configure_card_clk(host, clk_enabled);
289 	}
290 }
291 
292 static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
293 {
294 	/*
295 	 * Write-enable shall be assumed if GPIO is missing in a board's
296 	 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
297 	 * Tegra.
298 	 */
299 	return mmc_gpio_get_ro(host->mmc);
300 }
301 
302 static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
303 {
304 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
305 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
306 	int has_1v8, has_3v3;
307 
308 	/*
309 	 * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad
310 	 * voltage configuration in order to perform voltage switching. This
311 	 * means that valid pinctrl info is required on SDHCI instances capable
312 	 * of performing voltage switching. Whether or not an SDHCI instance is
313 	 * capable of voltage switching is determined based on the regulator.
314 	 */
315 
316 	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
317 		return true;
318 
319 	if (IS_ERR(host->mmc->supply.vqmmc))
320 		return false;
321 
322 	has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
323 						 1700000, 1950000);
324 
325 	has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
326 						 2700000, 3600000);
327 
328 	if (has_1v8 == 1 && has_3v3 == 1)
329 		return tegra_host->pad_control_available;
330 
331 	/* Fixed voltage, no pad control required. */
332 	return true;
333 }
334 
335 static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
336 {
337 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
338 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
339 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
340 	bool card_clk_enabled = false;
341 	u32 reg;
342 
343 	/*
344 	 * Touching the tap values is a bit tricky on some SoC generations.
345 	 * The quirk enables a workaround for a glitch that sometimes occurs if
346 	 * the tap values are changed.
347 	 */
348 
349 	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP)
350 		card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
351 
352 	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
353 	reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
354 	reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
355 	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
356 
357 	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP &&
358 	    card_clk_enabled) {
359 		udelay(1);
360 		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
361 		tegra_sdhci_configure_card_clk(host, card_clk_enabled);
362 	}
363 }
364 
365 static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
366 {
367 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
368 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
369 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
370 	u32 misc_ctrl, clk_ctrl, pad_ctrl;
371 
372 	sdhci_and_cqhci_reset(host, mask);
373 
374 	if (!(mask & SDHCI_RESET_ALL))
375 		return;
376 
377 	tegra_sdhci_set_tap(host, tegra_host->default_tap);
378 
379 	misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
380 	clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
381 
382 	misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 |
383 		       SDHCI_MISC_CTRL_ENABLE_SDR50 |
384 		       SDHCI_MISC_CTRL_ENABLE_DDR50 |
385 		       SDHCI_MISC_CTRL_ENABLE_SDR104);
386 
387 	clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK |
388 		      SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE);
389 
390 	if (tegra_sdhci_is_pad_and_regulator_valid(host)) {
391 		/* Erratum: Enable SDHCI spec v3.00 support */
392 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
393 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
394 		/* Advertise UHS modes as supported by host */
395 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
396 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
397 		if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
398 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
399 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
400 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
401 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
402 			clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
403 	}
404 
405 	clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT;
406 
407 	sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
408 	sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
409 
410 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) {
411 		pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
412 		pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK;
413 		pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL;
414 		sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
415 
416 		tegra_host->pad_calib_required = true;
417 	}
418 
419 	tegra_host->ddr_signaling = false;
420 }
421 
422 static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable)
423 {
424 	u32 val;
425 
426 	/*
427 	 * Enable or disable the additional I/O pad used by the drive strength
428 	 * calibration process.
429 	 */
430 	val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
431 
432 	if (enable)
433 		val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
434 	else
435 		val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
436 
437 	sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
438 
439 	if (enable)
440 		usleep_range(1, 2);
441 }
442 
443 static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
444 					       u16 pdpu)
445 {
446 	u32 reg;
447 
448 	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
449 	reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK;
450 	reg |= pdpu;
451 	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
452 }
453 
454 static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage,
455 				   bool state_drvupdn)
456 {
457 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
458 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
459 	struct sdhci_tegra_autocal_offsets *offsets =
460 						&tegra_host->autocal_offsets;
461 	struct pinctrl_state *pinctrl_drvupdn = NULL;
462 	int ret = 0;
463 	u8 drvup = 0, drvdn = 0;
464 	u32 reg;
465 
466 	if (!state_drvupdn) {
467 		/* PADS Drive Strength */
468 		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
469 			if (tegra_host->pinctrl_state_1v8_drv) {
470 				pinctrl_drvupdn =
471 					tegra_host->pinctrl_state_1v8_drv;
472 			} else {
473 				drvup = offsets->pull_up_1v8_timeout;
474 				drvdn = offsets->pull_down_1v8_timeout;
475 			}
476 		} else {
477 			if (tegra_host->pinctrl_state_3v3_drv) {
478 				pinctrl_drvupdn =
479 					tegra_host->pinctrl_state_3v3_drv;
480 			} else {
481 				drvup = offsets->pull_up_3v3_timeout;
482 				drvdn = offsets->pull_down_3v3_timeout;
483 			}
484 		}
485 
486 		if (pinctrl_drvupdn != NULL) {
487 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
488 							pinctrl_drvupdn);
489 			if (ret < 0)
490 				dev_err(mmc_dev(host->mmc),
491 					"failed pads drvupdn, ret: %d\n", ret);
492 		} else if ((drvup) || (drvdn)) {
493 			reg = sdhci_readl(host,
494 					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
495 			reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK;
496 			reg |= (drvup << 20) | (drvdn << 12);
497 			sdhci_writel(host, reg,
498 					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
499 		}
500 
501 	} else {
502 		/* Dual Voltage PADS Voltage selection */
503 		if (!tegra_host->pad_control_available)
504 			return 0;
505 
506 		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
507 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
508 						tegra_host->pinctrl_state_1v8);
509 			if (ret < 0)
510 				dev_err(mmc_dev(host->mmc),
511 					"setting 1.8V failed, ret: %d\n", ret);
512 		} else {
513 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
514 						tegra_host->pinctrl_state_3v3);
515 			if (ret < 0)
516 				dev_err(mmc_dev(host->mmc),
517 					"setting 3.3V failed, ret: %d\n", ret);
518 		}
519 	}
520 
521 	return ret;
522 }
523 
524 static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
525 {
526 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
527 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
528 	struct sdhci_tegra_autocal_offsets offsets =
529 			tegra_host->autocal_offsets;
530 	struct mmc_ios *ios = &host->mmc->ios;
531 	bool card_clk_enabled;
532 	u16 pdpu;
533 	u32 reg;
534 	int ret;
535 
536 	switch (ios->timing) {
537 	case MMC_TIMING_UHS_SDR104:
538 		pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104;
539 		break;
540 	case MMC_TIMING_MMC_HS400:
541 		pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400;
542 		break;
543 	default:
544 		if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
545 			pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8;
546 		else
547 			pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
548 	}
549 
550 	/* Set initial offset before auto-calibration */
551 	tegra_sdhci_set_pad_autocal_offset(host, pdpu);
552 
553 	card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
554 
555 	tegra_sdhci_configure_cal_pad(host, true);
556 
557 	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
558 	reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
559 	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
560 
561 	usleep_range(1, 2);
562 	/* 10 ms timeout */
563 	ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS,
564 				 reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE),
565 				 1000, 10000);
566 
567 	tegra_sdhci_configure_cal_pad(host, false);
568 
569 	tegra_sdhci_configure_card_clk(host, card_clk_enabled);
570 
571 	if (ret) {
572 		dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
573 
574 		/* Disable automatic cal and use fixed Drive Strengths */
575 		reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
576 		reg &= ~SDHCI_AUTO_CAL_ENABLE;
577 		sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
578 
579 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false);
580 		if (ret < 0)
581 			dev_err(mmc_dev(host->mmc),
582 				"Setting drive strengths failed: %d\n", ret);
583 	}
584 }
585 
586 static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
587 {
588 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
589 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
590 	struct sdhci_tegra_autocal_offsets *autocal =
591 			&tegra_host->autocal_offsets;
592 	int err;
593 
594 	err = device_property_read_u32(mmc_dev(host->mmc),
595 			"nvidia,pad-autocal-pull-up-offset-3v3",
596 			&autocal->pull_up_3v3);
597 	if (err)
598 		autocal->pull_up_3v3 = 0;
599 
600 	err = device_property_read_u32(mmc_dev(host->mmc),
601 			"nvidia,pad-autocal-pull-down-offset-3v3",
602 			&autocal->pull_down_3v3);
603 	if (err)
604 		autocal->pull_down_3v3 = 0;
605 
606 	err = device_property_read_u32(mmc_dev(host->mmc),
607 			"nvidia,pad-autocal-pull-up-offset-1v8",
608 			&autocal->pull_up_1v8);
609 	if (err)
610 		autocal->pull_up_1v8 = 0;
611 
612 	err = device_property_read_u32(mmc_dev(host->mmc),
613 			"nvidia,pad-autocal-pull-down-offset-1v8",
614 			&autocal->pull_down_1v8);
615 	if (err)
616 		autocal->pull_down_1v8 = 0;
617 
618 	err = device_property_read_u32(mmc_dev(host->mmc),
619 			"nvidia,pad-autocal-pull-up-offset-sdr104",
620 			&autocal->pull_up_sdr104);
621 	if (err)
622 		autocal->pull_up_sdr104 = autocal->pull_up_1v8;
623 
624 	err = device_property_read_u32(mmc_dev(host->mmc),
625 			"nvidia,pad-autocal-pull-down-offset-sdr104",
626 			&autocal->pull_down_sdr104);
627 	if (err)
628 		autocal->pull_down_sdr104 = autocal->pull_down_1v8;
629 
630 	err = device_property_read_u32(mmc_dev(host->mmc),
631 			"nvidia,pad-autocal-pull-up-offset-hs400",
632 			&autocal->pull_up_hs400);
633 	if (err)
634 		autocal->pull_up_hs400 = autocal->pull_up_1v8;
635 
636 	err = device_property_read_u32(mmc_dev(host->mmc),
637 			"nvidia,pad-autocal-pull-down-offset-hs400",
638 			&autocal->pull_down_hs400);
639 	if (err)
640 		autocal->pull_down_hs400 = autocal->pull_down_1v8;
641 
642 	/*
643 	 * Different fail-safe drive strength values based on the signaling
644 	 * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls.
645 	 * So, avoid reading below device tree properties for SoCs that don't
646 	 * have NVQUIRK_NEEDS_PAD_CONTROL.
647 	 */
648 	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
649 		return;
650 
651 	err = device_property_read_u32(mmc_dev(host->mmc),
652 			"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
653 			&autocal->pull_up_3v3_timeout);
654 	if (err) {
655 		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
656 			(tegra_host->pinctrl_state_3v3_drv == NULL))
657 			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
658 				mmc_hostname(host->mmc));
659 		autocal->pull_up_3v3_timeout = 0;
660 	}
661 
662 	err = device_property_read_u32(mmc_dev(host->mmc),
663 			"nvidia,pad-autocal-pull-down-offset-3v3-timeout",
664 			&autocal->pull_down_3v3_timeout);
665 	if (err) {
666 		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
667 			(tegra_host->pinctrl_state_3v3_drv == NULL))
668 			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
669 				mmc_hostname(host->mmc));
670 		autocal->pull_down_3v3_timeout = 0;
671 	}
672 
673 	err = device_property_read_u32(mmc_dev(host->mmc),
674 			"nvidia,pad-autocal-pull-up-offset-1v8-timeout",
675 			&autocal->pull_up_1v8_timeout);
676 	if (err) {
677 		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
678 			(tegra_host->pinctrl_state_1v8_drv == NULL))
679 			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
680 				mmc_hostname(host->mmc));
681 		autocal->pull_up_1v8_timeout = 0;
682 	}
683 
684 	err = device_property_read_u32(mmc_dev(host->mmc),
685 			"nvidia,pad-autocal-pull-down-offset-1v8-timeout",
686 			&autocal->pull_down_1v8_timeout);
687 	if (err) {
688 		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
689 			(tegra_host->pinctrl_state_1v8_drv == NULL))
690 			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
691 				mmc_hostname(host->mmc));
692 		autocal->pull_down_1v8_timeout = 0;
693 	}
694 }
695 
696 static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
697 {
698 	struct sdhci_host *host = mmc_priv(mmc);
699 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
700 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
701 	ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib);
702 
703 	/* 100 ms calibration interval is specified in the TRM */
704 	if (ktime_to_ms(since_calib) > 100) {
705 		tegra_sdhci_pad_autocalib(host);
706 		tegra_host->last_calib = ktime_get();
707 	}
708 
709 	sdhci_request(mmc, mrq);
710 }
711 
712 static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
713 {
714 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
715 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
716 	int err;
717 
718 	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-tap",
719 				       &tegra_host->default_tap);
720 	if (err)
721 		tegra_host->default_tap = 0;
722 
723 	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-trim",
724 				       &tegra_host->default_trim);
725 	if (err)
726 		tegra_host->default_trim = 0;
727 
728 	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,dqs-trim",
729 				       &tegra_host->dqs_trim);
730 	if (err)
731 		tegra_host->dqs_trim = 0x11;
732 }
733 
734 static void tegra_sdhci_parse_dt(struct sdhci_host *host)
735 {
736 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
737 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
738 
739 	if (device_property_read_bool(mmc_dev(host->mmc), "supports-cqe"))
740 		tegra_host->enable_hwcq = true;
741 	else
742 		tegra_host->enable_hwcq = false;
743 
744 	tegra_sdhci_parse_pad_autocal_dt(host);
745 	tegra_sdhci_parse_tap_and_trim(host);
746 }
747 
748 static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
749 {
750 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
751 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
752 	struct device *dev = mmc_dev(host->mmc);
753 	unsigned long host_clk;
754 	int err;
755 
756 	if (!clock)
757 		return sdhci_set_clock(host, clock);
758 
759 	/*
760 	 * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI
761 	 * divider to be configured to divided the host clock by two. The SDHCI
762 	 * clock divider is calculated as part of sdhci_set_clock() by
763 	 * sdhci_calc_clk(). The divider is calculated from host->max_clk and
764 	 * the requested clock rate.
765 	 *
766 	 * By setting the host->max_clk to clock * 2 the divider calculation
767 	 * will always result in the correct value for DDR50/52 modes,
768 	 * regardless of clock rate rounding, which may happen if the value
769 	 * from clk_get_rate() is used.
770 	 */
771 	host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
772 
773 	err = dev_pm_opp_set_rate(dev, host_clk);
774 	if (err)
775 		dev_err(dev, "failed to set clk rate to %luHz: %d\n",
776 			host_clk, err);
777 
778 	tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk);
779 	if (tegra_host->ddr_signaling)
780 		host->max_clk = host_clk;
781 	else
782 		host->max_clk = clk_get_rate(pltfm_host->clk);
783 
784 	sdhci_set_clock(host, clock);
785 
786 	if (tegra_host->pad_calib_required) {
787 		tegra_sdhci_pad_autocalib(host);
788 		tegra_host->pad_calib_required = false;
789 	}
790 }
791 
792 static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
793 					      struct mmc_ios *ios)
794 {
795 	struct sdhci_host *host = mmc_priv(mmc);
796 	u32 val;
797 
798 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
799 
800 	if (ios->enhanced_strobe) {
801 		val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
802 		/*
803 		 * When CMD13 is sent from mmc_select_hs400es() after
804 		 * switching to HS400ES mode, the bus is operating at
805 		 * either MMC_HIGH_26_MAX_DTR or MMC_HIGH_52_MAX_DTR.
806 		 * To meet Tegra SDHCI requirement at HS400ES mode, force SDHCI
807 		 * interface clock to MMC_HS200_MAX_DTR (200 MHz) so that host
808 		 * controller CAR clock and the interface clock are rate matched.
809 		 */
810 		tegra_sdhci_set_clock(host, MMC_HS200_MAX_DTR);
811 	} else {
812 		val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
813 	}
814 
815 	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
816 }
817 
818 static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
819 {
820 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
821 
822 	return clk_round_rate(pltfm_host->clk, UINT_MAX);
823 }
824 
825 static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim)
826 {
827 	u32 val;
828 
829 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
830 	val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK;
831 	val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT;
832 	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
833 }
834 
835 static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
836 {
837 	u32 reg;
838 	int err;
839 
840 	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
841 	reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE;
842 	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
843 
844 	/* 1 ms sleep, 5 ms timeout */
845 	err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA,
846 				 reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE),
847 				 1000, 5000);
848 	if (err)
849 		dev_err(mmc_dev(host->mmc),
850 			"HS400 delay line calibration timed out\n");
851 }
852 
853 static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
854 				       u8 thd_low, u8 fixed_tap)
855 {
856 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
857 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
858 	u32 val, tun_status;
859 	u8 word, bit, edge1, tap, window;
860 	bool tap_result;
861 	bool start_fail = false;
862 	bool start_pass = false;
863 	bool end_pass = false;
864 	bool first_fail = false;
865 	bool first_pass = false;
866 	u8 start_pass_tap = 0;
867 	u8 end_pass_tap = 0;
868 	u8 first_fail_tap = 0;
869 	u8 first_pass_tap = 0;
870 	u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE;
871 
872 	/*
873 	 * Read auto-tuned results and extract good valid passing window by
874 	 * filtering out un-wanted bubble/partial/merged windows.
875 	 */
876 	for (word = 0; word < total_tuning_words; word++) {
877 		val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
878 		val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK;
879 		val |= word;
880 		sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
881 		tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0);
882 		bit = 0;
883 		while (bit < TUNING_WORD_BIT_SIZE) {
884 			tap = word * TUNING_WORD_BIT_SIZE + bit;
885 			tap_result = tun_status & (1 << bit);
886 			if (!tap_result && !start_fail) {
887 				start_fail = true;
888 				if (!first_fail) {
889 					first_fail_tap = tap;
890 					first_fail = true;
891 				}
892 
893 			} else if (tap_result && start_fail && !start_pass) {
894 				start_pass_tap = tap;
895 				start_pass = true;
896 				if (!first_pass) {
897 					first_pass_tap = tap;
898 					first_pass = true;
899 				}
900 
901 			} else if (!tap_result && start_fail && start_pass &&
902 				   !end_pass) {
903 				end_pass_tap = tap - 1;
904 				end_pass = true;
905 			} else if (tap_result && start_pass && start_fail &&
906 				   end_pass) {
907 				window = end_pass_tap - start_pass_tap;
908 				/* discard merged window and bubble window */
909 				if (window >= thd_up || window < thd_low) {
910 					start_pass_tap = tap;
911 					end_pass = false;
912 				} else {
913 					/* set tap at middle of valid window */
914 					tap = start_pass_tap + window / 2;
915 					tegra_host->tuned_tap_delay = tap;
916 					return;
917 				}
918 			}
919 
920 			bit++;
921 		}
922 	}
923 
924 	if (!first_fail) {
925 		WARN(1, "no edge detected, continue with hw tuned delay.\n");
926 	} else if (first_pass) {
927 		/* set tap location at fixed tap relative to the first edge */
928 		edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
929 		if (edge1 - 1 > fixed_tap)
930 			tegra_host->tuned_tap_delay = edge1 - fixed_tap;
931 		else
932 			tegra_host->tuned_tap_delay = edge1 + fixed_tap;
933 	}
934 }
935 
936 static void tegra_sdhci_post_tuning(struct sdhci_host *host)
937 {
938 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
939 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
940 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
941 	u32 avg_tap_dly, val, min_tap_dly, max_tap_dly;
942 	u8 fixed_tap, start_tap, end_tap, window_width;
943 	u8 thdupper, thdlower;
944 	u8 num_iter;
945 	u32 clk_rate_mhz, period_ps, bestcase, worstcase;
946 
947 	/* retain HW tuned tap to use incase if no correction is needed */
948 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
949 	tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >>
950 				      SDHCI_CLOCK_CTRL_TAP_SHIFT;
951 	if (soc_data->min_tap_delay && soc_data->max_tap_delay) {
952 		min_tap_dly = soc_data->min_tap_delay;
953 		max_tap_dly = soc_data->max_tap_delay;
954 		clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC;
955 		period_ps = USEC_PER_SEC / clk_rate_mhz;
956 		bestcase = period_ps / min_tap_dly;
957 		worstcase = period_ps / max_tap_dly;
958 		/*
959 		 * Upper and Lower bound thresholds used to detect merged and
960 		 * bubble windows
961 		 */
962 		thdupper = (2 * worstcase + bestcase) / 2;
963 		thdlower = worstcase / 4;
964 		/*
965 		 * fixed tap is used when HW tuning result contains single edge
966 		 * and tap is set at fixed tap delay relative to the first edge
967 		 */
968 		avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly);
969 		fixed_tap = avg_tap_dly / 2;
970 
971 		val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1);
972 		start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
973 		end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) &
974 			  SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
975 		window_width = end_tap - start_tap;
976 		num_iter = host->tuning_loop_count;
977 		/*
978 		 * partial window includes edges of the tuning range.
979 		 * merged window includes more taps so window width is higher
980 		 * than upper threshold.
981 		 */
982 		if (start_tap == 0 || (end_tap == (num_iter - 1)) ||
983 		    (end_tap == num_iter - 2) || window_width >= thdupper) {
984 			pr_debug("%s: Apply tuning correction\n",
985 				 mmc_hostname(host->mmc));
986 			tegra_sdhci_tap_correction(host, thdupper, thdlower,
987 						   fixed_tap);
988 		}
989 	}
990 
991 	tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
992 }
993 
994 static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode)
995 {
996 	struct sdhci_host *host = mmc_priv(mmc);
997 	int err;
998 
999 	err = sdhci_execute_tuning(mmc, opcode);
1000 	if (!err && !host->tuning_err)
1001 		tegra_sdhci_post_tuning(host);
1002 
1003 	return err;
1004 }
1005 
1006 static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
1007 					  unsigned timing)
1008 {
1009 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1010 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1011 	bool set_default_tap = false;
1012 	bool set_dqs_trim = false;
1013 	bool do_hs400_dll_cal = false;
1014 	u8 iter = TRIES_256;
1015 	u32 val;
1016 
1017 	tegra_host->ddr_signaling = false;
1018 	switch (timing) {
1019 	case MMC_TIMING_UHS_SDR50:
1020 		break;
1021 	case MMC_TIMING_UHS_SDR104:
1022 	case MMC_TIMING_MMC_HS200:
1023 		/* Don't set default tap on tunable modes. */
1024 		iter = TRIES_128;
1025 		break;
1026 	case MMC_TIMING_MMC_HS400:
1027 		set_dqs_trim = true;
1028 		do_hs400_dll_cal = true;
1029 		iter = TRIES_128;
1030 		break;
1031 	case MMC_TIMING_MMC_DDR52:
1032 	case MMC_TIMING_UHS_DDR50:
1033 		tegra_host->ddr_signaling = true;
1034 		set_default_tap = true;
1035 		break;
1036 	default:
1037 		set_default_tap = true;
1038 		break;
1039 	}
1040 
1041 	val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
1042 	val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK |
1043 		 SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK |
1044 		 SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK);
1045 	val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
1046 		0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT |
1047 		1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT);
1048 	sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
1049 	sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0);
1050 
1051 	host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
1052 
1053 	sdhci_set_uhs_signaling(host, timing);
1054 
1055 	tegra_sdhci_pad_autocalib(host);
1056 
1057 	if (tegra_host->tuned_tap_delay && !set_default_tap)
1058 		tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
1059 	else
1060 		tegra_sdhci_set_tap(host, tegra_host->default_tap);
1061 
1062 	if (set_dqs_trim)
1063 		tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim);
1064 
1065 	if (do_hs400_dll_cal)
1066 		tegra_sdhci_hs400_dll_cal(host);
1067 }
1068 
1069 static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
1070 {
1071 	unsigned int min, max;
1072 
1073 	/*
1074 	 * Start search for minimum tap value at 10, as smaller values are
1075 	 * may wrongly be reported as working but fail at higher speeds,
1076 	 * according to the TRM.
1077 	 */
1078 	min = 10;
1079 	while (min < 255) {
1080 		tegra_sdhci_set_tap(host, min);
1081 		if (!mmc_send_tuning(host->mmc, opcode, NULL))
1082 			break;
1083 		min++;
1084 	}
1085 
1086 	/* Find the maximum tap value that still passes. */
1087 	max = min + 1;
1088 	while (max < 255) {
1089 		tegra_sdhci_set_tap(host, max);
1090 		if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1091 			max--;
1092 			break;
1093 		}
1094 		max++;
1095 	}
1096 
1097 	/* The TRM states the ideal tap value is at 75% in the passing range. */
1098 	tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4));
1099 
1100 	return mmc_send_tuning(host->mmc, opcode, NULL);
1101 }
1102 
1103 static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
1104 						   struct mmc_ios *ios)
1105 {
1106 	struct sdhci_host *host = mmc_priv(mmc);
1107 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1108 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1109 	int ret = 0;
1110 
1111 	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1112 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1113 		if (ret < 0)
1114 			return ret;
1115 		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1116 	} else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
1117 		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1118 		if (ret < 0)
1119 			return ret;
1120 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1121 	}
1122 
1123 	if (tegra_host->pad_calib_required)
1124 		tegra_sdhci_pad_autocalib(host);
1125 
1126 	return ret;
1127 }
1128 
1129 static int tegra_sdhci_init_pinctrl_info(struct device *dev,
1130 					 struct sdhci_tegra *tegra_host)
1131 {
1132 	tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
1133 	if (IS_ERR(tegra_host->pinctrl_sdmmc)) {
1134 		dev_dbg(dev, "No pinctrl info, err: %ld\n",
1135 			PTR_ERR(tegra_host->pinctrl_sdmmc));
1136 		return -1;
1137 	}
1138 
1139 	tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state(
1140 				tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv");
1141 	if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) {
1142 		if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV)
1143 			tegra_host->pinctrl_state_1v8_drv = NULL;
1144 	}
1145 
1146 	tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state(
1147 				tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv");
1148 	if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) {
1149 		if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV)
1150 			tegra_host->pinctrl_state_3v3_drv = NULL;
1151 	}
1152 
1153 	tegra_host->pinctrl_state_3v3 =
1154 		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
1155 	if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
1156 		dev_warn(dev, "Missing 3.3V pad state, err: %ld\n",
1157 			 PTR_ERR(tegra_host->pinctrl_state_3v3));
1158 		return -1;
1159 	}
1160 
1161 	tegra_host->pinctrl_state_1v8 =
1162 		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8");
1163 	if (IS_ERR(tegra_host->pinctrl_state_1v8)) {
1164 		dev_warn(dev, "Missing 1.8V pad state, err: %ld\n",
1165 			 PTR_ERR(tegra_host->pinctrl_state_1v8));
1166 		return -1;
1167 	}
1168 
1169 	tegra_host->pad_control_available = true;
1170 
1171 	return 0;
1172 }
1173 
1174 static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
1175 {
1176 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1177 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1178 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1179 
1180 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1181 		tegra_host->pad_calib_required = true;
1182 }
1183 
1184 static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
1185 {
1186 	struct mmc_host *mmc = cq_host->mmc;
1187 	struct sdhci_host *host = mmc_priv(mmc);
1188 	u8 ctrl;
1189 	ktime_t timeout;
1190 	bool timed_out;
1191 
1192 	/*
1193 	 * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
1194 	 * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
1195 	 * to be re-configured.
1196 	 * Tegra CQHCI/SDHCI prevents write access to block size register when
1197 	 * CQE is unhalted. So handling CQE resume sequence here to configure
1198 	 * SDHCI block registers prior to exiting CQE halt state.
1199 	 */
1200 	if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
1201 	    cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
1202 		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
1203 		sdhci_cqe_enable(mmc);
1204 		writel(val, cq_host->mmio + reg);
1205 		timeout = ktime_add_us(ktime_get(), 50);
1206 		while (1) {
1207 			timed_out = ktime_compare(ktime_get(), timeout) > 0;
1208 			ctrl = cqhci_readl(cq_host, CQHCI_CTL);
1209 			if (!(ctrl & CQHCI_HALT) || timed_out)
1210 				break;
1211 		}
1212 		/*
1213 		 * CQE usually resumes very quick, but incase if Tegra CQE
1214 		 * doesn't resume retry unhalt.
1215 		 */
1216 		if (timed_out)
1217 			writel(val, cq_host->mmio + reg);
1218 	} else {
1219 		writel(val, cq_host->mmio + reg);
1220 	}
1221 }
1222 
1223 static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
1224 					 struct mmc_request *mrq, u64 *data)
1225 {
1226 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc));
1227 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1228 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1229 
1230 	if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING &&
1231 	    mrq->cmd->flags & MMC_RSP_R1B)
1232 		*data |= CQHCI_CMD_TIMING(1);
1233 }
1234 
1235 static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
1236 {
1237 	struct cqhci_host *cq_host = mmc->cqe_private;
1238 	struct sdhci_host *host = mmc_priv(mmc);
1239 	u32 val;
1240 
1241 	/*
1242 	 * Tegra CQHCI/SDMMC design prevents write access to sdhci block size
1243 	 * register when CQE is enabled and unhalted.
1244 	 * CQHCI driver enables CQE prior to activation, so disable CQE before
1245 	 * programming block size in sdhci controller and enable it back.
1246 	 */
1247 	if (!cq_host->activated) {
1248 		val = cqhci_readl(cq_host, CQHCI_CFG);
1249 		if (val & CQHCI_ENABLE)
1250 			cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
1251 				     CQHCI_CFG);
1252 		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
1253 		sdhci_cqe_enable(mmc);
1254 		if (val & CQHCI_ENABLE)
1255 			cqhci_writel(cq_host, val, CQHCI_CFG);
1256 	}
1257 
1258 	/*
1259 	 * CMD CRC errors are seen sometimes with some eMMC devices when status
1260 	 * command is sent during transfer of last data block which is the
1261 	 * default case as send status command block counter (CBC) is 1.
1262 	 * Recommended fix to set CBC to 0 allowing send status command only
1263 	 * when data lines are idle.
1264 	 */
1265 	val = cqhci_readl(cq_host, CQHCI_SSC1);
1266 	val &= ~CQHCI_SSC1_CBC_MASK;
1267 	cqhci_writel(cq_host, val, CQHCI_SSC1);
1268 }
1269 
1270 static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
1271 {
1272 	sdhci_dumpregs(mmc_priv(mmc));
1273 }
1274 
1275 static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
1276 {
1277 	int cmd_error = 0;
1278 	int data_error = 0;
1279 
1280 	if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1281 		return intmask;
1282 
1283 	cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1284 
1285 	return 0;
1286 }
1287 
1288 static void tegra_sdhci_set_timeout(struct sdhci_host *host,
1289 				    struct mmc_command *cmd)
1290 {
1291 	u32 val;
1292 
1293 	/*
1294 	 * HW busy detection timeout is based on programmed data timeout
1295 	 * counter and maximum supported timeout is 11s which may not be
1296 	 * enough for long operations like cache flush, sleep awake, erase.
1297 	 *
1298 	 * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows
1299 	 * host controller to wait for busy state until the card is busy
1300 	 * without HW timeout.
1301 	 *
1302 	 * So, use infinite busy wait mode for operations that may take
1303 	 * more than maximum HW busy timeout of 11s otherwise use finite
1304 	 * busy wait mode.
1305 	 */
1306 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1307 	if (cmd && cmd->busy_timeout >= 11 * MSEC_PER_SEC)
1308 		val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1309 	else
1310 		val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1311 	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1312 
1313 	__sdhci_set_timeout(host, cmd);
1314 }
1315 
1316 static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc)
1317 {
1318 	struct cqhci_host *cq_host = mmc->cqe_private;
1319 	u32 reg;
1320 
1321 	reg = cqhci_readl(cq_host, CQHCI_CFG);
1322 	reg |= CQHCI_ENABLE;
1323 	cqhci_writel(cq_host, reg, CQHCI_CFG);
1324 }
1325 
1326 static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc)
1327 {
1328 	struct cqhci_host *cq_host = mmc->cqe_private;
1329 	struct sdhci_host *host = mmc_priv(mmc);
1330 	u32 reg;
1331 
1332 	reg = cqhci_readl(cq_host, CQHCI_CFG);
1333 	reg &= ~CQHCI_ENABLE;
1334 	cqhci_writel(cq_host, reg, CQHCI_CFG);
1335 	sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1336 }
1337 
1338 static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
1339 	.write_l    = tegra_cqhci_writel,
1340 	.enable	= sdhci_tegra_cqe_enable,
1341 	.disable = sdhci_cqe_disable,
1342 	.dumpregs = sdhci_tegra_dumpregs,
1343 	.update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
1344 	.pre_enable = sdhci_tegra_cqe_pre_enable,
1345 	.post_disable = sdhci_tegra_cqe_post_disable,
1346 };
1347 
1348 static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
1349 {
1350 	struct sdhci_pltfm_host *platform = sdhci_priv(host);
1351 	struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform);
1352 	const struct sdhci_tegra_soc_data *soc = tegra->soc_data;
1353 	struct device *dev = mmc_dev(host->mmc);
1354 
1355 	if (soc->dma_mask)
1356 		return dma_set_mask_and_coherent(dev, soc->dma_mask);
1357 
1358 	return 0;
1359 }
1360 
1361 static const struct sdhci_ops tegra_sdhci_ops = {
1362 	.get_ro     = tegra_sdhci_get_ro,
1363 	.read_w     = tegra_sdhci_readw,
1364 	.write_l    = tegra_sdhci_writel,
1365 	.set_clock  = tegra_sdhci_set_clock,
1366 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1367 	.set_bus_width = sdhci_set_bus_width,
1368 	.reset      = tegra_sdhci_reset,
1369 	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1370 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1371 	.voltage_switch = tegra_sdhci_voltage_switch,
1372 	.get_max_clock = tegra_sdhci_get_max_clock,
1373 };
1374 
1375 static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
1376 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1377 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1378 		  SDHCI_QUIRK_NO_HISPD_BIT |
1379 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1380 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1381 	.ops  = &tegra_sdhci_ops,
1382 };
1383 
1384 static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
1385 	.pdata = &sdhci_tegra20_pdata,
1386 	.dma_mask = DMA_BIT_MASK(32),
1387 	.nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
1388 		    NVQUIRK_HAS_ANDROID_GPT_SECTOR |
1389 		    NVQUIRK_ENABLE_BLOCK_GAP_DET,
1390 };
1391 
1392 static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
1393 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1394 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1395 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1396 		  SDHCI_QUIRK_NO_HISPD_BIT |
1397 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1398 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1399 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1400 		   SDHCI_QUIRK2_BROKEN_HS200 |
1401 		   /*
1402 		    * Auto-CMD23 leads to "Got command interrupt 0x00010000 even
1403 		    * though no command operation was in progress."
1404 		    *
1405 		    * The exact reason is unknown, as the same hardware seems
1406 		    * to support Auto CMD23 on a downstream 3.1 kernel.
1407 		    */
1408 		   SDHCI_QUIRK2_ACMD23_BROKEN,
1409 	.ops  = &tegra_sdhci_ops,
1410 };
1411 
1412 static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
1413 	.pdata = &sdhci_tegra30_pdata,
1414 	.dma_mask = DMA_BIT_MASK(32),
1415 	.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
1416 		    NVQUIRK_ENABLE_SDR50 |
1417 		    NVQUIRK_ENABLE_SDR104 |
1418 		    NVQUIRK_HAS_ANDROID_GPT_SECTOR |
1419 		    NVQUIRK_HAS_PADCALIB,
1420 };
1421 
1422 static const struct sdhci_ops tegra114_sdhci_ops = {
1423 	.get_ro     = tegra_sdhci_get_ro,
1424 	.read_w     = tegra_sdhci_readw,
1425 	.write_w    = tegra_sdhci_writew,
1426 	.write_l    = tegra_sdhci_writel,
1427 	.set_clock  = tegra_sdhci_set_clock,
1428 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1429 	.set_bus_width = sdhci_set_bus_width,
1430 	.reset      = tegra_sdhci_reset,
1431 	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1432 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1433 	.voltage_switch = tegra_sdhci_voltage_switch,
1434 	.get_max_clock = tegra_sdhci_get_max_clock,
1435 };
1436 
1437 static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
1438 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1439 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1440 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1441 		  SDHCI_QUIRK_NO_HISPD_BIT |
1442 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1443 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1444 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1445 	.ops  = &tegra114_sdhci_ops,
1446 };
1447 
1448 static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
1449 	.pdata = &sdhci_tegra114_pdata,
1450 	.dma_mask = DMA_BIT_MASK(32),
1451 	.nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
1452 };
1453 
1454 static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
1455 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1456 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1457 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1458 		  SDHCI_QUIRK_NO_HISPD_BIT |
1459 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1460 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1461 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1462 	.ops  = &tegra114_sdhci_ops,
1463 };
1464 
1465 static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
1466 	.pdata = &sdhci_tegra124_pdata,
1467 	.dma_mask = DMA_BIT_MASK(34),
1468 	.nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
1469 };
1470 
1471 static const struct sdhci_ops tegra210_sdhci_ops = {
1472 	.get_ro     = tegra_sdhci_get_ro,
1473 	.read_w     = tegra_sdhci_readw,
1474 	.write_w    = tegra210_sdhci_writew,
1475 	.write_l    = tegra_sdhci_writel,
1476 	.set_clock  = tegra_sdhci_set_clock,
1477 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1478 	.set_bus_width = sdhci_set_bus_width,
1479 	.reset      = tegra_sdhci_reset,
1480 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1481 	.voltage_switch = tegra_sdhci_voltage_switch,
1482 	.get_max_clock = tegra_sdhci_get_max_clock,
1483 	.set_timeout = tegra_sdhci_set_timeout,
1484 };
1485 
1486 static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
1487 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1488 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1489 		  SDHCI_QUIRK_NO_HISPD_BIT |
1490 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1491 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1492 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1493 	.ops  = &tegra210_sdhci_ops,
1494 };
1495 
1496 static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
1497 	.pdata = &sdhci_tegra210_pdata,
1498 	.dma_mask = DMA_BIT_MASK(34),
1499 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1500 		    NVQUIRK_HAS_PADCALIB |
1501 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1502 		    NVQUIRK_ENABLE_SDR50 |
1503 		    NVQUIRK_ENABLE_SDR104 |
1504 		    NVQUIRK_HAS_TMCLK,
1505 	.min_tap_delay = 106,
1506 	.max_tap_delay = 185,
1507 };
1508 
1509 static const struct sdhci_ops tegra186_sdhci_ops = {
1510 	.get_ro     = tegra_sdhci_get_ro,
1511 	.read_w     = tegra_sdhci_readw,
1512 	.write_l    = tegra_sdhci_writel,
1513 	.set_clock  = tegra_sdhci_set_clock,
1514 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1515 	.set_bus_width = sdhci_set_bus_width,
1516 	.reset      = tegra_sdhci_reset,
1517 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1518 	.voltage_switch = tegra_sdhci_voltage_switch,
1519 	.get_max_clock = tegra_sdhci_get_max_clock,
1520 	.irq = sdhci_tegra_cqhci_irq,
1521 	.set_timeout = tegra_sdhci_set_timeout,
1522 };
1523 
1524 static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
1525 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1526 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1527 		  SDHCI_QUIRK_NO_HISPD_BIT |
1528 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1529 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1530 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1531 		   SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER,
1532 	.ops  = &tegra186_sdhci_ops,
1533 };
1534 
1535 static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
1536 	.pdata = &sdhci_tegra186_pdata,
1537 	.dma_mask = DMA_BIT_MASK(40),
1538 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1539 		    NVQUIRK_HAS_PADCALIB |
1540 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1541 		    NVQUIRK_ENABLE_SDR50 |
1542 		    NVQUIRK_ENABLE_SDR104 |
1543 		    NVQUIRK_HAS_TMCLK |
1544 		    NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
1545 	.min_tap_delay = 84,
1546 	.max_tap_delay = 136,
1547 };
1548 
1549 static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
1550 	.pdata = &sdhci_tegra186_pdata,
1551 	.dma_mask = DMA_BIT_MASK(39),
1552 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1553 		    NVQUIRK_HAS_PADCALIB |
1554 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1555 		    NVQUIRK_ENABLE_SDR50 |
1556 		    NVQUIRK_ENABLE_SDR104 |
1557 		    NVQUIRK_HAS_TMCLK,
1558 	.min_tap_delay = 96,
1559 	.max_tap_delay = 139,
1560 };
1561 
1562 static const struct sdhci_tegra_soc_data soc_data_tegra234 = {
1563 	.pdata = &sdhci_tegra186_pdata,
1564 	.dma_mask = DMA_BIT_MASK(39),
1565 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1566 		    NVQUIRK_HAS_PADCALIB |
1567 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1568 		    NVQUIRK_ENABLE_SDR50 |
1569 		    NVQUIRK_ENABLE_SDR104 |
1570 		    NVQUIRK_PROGRAM_STREAMID |
1571 		    NVQUIRK_HAS_TMCLK,
1572 	.min_tap_delay = 95,
1573 	.max_tap_delay = 111,
1574 };
1575 
1576 static const struct of_device_id sdhci_tegra_dt_match[] = {
1577 	{ .compatible = "nvidia,tegra234-sdhci", .data = &soc_data_tegra234 },
1578 	{ .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
1579 	{ .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
1580 	{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
1581 	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
1582 	{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
1583 	{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
1584 	{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
1585 	{}
1586 };
1587 MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
1588 
1589 static int sdhci_tegra_add_host(struct sdhci_host *host)
1590 {
1591 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1592 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1593 	struct cqhci_host *cq_host;
1594 	bool dma64;
1595 	int ret;
1596 
1597 	if (!tegra_host->enable_hwcq)
1598 		return sdhci_add_host(host);
1599 
1600 	sdhci_enable_v4_mode(host);
1601 
1602 	ret = sdhci_setup_host(host);
1603 	if (ret)
1604 		return ret;
1605 
1606 	host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1607 
1608 	cq_host = devm_kzalloc(mmc_dev(host->mmc),
1609 				sizeof(*cq_host), GFP_KERNEL);
1610 	if (!cq_host) {
1611 		ret = -ENOMEM;
1612 		goto cleanup;
1613 	}
1614 
1615 	cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR;
1616 	cq_host->ops = &sdhci_tegra_cqhci_ops;
1617 
1618 	dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1619 	if (dma64)
1620 		cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
1621 
1622 	ret = cqhci_init(cq_host, host->mmc, dma64);
1623 	if (ret)
1624 		goto cleanup;
1625 
1626 	ret = __sdhci_add_host(host);
1627 	if (ret)
1628 		goto cleanup;
1629 
1630 	return 0;
1631 
1632 cleanup:
1633 	sdhci_cleanup_host(host);
1634 	return ret;
1635 }
1636 
1637 /* Program MC streamID for DMA transfers */
1638 static void sdhci_tegra_program_stream_id(struct sdhci_host *host)
1639 {
1640 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1641 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1642 
1643 	if (tegra_host->soc_data->nvquirks & NVQUIRK_PROGRAM_STREAMID) {
1644 		tegra_sdhci_writel(host, FIELD_PREP(GENMASK(15, 8), tegra_host->stream_id) |
1645 					 FIELD_PREP(GENMASK(7, 0), tegra_host->stream_id),
1646 					 SDHCI_TEGRA_CIF2AXI_CTRL_0);
1647 	}
1648 }
1649 
1650 static int sdhci_tegra_probe(struct platform_device *pdev)
1651 {
1652 	const struct sdhci_tegra_soc_data *soc_data;
1653 	struct sdhci_host *host;
1654 	struct sdhci_pltfm_host *pltfm_host;
1655 	struct sdhci_tegra *tegra_host;
1656 	struct clk *clk;
1657 	int rc;
1658 
1659 	soc_data = of_device_get_match_data(&pdev->dev);
1660 	if (!soc_data)
1661 		return -EINVAL;
1662 
1663 	host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
1664 	if (IS_ERR(host))
1665 		return PTR_ERR(host);
1666 	pltfm_host = sdhci_priv(host);
1667 
1668 	tegra_host = sdhci_pltfm_priv(pltfm_host);
1669 	tegra_host->ddr_signaling = false;
1670 	tegra_host->pad_calib_required = false;
1671 	tegra_host->pad_control_available = false;
1672 	tegra_host->soc_data = soc_data;
1673 
1674 	if (soc_data->nvquirks & NVQUIRK_HAS_ANDROID_GPT_SECTOR)
1675 		host->mmc->caps2 |= MMC_CAP2_ALT_GPT_TEGRA;
1676 
1677 	if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
1678 		rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
1679 		if (rc == 0)
1680 			host->mmc_host_ops.start_signal_voltage_switch =
1681 				sdhci_tegra_start_signal_voltage_switch;
1682 	}
1683 
1684 	/* Hook to periodically rerun pad calibration */
1685 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1686 		host->mmc_host_ops.request = tegra_sdhci_request;
1687 
1688 	host->mmc_host_ops.hs400_enhanced_strobe =
1689 			tegra_sdhci_hs400_enhanced_strobe;
1690 
1691 	if (!host->ops->platform_execute_tuning)
1692 		host->mmc_host_ops.execute_tuning =
1693 				tegra_sdhci_execute_hw_tuning;
1694 
1695 	rc = mmc_of_parse(host->mmc);
1696 	if (rc)
1697 		goto err_parse_dt;
1698 
1699 	if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
1700 		host->mmc->caps |= MMC_CAP_1_8V_DDR;
1701 
1702 	/* HW busy detection is supported, but R1B responses are required. */
1703 	host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
1704 
1705 	/* GPIO CD can be set as a wakeup source */
1706 	host->mmc->caps |= MMC_CAP_CD_WAKE;
1707 
1708 	tegra_sdhci_parse_dt(host);
1709 
1710 	if (tegra_host->soc_data->nvquirks & NVQUIRK_PROGRAM_STREAMID &&
1711 	    !tegra_dev_iommu_get_stream_id(&pdev->dev, &tegra_host->stream_id)) {
1712 		dev_warn(mmc_dev(host->mmc), "missing IOMMU stream ID\n");
1713 		tegra_host->stream_id = 0x7f;
1714 	}
1715 
1716 	tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
1717 							 GPIOD_OUT_HIGH);
1718 	if (IS_ERR(tegra_host->power_gpio)) {
1719 		rc = PTR_ERR(tegra_host->power_gpio);
1720 		goto err_power_req;
1721 	}
1722 
1723 	/*
1724 	 * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
1725 	 * timeout clock and SW can choose TMCLK or SDCLK for hardware
1726 	 * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
1727 	 * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
1728 	 *
1729 	 * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
1730 	 * 12Mhz TMCLK which is advertised in host capability register.
1731 	 * With TMCLK of 12Mhz provides maximum data timeout period that can
1732 	 * be achieved is 11s better than using SDCLK for data timeout.
1733 	 *
1734 	 * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
1735 	 * supporting separate TMCLK.
1736 	 */
1737 
1738 	if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) {
1739 		clk = devm_clk_get(&pdev->dev, "tmclk");
1740 		if (IS_ERR(clk)) {
1741 			rc = PTR_ERR(clk);
1742 			if (rc == -EPROBE_DEFER)
1743 				goto err_power_req;
1744 
1745 			dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
1746 			clk = NULL;
1747 		}
1748 
1749 		clk_set_rate(clk, 12000000);
1750 		rc = clk_prepare_enable(clk);
1751 		if (rc) {
1752 			dev_err(&pdev->dev,
1753 				"failed to enable tmclk: %d\n", rc);
1754 			goto err_power_req;
1755 		}
1756 
1757 		tegra_host->tmclk = clk;
1758 	}
1759 
1760 	clk = devm_clk_get(mmc_dev(host->mmc), NULL);
1761 	if (IS_ERR(clk)) {
1762 		rc = dev_err_probe(&pdev->dev, PTR_ERR(clk),
1763 				   "failed to get clock\n");
1764 		goto err_clk_get;
1765 	}
1766 	pltfm_host->clk = clk;
1767 
1768 	tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
1769 							   "sdhci");
1770 	if (IS_ERR(tegra_host->rst)) {
1771 		rc = PTR_ERR(tegra_host->rst);
1772 		dev_err(&pdev->dev, "failed to get reset control: %d\n", rc);
1773 		goto err_rst_get;
1774 	}
1775 
1776 	rc = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
1777 	if (rc)
1778 		goto err_rst_get;
1779 
1780 	pm_runtime_enable(&pdev->dev);
1781 	rc = pm_runtime_resume_and_get(&pdev->dev);
1782 	if (rc)
1783 		goto err_pm_get;
1784 
1785 	rc = reset_control_assert(tegra_host->rst);
1786 	if (rc)
1787 		goto err_rst_assert;
1788 
1789 	usleep_range(2000, 4000);
1790 
1791 	rc = reset_control_deassert(tegra_host->rst);
1792 	if (rc)
1793 		goto err_rst_assert;
1794 
1795 	usleep_range(2000, 4000);
1796 
1797 	rc = sdhci_tegra_add_host(host);
1798 	if (rc)
1799 		goto err_add_host;
1800 
1801 	sdhci_tegra_program_stream_id(host);
1802 
1803 	return 0;
1804 
1805 err_add_host:
1806 	reset_control_assert(tegra_host->rst);
1807 err_rst_assert:
1808 	pm_runtime_put_sync_suspend(&pdev->dev);
1809 err_pm_get:
1810 	pm_runtime_disable(&pdev->dev);
1811 err_rst_get:
1812 err_clk_get:
1813 	clk_disable_unprepare(tegra_host->tmclk);
1814 err_power_req:
1815 err_parse_dt:
1816 	sdhci_pltfm_free(pdev);
1817 	return rc;
1818 }
1819 
1820 static void sdhci_tegra_remove(struct platform_device *pdev)
1821 {
1822 	struct sdhci_host *host = platform_get_drvdata(pdev);
1823 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1824 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1825 
1826 	sdhci_remove_host(host, 0);
1827 
1828 	reset_control_assert(tegra_host->rst);
1829 	usleep_range(2000, 4000);
1830 
1831 	pm_runtime_put_sync_suspend(&pdev->dev);
1832 	pm_runtime_force_suspend(&pdev->dev);
1833 
1834 	clk_disable_unprepare(tegra_host->tmclk);
1835 	sdhci_pltfm_free(pdev);
1836 }
1837 
1838 static int __maybe_unused sdhci_tegra_runtime_suspend(struct device *dev)
1839 {
1840 	struct sdhci_host *host = dev_get_drvdata(dev);
1841 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1842 
1843 	clk_disable_unprepare(pltfm_host->clk);
1844 
1845 	return 0;
1846 }
1847 
1848 static int __maybe_unused sdhci_tegra_runtime_resume(struct device *dev)
1849 {
1850 	struct sdhci_host *host = dev_get_drvdata(dev);
1851 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1852 
1853 	return clk_prepare_enable(pltfm_host->clk);
1854 }
1855 
1856 #ifdef CONFIG_PM_SLEEP
1857 static int sdhci_tegra_suspend(struct device *dev)
1858 {
1859 	struct sdhci_host *host = dev_get_drvdata(dev);
1860 	int ret;
1861 
1862 	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1863 		ret = cqhci_suspend(host->mmc);
1864 		if (ret)
1865 			return ret;
1866 	}
1867 
1868 	ret = sdhci_suspend_host(host);
1869 	if (ret) {
1870 		cqhci_resume(host->mmc);
1871 		return ret;
1872 	}
1873 
1874 	ret = pm_runtime_force_suspend(dev);
1875 	if (ret) {
1876 		sdhci_resume_host(host);
1877 		cqhci_resume(host->mmc);
1878 		return ret;
1879 	}
1880 
1881 	return mmc_gpio_set_cd_wake(host->mmc, true);
1882 }
1883 
1884 static int sdhci_tegra_resume(struct device *dev)
1885 {
1886 	struct sdhci_host *host = dev_get_drvdata(dev);
1887 	int ret;
1888 
1889 	ret = mmc_gpio_set_cd_wake(host->mmc, false);
1890 	if (ret)
1891 		return ret;
1892 
1893 	ret = pm_runtime_force_resume(dev);
1894 	if (ret)
1895 		return ret;
1896 
1897 	sdhci_tegra_program_stream_id(host);
1898 
1899 	ret = sdhci_resume_host(host);
1900 	if (ret)
1901 		goto disable_clk;
1902 
1903 	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1904 		ret = cqhci_resume(host->mmc);
1905 		if (ret)
1906 			goto suspend_host;
1907 	}
1908 
1909 	return 0;
1910 
1911 suspend_host:
1912 	sdhci_suspend_host(host);
1913 disable_clk:
1914 	pm_runtime_force_suspend(dev);
1915 	return ret;
1916 }
1917 #endif
1918 
1919 static const struct dev_pm_ops sdhci_tegra_dev_pm_ops = {
1920 	SET_RUNTIME_PM_OPS(sdhci_tegra_runtime_suspend, sdhci_tegra_runtime_resume,
1921 			   NULL)
1922 	SET_SYSTEM_SLEEP_PM_OPS(sdhci_tegra_suspend, sdhci_tegra_resume)
1923 };
1924 
1925 static struct platform_driver sdhci_tegra_driver = {
1926 	.driver		= {
1927 		.name	= "sdhci-tegra",
1928 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1929 		.of_match_table = sdhci_tegra_dt_match,
1930 		.pm	= &sdhci_tegra_dev_pm_ops,
1931 	},
1932 	.probe		= sdhci_tegra_probe,
1933 	.remove_new	= sdhci_tegra_remove,
1934 };
1935 
1936 module_platform_driver(sdhci_tegra_driver);
1937 
1938 MODULE_DESCRIPTION("SDHCI driver for Tegra");
1939 MODULE_AUTHOR("Google, Inc.");
1940 MODULE_LICENSE("GPL v2");
1941