xref: /linux/drivers/mmc/host/sdhci-uhs2.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  linux/drivers/mmc/host/sdhci_uhs2.c - Secure Digital Host Controller
4  *  Interface driver
5  *
6  *  Copyright (C) 2014 Intel Corp, All Rights Reserved.
7  *  Copyright (C) 2020 Genesys Logic, Inc.
8  *  Authors: Ben Chuang <ben.chuang@genesyslogic.com.tw>
9  *  Copyright (C) 2020 Linaro Limited
10  *  Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
11  */
12 
13 #include <linux/delay.h>
14 #include <linux/module.h>
15 #include <linux/iopoll.h>
16 #include <linux/bitfield.h>
17 #include <linux/regulator/consumer.h>
18 #include <linux/mmc/mmc.h>
19 #include <linux/mmc/host.h>
20 
21 #include "sdhci.h"
22 #include "sdhci-uhs2.h"
23 
24 #define DRIVER_NAME "sdhci_uhs2"
25 #define DBG(f, x...) \
26 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__, ## x)
27 #define SDHCI_UHS2_DUMP(f, x...) \
28 	pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
29 
30 #define UHS2_RESET_TIMEOUT_100MS		100000
31 #define UHS2_CHECK_DORMANT_TIMEOUT_100MS	100000
32 #define UHS2_INTERFACE_DETECT_TIMEOUT_100MS	100000
33 #define UHS2_LANE_SYNC_TIMEOUT_150MS		150000
34 
35 #define UHS2_ARG_IOADR_MASK 0xfff
36 
37 void sdhci_uhs2_dump_regs(struct sdhci_host *host)
38 {
39 	if (!(mmc_card_uhs2(host->mmc)))
40 		return;
41 
42 	SDHCI_UHS2_DUMP("==================== UHS2 ==================\n");
43 	SDHCI_UHS2_DUMP("Blk Size:  0x%08x | Blk Cnt:  0x%08x\n",
44 			sdhci_readw(host, SDHCI_UHS2_BLOCK_SIZE),
45 			sdhci_readl(host, SDHCI_UHS2_BLOCK_COUNT));
46 	SDHCI_UHS2_DUMP("Cmd:       0x%08x | Trn mode: 0x%08x\n",
47 			sdhci_readw(host, SDHCI_UHS2_CMD),
48 			sdhci_readw(host, SDHCI_UHS2_TRANS_MODE));
49 	SDHCI_UHS2_DUMP("Int Stat:  0x%08x | Dev Sel : 0x%08x\n",
50 			sdhci_readw(host, SDHCI_UHS2_DEV_INT_STATUS),
51 			sdhci_readb(host, SDHCI_UHS2_DEV_SELECT));
52 	SDHCI_UHS2_DUMP("Dev Int Code:  0x%08x\n",
53 			sdhci_readb(host, SDHCI_UHS2_DEV_INT_CODE));
54 	SDHCI_UHS2_DUMP("Reset:     0x%08x | Timer:    0x%08x\n",
55 			sdhci_readw(host, SDHCI_UHS2_SW_RESET),
56 			sdhci_readw(host, SDHCI_UHS2_TIMER_CTRL));
57 	SDHCI_UHS2_DUMP("ErrInt:    0x%08x | ErrIntEn: 0x%08x\n",
58 			sdhci_readl(host, SDHCI_UHS2_INT_STATUS),
59 			sdhci_readl(host, SDHCI_UHS2_INT_STATUS_ENABLE));
60 	SDHCI_UHS2_DUMP("ErrSigEn:  0x%08x\n",
61 			sdhci_readl(host, SDHCI_UHS2_INT_SIGNAL_ENABLE));
62 }
63 EXPORT_SYMBOL_GPL(sdhci_uhs2_dump_regs);
64 
65 /*****************************************************************************\
66  *                                                                           *
67  * Low level functions                                                       *
68  *                                                                           *
69 \*****************************************************************************/
70 
71 static inline u16 uhs2_dev_cmd(struct mmc_command *cmd)
72 {
73 	return be16_to_cpu((__force __be16)cmd->uhs2_cmd->arg) & UHS2_ARG_IOADR_MASK;
74 }
75 
76 static inline int mmc_opt_regulator_set_ocr(struct mmc_host *mmc,
77 					    struct regulator *supply,
78 					    unsigned short vdd_bit)
79 {
80 	return IS_ERR_OR_NULL(supply) ? 0 : mmc_regulator_set_ocr(mmc, supply, vdd_bit);
81 }
82 
83 /**
84  * sdhci_uhs2_reset - invoke SW reset
85  * @host: SDHCI host
86  * @mask: Control mask
87  *
88  * Invoke SW reset, depending on a bit in @mask and wait for completion.
89  */
90 void sdhci_uhs2_reset(struct sdhci_host *host, u16 mask)
91 {
92 	u32 val;
93 
94 	sdhci_writew(host, mask, SDHCI_UHS2_SW_RESET);
95 
96 	if (mask & SDHCI_UHS2_SW_RESET_FULL)
97 		host->clock = 0;
98 
99 	/* hw clears the bit when it's done */
100 	if (read_poll_timeout_atomic(sdhci_readw, val, !(val & mask), 10,
101 				     UHS2_RESET_TIMEOUT_100MS, true, host, SDHCI_UHS2_SW_RESET)) {
102 		pr_warn("%s: %s: Reset 0x%x never completed. %s: clean reset bit.\n", __func__,
103 			mmc_hostname(host->mmc), (int)mask, mmc_hostname(host->mmc));
104 		sdhci_writeb(host, 0, SDHCI_UHS2_SW_RESET);
105 		return;
106 	}
107 }
108 EXPORT_SYMBOL_GPL(sdhci_uhs2_reset);
109 
110 static void sdhci_uhs2_reset_cmd_data(struct sdhci_host *host)
111 {
112 	sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
113 
114 	if (host->mmc->uhs2_sd_tran) {
115 		sdhci_uhs2_reset(host, SDHCI_UHS2_SW_RESET_SD);
116 
117 		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
118 		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
119 		sdhci_uhs2_clear_set_irqs(host, SDHCI_INT_ALL_MASK, SDHCI_UHS2_INT_ERROR_MASK);
120 	}
121 }
122 
123 void sdhci_uhs2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd)
124 {
125 	struct mmc_host *mmc = host->mmc;
126 	u8 pwr = 0;
127 
128 	if (mode != MMC_POWER_OFF) {
129 		pwr = sdhci_get_vdd_value(vdd);
130 		if (!pwr)
131 			WARN(1, "%s: Invalid vdd %#x\n",
132 			     mmc_hostname(host->mmc), vdd);
133 		pwr |= SDHCI_VDD2_POWER_180;
134 	}
135 
136 	if (host->pwr == pwr)
137 		return;
138 	host->pwr = pwr;
139 
140 	if (pwr == 0) {
141 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
142 
143 		mmc_opt_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
144 		mmc_regulator_set_vqmmc2(mmc, &mmc->ios);
145 	} else {
146 		mmc_opt_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
147 		/* support 1.8v only for now */
148 		mmc_regulator_set_vqmmc2(mmc, &mmc->ios);
149 
150 		/* Clear the power reg before setting a new value */
151 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
152 
153 		/* vdd first */
154 		pwr |= SDHCI_POWER_ON;
155 		sdhci_writeb(host, pwr & 0xf, SDHCI_POWER_CONTROL);
156 		mdelay(5);
157 
158 		pwr |= SDHCI_VDD2_POWER_ON;
159 		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
160 		mdelay(5);
161 	}
162 }
163 EXPORT_SYMBOL_GPL(sdhci_uhs2_set_power);
164 
165 static u8 sdhci_calc_timeout_uhs2(struct sdhci_host *host, u8 *cmd_res, u8 *dead_lock)
166 {
167 	/* timeout in us */
168 	unsigned int dead_lock_timeout = 1 * 1000 * 1000;
169 	unsigned int cmd_res_timeout = 5 * 1000;
170 	unsigned int current_timeout;
171 	u8 count;
172 
173 	/*
174 	 * Figure out needed cycles.
175 	 * We do this in steps in order to fit inside a 32 bit int.
176 	 * The first step is the minimum timeout, which will have a
177 	 * minimum resolution of 6 bits:
178 	 * (1) 2^13*1000 > 2^22,
179 	 * (2) host->timeout_clk < 2^16
180 	 *     =>
181 	 *     (1) / (2) > 2^6
182 	 */
183 	count = 0;
184 	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
185 	while (current_timeout < cmd_res_timeout) {
186 		count++;
187 		current_timeout <<= 1;
188 		if (count >= 0xF)
189 			break;
190 	}
191 
192 	if (count >= 0xF) {
193 		DBG("%s: Too large timeout 0x%x requested for CMD_RES!\n",
194 		    mmc_hostname(host->mmc), count);
195 		count = 0xE;
196 	}
197 	*cmd_res = count;
198 
199 	count = 0;
200 	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
201 	while (current_timeout < dead_lock_timeout) {
202 		count++;
203 		current_timeout <<= 1;
204 		if (count >= 0xF)
205 			break;
206 	}
207 
208 	if (count >= 0xF) {
209 		DBG("%s: Too large timeout 0x%x requested for DEADLOCK!\n",
210 		    mmc_hostname(host->mmc), count);
211 		count = 0xE;
212 	}
213 	*dead_lock = count;
214 
215 	return count;
216 }
217 
218 static void __sdhci_uhs2_set_timeout(struct sdhci_host *host)
219 {
220 	u8 cmd_res, dead_lock;
221 
222 	sdhci_calc_timeout_uhs2(host, &cmd_res, &dead_lock);
223 	cmd_res |= FIELD_PREP(SDHCI_UHS2_TIMER_CTRL_DEADLOCK_MASK, dead_lock);
224 	sdhci_writeb(host, cmd_res, SDHCI_UHS2_TIMER_CTRL);
225 }
226 
227 void sdhci_uhs2_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
228 {
229 	__sdhci_set_timeout(host, cmd);
230 
231 	if (mmc_card_uhs2(host->mmc))
232 		__sdhci_uhs2_set_timeout(host);
233 }
234 EXPORT_SYMBOL_GPL(sdhci_uhs2_set_timeout);
235 
236 /**
237  * sdhci_uhs2_clear_set_irqs - set Error Interrupt Status Enable register
238  * @host:	SDHCI host
239  * @clear:	bit-wise clear mask
240  * @set:	bit-wise set mask
241  *
242  * Set/unset bits in UHS-II Error Interrupt Status Enable register
243  */
244 void sdhci_uhs2_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
245 {
246 	u32 ier;
247 
248 	ier = sdhci_readl(host, SDHCI_UHS2_INT_STATUS_ENABLE);
249 	ier &= ~clear;
250 	ier |= set;
251 	sdhci_writel(host, ier, SDHCI_UHS2_INT_STATUS_ENABLE);
252 	sdhci_writel(host, ier, SDHCI_UHS2_INT_SIGNAL_ENABLE);
253 }
254 EXPORT_SYMBOL_GPL(sdhci_uhs2_clear_set_irqs);
255 
256 static void __sdhci_uhs2_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
257 {
258 	struct sdhci_host *host = mmc_priv(mmc);
259 	u8 cmd_res, dead_lock;
260 	u16 ctrl_2;
261 
262 	/* UHS2 Timeout Control */
263 	sdhci_calc_timeout_uhs2(host, &cmd_res, &dead_lock);
264 
265 	/* change to use calculate value */
266 	cmd_res |= FIELD_PREP(SDHCI_UHS2_TIMER_CTRL_DEADLOCK_MASK, dead_lock);
267 
268 	sdhci_uhs2_clear_set_irqs(host,
269 				  SDHCI_UHS2_INT_CMD_TIMEOUT |
270 				  SDHCI_UHS2_INT_DEADLOCK_TIMEOUT,
271 				  0);
272 	sdhci_writeb(host, cmd_res, SDHCI_UHS2_TIMER_CTRL);
273 	sdhci_uhs2_clear_set_irqs(host, 0,
274 				  SDHCI_UHS2_INT_CMD_TIMEOUT |
275 				  SDHCI_UHS2_INT_DEADLOCK_TIMEOUT);
276 
277 	/* UHS2 timing. Note, UHS2 timing is disabled when powering off */
278 	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
279 	if (ios->power_mode != MMC_POWER_OFF &&
280 	    (ios->timing == MMC_TIMING_UHS2_SPEED_A ||
281 	     ios->timing == MMC_TIMING_UHS2_SPEED_A_HD ||
282 	     ios->timing == MMC_TIMING_UHS2_SPEED_B ||
283 	     ios->timing == MMC_TIMING_UHS2_SPEED_B_HD))
284 		ctrl_2 |= SDHCI_CTRL_UHS2 | SDHCI_CTRL_UHS2_ENABLE;
285 	else
286 		ctrl_2 &= ~(SDHCI_CTRL_UHS2 | SDHCI_CTRL_UHS2_ENABLE);
287 	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
288 	host->timing = ios->timing;
289 
290 	if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
291 		sdhci_enable_preset_value(host, true);
292 
293 	if (host->ops->set_power)
294 		host->ops->set_power(host, ios->power_mode, ios->vdd);
295 	else
296 		sdhci_uhs2_set_power(host, ios->power_mode, ios->vdd);
297 
298 	sdhci_set_clock(host, host->clock);
299 }
300 
301 static int sdhci_uhs2_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
302 {
303 	struct sdhci_host *host = mmc_priv(mmc);
304 
305 	pr_debug("%s: clock %uHz powermode %u Vdd %u timing %u\n",
306 		 mmc_hostname(mmc), ios->clock, ios->power_mode, ios->vdd, ios->timing);
307 
308 	if (!mmc_card_uhs2(mmc)) {
309 		sdhci_set_ios(mmc, ios);
310 		return 0;
311 	}
312 
313 	if (ios->power_mode == MMC_POWER_UNDEFINED)
314 		return 0;
315 
316 	if (host->flags & SDHCI_DEVICE_DEAD) {
317 		if (ios->power_mode == MMC_POWER_OFF) {
318 			mmc_opt_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
319 			mmc_regulator_set_vqmmc2(mmc, ios);
320 		}
321 		return -1;
322 	}
323 
324 	sdhci_set_ios_common(mmc, ios);
325 
326 	__sdhci_uhs2_set_ios(mmc, ios);
327 
328 	return 0;
329 }
330 
331 static int sdhci_uhs2_interface_detect(struct sdhci_host *host)
332 {
333 	u32 val;
334 
335 	if (read_poll_timeout(sdhci_readl, val, (val & SDHCI_UHS2_IF_DETECT),
336 			      100, UHS2_INTERFACE_DETECT_TIMEOUT_100MS, true,
337 			      host, SDHCI_PRESENT_STATE)) {
338 		pr_warn("%s: not detect UHS2 interface in 100ms.\n", mmc_hostname(host->mmc));
339 		sdhci_dumpregs(host);
340 		return -EIO;
341 	}
342 
343 	/* Enable UHS2 error interrupts */
344 	sdhci_uhs2_clear_set_irqs(host, SDHCI_INT_ALL_MASK, SDHCI_UHS2_INT_ERROR_MASK);
345 
346 	if (read_poll_timeout(sdhci_readl, val, (val & SDHCI_UHS2_LANE_SYNC),
347 			      100, UHS2_LANE_SYNC_TIMEOUT_150MS, true, host, SDHCI_PRESENT_STATE)) {
348 		pr_warn("%s: UHS2 Lane sync fail in 150ms.\n", mmc_hostname(host->mmc));
349 		sdhci_dumpregs(host);
350 		return -EIO;
351 	}
352 
353 	DBG("%s: UHS2 Lane synchronized in UHS2 mode, PHY is initialized.\n",
354 	    mmc_hostname(host->mmc));
355 	return 0;
356 }
357 
358 static int sdhci_uhs2_init(struct sdhci_host *host)
359 {
360 	u16 caps_ptr = 0;
361 	u32 caps_gen = 0;
362 	u32 caps_phy = 0;
363 	u32 caps_tran[2] = {0, 0};
364 	struct mmc_host *mmc = host->mmc;
365 
366 	caps_ptr = sdhci_readw(host, SDHCI_UHS2_CAPS_PTR);
367 	if (caps_ptr < 0x100 || caps_ptr > 0x1FF) {
368 		pr_err("%s: SDHCI_UHS2_CAPS_PTR(%d) is wrong.\n",
369 		       mmc_hostname(mmc), caps_ptr);
370 		return -ENODEV;
371 	}
372 	caps_gen = sdhci_readl(host, caps_ptr + SDHCI_UHS2_CAPS_OFFSET);
373 	caps_phy = sdhci_readl(host, caps_ptr + SDHCI_UHS2_CAPS_PHY_OFFSET);
374 	caps_tran[0] = sdhci_readl(host, caps_ptr + SDHCI_UHS2_CAPS_TRAN_OFFSET);
375 	caps_tran[1] = sdhci_readl(host, caps_ptr + SDHCI_UHS2_CAPS_TRAN_1_OFFSET);
376 
377 	/* General Caps */
378 	mmc->uhs2_caps.dap = caps_gen & SDHCI_UHS2_CAPS_DAP_MASK;
379 	mmc->uhs2_caps.gap = FIELD_GET(SDHCI_UHS2_CAPS_GAP_MASK, caps_gen);
380 	mmc->uhs2_caps.n_lanes = FIELD_GET(SDHCI_UHS2_CAPS_LANE_MASK, caps_gen);
381 	mmc->uhs2_caps.addr64 =	(caps_gen & SDHCI_UHS2_CAPS_ADDR_64) ? 1 : 0;
382 	mmc->uhs2_caps.card_type = FIELD_GET(SDHCI_UHS2_CAPS_DEV_TYPE_MASK, caps_gen);
383 
384 	/* PHY Caps */
385 	mmc->uhs2_caps.phy_rev = caps_phy & SDHCI_UHS2_CAPS_PHY_REV_MASK;
386 	mmc->uhs2_caps.speed_range = FIELD_GET(SDHCI_UHS2_CAPS_PHY_RANGE_MASK, caps_phy);
387 	mmc->uhs2_caps.n_lss_sync = FIELD_GET(SDHCI_UHS2_CAPS_PHY_N_LSS_SYN_MASK, caps_phy);
388 	mmc->uhs2_caps.n_lss_dir = FIELD_GET(SDHCI_UHS2_CAPS_PHY_N_LSS_DIR_MASK, caps_phy);
389 	if (mmc->uhs2_caps.n_lss_sync == 0)
390 		mmc->uhs2_caps.n_lss_sync = 16 << 2;
391 	else
392 		mmc->uhs2_caps.n_lss_sync <<= 2;
393 	if (mmc->uhs2_caps.n_lss_dir == 0)
394 		mmc->uhs2_caps.n_lss_dir = 16 << 3;
395 	else
396 		mmc->uhs2_caps.n_lss_dir <<= 3;
397 
398 	/* LINK/TRAN Caps */
399 	mmc->uhs2_caps.link_rev = caps_tran[0] & SDHCI_UHS2_CAPS_TRAN_LINK_REV_MASK;
400 	mmc->uhs2_caps.n_fcu = FIELD_GET(SDHCI_UHS2_CAPS_TRAN_N_FCU_MASK, caps_tran[0]);
401 	if (mmc->uhs2_caps.n_fcu == 0)
402 		mmc->uhs2_caps.n_fcu = 256;
403 	mmc->uhs2_caps.host_type = FIELD_GET(SDHCI_UHS2_CAPS_TRAN_HOST_TYPE_MASK, caps_tran[0]);
404 	mmc->uhs2_caps.maxblk_len = FIELD_GET(SDHCI_UHS2_CAPS_TRAN_BLK_LEN_MASK, caps_tran[0]);
405 	mmc->uhs2_caps.n_data_gap = caps_tran[1] & SDHCI_UHS2_CAPS_TRAN_1_N_DATA_GAP_MASK;
406 
407 	return 0;
408 }
409 
410 static int sdhci_uhs2_do_detect_init(struct mmc_host *mmc)
411 {
412 	struct sdhci_host *host = mmc_priv(mmc);
413 
414 	DBG("Begin do uhs2 detect init.\n");
415 
416 	if (host->ops->uhs2_pre_detect_init)
417 		host->ops->uhs2_pre_detect_init(host);
418 
419 	if (sdhci_uhs2_interface_detect(host)) {
420 		pr_warn("%s: cannot detect UHS2 interface.\n", mmc_hostname(host->mmc));
421 		return -EIO;
422 	}
423 
424 	if (sdhci_uhs2_init(host)) {
425 		pr_warn("%s: UHS2 init fail.\n", mmc_hostname(host->mmc));
426 		return -EIO;
427 	}
428 
429 	/* Init complete, do soft reset and enable UHS2 error irqs. */
430 	sdhci_uhs2_reset(host, SDHCI_UHS2_SW_RESET_SD);
431 	sdhci_uhs2_clear_set_irqs(host, SDHCI_INT_ALL_MASK, SDHCI_UHS2_INT_ERROR_MASK);
432 	/*
433 	 * N.B SDHCI_INT_ENABLE and SDHCI_SIGNAL_ENABLE was cleared
434 	 * by SDHCI_UHS2_SW_RESET_SD
435 	 */
436 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
437 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
438 
439 	return 0;
440 }
441 
442 static int sdhci_uhs2_disable_clk(struct mmc_host *mmc)
443 {
444 	struct sdhci_host *host = mmc_priv(mmc);
445 	u16 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
446 
447 	clk &= ~SDHCI_CLOCK_CARD_EN;
448 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
449 
450 	return 0;
451 }
452 
453 static int sdhci_uhs2_enable_clk(struct mmc_host *mmc)
454 {
455 	struct sdhci_host *host = mmc_priv(mmc);
456 	u16 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
457 	int timeout_us = 20000; /* 20ms */
458 	u32 val;
459 
460 	clk |= SDHCI_CLOCK_CARD_EN;
461 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
462 
463 	if (read_poll_timeout(sdhci_readw, val, (val & SDHCI_CLOCK_INT_STABLE),
464 			      10, timeout_us, true, host, SDHCI_CLOCK_CONTROL)) {
465 		pr_err("%s: Internal clock never stabilised.\n", mmc_hostname(host->mmc));
466 		sdhci_dumpregs(host);
467 		return -EIO;
468 	}
469 	return 0;
470 }
471 
472 static void sdhci_uhs2_set_config(struct sdhci_host *host)
473 {
474 	u32 value;
475 	u16 sdhci_uhs2_set_ptr = sdhci_readw(host, SDHCI_UHS2_SETTINGS_PTR);
476 	u16 sdhci_uhs2_gen_set_reg	= sdhci_uhs2_set_ptr;
477 	u16 sdhci_uhs2_phy_set_reg	= sdhci_uhs2_set_ptr + 4;
478 	u16 sdhci_uhs2_tran_set_reg	= sdhci_uhs2_set_ptr + 8;
479 	u16 sdhci_uhs2_tran_set_1_reg	= sdhci_uhs2_set_ptr + 12;
480 
481 	/* Set Gen Settings */
482 	value = FIELD_PREP(SDHCI_UHS2_GEN_SETTINGS_N_LANES_MASK, host->mmc->uhs2_caps.n_lanes_set);
483 	sdhci_writel(host, value, sdhci_uhs2_gen_set_reg);
484 
485 	/* Set PHY Settings */
486 	value = FIELD_PREP(SDHCI_UHS2_PHY_N_LSS_DIR_MASK, host->mmc->uhs2_caps.n_lss_dir_set) |
487 		FIELD_PREP(SDHCI_UHS2_PHY_N_LSS_SYN_MASK, host->mmc->uhs2_caps.n_lss_sync_set);
488 	if (host->mmc->ios.timing == MMC_TIMING_UHS2_SPEED_B ||
489 	    host->mmc->ios.timing == MMC_TIMING_UHS2_SPEED_B_HD)
490 		value |= SDHCI_UHS2_PHY_SET_SPEED_B;
491 	sdhci_writel(host, value, sdhci_uhs2_phy_set_reg);
492 
493 	/* Set LINK-TRAN Settings */
494 	value = FIELD_PREP(SDHCI_UHS2_TRAN_RETRY_CNT_MASK, host->mmc->uhs2_caps.max_retry_set) |
495 		FIELD_PREP(SDHCI_UHS2_TRAN_N_FCU_MASK, host->mmc->uhs2_caps.n_fcu_set);
496 	sdhci_writel(host, value, sdhci_uhs2_tran_set_reg);
497 	sdhci_writel(host, host->mmc->uhs2_caps.n_data_gap_set, sdhci_uhs2_tran_set_1_reg);
498 }
499 
500 static int sdhci_uhs2_check_dormant(struct sdhci_host *host)
501 {
502 	u32 val;
503 
504 	if (read_poll_timeout(sdhci_readl, val, (val & SDHCI_UHS2_IN_DORMANT_STATE),
505 			      100, UHS2_CHECK_DORMANT_TIMEOUT_100MS, true, host,
506 			      SDHCI_PRESENT_STATE)) {
507 		pr_warn("%s: UHS2 IN_DORMANT fail in 100ms.\n", mmc_hostname(host->mmc));
508 		sdhci_dumpregs(host);
509 		return -EIO;
510 	}
511 	return 0;
512 }
513 
514 static int sdhci_uhs2_control(struct mmc_host *mmc, enum sd_uhs2_operation op)
515 {
516 	struct sdhci_host *host = mmc_priv(mmc);
517 	struct mmc_ios *ios = &mmc->ios;
518 	int err = 0;
519 
520 	DBG("Begin uhs2 control, act %d.\n", op);
521 
522 	switch (op) {
523 	case UHS2_PHY_INIT:
524 		err = sdhci_uhs2_do_detect_init(mmc);
525 		break;
526 	case UHS2_SET_CONFIG:
527 		sdhci_uhs2_set_config(host);
528 		break;
529 	case UHS2_ENABLE_INT:
530 		sdhci_uhs2_clear_set_irqs(host, 0, SDHCI_INT_CARD_INT);
531 		break;
532 	case UHS2_DISABLE_INT:
533 		sdhci_uhs2_clear_set_irqs(host, SDHCI_INT_CARD_INT, 0);
534 		break;
535 	case UHS2_CHECK_DORMANT:
536 		err = sdhci_uhs2_check_dormant(host);
537 		break;
538 	case UHS2_DISABLE_CLK:
539 		err = sdhci_uhs2_disable_clk(mmc);
540 		break;
541 	case UHS2_ENABLE_CLK:
542 		err = sdhci_uhs2_enable_clk(mmc);
543 		break;
544 	case UHS2_SET_IOS:
545 		err = sdhci_uhs2_set_ios(mmc, ios);
546 		break;
547 	default:
548 		pr_err("%s: input sd uhs2 operation %d is wrong!\n",
549 		       mmc_hostname(host->mmc), op);
550 		err = -EIO;
551 		break;
552 	}
553 
554 	return err;
555 }
556 
557 /*****************************************************************************\
558  *                                                                           *
559  * Core functions                                                            *
560  *                                                                           *
561 \*****************************************************************************/
562 
563 static void sdhci_uhs2_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
564 {
565 	struct mmc_data *data = cmd->data;
566 
567 	sdhci_initialize_data(host, data);
568 
569 	sdhci_prepare_dma(host, data);
570 
571 	sdhci_writew(host, data->blksz, SDHCI_UHS2_BLOCK_SIZE);
572 	sdhci_writew(host, data->blocks, SDHCI_UHS2_BLOCK_COUNT);
573 }
574 
575 static void sdhci_uhs2_finish_data(struct sdhci_host *host)
576 {
577 	struct mmc_data *data = host->data;
578 
579 	__sdhci_finish_data_common(host, true);
580 
581 	__sdhci_finish_mrq(host, data->mrq);
582 }
583 
584 static void sdhci_uhs2_set_transfer_mode(struct sdhci_host *host, struct mmc_command *cmd)
585 {
586 	u16 mode;
587 	struct mmc_data *data = cmd->data;
588 
589 	if (!data) {
590 		/* clear Auto CMD settings for no data CMDs */
591 		if (uhs2_dev_cmd(cmd) == UHS2_DEV_CMD_TRANS_ABORT) {
592 			mode =  0;
593 		} else {
594 			mode = sdhci_readw(host, SDHCI_UHS2_TRANS_MODE);
595 			if (cmd->opcode == MMC_STOP_TRANSMISSION || cmd->opcode == MMC_ERASE)
596 				mode |= SDHCI_UHS2_TRNS_WAIT_EBSY;
597 			else
598 				/* send status mode */
599 				if (cmd->opcode == MMC_SEND_STATUS)
600 					mode = 0;
601 		}
602 
603 		DBG("UHS2 no data trans mode is 0x%x.\n", mode);
604 
605 		sdhci_writew(host, mode, SDHCI_UHS2_TRANS_MODE);
606 		return;
607 	}
608 
609 	WARN_ON(!host->data);
610 
611 	mode = SDHCI_UHS2_TRNS_BLK_CNT_EN | SDHCI_UHS2_TRNS_WAIT_EBSY;
612 	if (data->flags & MMC_DATA_WRITE)
613 		mode |= SDHCI_UHS2_TRNS_DATA_TRNS_WRT;
614 
615 	if (data->blocks == 1 &&
616 	    data->blksz != 512 &&
617 	    cmd->opcode != MMC_READ_SINGLE_BLOCK &&
618 	    cmd->opcode != MMC_WRITE_BLOCK) {
619 		mode &= ~SDHCI_UHS2_TRNS_BLK_CNT_EN;
620 		mode |= SDHCI_UHS2_TRNS_BLK_BYTE_MODE;
621 	}
622 
623 	if (host->flags & SDHCI_REQ_USE_DMA)
624 		mode |= SDHCI_UHS2_TRNS_DMA;
625 
626 	if (cmd->uhs2_cmd->tmode_half_duplex)
627 		mode |= SDHCI_UHS2_TRNS_2L_HD;
628 
629 	sdhci_writew(host, mode, SDHCI_UHS2_TRANS_MODE);
630 
631 	DBG("UHS2 trans mode is 0x%x.\n", mode);
632 }
633 
634 static void __sdhci_uhs2_send_command(struct sdhci_host *host, struct mmc_command *cmd)
635 {
636 	int i, j;
637 	int cmd_reg;
638 
639 	i = 0;
640 	sdhci_writel(host,
641 		     ((u32)cmd->uhs2_cmd->arg << 16) |
642 				(u32)cmd->uhs2_cmd->header,
643 		     SDHCI_UHS2_CMD_PACKET + i);
644 	i += 4;
645 
646 	/*
647 	 * Per spec, payload (config) should be MSB before sending out.
648 	 * But we don't need convert here because had set payload as
649 	 * MSB when preparing config read/write commands.
650 	 */
651 	for (j = 0; j < cmd->uhs2_cmd->payload_len / sizeof(u32); j++) {
652 		sdhci_writel(host, *(__force u32 *)(cmd->uhs2_cmd->payload + j),
653 			     SDHCI_UHS2_CMD_PACKET + i);
654 		i += 4;
655 	}
656 
657 	for ( ; i < SDHCI_UHS2_CMD_PACK_MAX_LEN; i += 4)
658 		sdhci_writel(host, 0, SDHCI_UHS2_CMD_PACKET + i);
659 
660 	DBG("UHS2 CMD packet_len = %d.\n", cmd->uhs2_cmd->packet_len);
661 	for (i = 0; i < cmd->uhs2_cmd->packet_len; i++)
662 		DBG("UHS2 CMD_PACKET[%d] = 0x%x.\n", i,
663 		    sdhci_readb(host, SDHCI_UHS2_CMD_PACKET + i));
664 
665 	cmd_reg = FIELD_PREP(SDHCI_UHS2_CMD_PACK_LEN_MASK, cmd->uhs2_cmd->packet_len);
666 	if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC)
667 		cmd_reg |= SDHCI_UHS2_CMD_DATA;
668 	if (cmd->opcode == MMC_STOP_TRANSMISSION)
669 		cmd_reg |= SDHCI_UHS2_CMD_CMD12;
670 
671 	/* UHS2 Native ABORT */
672 	if ((cmd->uhs2_cmd->header & UHS2_NATIVE_PACKET) &&
673 	    (uhs2_dev_cmd(cmd) == UHS2_DEV_CMD_TRANS_ABORT))
674 		cmd_reg |= SDHCI_UHS2_CMD_TRNS_ABORT;
675 
676 	/* UHS2 Native DORMANT */
677 	if ((cmd->uhs2_cmd->header & UHS2_NATIVE_PACKET) &&
678 	    (uhs2_dev_cmd(cmd) == UHS2_DEV_CMD_GO_DORMANT_STATE))
679 		cmd_reg |= SDHCI_UHS2_CMD_DORMANT;
680 
681 	DBG("0x%x is set to UHS2 CMD register.\n", cmd_reg);
682 
683 	sdhci_writew(host, cmd_reg, SDHCI_UHS2_CMD);
684 }
685 
686 static bool sdhci_uhs2_send_command(struct sdhci_host *host, struct mmc_command *cmd)
687 {
688 	u32 mask;
689 	unsigned long timeout;
690 
691 	WARN_ON(host->cmd);
692 
693 	/* Initially, a command has no error */
694 	cmd->error = 0;
695 
696 	if (cmd->opcode == MMC_STOP_TRANSMISSION)
697 		cmd->flags |= MMC_RSP_BUSY;
698 
699 	mask = SDHCI_CMD_INHIBIT;
700 
701 	if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)
702 		return false;
703 
704 	host->cmd = cmd;
705 	host->data_timeout = 0;
706 	if (sdhci_data_line_cmd(cmd)) {
707 		WARN_ON(host->data_cmd);
708 		host->data_cmd = cmd;
709 		__sdhci_uhs2_set_timeout(host);
710 	}
711 
712 	if (cmd->data)
713 		sdhci_uhs2_prepare_data(host, cmd);
714 
715 	sdhci_uhs2_set_transfer_mode(host, cmd);
716 
717 	timeout = jiffies;
718 	if (host->data_timeout)
719 		timeout += nsecs_to_jiffies(host->data_timeout);
720 	else if (!cmd->data && cmd->busy_timeout > 9000)
721 		timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
722 	else
723 		timeout += 10 * HZ;
724 	sdhci_mod_timer(host, cmd->mrq, timeout);
725 
726 	__sdhci_uhs2_send_command(host, cmd);
727 
728 	return true;
729 }
730 
731 static bool sdhci_uhs2_send_command_retry(struct sdhci_host *host,
732 					  struct mmc_command *cmd,
733 					  unsigned long flags)
734 	__releases(host->lock)
735 	__acquires(host->lock)
736 {
737 	struct mmc_command *deferred_cmd = host->deferred_cmd;
738 	int timeout = 10; /* Approx. 10 ms */
739 	bool present;
740 
741 	while (!sdhci_uhs2_send_command(host, cmd)) {
742 		if (!timeout--) {
743 			pr_err("%s: Controller never released inhibit bit(s).\n",
744 			       mmc_hostname(host->mmc));
745 			sdhci_dumpregs(host);
746 			cmd->error = -EIO;
747 			return false;
748 		}
749 
750 		spin_unlock_irqrestore(&host->lock, flags);
751 
752 		usleep_range(1000, 1250);
753 
754 		present = host->mmc->ops->get_cd(host->mmc);
755 
756 		spin_lock_irqsave(&host->lock, flags);
757 
758 		/* A deferred command might disappear, handle that */
759 		if (cmd == deferred_cmd && cmd != host->deferred_cmd)
760 			return true;
761 
762 		if (sdhci_present_error(host, cmd, present))
763 			return false;
764 	}
765 
766 	if (cmd == host->deferred_cmd)
767 		host->deferred_cmd = NULL;
768 
769 	return true;
770 }
771 
772 static void __sdhci_uhs2_finish_command(struct sdhci_host *host)
773 {
774 	struct mmc_command *cmd = host->cmd;
775 	u8 resp;
776 	u8 error_code;
777 	bool breada0 = 0;
778 	int i;
779 
780 	if (host->mmc->uhs2_sd_tran) {
781 		resp = sdhci_readb(host, SDHCI_UHS2_RESPONSE + 2);
782 		if (resp & UHS2_RES_NACK_MASK) {
783 			error_code = (resp >> UHS2_RES_ECODE_POS) & UHS2_RES_ECODE_MASK;
784 			pr_err("%s: NACK response, ECODE=0x%x.\n",
785 			       mmc_hostname(host->mmc), error_code);
786 		}
787 		breada0 = 1;
788 	}
789 
790 	if (cmd->uhs2_cmd->uhs2_resp_len) {
791 		int len = min_t(int, cmd->uhs2_cmd->uhs2_resp_len, UHS2_MAX_RESP_LEN);
792 
793 		/* Get whole response of some native CCMD, like
794 		 * DEVICE_INIT, ENUMERATE.
795 		 */
796 		for (i = 0; i < len; i++)
797 			cmd->uhs2_cmd->uhs2_resp[i] = sdhci_readb(host, SDHCI_UHS2_RESPONSE + i);
798 	} else {
799 		/* Get SD CMD response and Payload for some read
800 		 * CCMD, like INQUIRY_CFG.
801 		 */
802 		/* Per spec (p136), payload field is divided into
803 		 * a unit of DWORD and transmission order within
804 		 * a DWORD is big endian.
805 		 */
806 		if (!breada0)
807 			sdhci_readl(host, SDHCI_UHS2_RESPONSE);
808 		for (i = 4; i < 20; i += 4) {
809 			cmd->resp[i / 4 - 1] =
810 				(sdhci_readb(host,
811 					     SDHCI_UHS2_RESPONSE + i) << 24) |
812 				(sdhci_readb(host,
813 					     SDHCI_UHS2_RESPONSE + i + 1)
814 					<< 16) |
815 				(sdhci_readb(host,
816 					     SDHCI_UHS2_RESPONSE + i + 2)
817 					<< 8) |
818 				sdhci_readb(host, SDHCI_UHS2_RESPONSE + i + 3);
819 		}
820 	}
821 }
822 
823 static void sdhci_uhs2_finish_command(struct sdhci_host *host)
824 {
825 	struct mmc_command *cmd = host->cmd;
826 
827 	__sdhci_uhs2_finish_command(host);
828 
829 	host->cmd = NULL;
830 
831 	if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
832 		mmc_command_done(host->mmc, cmd->mrq);
833 
834 	/*
835 	 * The host can send and interrupt when the busy state has
836 	 * ended, allowing us to wait without wasting CPU cycles.
837 	 * The busy signal uses DAT0 so this is similar to waiting
838 	 * for data to complete.
839 	 *
840 	 * Note: The 1.0 specification is a bit ambiguous about this
841 	 *       feature so there might be some problems with older
842 	 *       controllers.
843 	 */
844 	if (cmd->flags & MMC_RSP_BUSY) {
845 		if (cmd->data) {
846 			DBG("Cannot wait for busy signal when also doing a data transfer");
847 		} else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
848 			   cmd == host->data_cmd) {
849 			/* Command complete before busy is ended */
850 			return;
851 		}
852 	}
853 
854 	/* Processed actual command. */
855 	if (host->data && host->data_early)
856 		sdhci_uhs2_finish_data(host);
857 
858 	if (!cmd->data)
859 		__sdhci_finish_mrq(host, cmd->mrq);
860 }
861 
862 static void sdhci_uhs2_request(struct mmc_host *mmc, struct mmc_request *mrq)
863 {
864 	struct sdhci_host *host = mmc_priv(mmc);
865 	struct mmc_command *cmd;
866 	unsigned long flags;
867 	bool present;
868 
869 	if (!(mmc_card_uhs2(mmc))) {
870 		sdhci_request(mmc, mrq);
871 		return;
872 	}
873 
874 	mrq->stop = NULL;
875 	mrq->sbc = NULL;
876 	if (mrq->data)
877 		mrq->data->stop = NULL;
878 
879 	/* Firstly check card presence */
880 	present = mmc->ops->get_cd(mmc);
881 
882 	spin_lock_irqsave(&host->lock, flags);
883 
884 	if (sdhci_present_error(host, mrq->cmd, present))
885 		goto out_finish;
886 
887 	cmd = mrq->cmd;
888 
889 	if (!sdhci_uhs2_send_command_retry(host, cmd, flags))
890 		goto out_finish;
891 
892 	spin_unlock_irqrestore(&host->lock, flags);
893 
894 	return;
895 
896 out_finish:
897 	sdhci_finish_mrq(host, mrq);
898 	spin_unlock_irqrestore(&host->lock, flags);
899 }
900 
901 /*****************************************************************************\
902  *                                                                           *
903  * Request done                                                              *
904  *                                                                           *
905 \*****************************************************************************/
906 
907 static bool sdhci_uhs2_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
908 {
909 	return sdhci_needs_reset(host, mrq) ||
910 	       (!(host->flags & SDHCI_DEVICE_DEAD) && mrq->data && mrq->data->error);
911 }
912 
913 static bool sdhci_uhs2_request_done(struct sdhci_host *host)
914 {
915 	unsigned long flags;
916 	struct mmc_request *mrq;
917 	int i;
918 
919 	spin_lock_irqsave(&host->lock, flags);
920 
921 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
922 		mrq = host->mrqs_done[i];
923 		if (mrq)
924 			break;
925 	}
926 
927 	if (!mrq) {
928 		spin_unlock_irqrestore(&host->lock, flags);
929 		return true;
930 	}
931 
932 	/*
933 	 * Always unmap the data buffers if they were mapped by
934 	 * sdhci_prepare_data() whenever we finish with a request.
935 	 * This avoids leaking DMA mappings on error.
936 	 */
937 	if (host->flags & SDHCI_REQ_USE_DMA)
938 		sdhci_request_done_dma(host, mrq);
939 
940 	/*
941 	 * The controller needs a reset of internal state machines
942 	 * upon error conditions.
943 	 */
944 	if (sdhci_uhs2_needs_reset(host, mrq)) {
945 		/*
946 		 * Do not finish until command and data lines are available for
947 		 * reset. Note there can only be one other mrq, so it cannot
948 		 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
949 		 * would both be null.
950 		 */
951 		if (host->cmd || host->data_cmd) {
952 			spin_unlock_irqrestore(&host->lock, flags);
953 			return true;
954 		}
955 
956 		if (mrq->cmd->error || mrq->data->error)
957 			sdhci_uhs2_reset_cmd_data(host);
958 		else
959 			sdhci_uhs2_reset(host, SDHCI_UHS2_SW_RESET_SD);
960 		host->pending_reset = false;
961 	}
962 
963 	host->mrqs_done[i] = NULL;
964 
965 	spin_unlock_irqrestore(&host->lock, flags);
966 
967 	if (host->ops->request_done)
968 		host->ops->request_done(host, mrq);
969 	else
970 		mmc_request_done(host->mmc, mrq);
971 
972 	return false;
973 }
974 
975 static void sdhci_uhs2_complete_work(struct work_struct *work)
976 {
977 	struct sdhci_host *host = container_of(work, struct sdhci_host,
978 					       complete_work);
979 
980 	if (!mmc_card_uhs2(host->mmc)) {
981 		sdhci_complete_work(work);
982 		return;
983 	}
984 
985 	while (!sdhci_uhs2_request_done(host))
986 		;
987 }
988 
989 /*****************************************************************************\
990  *                                                                           *
991  * Interrupt handling                                                        *
992  *                                                                           *
993 \*****************************************************************************/
994 
995 static void __sdhci_uhs2_irq(struct sdhci_host *host, u32 uhs2mask)
996 {
997 	struct mmc_command *cmd = host->cmd;
998 
999 	DBG("*** %s got UHS2 error interrupt: 0x%08x\n",
1000 	    mmc_hostname(host->mmc), uhs2mask);
1001 
1002 	if (uhs2mask & SDHCI_UHS2_INT_CMD_ERR_MASK) {
1003 		if (!host->cmd) {
1004 			pr_err("%s: Got cmd interrupt 0x%08x but no cmd.\n",
1005 			       mmc_hostname(host->mmc),
1006 			       (unsigned int)uhs2mask);
1007 			sdhci_dumpregs(host);
1008 			return;
1009 		}
1010 		host->cmd->error = -EILSEQ;
1011 		if (uhs2mask & SDHCI_UHS2_INT_CMD_TIMEOUT)
1012 			host->cmd->error = -ETIMEDOUT;
1013 	}
1014 
1015 	if (uhs2mask & SDHCI_UHS2_INT_DATA_ERR_MASK) {
1016 		if (!host->data) {
1017 			pr_err("%s: Got data interrupt 0x%08x but no data.\n",
1018 			       mmc_hostname(host->mmc),
1019 			       (unsigned int)uhs2mask);
1020 			sdhci_dumpregs(host);
1021 			return;
1022 		}
1023 
1024 		if (uhs2mask & SDHCI_UHS2_INT_DEADLOCK_TIMEOUT) {
1025 			pr_err("%s: Got deadlock timeout interrupt 0x%08x\n",
1026 			       mmc_hostname(host->mmc),
1027 			       (unsigned int)uhs2mask);
1028 			host->data->error = -ETIMEDOUT;
1029 		} else if (uhs2mask & SDHCI_UHS2_INT_ADMA_ERROR) {
1030 			pr_err("%s: ADMA error = 0x %x\n",
1031 			       mmc_hostname(host->mmc),
1032 			       sdhci_readb(host, SDHCI_ADMA_ERROR));
1033 			host->data->error = -EIO;
1034 		} else {
1035 			host->data->error = -EILSEQ;
1036 		}
1037 	}
1038 
1039 	if (host->data && host->data->error)
1040 		sdhci_uhs2_finish_data(host);
1041 	else
1042 		sdhci_finish_mrq(host, cmd->mrq);
1043 
1044 }
1045 
1046 u32 sdhci_uhs2_irq(struct sdhci_host *host, u32 intmask)
1047 {
1048 	u32 mask = intmask, uhs2mask;
1049 
1050 	if (!mmc_card_uhs2(host->mmc))
1051 		goto out;
1052 
1053 	if (intmask & SDHCI_INT_ERROR) {
1054 		uhs2mask = sdhci_readl(host, SDHCI_UHS2_INT_STATUS);
1055 		if (!(uhs2mask & SDHCI_UHS2_INT_ERROR_MASK))
1056 			goto cmd_irq;
1057 
1058 		/* Clear error interrupts */
1059 		sdhci_writel(host, uhs2mask & SDHCI_UHS2_INT_ERROR_MASK,
1060 			     SDHCI_UHS2_INT_STATUS);
1061 
1062 		/* Handle error interrupts */
1063 		__sdhci_uhs2_irq(host, uhs2mask);
1064 
1065 		/* Caller, sdhci_irq(), doesn't have to care about UHS-2 errors */
1066 		intmask &= ~SDHCI_INT_ERROR;
1067 		mask &= SDHCI_INT_ERROR;
1068 	}
1069 
1070 cmd_irq:
1071 	if (intmask & SDHCI_INT_CMD_MASK) {
1072 		/* Clear command interrupt */
1073 		sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK, SDHCI_INT_STATUS);
1074 
1075 		/* Handle command interrupt */
1076 		if (intmask & SDHCI_INT_RESPONSE)
1077 			sdhci_uhs2_finish_command(host);
1078 
1079 		/* Caller, sdhci_irq(), doesn't have to care about UHS-2 commands */
1080 		intmask &= ~SDHCI_INT_CMD_MASK;
1081 		mask &= SDHCI_INT_CMD_MASK;
1082 	}
1083 
1084 	/* Clear already-handled interrupts. */
1085 	sdhci_writel(host, mask, SDHCI_INT_STATUS);
1086 
1087 out:
1088 	return intmask;
1089 }
1090 EXPORT_SYMBOL_GPL(sdhci_uhs2_irq);
1091 
1092 static irqreturn_t sdhci_uhs2_thread_irq(int irq, void *dev_id)
1093 {
1094 	struct sdhci_host *host = dev_id;
1095 	struct mmc_command *cmd;
1096 	unsigned long flags;
1097 	u32 isr;
1098 
1099 	if (!mmc_card_uhs2(host->mmc))
1100 		return sdhci_thread_irq(irq, dev_id);
1101 
1102 	while (!sdhci_uhs2_request_done(host))
1103 		;
1104 
1105 	spin_lock_irqsave(&host->lock, flags);
1106 
1107 	isr = host->thread_isr;
1108 	host->thread_isr = 0;
1109 
1110 	cmd = host->deferred_cmd;
1111 	if (cmd && !sdhci_uhs2_send_command_retry(host, cmd, flags))
1112 		sdhci_finish_mrq(host, cmd->mrq);
1113 
1114 	spin_unlock_irqrestore(&host->lock, flags);
1115 
1116 	if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
1117 		struct mmc_host *mmc = host->mmc;
1118 
1119 		mmc->ops->card_event(mmc);
1120 		mmc_detect_change(mmc, msecs_to_jiffies(200));
1121 	}
1122 
1123 	return IRQ_HANDLED;
1124 }
1125 
1126 /*****************************************************************************\
1127  *                                                                           *
1128  * Driver init/exit                                                          *
1129  *                                                                           *
1130 \*****************************************************************************/
1131 
1132 static int sdhci_uhs2_host_ops_init(struct sdhci_host *host)
1133 {
1134 	host->mmc_host_ops.uhs2_control = sdhci_uhs2_control;
1135 	host->mmc_host_ops.request = sdhci_uhs2_request;
1136 
1137 	return 0;
1138 }
1139 
1140 static int __init sdhci_uhs2_mod_init(void)
1141 {
1142 	return 0;
1143 }
1144 module_init(sdhci_uhs2_mod_init);
1145 
1146 static void __exit sdhci_uhs2_mod_exit(void)
1147 {
1148 }
1149 module_exit(sdhci_uhs2_mod_exit);
1150 
1151 /*****************************************************************************\
1152  *
1153  * Device allocation/registration                                            *
1154  *                                                                           *
1155 \*****************************************************************************/
1156 
1157 static void __sdhci_uhs2_add_host_v4(struct sdhci_host *host, u32 caps1)
1158 {
1159 	struct mmc_host *mmc;
1160 	u32 max_current_caps2;
1161 
1162 	mmc = host->mmc;
1163 
1164 	/* Support UHS2 */
1165 	if (caps1 & SDHCI_SUPPORT_UHS2)
1166 		mmc->caps2 |= MMC_CAP2_SD_UHS2;
1167 
1168 	max_current_caps2 = sdhci_readl(host, SDHCI_MAX_CURRENT_1);
1169 
1170 	if ((caps1 & SDHCI_CAN_VDD2_180) &&
1171 	    !max_current_caps2 &&
1172 	    !IS_ERR(mmc->supply.vqmmc2)) {
1173 		/* UHS2 - VDD2 */
1174 		int curr = regulator_get_current_limit(mmc->supply.vqmmc2);
1175 
1176 		if (curr > 0) {
1177 			/* convert to SDHCI_MAX_CURRENT format */
1178 			curr = curr / 1000;  /* convert to mA */
1179 			curr = curr / SDHCI_MAX_CURRENT_MULTIPLIER;
1180 			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
1181 			max_current_caps2 = curr;
1182 		}
1183 	}
1184 
1185 	if (!(caps1 & SDHCI_CAN_VDD2_180))
1186 		mmc->caps2 &= ~MMC_CAP2_SD_UHS2;
1187 }
1188 
1189 static void __sdhci_uhs2_remove_host(struct sdhci_host *host, int dead)
1190 {
1191 	if (!mmc_card_uhs2(host->mmc))
1192 		return;
1193 
1194 	if (!dead)
1195 		sdhci_uhs2_reset(host, SDHCI_UHS2_SW_RESET_FULL);
1196 }
1197 
1198 int sdhci_uhs2_add_host(struct sdhci_host *host)
1199 {
1200 	struct mmc_host *mmc = host->mmc;
1201 	int ret;
1202 
1203 	ret = sdhci_setup_host(host);
1204 	if (ret)
1205 		return ret;
1206 
1207 	if (host->version >= SDHCI_SPEC_400)
1208 		__sdhci_uhs2_add_host_v4(host, host->caps1);
1209 
1210 	if ((mmc->caps2 & MMC_CAP2_SD_UHS2) && !host->v4_mode)
1211 		/* host doesn't want to enable UHS2 support */
1212 		mmc->caps2 &= ~MMC_CAP2_SD_UHS2;
1213 
1214 	/* overwrite ops */
1215 	if (mmc->caps2 & MMC_CAP2_SD_UHS2)
1216 		sdhci_uhs2_host_ops_init(host);
1217 
1218 	host->complete_work_fn = sdhci_uhs2_complete_work;
1219 	host->thread_irq_fn    = sdhci_uhs2_thread_irq;
1220 
1221 	/* LED support not implemented for UHS2 */
1222 	host->quirks |= SDHCI_QUIRK_NO_LED;
1223 
1224 	ret = __sdhci_add_host(host);
1225 	if (ret)
1226 		goto cleanup;
1227 
1228 	return 0;
1229 
1230 cleanup:
1231 	if (host->version >= SDHCI_SPEC_400)
1232 		__sdhci_uhs2_remove_host(host, 0);
1233 
1234 	sdhci_cleanup_host(host);
1235 
1236 	return ret;
1237 }
1238 EXPORT_SYMBOL_GPL(sdhci_uhs2_add_host);
1239 
1240 void sdhci_uhs2_remove_host(struct sdhci_host *host, int dead)
1241 {
1242 	__sdhci_uhs2_remove_host(host, dead);
1243 
1244 	sdhci_remove_host(host, dead);
1245 }
1246 EXPORT_SYMBOL_GPL(sdhci_uhs2_remove_host);
1247 
1248 MODULE_AUTHOR("Intel, Genesys Logic, Linaro");
1249 MODULE_DESCRIPTION("MMC UHS-II Support");
1250 MODULE_LICENSE("GPL");
1251