xref: /linux/drivers/mmc/host/sdhci-uhs2.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  linux/drivers/mmc/host/sdhci_uhs2.c - Secure Digital Host Controller
4  *  Interface driver
5  *
6  *  Copyright (C) 2014 Intel Corp, All Rights Reserved.
7  *  Copyright (C) 2020 Genesys Logic, Inc.
8  *  Authors: Ben Chuang <ben.chuang@genesyslogic.com.tw>
9  *  Copyright (C) 2020 Linaro Limited
10  *  Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
11  */
12 
13 #include <linux/delay.h>
14 #include <linux/module.h>
15 #include <linux/iopoll.h>
16 #include <linux/bitfield.h>
17 #include <linux/regulator/consumer.h>
18 #include <linux/mmc/mmc.h>
19 #include <linux/mmc/host.h>
20 
21 #include "sdhci.h"
22 #include "sdhci-uhs2.h"
23 
24 #define DRIVER_NAME "sdhci_uhs2"
25 #define DBG(f, x...) \
26 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__, ## x)
27 #define SDHCI_UHS2_DUMP(f, x...) \
28 	pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
29 
30 #define UHS2_RESET_TIMEOUT_100MS		100000
31 #define UHS2_CHECK_DORMANT_TIMEOUT_100MS	100000
32 #define UHS2_INTERFACE_DETECT_TIMEOUT_100MS	100000
33 #define UHS2_LANE_SYNC_TIMEOUT_150MS		150000
34 
35 #define UHS2_ARG_IOADR_MASK 0xfff
36 
37 void sdhci_uhs2_dump_regs(struct sdhci_host *host)
38 {
39 	if (!(mmc_card_uhs2(host->mmc)))
40 		return;
41 
42 	SDHCI_UHS2_DUMP("==================== UHS2 ==================\n");
43 	SDHCI_UHS2_DUMP("Blk Size:  0x%08x | Blk Cnt:  0x%08x\n",
44 			sdhci_readw(host, SDHCI_UHS2_BLOCK_SIZE),
45 			sdhci_readl(host, SDHCI_UHS2_BLOCK_COUNT));
46 	SDHCI_UHS2_DUMP("Cmd:       0x%08x | Trn mode: 0x%08x\n",
47 			sdhci_readw(host, SDHCI_UHS2_CMD),
48 			sdhci_readw(host, SDHCI_UHS2_TRANS_MODE));
49 	SDHCI_UHS2_DUMP("Int Stat:  0x%08x | Dev Sel : 0x%08x\n",
50 			sdhci_readw(host, SDHCI_UHS2_DEV_INT_STATUS),
51 			sdhci_readb(host, SDHCI_UHS2_DEV_SELECT));
52 	SDHCI_UHS2_DUMP("Dev Int Code:  0x%08x\n",
53 			sdhci_readb(host, SDHCI_UHS2_DEV_INT_CODE));
54 	SDHCI_UHS2_DUMP("Reset:     0x%08x | Timer:    0x%08x\n",
55 			sdhci_readw(host, SDHCI_UHS2_SW_RESET),
56 			sdhci_readw(host, SDHCI_UHS2_TIMER_CTRL));
57 	SDHCI_UHS2_DUMP("ErrInt:    0x%08x | ErrIntEn: 0x%08x\n",
58 			sdhci_readl(host, SDHCI_UHS2_INT_STATUS),
59 			sdhci_readl(host, SDHCI_UHS2_INT_STATUS_ENABLE));
60 	SDHCI_UHS2_DUMP("ErrSigEn:  0x%08x\n",
61 			sdhci_readl(host, SDHCI_UHS2_INT_SIGNAL_ENABLE));
62 }
63 EXPORT_SYMBOL_GPL(sdhci_uhs2_dump_regs);
64 
65 /*****************************************************************************\
66  *                                                                           *
67  * Low level functions                                                       *
68  *                                                                           *
69 \*****************************************************************************/
70 
71 static inline u16 uhs2_dev_cmd(struct mmc_command *cmd)
72 {
73 	return be16_to_cpu((__force __be16)cmd->uhs2_cmd->arg) & UHS2_ARG_IOADR_MASK;
74 }
75 
76 static inline int mmc_opt_regulator_set_ocr(struct mmc_host *mmc,
77 					    struct regulator *supply,
78 					    unsigned short vdd_bit)
79 {
80 	return IS_ERR_OR_NULL(supply) ? 0 : mmc_regulator_set_ocr(mmc, supply, vdd_bit);
81 }
82 
83 /**
84  * sdhci_uhs2_reset - invoke SW reset
85  * @host: SDHCI host
86  * @mask: Control mask
87  *
88  * Invoke SW reset, depending on a bit in @mask and wait for completion.
89  */
90 void sdhci_uhs2_reset(struct sdhci_host *host, u16 mask)
91 {
92 	u32 val;
93 
94 	sdhci_writew(host, mask, SDHCI_UHS2_SW_RESET);
95 
96 	if (mask & SDHCI_UHS2_SW_RESET_FULL)
97 		host->clock = 0;
98 
99 	/* hw clears the bit when it's done */
100 	if (read_poll_timeout_atomic(sdhci_readw, val, !(val & mask), 10,
101 				     UHS2_RESET_TIMEOUT_100MS, true, host, SDHCI_UHS2_SW_RESET)) {
102 		pr_debug("%s: %s: Reset 0x%x never completed. %s: clean reset bit.\n", __func__,
103 			 mmc_hostname(host->mmc), (int)mask, mmc_hostname(host->mmc));
104 		sdhci_writeb(host, 0, SDHCI_UHS2_SW_RESET);
105 		return;
106 	}
107 }
108 EXPORT_SYMBOL_GPL(sdhci_uhs2_reset);
109 
110 static void sdhci_uhs2_reset_cmd_data(struct sdhci_host *host)
111 {
112 	sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
113 
114 	if (host->mmc->uhs2_sd_tran) {
115 		sdhci_uhs2_reset(host, SDHCI_UHS2_SW_RESET_SD);
116 
117 		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
118 		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
119 		sdhci_uhs2_clear_set_irqs(host, SDHCI_INT_ALL_MASK, SDHCI_UHS2_INT_ERROR_MASK);
120 	}
121 }
122 
123 void sdhci_uhs2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd)
124 {
125 	struct mmc_host *mmc = host->mmc;
126 	u8 pwr = 0;
127 
128 	if (mode != MMC_POWER_OFF) {
129 		pwr = sdhci_get_vdd_value(vdd);
130 		if (!pwr)
131 			WARN(1, "%s: Invalid vdd %#x\n",
132 			     mmc_hostname(host->mmc), vdd);
133 		pwr |= SDHCI_VDD2_POWER_180;
134 	}
135 
136 	if (host->pwr == pwr)
137 		return;
138 	host->pwr = pwr;
139 
140 	if (pwr == 0) {
141 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
142 
143 		mmc_opt_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
144 		mmc_regulator_set_vqmmc2(mmc, &mmc->ios);
145 	} else {
146 		mmc_opt_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
147 		/* support 1.8v only for now */
148 		mmc_regulator_set_vqmmc2(mmc, &mmc->ios);
149 
150 		/* Clear the power reg before setting a new value */
151 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
152 
153 		/* vdd first */
154 		pwr |= SDHCI_POWER_ON;
155 		sdhci_writeb(host, pwr & 0xf, SDHCI_POWER_CONTROL);
156 		mdelay(5);
157 
158 		pwr |= SDHCI_VDD2_POWER_ON;
159 		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
160 		mdelay(5);
161 	}
162 }
163 EXPORT_SYMBOL_GPL(sdhci_uhs2_set_power);
164 
165 static u8 sdhci_calc_timeout_uhs2(struct sdhci_host *host, u8 *cmd_res, u8 *dead_lock)
166 {
167 	/* timeout in us */
168 	unsigned int dead_lock_timeout = 1 * 1000 * 1000;
169 	unsigned int cmd_res_timeout = 5 * 1000;
170 	unsigned int current_timeout;
171 	u8 count;
172 
173 	/*
174 	 * Figure out needed cycles.
175 	 * We do this in steps in order to fit inside a 32 bit int.
176 	 * The first step is the minimum timeout, which will have a
177 	 * minimum resolution of 6 bits:
178 	 * (1) 2^13*1000 > 2^22,
179 	 * (2) host->timeout_clk < 2^16
180 	 *     =>
181 	 *     (1) / (2) > 2^6
182 	 */
183 	count = 0;
184 	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
185 	while (current_timeout < cmd_res_timeout) {
186 		count++;
187 		current_timeout <<= 1;
188 		if (count >= 0xF)
189 			break;
190 	}
191 
192 	if (count >= 0xF) {
193 		DBG("%s: Too large timeout 0x%x requested for CMD_RES!\n",
194 		    mmc_hostname(host->mmc), count);
195 		count = 0xE;
196 	}
197 	*cmd_res = count;
198 
199 	count = 0;
200 	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
201 	while (current_timeout < dead_lock_timeout) {
202 		count++;
203 		current_timeout <<= 1;
204 		if (count >= 0xF)
205 			break;
206 	}
207 
208 	if (count >= 0xF) {
209 		DBG("%s: Too large timeout 0x%x requested for DEADLOCK!\n",
210 		    mmc_hostname(host->mmc), count);
211 		count = 0xE;
212 	}
213 	*dead_lock = count;
214 
215 	return count;
216 }
217 
218 static void __sdhci_uhs2_set_timeout(struct sdhci_host *host)
219 {
220 	u8 cmd_res, dead_lock;
221 
222 	sdhci_calc_timeout_uhs2(host, &cmd_res, &dead_lock);
223 	cmd_res |= FIELD_PREP(SDHCI_UHS2_TIMER_CTRL_DEADLOCK_MASK, dead_lock);
224 	sdhci_writeb(host, cmd_res, SDHCI_UHS2_TIMER_CTRL);
225 }
226 
227 void sdhci_uhs2_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
228 {
229 	__sdhci_set_timeout(host, cmd);
230 
231 	if (mmc_card_uhs2(host->mmc))
232 		__sdhci_uhs2_set_timeout(host);
233 }
234 EXPORT_SYMBOL_GPL(sdhci_uhs2_set_timeout);
235 
236 /**
237  * sdhci_uhs2_clear_set_irqs - set Error Interrupt Status Enable register
238  * @host:	SDHCI host
239  * @clear:	bit-wise clear mask
240  * @set:	bit-wise set mask
241  *
242  * Set/unset bits in UHS-II Error Interrupt Status Enable register
243  */
244 void sdhci_uhs2_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
245 {
246 	u32 ier;
247 
248 	ier = sdhci_readl(host, SDHCI_UHS2_INT_STATUS_ENABLE);
249 	ier &= ~clear;
250 	ier |= set;
251 	sdhci_writel(host, ier, SDHCI_UHS2_INT_STATUS_ENABLE);
252 	sdhci_writel(host, ier, SDHCI_UHS2_INT_SIGNAL_ENABLE);
253 }
254 EXPORT_SYMBOL_GPL(sdhci_uhs2_clear_set_irqs);
255 
256 static void __sdhci_uhs2_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
257 {
258 	struct sdhci_host *host = mmc_priv(mmc);
259 	u8 cmd_res, dead_lock;
260 	u16 ctrl_2;
261 
262 	/* UHS2 Timeout Control */
263 	sdhci_calc_timeout_uhs2(host, &cmd_res, &dead_lock);
264 
265 	/* change to use calculate value */
266 	cmd_res |= FIELD_PREP(SDHCI_UHS2_TIMER_CTRL_DEADLOCK_MASK, dead_lock);
267 
268 	sdhci_uhs2_clear_set_irqs(host,
269 				  SDHCI_UHS2_INT_CMD_TIMEOUT |
270 				  SDHCI_UHS2_INT_DEADLOCK_TIMEOUT,
271 				  0);
272 	sdhci_writeb(host, cmd_res, SDHCI_UHS2_TIMER_CTRL);
273 	sdhci_uhs2_clear_set_irqs(host, 0,
274 				  SDHCI_UHS2_INT_CMD_TIMEOUT |
275 				  SDHCI_UHS2_INT_DEADLOCK_TIMEOUT);
276 
277 	/* UHS2 timing. Note, UHS2 timing is disabled when powering off */
278 	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
279 	if (ios->power_mode != MMC_POWER_OFF &&
280 	    (ios->timing == MMC_TIMING_UHS2_SPEED_A ||
281 	     ios->timing == MMC_TIMING_UHS2_SPEED_A_HD ||
282 	     ios->timing == MMC_TIMING_UHS2_SPEED_B ||
283 	     ios->timing == MMC_TIMING_UHS2_SPEED_B_HD))
284 		ctrl_2 |= SDHCI_CTRL_UHS2 | SDHCI_CTRL_UHS2_ENABLE;
285 	else
286 		ctrl_2 &= ~(SDHCI_CTRL_UHS2 | SDHCI_CTRL_UHS2_ENABLE);
287 	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
288 	host->timing = ios->timing;
289 
290 	if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
291 		sdhci_enable_preset_value(host, true);
292 
293 	if (host->ops->set_power)
294 		host->ops->set_power(host, ios->power_mode, ios->vdd);
295 	else
296 		sdhci_uhs2_set_power(host, ios->power_mode, ios->vdd);
297 
298 	host->ops->set_clock(host, ios->clock);
299 	host->clock = ios->clock;
300 }
301 
302 static int sdhci_uhs2_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
303 {
304 	struct sdhci_host *host = mmc_priv(mmc);
305 
306 	pr_debug("%s: clock %uHz powermode %u Vdd %u timing %u\n",
307 		 mmc_hostname(mmc), ios->clock, ios->power_mode, ios->vdd, ios->timing);
308 
309 	if (!mmc_card_uhs2(mmc)) {
310 		sdhci_set_ios(mmc, ios);
311 		return 0;
312 	}
313 
314 	if (ios->power_mode == MMC_POWER_UNDEFINED)
315 		return 0;
316 
317 	if (host->flags & SDHCI_DEVICE_DEAD) {
318 		if (ios->power_mode == MMC_POWER_OFF) {
319 			mmc_opt_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
320 			mmc_regulator_set_vqmmc2(mmc, ios);
321 		}
322 		return -1;
323 	}
324 
325 	sdhci_set_ios_common(mmc, ios);
326 
327 	__sdhci_uhs2_set_ios(mmc, ios);
328 
329 	return 0;
330 }
331 
332 static int sdhci_uhs2_interface_detect(struct sdhci_host *host)
333 {
334 	u32 val;
335 
336 	if (read_poll_timeout(sdhci_readl, val, (val & SDHCI_UHS2_IF_DETECT),
337 			      100, UHS2_INTERFACE_DETECT_TIMEOUT_100MS, true,
338 			      host, SDHCI_PRESENT_STATE)) {
339 		pr_debug("%s: not detect UHS2 interface in 100ms.\n", mmc_hostname(host->mmc));
340 		sdhci_dbg_dumpregs(host, "UHS2 interface detect timeout in 100ms");
341 		return -EIO;
342 	}
343 
344 	/* Enable UHS2 error interrupts */
345 	sdhci_uhs2_clear_set_irqs(host, SDHCI_INT_ALL_MASK, SDHCI_UHS2_INT_ERROR_MASK);
346 
347 	if (read_poll_timeout(sdhci_readl, val, (val & SDHCI_UHS2_LANE_SYNC),
348 			      100, UHS2_LANE_SYNC_TIMEOUT_150MS, true, host, SDHCI_PRESENT_STATE)) {
349 		pr_debug("%s: UHS2 Lane sync fail in 150ms.\n", mmc_hostname(host->mmc));
350 		sdhci_dbg_dumpregs(host, "UHS2 Lane sync fail in 150ms");
351 		return -EIO;
352 	}
353 
354 	DBG("%s: UHS2 Lane synchronized in UHS2 mode, PHY is initialized.\n",
355 	    mmc_hostname(host->mmc));
356 	return 0;
357 }
358 
359 static int sdhci_uhs2_init(struct sdhci_host *host)
360 {
361 	u16 caps_ptr = 0;
362 	u32 caps_gen = 0;
363 	u32 caps_phy = 0;
364 	u32 caps_tran[2] = {0, 0};
365 	struct mmc_host *mmc = host->mmc;
366 
367 	caps_ptr = sdhci_readw(host, SDHCI_UHS2_CAPS_PTR);
368 	if (caps_ptr < 0x100 || caps_ptr > 0x1FF) {
369 		pr_err("%s: SDHCI_UHS2_CAPS_PTR(%d) is wrong.\n",
370 		       mmc_hostname(mmc), caps_ptr);
371 		return -ENODEV;
372 	}
373 	caps_gen = sdhci_readl(host, caps_ptr + SDHCI_UHS2_CAPS_OFFSET);
374 	caps_phy = sdhci_readl(host, caps_ptr + SDHCI_UHS2_CAPS_PHY_OFFSET);
375 	caps_tran[0] = sdhci_readl(host, caps_ptr + SDHCI_UHS2_CAPS_TRAN_OFFSET);
376 	caps_tran[1] = sdhci_readl(host, caps_ptr + SDHCI_UHS2_CAPS_TRAN_1_OFFSET);
377 
378 	/* General Caps */
379 	mmc->uhs2_caps.dap = caps_gen & SDHCI_UHS2_CAPS_DAP_MASK;
380 	mmc->uhs2_caps.gap = FIELD_GET(SDHCI_UHS2_CAPS_GAP_MASK, caps_gen);
381 	mmc->uhs2_caps.n_lanes = FIELD_GET(SDHCI_UHS2_CAPS_LANE_MASK, caps_gen);
382 	mmc->uhs2_caps.addr64 =	(caps_gen & SDHCI_UHS2_CAPS_ADDR_64) ? 1 : 0;
383 	mmc->uhs2_caps.card_type = FIELD_GET(SDHCI_UHS2_CAPS_DEV_TYPE_MASK, caps_gen);
384 
385 	/* PHY Caps */
386 	mmc->uhs2_caps.phy_rev = caps_phy & SDHCI_UHS2_CAPS_PHY_REV_MASK;
387 	mmc->uhs2_caps.speed_range = FIELD_GET(SDHCI_UHS2_CAPS_PHY_RANGE_MASK, caps_phy);
388 	mmc->uhs2_caps.n_lss_sync = FIELD_GET(SDHCI_UHS2_CAPS_PHY_N_LSS_SYN_MASK, caps_phy);
389 	mmc->uhs2_caps.n_lss_dir = FIELD_GET(SDHCI_UHS2_CAPS_PHY_N_LSS_DIR_MASK, caps_phy);
390 	if (mmc->uhs2_caps.n_lss_sync == 0)
391 		mmc->uhs2_caps.n_lss_sync = 16 << 2;
392 	else
393 		mmc->uhs2_caps.n_lss_sync <<= 2;
394 	if (mmc->uhs2_caps.n_lss_dir == 0)
395 		mmc->uhs2_caps.n_lss_dir = 16 << 3;
396 	else
397 		mmc->uhs2_caps.n_lss_dir <<= 3;
398 
399 	/* LINK/TRAN Caps */
400 	mmc->uhs2_caps.link_rev = caps_tran[0] & SDHCI_UHS2_CAPS_TRAN_LINK_REV_MASK;
401 	mmc->uhs2_caps.n_fcu = FIELD_GET(SDHCI_UHS2_CAPS_TRAN_N_FCU_MASK, caps_tran[0]);
402 	if (mmc->uhs2_caps.n_fcu == 0)
403 		mmc->uhs2_caps.n_fcu = 256;
404 	mmc->uhs2_caps.host_type = FIELD_GET(SDHCI_UHS2_CAPS_TRAN_HOST_TYPE_MASK, caps_tran[0]);
405 	mmc->uhs2_caps.maxblk_len = FIELD_GET(SDHCI_UHS2_CAPS_TRAN_BLK_LEN_MASK, caps_tran[0]);
406 	mmc->uhs2_caps.n_data_gap = caps_tran[1] & SDHCI_UHS2_CAPS_TRAN_1_N_DATA_GAP_MASK;
407 
408 	return 0;
409 }
410 
411 static int sdhci_uhs2_do_detect_init(struct mmc_host *mmc)
412 {
413 	struct sdhci_host *host = mmc_priv(mmc);
414 
415 	DBG("Begin do uhs2 detect init.\n");
416 
417 	if (host->ops->uhs2_pre_detect_init)
418 		host->ops->uhs2_pre_detect_init(host);
419 
420 	if (sdhci_uhs2_interface_detect(host)) {
421 		pr_debug("%s: cannot detect UHS2 interface.\n", mmc_hostname(host->mmc));
422 		return -EIO;
423 	}
424 
425 	if (sdhci_uhs2_init(host)) {
426 		pr_debug("%s: UHS2 init fail.\n", mmc_hostname(host->mmc));
427 		return -EIO;
428 	}
429 
430 	/* Init complete, do soft reset and enable UHS2 error irqs. */
431 	sdhci_uhs2_reset(host, SDHCI_UHS2_SW_RESET_SD);
432 	sdhci_uhs2_clear_set_irqs(host, SDHCI_INT_ALL_MASK, SDHCI_UHS2_INT_ERROR_MASK);
433 	/*
434 	 * N.B SDHCI_INT_ENABLE and SDHCI_SIGNAL_ENABLE was cleared
435 	 * by SDHCI_UHS2_SW_RESET_SD
436 	 */
437 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
438 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
439 
440 	return 0;
441 }
442 
443 static int sdhci_uhs2_disable_clk(struct mmc_host *mmc)
444 {
445 	struct sdhci_host *host = mmc_priv(mmc);
446 	u16 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
447 
448 	clk &= ~SDHCI_CLOCK_CARD_EN;
449 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
450 
451 	return 0;
452 }
453 
454 static int sdhci_uhs2_enable_clk(struct mmc_host *mmc)
455 {
456 	struct sdhci_host *host = mmc_priv(mmc);
457 	u16 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
458 	int timeout_us = 20000; /* 20ms */
459 	u32 val;
460 
461 	clk |= SDHCI_CLOCK_CARD_EN;
462 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
463 
464 	if (read_poll_timeout(sdhci_readw, val, (val & SDHCI_CLOCK_INT_STABLE),
465 			      10, timeout_us, true, host, SDHCI_CLOCK_CONTROL)) {
466 		pr_err("%s: Internal clock never stabilised.\n", mmc_hostname(host->mmc));
467 		sdhci_dumpregs(host);
468 		return -EIO;
469 	}
470 	return 0;
471 }
472 
473 static void sdhci_uhs2_set_config(struct sdhci_host *host)
474 {
475 	u32 value;
476 	u16 sdhci_uhs2_set_ptr = sdhci_readw(host, SDHCI_UHS2_SETTINGS_PTR);
477 	u16 sdhci_uhs2_gen_set_reg	= sdhci_uhs2_set_ptr;
478 	u16 sdhci_uhs2_phy_set_reg	= sdhci_uhs2_set_ptr + 4;
479 	u16 sdhci_uhs2_tran_set_reg	= sdhci_uhs2_set_ptr + 8;
480 	u16 sdhci_uhs2_tran_set_1_reg	= sdhci_uhs2_set_ptr + 12;
481 
482 	/* Set Gen Settings */
483 	value = FIELD_PREP(SDHCI_UHS2_GEN_SETTINGS_N_LANES_MASK, host->mmc->uhs2_caps.n_lanes_set);
484 	sdhci_writel(host, value, sdhci_uhs2_gen_set_reg);
485 
486 	/* Set PHY Settings */
487 	value = FIELD_PREP(SDHCI_UHS2_PHY_N_LSS_DIR_MASK, host->mmc->uhs2_caps.n_lss_dir_set) |
488 		FIELD_PREP(SDHCI_UHS2_PHY_N_LSS_SYN_MASK, host->mmc->uhs2_caps.n_lss_sync_set);
489 	if (host->mmc->ios.timing == MMC_TIMING_UHS2_SPEED_B ||
490 	    host->mmc->ios.timing == MMC_TIMING_UHS2_SPEED_B_HD)
491 		value |= SDHCI_UHS2_PHY_SET_SPEED_B;
492 	sdhci_writel(host, value, sdhci_uhs2_phy_set_reg);
493 
494 	/* Set LINK-TRAN Settings */
495 	value = FIELD_PREP(SDHCI_UHS2_TRAN_RETRY_CNT_MASK, host->mmc->uhs2_caps.max_retry_set) |
496 		FIELD_PREP(SDHCI_UHS2_TRAN_N_FCU_MASK, host->mmc->uhs2_caps.n_fcu_set);
497 	sdhci_writel(host, value, sdhci_uhs2_tran_set_reg);
498 	sdhci_writel(host, host->mmc->uhs2_caps.n_data_gap_set, sdhci_uhs2_tran_set_1_reg);
499 }
500 
501 static int sdhci_uhs2_check_dormant(struct sdhci_host *host)
502 {
503 	u32 val;
504 
505 	if (read_poll_timeout(sdhci_readl, val, (val & SDHCI_UHS2_IN_DORMANT_STATE),
506 			      100, UHS2_CHECK_DORMANT_TIMEOUT_100MS, true, host,
507 			      SDHCI_PRESENT_STATE)) {
508 		pr_debug("%s: UHS2 IN_DORMANT fail in 100ms.\n", mmc_hostname(host->mmc));
509 		sdhci_dbg_dumpregs(host, "UHS2 IN_DORMANT fail in 100ms");
510 		return -EIO;
511 	}
512 	return 0;
513 }
514 
515 static int sdhci_uhs2_control(struct mmc_host *mmc, enum sd_uhs2_operation op)
516 {
517 	struct sdhci_host *host = mmc_priv(mmc);
518 	struct mmc_ios *ios = &mmc->ios;
519 	int err = 0;
520 
521 	DBG("Begin uhs2 control, act %d.\n", op);
522 
523 	switch (op) {
524 	case UHS2_PHY_INIT:
525 		err = sdhci_uhs2_do_detect_init(mmc);
526 		break;
527 	case UHS2_SET_CONFIG:
528 		sdhci_uhs2_set_config(host);
529 		break;
530 	case UHS2_ENABLE_INT:
531 		sdhci_uhs2_clear_set_irqs(host, 0, SDHCI_INT_CARD_INT);
532 		break;
533 	case UHS2_DISABLE_INT:
534 		sdhci_uhs2_clear_set_irqs(host, SDHCI_INT_CARD_INT, 0);
535 		break;
536 	case UHS2_CHECK_DORMANT:
537 		err = sdhci_uhs2_check_dormant(host);
538 		break;
539 	case UHS2_DISABLE_CLK:
540 		err = sdhci_uhs2_disable_clk(mmc);
541 		break;
542 	case UHS2_ENABLE_CLK:
543 		err = sdhci_uhs2_enable_clk(mmc);
544 		break;
545 	case UHS2_SET_IOS:
546 		err = sdhci_uhs2_set_ios(mmc, ios);
547 		break;
548 	default:
549 		pr_err("%s: input sd uhs2 operation %d is wrong!\n",
550 		       mmc_hostname(host->mmc), op);
551 		err = -EIO;
552 		break;
553 	}
554 
555 	return err;
556 }
557 
558 /*****************************************************************************\
559  *                                                                           *
560  * Core functions                                                            *
561  *                                                                           *
562 \*****************************************************************************/
563 
564 static void sdhci_uhs2_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
565 {
566 	struct mmc_data *data = cmd->data;
567 
568 	sdhci_initialize_data(host, data);
569 
570 	sdhci_prepare_dma(host, data);
571 
572 	sdhci_writew(host, data->blksz, SDHCI_UHS2_BLOCK_SIZE);
573 	sdhci_writew(host, data->blocks, SDHCI_UHS2_BLOCK_COUNT);
574 }
575 
576 static void sdhci_uhs2_finish_data(struct sdhci_host *host)
577 {
578 	struct mmc_data *data = host->data;
579 
580 	__sdhci_finish_data_common(host, true);
581 
582 	__sdhci_finish_mrq(host, data->mrq);
583 }
584 
585 static void sdhci_uhs2_set_transfer_mode(struct sdhci_host *host, struct mmc_command *cmd)
586 {
587 	u16 mode;
588 	struct mmc_data *data = cmd->data;
589 
590 	if (!data) {
591 		/* clear Auto CMD settings for no data CMDs */
592 		if (uhs2_dev_cmd(cmd) == UHS2_DEV_CMD_TRANS_ABORT) {
593 			mode =  0;
594 		} else {
595 			mode = sdhci_readw(host, SDHCI_UHS2_TRANS_MODE);
596 			if (cmd->opcode == MMC_STOP_TRANSMISSION || cmd->opcode == MMC_ERASE)
597 				mode |= SDHCI_UHS2_TRNS_WAIT_EBSY;
598 			else
599 				/* send status mode */
600 				if (cmd->opcode == MMC_SEND_STATUS)
601 					mode = 0;
602 		}
603 
604 		DBG("UHS2 no data trans mode is 0x%x.\n", mode);
605 
606 		sdhci_writew(host, mode, SDHCI_UHS2_TRANS_MODE);
607 		return;
608 	}
609 
610 	WARN_ON(!host->data);
611 
612 	mode = SDHCI_UHS2_TRNS_BLK_CNT_EN | SDHCI_UHS2_TRNS_WAIT_EBSY;
613 	if (data->flags & MMC_DATA_WRITE)
614 		mode |= SDHCI_UHS2_TRNS_DATA_TRNS_WRT;
615 
616 	if (data->blocks == 1 &&
617 	    data->blksz != 512 &&
618 	    cmd->opcode != MMC_READ_SINGLE_BLOCK &&
619 	    cmd->opcode != MMC_WRITE_BLOCK) {
620 		mode &= ~SDHCI_UHS2_TRNS_BLK_CNT_EN;
621 		mode |= SDHCI_UHS2_TRNS_BLK_BYTE_MODE;
622 	}
623 
624 	if (host->flags & SDHCI_REQ_USE_DMA)
625 		mode |= SDHCI_UHS2_TRNS_DMA;
626 
627 	if (cmd->uhs2_cmd->tmode_half_duplex)
628 		mode |= SDHCI_UHS2_TRNS_2L_HD;
629 
630 	sdhci_writew(host, mode, SDHCI_UHS2_TRANS_MODE);
631 
632 	DBG("UHS2 trans mode is 0x%x.\n", mode);
633 }
634 
635 static void __sdhci_uhs2_send_command(struct sdhci_host *host, struct mmc_command *cmd)
636 {
637 	int i, j;
638 	int cmd_reg;
639 
640 	i = 0;
641 	sdhci_writel(host,
642 		     ((u32)cmd->uhs2_cmd->arg << 16) |
643 				(u32)cmd->uhs2_cmd->header,
644 		     SDHCI_UHS2_CMD_PACKET + i);
645 	i += 4;
646 
647 	/*
648 	 * Per spec, payload (config) should be MSB before sending out.
649 	 * But we don't need convert here because had set payload as
650 	 * MSB when preparing config read/write commands.
651 	 */
652 	for (j = 0; j < cmd->uhs2_cmd->payload_len / sizeof(u32); j++) {
653 		sdhci_writel(host, *(__force u32 *)(cmd->uhs2_cmd->payload + j),
654 			     SDHCI_UHS2_CMD_PACKET + i);
655 		i += 4;
656 	}
657 
658 	for ( ; i < SDHCI_UHS2_CMD_PACK_MAX_LEN; i += 4)
659 		sdhci_writel(host, 0, SDHCI_UHS2_CMD_PACKET + i);
660 
661 	DBG("UHS2 CMD packet_len = %d.\n", cmd->uhs2_cmd->packet_len);
662 	for (i = 0; i < cmd->uhs2_cmd->packet_len; i++)
663 		DBG("UHS2 CMD_PACKET[%d] = 0x%x.\n", i,
664 		    sdhci_readb(host, SDHCI_UHS2_CMD_PACKET + i));
665 
666 	cmd_reg = FIELD_PREP(SDHCI_UHS2_CMD_PACK_LEN_MASK, cmd->uhs2_cmd->packet_len);
667 	if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC)
668 		cmd_reg |= SDHCI_UHS2_CMD_DATA;
669 	if (cmd->opcode == MMC_STOP_TRANSMISSION)
670 		cmd_reg |= SDHCI_UHS2_CMD_CMD12;
671 
672 	/* UHS2 Native ABORT */
673 	if ((cmd->uhs2_cmd->header & UHS2_NATIVE_PACKET) &&
674 	    (uhs2_dev_cmd(cmd) == UHS2_DEV_CMD_TRANS_ABORT))
675 		cmd_reg |= SDHCI_UHS2_CMD_TRNS_ABORT;
676 
677 	/* UHS2 Native DORMANT */
678 	if ((cmd->uhs2_cmd->header & UHS2_NATIVE_PACKET) &&
679 	    (uhs2_dev_cmd(cmd) == UHS2_DEV_CMD_GO_DORMANT_STATE))
680 		cmd_reg |= SDHCI_UHS2_CMD_DORMANT;
681 
682 	DBG("0x%x is set to UHS2 CMD register.\n", cmd_reg);
683 
684 	sdhci_writew(host, cmd_reg, SDHCI_UHS2_CMD);
685 }
686 
687 static bool sdhci_uhs2_send_command(struct sdhci_host *host, struct mmc_command *cmd)
688 {
689 	u32 mask;
690 	unsigned long timeout;
691 
692 	WARN_ON(host->cmd);
693 
694 	/* Initially, a command has no error */
695 	cmd->error = 0;
696 
697 	if (cmd->opcode == MMC_STOP_TRANSMISSION)
698 		cmd->flags |= MMC_RSP_BUSY;
699 
700 	mask = SDHCI_CMD_INHIBIT;
701 
702 	if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)
703 		return false;
704 
705 	host->cmd = cmd;
706 	host->data_timeout = 0;
707 	if (sdhci_data_line_cmd(cmd)) {
708 		WARN_ON(host->data_cmd);
709 		host->data_cmd = cmd;
710 		__sdhci_uhs2_set_timeout(host);
711 	}
712 
713 	if (cmd->data)
714 		sdhci_uhs2_prepare_data(host, cmd);
715 
716 	sdhci_uhs2_set_transfer_mode(host, cmd);
717 
718 	timeout = jiffies;
719 	if (host->data_timeout)
720 		timeout += nsecs_to_jiffies(host->data_timeout);
721 	else if (!cmd->data && cmd->busy_timeout > 9000)
722 		timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
723 	else
724 		timeout += 10 * HZ;
725 	sdhci_mod_timer(host, cmd->mrq, timeout);
726 
727 	__sdhci_uhs2_send_command(host, cmd);
728 
729 	return true;
730 }
731 
732 static bool sdhci_uhs2_send_command_retry(struct sdhci_host *host,
733 					  struct mmc_command *cmd,
734 					  unsigned long flags)
735 	__releases(host->lock)
736 	__acquires(host->lock)
737 {
738 	struct mmc_command *deferred_cmd = host->deferred_cmd;
739 	int timeout = 10; /* Approx. 10 ms */
740 	bool present;
741 
742 	while (!sdhci_uhs2_send_command(host, cmd)) {
743 		if (!timeout--) {
744 			pr_err("%s: Controller never released inhibit bit(s).\n",
745 			       mmc_hostname(host->mmc));
746 			sdhci_dumpregs(host);
747 			cmd->error = -EIO;
748 			return false;
749 		}
750 
751 		spin_unlock_irqrestore(&host->lock, flags);
752 
753 		usleep_range(1000, 1250);
754 
755 		present = host->mmc->ops->get_cd(host->mmc);
756 
757 		spin_lock_irqsave(&host->lock, flags);
758 
759 		/* A deferred command might disappear, handle that */
760 		if (cmd == deferred_cmd && cmd != host->deferred_cmd)
761 			return true;
762 
763 		if (sdhci_present_error(host, cmd, present))
764 			return false;
765 	}
766 
767 	if (cmd == host->deferred_cmd)
768 		host->deferred_cmd = NULL;
769 
770 	return true;
771 }
772 
773 static void __sdhci_uhs2_finish_command(struct sdhci_host *host)
774 {
775 	struct mmc_command *cmd = host->cmd;
776 	u8 resp;
777 	u8 error_code;
778 	bool breada0 = 0;
779 	int i;
780 
781 	if (host->mmc->uhs2_sd_tran) {
782 		resp = sdhci_readb(host, SDHCI_UHS2_RESPONSE + 2);
783 		if (resp & UHS2_RES_NACK_MASK) {
784 			error_code = (resp >> UHS2_RES_ECODE_POS) & UHS2_RES_ECODE_MASK;
785 			pr_err("%s: NACK response, ECODE=0x%x.\n",
786 			       mmc_hostname(host->mmc), error_code);
787 		}
788 		breada0 = 1;
789 	}
790 
791 	if (cmd->uhs2_cmd->uhs2_resp_len) {
792 		int len = min_t(int, cmd->uhs2_cmd->uhs2_resp_len, UHS2_MAX_RESP_LEN);
793 
794 		/* Get whole response of some native CCMD, like
795 		 * DEVICE_INIT, ENUMERATE.
796 		 */
797 		for (i = 0; i < len; i++)
798 			cmd->uhs2_cmd->uhs2_resp[i] = sdhci_readb(host, SDHCI_UHS2_RESPONSE + i);
799 	} else {
800 		/* Get SD CMD response and Payload for some read
801 		 * CCMD, like INQUIRY_CFG.
802 		 */
803 		/* Per spec (p136), payload field is divided into
804 		 * a unit of DWORD and transmission order within
805 		 * a DWORD is big endian.
806 		 */
807 		if (!breada0)
808 			sdhci_readl(host, SDHCI_UHS2_RESPONSE);
809 		for (i = 4; i < 20; i += 4) {
810 			cmd->resp[i / 4 - 1] =
811 				(sdhci_readb(host,
812 					     SDHCI_UHS2_RESPONSE + i) << 24) |
813 				(sdhci_readb(host,
814 					     SDHCI_UHS2_RESPONSE + i + 1)
815 					<< 16) |
816 				(sdhci_readb(host,
817 					     SDHCI_UHS2_RESPONSE + i + 2)
818 					<< 8) |
819 				sdhci_readb(host, SDHCI_UHS2_RESPONSE + i + 3);
820 		}
821 	}
822 }
823 
824 static void sdhci_uhs2_finish_command(struct sdhci_host *host)
825 {
826 	struct mmc_command *cmd = host->cmd;
827 
828 	__sdhci_uhs2_finish_command(host);
829 
830 	host->cmd = NULL;
831 
832 	if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
833 		mmc_command_done(host->mmc, cmd->mrq);
834 
835 	/*
836 	 * The host can send and interrupt when the busy state has
837 	 * ended, allowing us to wait without wasting CPU cycles.
838 	 * The busy signal uses DAT0 so this is similar to waiting
839 	 * for data to complete.
840 	 *
841 	 * Note: The 1.0 specification is a bit ambiguous about this
842 	 *       feature so there might be some problems with older
843 	 *       controllers.
844 	 */
845 	if (cmd->flags & MMC_RSP_BUSY) {
846 		if (cmd->data) {
847 			DBG("Cannot wait for busy signal when also doing a data transfer");
848 		} else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
849 			   cmd == host->data_cmd) {
850 			/* Command complete before busy is ended */
851 			return;
852 		}
853 	}
854 
855 	/* Processed actual command. */
856 	if (host->data && host->data_early)
857 		sdhci_uhs2_finish_data(host);
858 
859 	if (!cmd->data)
860 		__sdhci_finish_mrq(host, cmd->mrq);
861 }
862 
863 static void sdhci_uhs2_request(struct mmc_host *mmc, struct mmc_request *mrq)
864 {
865 	struct sdhci_host *host = mmc_priv(mmc);
866 	struct mmc_command *cmd;
867 	unsigned long flags;
868 	bool present;
869 
870 	if (!(mmc_card_uhs2(mmc))) {
871 		sdhci_request(mmc, mrq);
872 		return;
873 	}
874 
875 	mrq->stop = NULL;
876 	mrq->sbc = NULL;
877 	if (mrq->data)
878 		mrq->data->stop = NULL;
879 
880 	/* Firstly check card presence */
881 	present = mmc->ops->get_cd(mmc);
882 
883 	spin_lock_irqsave(&host->lock, flags);
884 
885 	if (sdhci_present_error(host, mrq->cmd, present))
886 		goto out_finish;
887 
888 	cmd = mrq->cmd;
889 
890 	if (!sdhci_uhs2_send_command_retry(host, cmd, flags))
891 		goto out_finish;
892 
893 	spin_unlock_irqrestore(&host->lock, flags);
894 
895 	return;
896 
897 out_finish:
898 	sdhci_finish_mrq(host, mrq);
899 	spin_unlock_irqrestore(&host->lock, flags);
900 }
901 
902 /*****************************************************************************\
903  *                                                                           *
904  * Request done                                                              *
905  *                                                                           *
906 \*****************************************************************************/
907 
908 static bool sdhci_uhs2_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
909 {
910 	return sdhci_needs_reset(host, mrq) ||
911 	       (!(host->flags & SDHCI_DEVICE_DEAD) && mrq->data && mrq->data->error);
912 }
913 
914 static bool sdhci_uhs2_request_done(struct sdhci_host *host)
915 {
916 	unsigned long flags;
917 	struct mmc_request *mrq;
918 	int i;
919 
920 	spin_lock_irqsave(&host->lock, flags);
921 
922 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
923 		mrq = host->mrqs_done[i];
924 		if (mrq)
925 			break;
926 	}
927 
928 	if (!mrq) {
929 		spin_unlock_irqrestore(&host->lock, flags);
930 		return true;
931 	}
932 
933 	/*
934 	 * Always unmap the data buffers if they were mapped by
935 	 * sdhci_prepare_data() whenever we finish with a request.
936 	 * This avoids leaking DMA mappings on error.
937 	 */
938 	if (host->flags & SDHCI_REQ_USE_DMA)
939 		sdhci_request_done_dma(host, mrq);
940 
941 	/*
942 	 * The controller needs a reset of internal state machines
943 	 * upon error conditions.
944 	 */
945 	if (sdhci_uhs2_needs_reset(host, mrq)) {
946 		/*
947 		 * Do not finish until command and data lines are available for
948 		 * reset. Note there can only be one other mrq, so it cannot
949 		 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
950 		 * would both be null.
951 		 */
952 		if (host->cmd || host->data_cmd) {
953 			spin_unlock_irqrestore(&host->lock, flags);
954 			return true;
955 		}
956 
957 		if (mrq->cmd->error || mrq->data->error)
958 			sdhci_uhs2_reset_cmd_data(host);
959 		else
960 			sdhci_uhs2_reset(host, SDHCI_UHS2_SW_RESET_SD);
961 		host->pending_reset = false;
962 	}
963 
964 	host->mrqs_done[i] = NULL;
965 
966 	spin_unlock_irqrestore(&host->lock, flags);
967 
968 	if (host->ops->request_done)
969 		host->ops->request_done(host, mrq);
970 	else
971 		mmc_request_done(host->mmc, mrq);
972 
973 	return false;
974 }
975 
976 static void sdhci_uhs2_complete_work(struct work_struct *work)
977 {
978 	struct sdhci_host *host = container_of(work, struct sdhci_host,
979 					       complete_work);
980 
981 	if (!mmc_card_uhs2(host->mmc)) {
982 		sdhci_complete_work(work);
983 		return;
984 	}
985 
986 	while (!sdhci_uhs2_request_done(host))
987 		;
988 }
989 
990 /*****************************************************************************\
991  *                                                                           *
992  * Interrupt handling                                                        *
993  *                                                                           *
994 \*****************************************************************************/
995 
996 static void __sdhci_uhs2_irq(struct sdhci_host *host, u32 uhs2mask)
997 {
998 	struct mmc_command *cmd = host->cmd;
999 
1000 	DBG("*** %s got UHS2 error interrupt: 0x%08x\n",
1001 	    mmc_hostname(host->mmc), uhs2mask);
1002 
1003 	if (uhs2mask & SDHCI_UHS2_INT_CMD_ERR_MASK) {
1004 		if (!host->cmd) {
1005 			pr_err("%s: Got cmd interrupt 0x%08x but no cmd.\n",
1006 			       mmc_hostname(host->mmc),
1007 			       (unsigned int)uhs2mask);
1008 			sdhci_dumpregs(host);
1009 			return;
1010 		}
1011 		host->cmd->error = -EILSEQ;
1012 		if (uhs2mask & SDHCI_UHS2_INT_CMD_TIMEOUT)
1013 			host->cmd->error = -ETIMEDOUT;
1014 	}
1015 
1016 	if (uhs2mask & SDHCI_UHS2_INT_DATA_ERR_MASK) {
1017 		if (!host->data) {
1018 			pr_err("%s: Got data interrupt 0x%08x but no data.\n",
1019 			       mmc_hostname(host->mmc),
1020 			       (unsigned int)uhs2mask);
1021 			sdhci_dumpregs(host);
1022 			return;
1023 		}
1024 
1025 		if (uhs2mask & SDHCI_UHS2_INT_DEADLOCK_TIMEOUT) {
1026 			pr_err("%s: Got deadlock timeout interrupt 0x%08x\n",
1027 			       mmc_hostname(host->mmc),
1028 			       (unsigned int)uhs2mask);
1029 			host->data->error = -ETIMEDOUT;
1030 		} else if (uhs2mask & SDHCI_UHS2_INT_ADMA_ERROR) {
1031 			pr_err("%s: ADMA error = 0x %x\n",
1032 			       mmc_hostname(host->mmc),
1033 			       sdhci_readb(host, SDHCI_ADMA_ERROR));
1034 			host->data->error = -EIO;
1035 		} else {
1036 			host->data->error = -EILSEQ;
1037 		}
1038 	}
1039 
1040 	if (host->data && host->data->error)
1041 		sdhci_uhs2_finish_data(host);
1042 	else
1043 		sdhci_finish_mrq(host, cmd->mrq);
1044 
1045 }
1046 
1047 u32 sdhci_uhs2_irq(struct sdhci_host *host, u32 intmask)
1048 {
1049 	u32 mask = intmask, uhs2mask;
1050 
1051 	if (!mmc_card_uhs2(host->mmc))
1052 		goto out;
1053 
1054 	if (intmask & SDHCI_INT_ERROR) {
1055 		uhs2mask = sdhci_readl(host, SDHCI_UHS2_INT_STATUS);
1056 		if (!(uhs2mask & SDHCI_UHS2_INT_ERROR_MASK))
1057 			goto cmd_irq;
1058 
1059 		/* Clear error interrupts */
1060 		sdhci_writel(host, uhs2mask & SDHCI_UHS2_INT_ERROR_MASK,
1061 			     SDHCI_UHS2_INT_STATUS);
1062 
1063 		/* Handle error interrupts */
1064 		__sdhci_uhs2_irq(host, uhs2mask);
1065 
1066 		/* Caller, sdhci_irq(), doesn't have to care about UHS-2 errors */
1067 		intmask &= ~SDHCI_INT_ERROR;
1068 		mask &= SDHCI_INT_ERROR;
1069 	}
1070 
1071 cmd_irq:
1072 	if (intmask & SDHCI_INT_CMD_MASK) {
1073 		/* Clear command interrupt */
1074 		sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK, SDHCI_INT_STATUS);
1075 
1076 		/* Handle command interrupt */
1077 		if (intmask & SDHCI_INT_RESPONSE)
1078 			sdhci_uhs2_finish_command(host);
1079 
1080 		/* Caller, sdhci_irq(), doesn't have to care about UHS-2 commands */
1081 		intmask &= ~SDHCI_INT_CMD_MASK;
1082 		mask &= SDHCI_INT_CMD_MASK;
1083 	}
1084 
1085 	/* Clear already-handled interrupts. */
1086 	sdhci_writel(host, mask, SDHCI_INT_STATUS);
1087 
1088 out:
1089 	return intmask;
1090 }
1091 EXPORT_SYMBOL_GPL(sdhci_uhs2_irq);
1092 
1093 static irqreturn_t sdhci_uhs2_thread_irq(int irq, void *dev_id)
1094 {
1095 	struct sdhci_host *host = dev_id;
1096 	struct mmc_command *cmd;
1097 	unsigned long flags;
1098 	u32 isr;
1099 
1100 	if (!mmc_card_uhs2(host->mmc))
1101 		return sdhci_thread_irq(irq, dev_id);
1102 
1103 	while (!sdhci_uhs2_request_done(host))
1104 		;
1105 
1106 	spin_lock_irqsave(&host->lock, flags);
1107 
1108 	isr = host->thread_isr;
1109 	host->thread_isr = 0;
1110 
1111 	cmd = host->deferred_cmd;
1112 	if (cmd && !sdhci_uhs2_send_command_retry(host, cmd, flags))
1113 		sdhci_finish_mrq(host, cmd->mrq);
1114 
1115 	spin_unlock_irqrestore(&host->lock, flags);
1116 
1117 	if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
1118 		struct mmc_host *mmc = host->mmc;
1119 
1120 		mmc->ops->card_event(mmc);
1121 		mmc_detect_change(mmc, msecs_to_jiffies(200));
1122 	}
1123 
1124 	return IRQ_HANDLED;
1125 }
1126 
1127 /*****************************************************************************\
1128  *                                                                           *
1129  * Driver init/exit                                                          *
1130  *                                                                           *
1131 \*****************************************************************************/
1132 
1133 static int sdhci_uhs2_host_ops_init(struct sdhci_host *host)
1134 {
1135 	host->mmc_host_ops.uhs2_control = sdhci_uhs2_control;
1136 	host->mmc_host_ops.request = sdhci_uhs2_request;
1137 
1138 	return 0;
1139 }
1140 
1141 static int __init sdhci_uhs2_mod_init(void)
1142 {
1143 	return 0;
1144 }
1145 module_init(sdhci_uhs2_mod_init);
1146 
1147 static void __exit sdhci_uhs2_mod_exit(void)
1148 {
1149 }
1150 module_exit(sdhci_uhs2_mod_exit);
1151 
1152 /*****************************************************************************\
1153  *
1154  * Device allocation/registration                                            *
1155  *                                                                           *
1156 \*****************************************************************************/
1157 
1158 static void __sdhci_uhs2_add_host_v4(struct sdhci_host *host, u32 caps1)
1159 {
1160 	struct mmc_host *mmc;
1161 	u32 max_current_caps2;
1162 
1163 	mmc = host->mmc;
1164 
1165 	/* Support UHS2 */
1166 	if (caps1 & SDHCI_SUPPORT_UHS2)
1167 		mmc->caps2 |= MMC_CAP2_SD_UHS2;
1168 
1169 	max_current_caps2 = sdhci_readl(host, SDHCI_MAX_CURRENT_1);
1170 
1171 	if ((caps1 & SDHCI_CAN_VDD2_180) &&
1172 	    !max_current_caps2 &&
1173 	    !IS_ERR(mmc->supply.vqmmc2)) {
1174 		/* UHS2 - VDD2 */
1175 		int curr = regulator_get_current_limit(mmc->supply.vqmmc2);
1176 
1177 		if (curr > 0) {
1178 			/* convert to SDHCI_MAX_CURRENT format */
1179 			curr = curr / 1000;  /* convert to mA */
1180 			curr = curr / SDHCI_MAX_CURRENT_MULTIPLIER;
1181 			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
1182 			max_current_caps2 = curr;
1183 		}
1184 	}
1185 
1186 	if (!(caps1 & SDHCI_CAN_VDD2_180))
1187 		mmc->caps2 &= ~MMC_CAP2_SD_UHS2;
1188 }
1189 
1190 static void __sdhci_uhs2_remove_host(struct sdhci_host *host, int dead)
1191 {
1192 	if (!mmc_card_uhs2(host->mmc))
1193 		return;
1194 
1195 	if (!dead)
1196 		sdhci_uhs2_reset(host, SDHCI_UHS2_SW_RESET_FULL);
1197 }
1198 
1199 int sdhci_uhs2_add_host(struct sdhci_host *host)
1200 {
1201 	struct mmc_host *mmc = host->mmc;
1202 	int ret;
1203 
1204 	ret = sdhci_setup_host(host);
1205 	if (ret)
1206 		return ret;
1207 
1208 	if (host->version >= SDHCI_SPEC_400)
1209 		__sdhci_uhs2_add_host_v4(host, host->caps1);
1210 
1211 	if ((mmc->caps2 & MMC_CAP2_SD_UHS2) && !host->v4_mode)
1212 		/* host doesn't want to enable UHS2 support */
1213 		mmc->caps2 &= ~MMC_CAP2_SD_UHS2;
1214 
1215 	/* overwrite ops */
1216 	if (mmc->caps2 & MMC_CAP2_SD_UHS2)
1217 		sdhci_uhs2_host_ops_init(host);
1218 
1219 	host->complete_work_fn = sdhci_uhs2_complete_work;
1220 	host->thread_irq_fn    = sdhci_uhs2_thread_irq;
1221 
1222 	/* LED support not implemented for UHS2 */
1223 	host->quirks |= SDHCI_QUIRK_NO_LED;
1224 
1225 	ret = __sdhci_add_host(host);
1226 	if (ret)
1227 		goto cleanup;
1228 
1229 	return 0;
1230 
1231 cleanup:
1232 	if (host->version >= SDHCI_SPEC_400)
1233 		__sdhci_uhs2_remove_host(host, 0);
1234 
1235 	sdhci_cleanup_host(host);
1236 
1237 	return ret;
1238 }
1239 EXPORT_SYMBOL_GPL(sdhci_uhs2_add_host);
1240 
1241 void sdhci_uhs2_remove_host(struct sdhci_host *host, int dead)
1242 {
1243 	__sdhci_uhs2_remove_host(host, dead);
1244 
1245 	sdhci_remove_host(host, dead);
1246 }
1247 EXPORT_SYMBOL_GPL(sdhci_uhs2_remove_host);
1248 
1249 MODULE_AUTHOR("Intel, Genesys Logic, Linaro");
1250 MODULE_DESCRIPTION("MMC UHS-II Support");
1251 MODULE_LICENSE("GPL");
1252