xref: /linux/drivers/mmc/core/mmc.c (revision aec2f682d47c54ef434b2d440992626d80b1ebdc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/drivers/mmc/core/mmc.c
4  *
5  *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
6  *  Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
7  *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8  */
9 
10 #include <linux/err.h>
11 #include <linux/of.h>
12 #include <linux/slab.h>
13 #include <linux/stat.h>
14 #include <linux/string.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/random.h>
17 #include <linux/sysfs.h>
18 
19 #include <linux/mmc/host.h>
20 #include <linux/mmc/card.h>
21 #include <linux/mmc/mmc.h>
22 
23 #include "core.h"
24 #include "card.h"
25 #include "host.h"
26 #include "bus.h"
27 #include "mmc_ops.h"
28 #include "quirks.h"
29 #include "sd_ops.h"
30 #include "pwrseq.h"
31 
32 #define DEFAULT_CMD6_TIMEOUT_MS	500
33 #define MIN_CACHE_EN_TIMEOUT_MS 1600
34 #define CACHE_FLUSH_TIMEOUT_MS 30000 /* 30s */
35 
36 enum mmc_poweroff_type {
37 	MMC_POWEROFF_SUSPEND,
38 	MMC_POWEROFF_SHUTDOWN,
39 	MMC_POWEROFF_UNDERVOLTAGE,
40 	MMC_POWEROFF_UNBIND,
41 };
42 
43 static const unsigned int tran_exp[] = {
44 	10000,		100000,		1000000,	10000000,
45 	0,		0,		0,		0
46 };
47 
48 static const unsigned char tran_mant[] = {
49 	0,	10,	12,	13,	15,	20,	25,	30,
50 	35,	40,	45,	50,	55,	60,	70,	80,
51 };
52 
53 static const unsigned int taac_exp[] = {
54 	1,	10,	100,	1000,	10000,	100000,	1000000, 10000000,
55 };
56 
57 static const unsigned int taac_mant[] = {
58 	0,	10,	12,	13,	15,	20,	25,	30,
59 	35,	40,	45,	50,	55,	60,	70,	80,
60 };
61 
62 /*
63  * Given the decoded CSD structure, decode the raw CID to our CID structure.
64  */
65 static int mmc_decode_cid(struct mmc_card *card)
66 {
67 	u32 *resp = card->raw_cid;
68 
69 	/*
70 	 * Add the raw card ID (cid) data to the entropy pool. It doesn't
71 	 * matter that not all of it is unique, it's just bonus entropy.
72 	 */
73 	add_device_randomness(&card->raw_cid, sizeof(card->raw_cid));
74 
75 	/*
76 	 * The selection of the format here is based upon published
77 	 * specs from SanDisk and from what people have reported.
78 	 */
79 	switch (card->csd.mmca_vsn) {
80 	case 0: /* MMC v1.0 - v1.2 */
81 	case 1: /* MMC v1.4 */
82 		card->cid.manfid	= unstuff_bits(resp, 104, 24);
83 		card->cid.prod_name[0]	= unstuff_bits(resp, 96, 8);
84 		card->cid.prod_name[1]	= unstuff_bits(resp, 88, 8);
85 		card->cid.prod_name[2]	= unstuff_bits(resp, 80, 8);
86 		card->cid.prod_name[3]	= unstuff_bits(resp, 72, 8);
87 		card->cid.prod_name[4]	= unstuff_bits(resp, 64, 8);
88 		card->cid.prod_name[5]	= unstuff_bits(resp, 56, 8);
89 		card->cid.prod_name[6]	= unstuff_bits(resp, 48, 8);
90 		card->cid.hwrev		= unstuff_bits(resp, 44, 4);
91 		card->cid.fwrev		= unstuff_bits(resp, 40, 4);
92 		card->cid.serial	= unstuff_bits(resp, 16, 24);
93 		card->cid.month		= unstuff_bits(resp, 12, 4);
94 		card->cid.year		= unstuff_bits(resp, 8, 4) + 1997;
95 		break;
96 
97 	case 2: /* MMC v2.0 - v2.2 */
98 	case 3: /* MMC v3.1 - v3.3 */
99 	case 4: /* MMC v4 */
100 		card->cid.manfid	= unstuff_bits(resp, 120, 8);
101 		card->cid.oemid		= unstuff_bits(resp, 104, 16);
102 		card->cid.prod_name[0]	= unstuff_bits(resp, 96, 8);
103 		card->cid.prod_name[1]	= unstuff_bits(resp, 88, 8);
104 		card->cid.prod_name[2]	= unstuff_bits(resp, 80, 8);
105 		card->cid.prod_name[3]	= unstuff_bits(resp, 72, 8);
106 		card->cid.prod_name[4]	= unstuff_bits(resp, 64, 8);
107 		card->cid.prod_name[5]	= unstuff_bits(resp, 56, 8);
108 		card->cid.prv		= unstuff_bits(resp, 48, 8);
109 		card->cid.serial	= unstuff_bits(resp, 16, 32);
110 		card->cid.month		= unstuff_bits(resp, 12, 4);
111 		card->cid.year		= unstuff_bits(resp, 8, 4) + 1997;
112 		break;
113 
114 	default:
115 		pr_err("%s: card has unknown MMCA version %d\n",
116 			mmc_hostname(card->host), card->csd.mmca_vsn);
117 		return -EINVAL;
118 	}
119 
120 	/* some product names include trailing whitespace */
121 	strim(card->cid.prod_name);
122 
123 	return 0;
124 }
125 
126 static void mmc_set_erase_size(struct mmc_card *card)
127 {
128 	if (card->ext_csd.erase_group_def & 1)
129 		card->erase_size = card->ext_csd.hc_erase_size;
130 	else
131 		card->erase_size = card->csd.erase_size;
132 
133 	mmc_init_erase(card);
134 }
135 
136 
137 static void mmc_set_wp_grp_size(struct mmc_card *card)
138 {
139 	if (card->ext_csd.erase_group_def & 1)
140 		card->wp_grp_size = card->ext_csd.hc_erase_size *
141 			card->ext_csd.raw_hc_erase_gap_size;
142 	else
143 		card->wp_grp_size = card->csd.erase_size *
144 			(card->csd.wp_grp_size + 1);
145 }
146 
147 /*
148  * Given a 128-bit response, decode to our card CSD structure.
149  */
150 static int mmc_decode_csd(struct mmc_card *card)
151 {
152 	struct mmc_csd *csd = &card->csd;
153 	unsigned int e, m, a, b;
154 	u32 *resp = card->raw_csd;
155 
156 	/*
157 	 * We only understand CSD structure v1.1 and v1.2.
158 	 * v1.2 has extra information in bits 15, 11 and 10.
159 	 * We also support eMMC v4.4 & v4.41.
160 	 */
161 	csd->structure = unstuff_bits(resp, 126, 2);
162 	if (csd->structure == 0) {
163 		pr_err("%s: unrecognised CSD structure version %d\n",
164 			mmc_hostname(card->host), csd->structure);
165 		return -EINVAL;
166 	}
167 
168 	csd->mmca_vsn	 = unstuff_bits(resp, 122, 4);
169 	m = unstuff_bits(resp, 115, 4);
170 	e = unstuff_bits(resp, 112, 3);
171 	csd->taac_ns	 = (taac_exp[e] * taac_mant[m] + 9) / 10;
172 	csd->taac_clks	 = unstuff_bits(resp, 104, 8) * 100;
173 
174 	m = unstuff_bits(resp, 99, 4);
175 	e = unstuff_bits(resp, 96, 3);
176 	csd->max_dtr	  = tran_exp[e] * tran_mant[m];
177 	csd->cmdclass	  = unstuff_bits(resp, 84, 12);
178 
179 	e = unstuff_bits(resp, 47, 3);
180 	m = unstuff_bits(resp, 62, 12);
181 	csd->capacity	  = (1 + m) << (e + 2);
182 
183 	csd->read_blkbits = unstuff_bits(resp, 80, 4);
184 	csd->read_partial = unstuff_bits(resp, 79, 1);
185 	csd->write_misalign = unstuff_bits(resp, 78, 1);
186 	csd->read_misalign = unstuff_bits(resp, 77, 1);
187 	csd->dsr_imp = unstuff_bits(resp, 76, 1);
188 	csd->r2w_factor = unstuff_bits(resp, 26, 3);
189 	csd->write_blkbits = unstuff_bits(resp, 22, 4);
190 	csd->write_partial = unstuff_bits(resp, 21, 1);
191 
192 	if (csd->write_blkbits >= 9) {
193 		a = unstuff_bits(resp, 42, 5);
194 		b = unstuff_bits(resp, 37, 5);
195 		csd->erase_size = (a + 1) * (b + 1);
196 		csd->erase_size <<= csd->write_blkbits - 9;
197 		csd->wp_grp_size = unstuff_bits(resp, 32, 5);
198 	}
199 
200 	return 0;
201 }
202 
203 static void mmc_select_card_type(struct mmc_card *card)
204 {
205 	struct mmc_host *host = card->host;
206 	u8 card_type = card->ext_csd.raw_card_type;
207 	u32 caps = host->caps, caps2 = host->caps2;
208 	unsigned int hs_max_dtr = 0, hs200_max_dtr = 0;
209 	unsigned int avail_type = 0;
210 
211 	if (caps & MMC_CAP_MMC_HIGHSPEED &&
212 	    card_type & EXT_CSD_CARD_TYPE_HS_26) {
213 		hs_max_dtr = MMC_HIGH_26_MAX_DTR;
214 		avail_type |= EXT_CSD_CARD_TYPE_HS_26;
215 	}
216 
217 	if (caps & MMC_CAP_MMC_HIGHSPEED &&
218 	    card_type & EXT_CSD_CARD_TYPE_HS_52) {
219 		hs_max_dtr = MMC_HIGH_52_MAX_DTR;
220 		avail_type |= EXT_CSD_CARD_TYPE_HS_52;
221 	}
222 
223 	if (caps & (MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR) &&
224 	    card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) {
225 		hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
226 		avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
227 	}
228 
229 	if (caps & MMC_CAP_1_2V_DDR &&
230 	    card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
231 		hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
232 		avail_type |= EXT_CSD_CARD_TYPE_DDR_1_2V;
233 	}
234 
235 	if (caps2 & MMC_CAP2_HS200_1_8V_SDR &&
236 	    card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) {
237 		hs200_max_dtr = MMC_HS200_MAX_DTR;
238 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
239 	}
240 
241 	if (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
242 	    card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) {
243 		hs200_max_dtr = MMC_HS200_MAX_DTR;
244 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_2V;
245 	}
246 
247 	if (caps2 & MMC_CAP2_HS400_1_8V &&
248 	    card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) {
249 		hs200_max_dtr = MMC_HS200_MAX_DTR;
250 		avail_type |= EXT_CSD_CARD_TYPE_HS400_1_8V;
251 	}
252 
253 	if (caps2 & MMC_CAP2_HS400_1_2V &&
254 	    card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) {
255 		hs200_max_dtr = MMC_HS200_MAX_DTR;
256 		avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V;
257 	}
258 
259 	if ((caps2 & MMC_CAP2_HS400_ES) &&
260 	    card->ext_csd.strobe_support &&
261 	    (avail_type & EXT_CSD_CARD_TYPE_HS400))
262 		avail_type |= EXT_CSD_CARD_TYPE_HS400ES;
263 
264 	card->ext_csd.hs_max_dtr = hs_max_dtr;
265 	card->ext_csd.hs200_max_dtr = hs200_max_dtr;
266 	card->mmc_avail_type = avail_type;
267 }
268 
269 static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
270 {
271 	u8 hc_erase_grp_sz, hc_wp_grp_sz;
272 
273 	/*
274 	 * Disable these attributes by default
275 	 */
276 	card->ext_csd.enhanced_area_offset = -EINVAL;
277 	card->ext_csd.enhanced_area_size = -EINVAL;
278 
279 	/*
280 	 * Enhanced area feature support -- check whether the eMMC
281 	 * card has the Enhanced area enabled.  If so, export enhanced
282 	 * area offset and size to user by adding sysfs interface.
283 	 */
284 	if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
285 	    (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
286 		if (card->ext_csd.partition_setting_completed) {
287 			hc_erase_grp_sz =
288 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
289 			hc_wp_grp_sz =
290 				ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
291 
292 			/*
293 			 * calculate the enhanced data area offset, in bytes
294 			 */
295 			card->ext_csd.enhanced_area_offset =
296 				(((unsigned long long)ext_csd[139]) << 24) +
297 				(((unsigned long long)ext_csd[138]) << 16) +
298 				(((unsigned long long)ext_csd[137]) << 8) +
299 				(((unsigned long long)ext_csd[136]));
300 			if (mmc_card_blockaddr(card))
301 				card->ext_csd.enhanced_area_offset <<= 9;
302 			/*
303 			 * calculate the enhanced data area size, in kilobytes
304 			 */
305 			card->ext_csd.enhanced_area_size =
306 				(ext_csd[142] << 16) + (ext_csd[141] << 8) +
307 				ext_csd[140];
308 			card->ext_csd.enhanced_area_size *=
309 				(size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
310 			card->ext_csd.enhanced_area_size <<= 9;
311 		} else {
312 			pr_warn("%s: defines enhanced area without partition setting complete\n",
313 				mmc_hostname(card->host));
314 		}
315 	}
316 }
317 
318 static void mmc_part_add(struct mmc_card *card, u64 size,
319 			 unsigned int part_cfg, char *name, int idx, bool ro,
320 			 int area_type)
321 {
322 	card->part[card->nr_parts].size = size;
323 	card->part[card->nr_parts].part_cfg = part_cfg;
324 	sprintf(card->part[card->nr_parts].name, name, idx);
325 	card->part[card->nr_parts].force_ro = ro;
326 	card->part[card->nr_parts].area_type = area_type;
327 	card->nr_parts++;
328 }
329 
330 static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
331 {
332 	int idx;
333 	u8 hc_erase_grp_sz, hc_wp_grp_sz;
334 	u64 part_size;
335 
336 	/*
337 	 * General purpose partition feature support --
338 	 * If ext_csd has the size of general purpose partitions,
339 	 * set size, part_cfg, partition name in mmc_part.
340 	 */
341 	if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
342 	    EXT_CSD_PART_SUPPORT_PART_EN) {
343 		hc_erase_grp_sz =
344 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
345 		hc_wp_grp_sz =
346 			ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
347 
348 		for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
349 			if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
350 			    !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
351 			    !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
352 				continue;
353 			if (card->ext_csd.partition_setting_completed == 0) {
354 				pr_warn("%s: has partition size defined without partition complete\n",
355 					mmc_hostname(card->host));
356 				break;
357 			}
358 			part_size =
359 				(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
360 				<< 16) +
361 				(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
362 				<< 8) +
363 				ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
364 			part_size *= (hc_erase_grp_sz * hc_wp_grp_sz);
365 			mmc_part_add(card, part_size << 19,
366 				EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
367 				"gp%d", idx, false,
368 				MMC_BLK_DATA_AREA_GP);
369 		}
370 	}
371 }
372 
373 /* Minimum partition switch timeout in milliseconds */
374 #define MMC_MIN_PART_SWITCH_TIME	300
375 
376 /*
377  * Decode extended CSD.
378  */
379 static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
380 {
381 	int err = 0, idx;
382 	u64 part_size;
383 	struct device_node *np;
384 	bool broken_hpi = false;
385 
386 	/* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
387 	card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
388 	if (card->csd.structure == 3) {
389 		if (card->ext_csd.raw_ext_csd_structure > 2) {
390 			pr_err("%s: unrecognised EXT_CSD structure "
391 				"version %d\n", mmc_hostname(card->host),
392 					card->ext_csd.raw_ext_csd_structure);
393 			err = -EINVAL;
394 			goto out;
395 		}
396 	}
397 
398 	np = mmc_of_find_child_device(card->host, 0);
399 	if (np && of_device_is_compatible(np, "mmc-card"))
400 		broken_hpi = of_property_read_bool(np, "broken-hpi");
401 	of_node_put(np);
402 
403 	/*
404 	 * The EXT_CSD format is meant to be forward compatible. As long
405 	 * as CSD_STRUCTURE does not change, all values for EXT_CSD_REV
406 	 * are authorized, see JEDEC JESD84-B50 section B.8.
407 	 */
408 	card->ext_csd.rev = ext_csd[EXT_CSD_REV];
409 
410 	/* fixup device after ext_csd revision field is updated */
411 	mmc_fixup_device(card, mmc_ext_csd_fixups);
412 
413 	card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
414 	card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
415 	card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
416 	card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
417 	if (card->ext_csd.rev >= 2) {
418 		card->ext_csd.sectors =
419 			ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
420 			ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
421 			ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
422 			ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
423 
424 		/* Cards with density > 2GiB are sector addressed */
425 		if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
426 			mmc_card_set_blockaddr(card);
427 	}
428 
429 	card->ext_csd.strobe_support = ext_csd[EXT_CSD_STROBE_SUPPORT];
430 	card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
431 
432 	card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
433 	card->ext_csd.raw_erase_timeout_mult =
434 		ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
435 	card->ext_csd.raw_hc_erase_grp_size =
436 		ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
437 	card->ext_csd.raw_boot_mult =
438 		ext_csd[EXT_CSD_BOOT_MULT];
439 	if (card->ext_csd.rev >= 3) {
440 		u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
441 		card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
442 
443 		/* EXT_CSD value is in units of 10ms, but we store in ms */
444 		card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
445 
446 		/* Sleep / awake timeout in 100ns units */
447 		if (sa_shift > 0 && sa_shift <= 0x17)
448 			card->ext_csd.sa_timeout =
449 					1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
450 		card->ext_csd.erase_group_def =
451 			ext_csd[EXT_CSD_ERASE_GROUP_DEF];
452 		card->ext_csd.hc_erase_timeout = 300 *
453 			ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
454 		card->ext_csd.hc_erase_size =
455 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
456 
457 		card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
458 
459 		/*
460 		 * There are two boot regions of equal size, defined in
461 		 * multiples of 128K.
462 		 */
463 		if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_host_can_access_boot(card->host)) {
464 			for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
465 				part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
466 				mmc_part_add(card, part_size,
467 					EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
468 					"boot%d", idx, true,
469 					MMC_BLK_DATA_AREA_BOOT);
470 			}
471 		}
472 	}
473 
474 	card->ext_csd.raw_hc_erase_gap_size =
475 		ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
476 	card->ext_csd.raw_sec_trim_mult =
477 		ext_csd[EXT_CSD_SEC_TRIM_MULT];
478 	card->ext_csd.raw_sec_erase_mult =
479 		ext_csd[EXT_CSD_SEC_ERASE_MULT];
480 	card->ext_csd.raw_sec_feature_support =
481 		ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
482 	card->ext_csd.raw_trim_mult =
483 		ext_csd[EXT_CSD_TRIM_MULT];
484 	card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
485 	card->ext_csd.raw_driver_strength = ext_csd[EXT_CSD_DRIVER_STRENGTH];
486 	if (card->ext_csd.rev >= 4) {
487 		if (ext_csd[EXT_CSD_PARTITION_SETTING_COMPLETED] &
488 		    EXT_CSD_PART_SETTING_COMPLETED)
489 			card->ext_csd.partition_setting_completed = 1;
490 		else
491 			card->ext_csd.partition_setting_completed = 0;
492 
493 		mmc_manage_enhanced_area(card, ext_csd);
494 
495 		mmc_manage_gp_partitions(card, ext_csd);
496 
497 		card->ext_csd.sec_trim_mult =
498 			ext_csd[EXT_CSD_SEC_TRIM_MULT];
499 		card->ext_csd.sec_erase_mult =
500 			ext_csd[EXT_CSD_SEC_ERASE_MULT];
501 		card->ext_csd.sec_feature_support =
502 			ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
503 		card->ext_csd.trim_timeout = 300 *
504 			ext_csd[EXT_CSD_TRIM_MULT];
505 
506 		/*
507 		 * Note that the call to mmc_part_add above defaults to read
508 		 * only. If this default assumption is changed, the call must
509 		 * take into account the value of boot_locked below.
510 		 */
511 		card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
512 		card->ext_csd.boot_ro_lockable = true;
513 
514 		/* Save power class values */
515 		card->ext_csd.raw_pwr_cl_52_195 =
516 			ext_csd[EXT_CSD_PWR_CL_52_195];
517 		card->ext_csd.raw_pwr_cl_26_195 =
518 			ext_csd[EXT_CSD_PWR_CL_26_195];
519 		card->ext_csd.raw_pwr_cl_52_360 =
520 			ext_csd[EXT_CSD_PWR_CL_52_360];
521 		card->ext_csd.raw_pwr_cl_26_360 =
522 			ext_csd[EXT_CSD_PWR_CL_26_360];
523 		card->ext_csd.raw_pwr_cl_200_195 =
524 			ext_csd[EXT_CSD_PWR_CL_200_195];
525 		card->ext_csd.raw_pwr_cl_200_360 =
526 			ext_csd[EXT_CSD_PWR_CL_200_360];
527 		card->ext_csd.raw_pwr_cl_ddr_52_195 =
528 			ext_csd[EXT_CSD_PWR_CL_DDR_52_195];
529 		card->ext_csd.raw_pwr_cl_ddr_52_360 =
530 			ext_csd[EXT_CSD_PWR_CL_DDR_52_360];
531 		card->ext_csd.raw_pwr_cl_ddr_200_360 =
532 			ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
533 	}
534 
535 	if (card->ext_csd.rev >= 5) {
536 		/* Adjust production date as per JEDEC JESD84-B451 */
537 		if (card->cid.year < 2010)
538 			card->cid.year += 16;
539 
540 		/* check whether the eMMC card supports BKOPS */
541 		if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
542 			card->ext_csd.bkops = 1;
543 			card->ext_csd.man_bkops_en =
544 					(ext_csd[EXT_CSD_BKOPS_EN] &
545 						EXT_CSD_MANUAL_BKOPS_MASK);
546 			card->ext_csd.raw_bkops_status =
547 				ext_csd[EXT_CSD_BKOPS_STATUS];
548 			if (card->ext_csd.man_bkops_en)
549 				pr_debug("%s: MAN_BKOPS_EN bit is set\n",
550 					mmc_hostname(card->host));
551 			card->ext_csd.auto_bkops_en =
552 					(ext_csd[EXT_CSD_BKOPS_EN] &
553 						EXT_CSD_AUTO_BKOPS_MASK);
554 			if (card->ext_csd.auto_bkops_en)
555 				pr_debug("%s: AUTO_BKOPS_EN bit is set\n",
556 					mmc_hostname(card->host));
557 		}
558 
559 		/* check whether the eMMC card supports HPI */
560 		if (!mmc_card_broken_hpi(card) &&
561 		    !broken_hpi && (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1)) {
562 			card->ext_csd.hpi = 1;
563 			if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
564 				card->ext_csd.hpi_cmd =	MMC_STOP_TRANSMISSION;
565 			else
566 				card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
567 			/*
568 			 * Indicate the maximum timeout to close
569 			 * a command interrupted by HPI
570 			 */
571 			card->ext_csd.out_of_int_time =
572 				ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
573 		}
574 
575 		card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
576 		card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
577 
578 		/*
579 		 * RPMB regions are defined in multiples of 128K.
580 		 */
581 		card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
582 		if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_can_cmd23(card->host)) {
583 			mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
584 				EXT_CSD_PART_CONFIG_ACC_RPMB,
585 				"rpmb", 0, false,
586 				MMC_BLK_DATA_AREA_RPMB);
587 		}
588 	}
589 
590 	card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
591 	if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
592 		card->erased_byte = 0xFF;
593 	else
594 		card->erased_byte = 0x0;
595 
596 	/* eMMC v4.5 or later */
597 	card->ext_csd.generic_cmd6_time = DEFAULT_CMD6_TIMEOUT_MS;
598 	if (card->ext_csd.rev >= 6) {
599 		card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
600 
601 		card->ext_csd.generic_cmd6_time = 10 *
602 			ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
603 		card->ext_csd.power_off_longtime = 10 *
604 			ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
605 
606 		card->ext_csd.cache_size =
607 			ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
608 			ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
609 			ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
610 			ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
611 
612 		if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
613 			card->ext_csd.data_sector_size = 4096;
614 		else
615 			card->ext_csd.data_sector_size = 512;
616 
617 		if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
618 		    (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
619 			card->ext_csd.data_tag_unit_size =
620 			((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
621 			(card->ext_csd.data_sector_size);
622 		} else {
623 			card->ext_csd.data_tag_unit_size = 0;
624 		}
625 	} else {
626 		card->ext_csd.data_sector_size = 512;
627 	}
628 
629 	/*
630 	 * GENERIC_CMD6_TIME is to be used "unless a specific timeout is defined
631 	 * when accessing a specific field", so use it here if there is no
632 	 * PARTITION_SWITCH_TIME.
633 	 */
634 	if (!card->ext_csd.part_time)
635 		card->ext_csd.part_time = card->ext_csd.generic_cmd6_time;
636 	/* Some eMMC set the value too low so set a minimum */
637 	if (card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
638 		card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
639 
640 	/* eMMC v5 or later */
641 	if (card->ext_csd.rev >= 7) {
642 		memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
643 		       MMC_FIRMWARE_LEN);
644 		card->ext_csd.ffu_capable =
645 			(ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
646 			!(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
647 
648 		card->ext_csd.pre_eol_info = ext_csd[EXT_CSD_PRE_EOL_INFO];
649 		card->ext_csd.device_life_time_est_typ_a =
650 			ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A];
651 		card->ext_csd.device_life_time_est_typ_b =
652 			ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B];
653 	}
654 
655 	/* eMMC v5.1 or later */
656 	if (card->ext_csd.rev >= 8) {
657 		card->ext_csd.cmdq_support = ext_csd[EXT_CSD_CMDQ_SUPPORT] &
658 					     EXT_CSD_CMDQ_SUPPORTED;
659 		card->ext_csd.cmdq_depth = (ext_csd[EXT_CSD_CMDQ_DEPTH] &
660 					    EXT_CSD_CMDQ_DEPTH_MASK) + 1;
661 		/* Exclude inefficiently small queue depths */
662 		if (card->ext_csd.cmdq_depth <= 2) {
663 			card->ext_csd.cmdq_support = false;
664 			card->ext_csd.cmdq_depth = 0;
665 		}
666 		if (card->ext_csd.cmdq_support) {
667 			pr_debug("%s: Command Queue supported depth %u\n",
668 				 mmc_hostname(card->host),
669 				 card->ext_csd.cmdq_depth);
670 		}
671 		card->ext_csd.enhanced_rpmb_supported =
672 					(card->ext_csd.rel_param &
673 					 EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR);
674 
675 		if (card->ext_csd.rev >= 9) {
676 			/* Adjust production date as per JEDEC JESD84-B51B September 2025 */
677 			if (card->cid.year < 2023)
678 				card->cid.year += 16;
679 		} else {
680 			/* Handle vendors with broken MDT reporting */
681 			if (mmc_card_broken_mdt(card) && card->cid.year >= 2010 &&
682 			    card->cid.year <= 2012)
683 				card->cid.year += 16;
684 		}
685 	}
686 
687 out:
688 	return err;
689 }
690 
691 static int mmc_read_ext_csd(struct mmc_card *card)
692 {
693 	u8 *ext_csd;
694 	int err;
695 
696 	if (!mmc_card_can_ext_csd(card))
697 		return 0;
698 
699 	err = mmc_get_ext_csd(card, &ext_csd);
700 	if (err) {
701 		/* If the host or the card can't do the switch,
702 		 * fail more gracefully. */
703 		if ((err != -EINVAL)
704 		 && (err != -ENOSYS)
705 		 && (err != -EFAULT))
706 			return err;
707 
708 		/*
709 		 * High capacity cards should have this "magic" size
710 		 * stored in their CSD.
711 		 */
712 		if (card->csd.capacity == (4096 * 512)) {
713 			pr_err("%s: unable to read EXT_CSD on a possible high capacity card. Card will be ignored.\n",
714 				mmc_hostname(card->host));
715 		} else {
716 			pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
717 				mmc_hostname(card->host));
718 			err = 0;
719 		}
720 
721 		return err;
722 	}
723 
724 	err = mmc_decode_ext_csd(card, ext_csd);
725 	kfree(ext_csd);
726 	return err;
727 }
728 
729 static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
730 {
731 	u8 *bw_ext_csd;
732 	int err;
733 
734 	if (bus_width == MMC_BUS_WIDTH_1)
735 		return 0;
736 
737 	err = mmc_get_ext_csd(card, &bw_ext_csd);
738 	if (err)
739 		return err;
740 
741 	/* only compare read only fields */
742 	err = !((card->ext_csd.raw_partition_support ==
743 			bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
744 		(card->ext_csd.raw_erased_mem_count ==
745 			bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
746 		(card->ext_csd.rev ==
747 			bw_ext_csd[EXT_CSD_REV]) &&
748 		(card->ext_csd.raw_ext_csd_structure ==
749 			bw_ext_csd[EXT_CSD_STRUCTURE]) &&
750 		(card->ext_csd.raw_card_type ==
751 			bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
752 		(card->ext_csd.raw_s_a_timeout ==
753 			bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
754 		(card->ext_csd.raw_hc_erase_gap_size ==
755 			bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
756 		(card->ext_csd.raw_erase_timeout_mult ==
757 			bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
758 		(card->ext_csd.raw_hc_erase_grp_size ==
759 			bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
760 		(card->ext_csd.raw_sec_trim_mult ==
761 			bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
762 		(card->ext_csd.raw_sec_erase_mult ==
763 			bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
764 		(card->ext_csd.raw_sec_feature_support ==
765 			bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
766 		(card->ext_csd.raw_trim_mult ==
767 			bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
768 		(card->ext_csd.raw_sectors[0] ==
769 			bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
770 		(card->ext_csd.raw_sectors[1] ==
771 			bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
772 		(card->ext_csd.raw_sectors[2] ==
773 			bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
774 		(card->ext_csd.raw_sectors[3] ==
775 			bw_ext_csd[EXT_CSD_SEC_CNT + 3]) &&
776 		(card->ext_csd.raw_pwr_cl_52_195 ==
777 			bw_ext_csd[EXT_CSD_PWR_CL_52_195]) &&
778 		(card->ext_csd.raw_pwr_cl_26_195 ==
779 			bw_ext_csd[EXT_CSD_PWR_CL_26_195]) &&
780 		(card->ext_csd.raw_pwr_cl_52_360 ==
781 			bw_ext_csd[EXT_CSD_PWR_CL_52_360]) &&
782 		(card->ext_csd.raw_pwr_cl_26_360 ==
783 			bw_ext_csd[EXT_CSD_PWR_CL_26_360]) &&
784 		(card->ext_csd.raw_pwr_cl_200_195 ==
785 			bw_ext_csd[EXT_CSD_PWR_CL_200_195]) &&
786 		(card->ext_csd.raw_pwr_cl_200_360 ==
787 			bw_ext_csd[EXT_CSD_PWR_CL_200_360]) &&
788 		(card->ext_csd.raw_pwr_cl_ddr_52_195 ==
789 			bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) &&
790 		(card->ext_csd.raw_pwr_cl_ddr_52_360 ==
791 			bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) &&
792 		(card->ext_csd.raw_pwr_cl_ddr_200_360 ==
793 			bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360]));
794 
795 	if (err)
796 		err = -EINVAL;
797 
798 	kfree(bw_ext_csd);
799 	return err;
800 }
801 
802 MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
803 	card->raw_cid[2], card->raw_cid[3]);
804 MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
805 	card->raw_csd[2], card->raw_csd[3]);
806 MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
807 MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
808 MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
809 MMC_DEV_ATTR(wp_grp_size, "%u\n", card->wp_grp_size << 9);
810 MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable);
811 MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
812 MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
813 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
814 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
815 MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
816 MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
817 MMC_DEV_ATTR(pre_eol_info, "0x%02x\n", card->ext_csd.pre_eol_info);
818 MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
819 	card->ext_csd.device_life_time_est_typ_a,
820 	card->ext_csd.device_life_time_est_typ_b);
821 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
822 MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
823 		card->ext_csd.enhanced_area_offset);
824 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
825 MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
826 MMC_DEV_ATTR(enhanced_rpmb_supported, "%#x\n",
827 	card->ext_csd.enhanced_rpmb_supported);
828 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
829 MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
830 MMC_DEV_ATTR(rca, "0x%04x\n", card->rca);
831 MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en);
832 
833 static ssize_t mmc_fwrev_show(struct device *dev,
834 			      struct device_attribute *attr,
835 			      char *buf)
836 {
837 	struct mmc_card *card = mmc_dev_to_card(dev);
838 
839 	if (card->ext_csd.rev < 7)
840 		return sysfs_emit(buf, "0x%x\n", card->cid.fwrev);
841 	else
842 		return sysfs_emit(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
843 				  card->ext_csd.fwrev);
844 }
845 
846 static DEVICE_ATTR(fwrev, 0444, mmc_fwrev_show, NULL);
847 
848 static ssize_t mmc_dsr_show(struct device *dev,
849 			    struct device_attribute *attr,
850 			    char *buf)
851 {
852 	struct mmc_card *card = mmc_dev_to_card(dev);
853 	struct mmc_host *host = card->host;
854 
855 	if (card->csd.dsr_imp && host->dsr_req)
856 		return sysfs_emit(buf, "0x%x\n", host->dsr);
857 	else
858 		/* return default DSR value */
859 		return sysfs_emit(buf, "0x%x\n", 0x404);
860 }
861 
862 static DEVICE_ATTR(dsr, 0444, mmc_dsr_show, NULL);
863 
864 static struct attribute *mmc_std_attrs[] = {
865 	&dev_attr_cid.attr,
866 	&dev_attr_csd.attr,
867 	&dev_attr_date.attr,
868 	&dev_attr_erase_size.attr,
869 	&dev_attr_preferred_erase_size.attr,
870 	&dev_attr_wp_grp_size.attr,
871 	&dev_attr_fwrev.attr,
872 	&dev_attr_ffu_capable.attr,
873 	&dev_attr_hwrev.attr,
874 	&dev_attr_manfid.attr,
875 	&dev_attr_name.attr,
876 	&dev_attr_oemid.attr,
877 	&dev_attr_prv.attr,
878 	&dev_attr_rev.attr,
879 	&dev_attr_pre_eol_info.attr,
880 	&dev_attr_life_time.attr,
881 	&dev_attr_serial.attr,
882 	&dev_attr_enhanced_area_offset.attr,
883 	&dev_attr_enhanced_area_size.attr,
884 	&dev_attr_raw_rpmb_size_mult.attr,
885 	&dev_attr_enhanced_rpmb_supported.attr,
886 	&dev_attr_rel_sectors.attr,
887 	&dev_attr_ocr.attr,
888 	&dev_attr_rca.attr,
889 	&dev_attr_dsr.attr,
890 	&dev_attr_cmdq_en.attr,
891 	NULL,
892 };
893 ATTRIBUTE_GROUPS(mmc_std);
894 
895 static const struct device_type mmc_type = {
896 	.groups = mmc_std_groups,
897 };
898 
899 /*
900  * Select the PowerClass for the current bus width
901  * If power class is defined for 4/8 bit bus in the
902  * extended CSD register, select it by executing the
903  * mmc_switch command.
904  */
905 static int __mmc_select_powerclass(struct mmc_card *card,
906 				   unsigned int bus_width)
907 {
908 	struct mmc_host *host = card->host;
909 	struct mmc_ext_csd *ext_csd = &card->ext_csd;
910 	unsigned int pwrclass_val = 0;
911 	int err = 0;
912 
913 	switch (1 << host->ios.vdd) {
914 	case MMC_VDD_165_195:
915 		if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
916 			pwrclass_val = ext_csd->raw_pwr_cl_26_195;
917 		else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
918 			pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
919 				ext_csd->raw_pwr_cl_52_195 :
920 				ext_csd->raw_pwr_cl_ddr_52_195;
921 		else if (host->ios.clock <= MMC_HS200_MAX_DTR)
922 			pwrclass_val = ext_csd->raw_pwr_cl_200_195;
923 		break;
924 	case MMC_VDD_27_28:
925 	case MMC_VDD_28_29:
926 	case MMC_VDD_29_30:
927 	case MMC_VDD_30_31:
928 	case MMC_VDD_31_32:
929 	case MMC_VDD_32_33:
930 	case MMC_VDD_33_34:
931 	case MMC_VDD_34_35:
932 	case MMC_VDD_35_36:
933 		if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
934 			pwrclass_val = ext_csd->raw_pwr_cl_26_360;
935 		else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
936 			pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
937 				ext_csd->raw_pwr_cl_52_360 :
938 				ext_csd->raw_pwr_cl_ddr_52_360;
939 		else if (host->ios.clock <= MMC_HS200_MAX_DTR)
940 			pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ?
941 				ext_csd->raw_pwr_cl_ddr_200_360 :
942 				ext_csd->raw_pwr_cl_200_360;
943 		break;
944 	default:
945 		pr_warn("%s: Voltage range not supported for power class\n",
946 			mmc_hostname(host));
947 		return -EINVAL;
948 	}
949 
950 	if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
951 		pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
952 				EXT_CSD_PWR_CL_8BIT_SHIFT;
953 	else
954 		pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
955 				EXT_CSD_PWR_CL_4BIT_SHIFT;
956 
957 	/* If the power class is different from the default value */
958 	if (pwrclass_val > 0) {
959 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
960 				 EXT_CSD_POWER_CLASS,
961 				 pwrclass_val,
962 				 card->ext_csd.generic_cmd6_time);
963 	}
964 
965 	return err;
966 }
967 
968 static int mmc_select_powerclass(struct mmc_card *card)
969 {
970 	struct mmc_host *host = card->host;
971 	u32 bus_width, ext_csd_bits;
972 	int err, ddr;
973 
974 	/* Power class selection is supported for versions >= 4.0 */
975 	if (!mmc_card_can_ext_csd(card))
976 		return 0;
977 
978 	bus_width = host->ios.bus_width;
979 	/* Power class values are defined only for 4/8 bit bus */
980 	if (bus_width == MMC_BUS_WIDTH_1)
981 		return 0;
982 
983 	ddr = card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52;
984 	if (ddr)
985 		ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
986 			EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
987 	else
988 		ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
989 			EXT_CSD_BUS_WIDTH_8 :  EXT_CSD_BUS_WIDTH_4;
990 
991 	err = __mmc_select_powerclass(card, ext_csd_bits);
992 	if (err)
993 		pr_warn("%s: power class selection to bus width %d ddr %d failed\n",
994 			mmc_hostname(host), 1 << bus_width, ddr);
995 
996 	return err;
997 }
998 
999 /*
1000  * Set the bus speed for the selected speed mode.
1001  */
1002 static void mmc_set_bus_speed(struct mmc_card *card)
1003 {
1004 	unsigned int max_dtr = (unsigned int)-1;
1005 
1006 	if ((mmc_card_hs200(card) || mmc_card_hs400(card)) &&
1007 	     max_dtr > card->ext_csd.hs200_max_dtr)
1008 		max_dtr = card->ext_csd.hs200_max_dtr;
1009 	else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr)
1010 		max_dtr = card->ext_csd.hs_max_dtr;
1011 	else if (max_dtr > card->csd.max_dtr)
1012 		max_dtr = card->csd.max_dtr;
1013 
1014 	mmc_set_clock(card->host, max_dtr);
1015 }
1016 
1017 /*
1018  * Select the bus width amoung 4-bit and 8-bit(SDR).
1019  * If the bus width is changed successfully, return the selected width value.
1020  * Zero is returned instead of error value if the wide width is not supported.
1021  */
1022 static int mmc_select_bus_width(struct mmc_card *card)
1023 {
1024 	static unsigned ext_csd_bits[] = {
1025 		EXT_CSD_BUS_WIDTH_8,
1026 		EXT_CSD_BUS_WIDTH_4,
1027 		EXT_CSD_BUS_WIDTH_1,
1028 	};
1029 	static unsigned bus_widths[] = {
1030 		MMC_BUS_WIDTH_8,
1031 		MMC_BUS_WIDTH_4,
1032 		MMC_BUS_WIDTH_1,
1033 	};
1034 	struct mmc_host *host = card->host;
1035 	unsigned idx, bus_width = 0;
1036 	int err = 0;
1037 
1038 	if (!mmc_card_can_ext_csd(card) ||
1039 	    !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
1040 		return 0;
1041 
1042 	idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1;
1043 
1044 	/*
1045 	 * Unlike SD, MMC cards dont have a configuration register to notify
1046 	 * supported bus width. So bus test command should be run to identify
1047 	 * the supported bus width or compare the ext csd values of current
1048 	 * bus width and ext csd values of 1 bit mode read earlier.
1049 	 */
1050 	for (; idx < ARRAY_SIZE(bus_widths); idx++) {
1051 		/*
1052 		 * Host is capable of 8bit transfer, then switch
1053 		 * the device to work in 8bit transfer mode. If the
1054 		 * mmc switch command returns error then switch to
1055 		 * 4bit transfer mode. On success set the corresponding
1056 		 * bus width on the host.
1057 		 */
1058 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1059 				 EXT_CSD_BUS_WIDTH,
1060 				 ext_csd_bits[idx],
1061 				 card->ext_csd.generic_cmd6_time);
1062 		if (err)
1063 			continue;
1064 
1065 		bus_width = bus_widths[idx];
1066 		mmc_set_bus_width(host, bus_width);
1067 
1068 		/*
1069 		 * If controller can't handle bus width test,
1070 		 * compare ext_csd previously read in 1 bit mode
1071 		 * against ext_csd at new bus width
1072 		 */
1073 		if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
1074 			err = mmc_compare_ext_csds(card, bus_width);
1075 		else
1076 			err = mmc_bus_test(card, bus_width);
1077 
1078 		if (!err) {
1079 			err = bus_width;
1080 			break;
1081 		} else {
1082 			pr_warn("%s: switch to bus width %d failed\n",
1083 				mmc_hostname(host), 1 << bus_width);
1084 		}
1085 	}
1086 
1087 	return err;
1088 }
1089 
1090 /*
1091  * Switch to the high-speed mode
1092  */
1093 static int mmc_select_hs(struct mmc_card *card)
1094 {
1095 	int err;
1096 
1097 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1098 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
1099 			   card->ext_csd.generic_cmd6_time, MMC_TIMING_MMC_HS,
1100 			   true, true, MMC_CMD_RETRIES);
1101 	if (err)
1102 		pr_warn("%s: switch to high-speed failed, err:%d\n",
1103 			mmc_hostname(card->host), err);
1104 
1105 	return err;
1106 }
1107 
1108 /*
1109  * Activate wide bus and DDR if supported.
1110  */
1111 static int mmc_select_hs_ddr(struct mmc_card *card)
1112 {
1113 	struct mmc_host *host = card->host;
1114 	u32 bus_width, ext_csd_bits;
1115 	int err = 0;
1116 
1117 	if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
1118 		return 0;
1119 
1120 	bus_width = host->ios.bus_width;
1121 	if (bus_width == MMC_BUS_WIDTH_1)
1122 		return 0;
1123 
1124 	ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
1125 		EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
1126 
1127 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1128 			   EXT_CSD_BUS_WIDTH,
1129 			   ext_csd_bits,
1130 			   card->ext_csd.generic_cmd6_time,
1131 			   MMC_TIMING_MMC_DDR52,
1132 			   true, true, MMC_CMD_RETRIES);
1133 	if (err) {
1134 		pr_err("%s: switch to bus width %d ddr failed\n",
1135 			mmc_hostname(host), 1 << bus_width);
1136 		return err;
1137 	}
1138 
1139 	/*
1140 	 * eMMC cards can support 3.3V to 1.2V i/o (vccq)
1141 	 * signaling.
1142 	 *
1143 	 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
1144 	 *
1145 	 * 1.8V vccq at 3.3V core voltage (vcc) is not required
1146 	 * in the JEDEC spec for DDR.
1147 	 *
1148 	 * Even (e)MMC card can support 3.3v to 1.2v vccq, but not all
1149 	 * host controller can support this, like some of the SDHCI
1150 	 * controller which connect to an eMMC device. Some of these
1151 	 * host controller still needs to use 1.8v vccq for supporting
1152 	 * DDR mode.
1153 	 *
1154 	 * So the sequence will be:
1155 	 * if (host and device can both support 1.2v IO)
1156 	 *	use 1.2v IO;
1157 	 * else if (host and device can both support 1.8v IO)
1158 	 *	use 1.8v IO;
1159 	 * so if host and device can only support 3.3v IO, this is the
1160 	 * last choice.
1161 	 *
1162 	 * WARNING: eMMC rules are NOT the same as SD DDR
1163 	 */
1164 	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
1165 		err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1166 		if (!err)
1167 			return 0;
1168 	}
1169 
1170 	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V &&
1171 	    host->caps & MMC_CAP_1_8V_DDR)
1172 		err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1173 
1174 	/* make sure vccq is 3.3v after switching disaster */
1175 	if (err)
1176 		err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
1177 
1178 	return err;
1179 }
1180 
1181 static int mmc_select_hs400(struct mmc_card *card)
1182 {
1183 	struct mmc_host *host = card->host;
1184 	unsigned int max_dtr;
1185 	int err = 0;
1186 	u8 val;
1187 
1188 	/*
1189 	 * HS400 mode requires 8-bit bus width
1190 	 */
1191 	if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1192 	      host->ios.bus_width == MMC_BUS_WIDTH_8))
1193 		return 0;
1194 
1195 	/* Switch card to HS mode */
1196 	val = EXT_CSD_TIMING_HS;
1197 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1198 			   EXT_CSD_HS_TIMING, val,
1199 			   card->ext_csd.generic_cmd6_time, 0,
1200 			   false, true, MMC_CMD_RETRIES);
1201 	if (err) {
1202 		pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
1203 			mmc_hostname(host), err);
1204 		return err;
1205 	}
1206 
1207 	/* Prepare host to downgrade to HS timing */
1208 	if (host->ops->hs400_downgrade)
1209 		host->ops->hs400_downgrade(host);
1210 
1211 	/* Set host controller to HS timing */
1212 	mmc_set_timing(host, MMC_TIMING_MMC_HS);
1213 
1214 	/* Reduce frequency to HS frequency */
1215 	max_dtr = card->ext_csd.hs_max_dtr;
1216 	mmc_set_clock(host, max_dtr);
1217 
1218 	err = mmc_switch_status(card, true);
1219 	if (err)
1220 		goto out_err;
1221 
1222 	if (host->ops->hs400_prepare_ddr)
1223 		host->ops->hs400_prepare_ddr(host);
1224 
1225 	/* Switch card to DDR */
1226 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1227 			 EXT_CSD_BUS_WIDTH,
1228 			 EXT_CSD_DDR_BUS_WIDTH_8,
1229 			 card->ext_csd.generic_cmd6_time);
1230 	if (err) {
1231 		pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
1232 			mmc_hostname(host), err);
1233 		return err;
1234 	}
1235 
1236 	/* Switch card to HS400 */
1237 	val = EXT_CSD_TIMING_HS400 |
1238 	      card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1239 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1240 			   EXT_CSD_HS_TIMING, val,
1241 			   card->ext_csd.generic_cmd6_time, 0,
1242 			   false, true, MMC_CMD_RETRIES);
1243 	if (err) {
1244 		pr_err("%s: switch to hs400 failed, err:%d\n",
1245 			 mmc_hostname(host), err);
1246 		return err;
1247 	}
1248 
1249 	/* Set host controller to HS400 timing and frequency */
1250 	mmc_set_timing(host, MMC_TIMING_MMC_HS400);
1251 	mmc_set_bus_speed(card);
1252 
1253 	if (host->ops->execute_hs400_tuning) {
1254 		mmc_retune_disable(host);
1255 		err = host->ops->execute_hs400_tuning(host, card);
1256 		mmc_retune_enable(host);
1257 		if (err)
1258 			goto out_err;
1259 	}
1260 
1261 	if (host->ops->hs400_complete)
1262 		host->ops->hs400_complete(host);
1263 
1264 	err = mmc_switch_status(card, true);
1265 	if (err)
1266 		goto out_err;
1267 
1268 	return 0;
1269 
1270 out_err:
1271 	pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1272 	       __func__, err);
1273 	return err;
1274 }
1275 
1276 int mmc_hs200_to_hs400(struct mmc_card *card)
1277 {
1278 	return mmc_select_hs400(card);
1279 }
1280 
1281 int mmc_hs400_to_hs200(struct mmc_card *card)
1282 {
1283 	struct mmc_host *host = card->host;
1284 	unsigned int max_dtr;
1285 	int err;
1286 	u8 val;
1287 
1288 	/* Reduce frequency to HS */
1289 	max_dtr = card->ext_csd.hs_max_dtr;
1290 	mmc_set_clock(host, max_dtr);
1291 
1292 	/* Switch HS400 to HS DDR */
1293 	val = EXT_CSD_TIMING_HS;
1294 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
1295 			   val, card->ext_csd.generic_cmd6_time, 0,
1296 			   false, true, MMC_CMD_RETRIES);
1297 	if (err)
1298 		goto out_err;
1299 
1300 	if (host->ops->hs400_downgrade)
1301 		host->ops->hs400_downgrade(host);
1302 
1303 	mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
1304 
1305 	err = mmc_switch_status(card, true);
1306 	if (err)
1307 		goto out_err;
1308 
1309 	/* Switch HS DDR to HS */
1310 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1311 			   EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time,
1312 			   0, false, true, MMC_CMD_RETRIES);
1313 	if (err)
1314 		goto out_err;
1315 
1316 	mmc_set_timing(host, MMC_TIMING_MMC_HS);
1317 
1318 	err = mmc_switch_status(card, true);
1319 	if (err)
1320 		goto out_err;
1321 
1322 	/* Switch HS to HS200 */
1323 	val = EXT_CSD_TIMING_HS200 |
1324 	      card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1325 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
1326 			   val, card->ext_csd.generic_cmd6_time, 0,
1327 			   false, true, MMC_CMD_RETRIES);
1328 	if (err)
1329 		goto out_err;
1330 
1331 	mmc_set_timing(host, MMC_TIMING_MMC_HS200);
1332 
1333 	/*
1334 	 * For HS200, CRC errors are not a reliable way to know the switch
1335 	 * failed. If there really is a problem, we would expect tuning will
1336 	 * fail and the result ends up the same.
1337 	 */
1338 	err = mmc_switch_status(card, false);
1339 	if (err)
1340 		goto out_err;
1341 
1342 	mmc_set_bus_speed(card);
1343 
1344 	/* Prepare tuning for HS400 mode. */
1345 	if (host->ops->prepare_hs400_tuning)
1346 		host->ops->prepare_hs400_tuning(host, &host->ios);
1347 
1348 	return 0;
1349 
1350 out_err:
1351 	pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1352 	       __func__, err);
1353 	return err;
1354 }
1355 
1356 static void mmc_select_driver_type(struct mmc_card *card)
1357 {
1358 	int card_drv_type, drive_strength, drv_type = 0;
1359 	int fixed_drv_type = card->host->fixed_drv_type;
1360 
1361 	card_drv_type = card->ext_csd.raw_driver_strength |
1362 			mmc_driver_type_mask(0);
1363 
1364 	if (fixed_drv_type >= 0)
1365 		drive_strength = card_drv_type & mmc_driver_type_mask(fixed_drv_type)
1366 				 ? fixed_drv_type : 0;
1367 	else
1368 		drive_strength = mmc_select_drive_strength(card,
1369 							   card->ext_csd.hs200_max_dtr,
1370 							   card_drv_type, &drv_type);
1371 
1372 	card->drive_strength = drive_strength;
1373 
1374 	if (drv_type)
1375 		mmc_set_driver_type(card->host, drv_type);
1376 }
1377 
1378 static int mmc_select_hs400es(struct mmc_card *card)
1379 {
1380 	struct mmc_host *host = card->host;
1381 	int err = -EINVAL;
1382 	u8 val;
1383 
1384 	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
1385 		err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1386 
1387 	if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)
1388 		err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1389 
1390 	/* If fails try again during next card power cycle */
1391 	if (err)
1392 		goto out_err;
1393 
1394 	err = mmc_select_bus_width(card);
1395 	if (err != MMC_BUS_WIDTH_8) {
1396 		pr_err("%s: switch to 8bit bus width failed, err:%d\n",
1397 			mmc_hostname(host), err);
1398 		err = err < 0 ? err : -ENOTSUPP;
1399 		goto out_err;
1400 	}
1401 
1402 	/* Switch card to HS mode */
1403 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1404 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
1405 			   card->ext_csd.generic_cmd6_time, 0,
1406 			   false, true, MMC_CMD_RETRIES);
1407 	if (err) {
1408 		pr_err("%s: switch to hs for hs400es failed, err:%d\n",
1409 			mmc_hostname(host), err);
1410 		goto out_err;
1411 	}
1412 
1413 	/*
1414 	 * Bump to HS timing and frequency. Some cards don't handle
1415 	 * SEND_STATUS reliably at the initial frequency.
1416 	 */
1417 	mmc_set_timing(host, MMC_TIMING_MMC_HS);
1418 	mmc_set_bus_speed(card);
1419 
1420 	err = mmc_switch_status(card, true);
1421 	if (err)
1422 		goto out_err;
1423 
1424 	/* Switch card to DDR with strobe bit */
1425 	val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
1426 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1427 			 EXT_CSD_BUS_WIDTH,
1428 			 val,
1429 			 card->ext_csd.generic_cmd6_time);
1430 	if (err) {
1431 		pr_err("%s: switch to bus width for hs400es failed, err:%d\n",
1432 			mmc_hostname(host), err);
1433 		goto out_err;
1434 	}
1435 
1436 	mmc_select_driver_type(card);
1437 
1438 	/* Switch card to HS400 */
1439 	val = EXT_CSD_TIMING_HS400 |
1440 	      card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1441 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1442 			   EXT_CSD_HS_TIMING, val,
1443 			   card->ext_csd.generic_cmd6_time, 0,
1444 			   false, true, MMC_CMD_RETRIES);
1445 	if (err) {
1446 		pr_err("%s: switch to hs400es failed, err:%d\n",
1447 			mmc_hostname(host), err);
1448 		goto out_err;
1449 	}
1450 
1451 	/* Set host controller to HS400 timing and frequency */
1452 	mmc_set_timing(host, MMC_TIMING_MMC_HS400);
1453 
1454 	/* Controller enable enhanced strobe function */
1455 	host->ios.enhanced_strobe = true;
1456 	if (host->ops->hs400_enhanced_strobe)
1457 		host->ops->hs400_enhanced_strobe(host, &host->ios);
1458 
1459 	err = mmc_switch_status(card, true);
1460 	if (err)
1461 		goto out_err;
1462 
1463 	return 0;
1464 
1465 out_err:
1466 	pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1467 	       __func__, err);
1468 	return err;
1469 }
1470 
1471 /*
1472  * For device supporting HS200 mode, the following sequence
1473  * should be done before executing the tuning process.
1474  * 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported)
1475  * 2. switch to HS200 mode
1476  * 3. set the clock to > 52Mhz and <=200MHz
1477  */
1478 static int mmc_select_hs200(struct mmc_card *card)
1479 {
1480 	struct mmc_host *host = card->host;
1481 	unsigned int old_timing, old_signal_voltage, old_clock;
1482 	int err = -EINVAL;
1483 	u8 val;
1484 
1485 	old_signal_voltage = host->ios.signal_voltage;
1486 	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
1487 		err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1488 
1489 	if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
1490 		err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1491 
1492 	/* If fails try again during next card power cycle */
1493 	if (err)
1494 		return err;
1495 
1496 	mmc_select_driver_type(card);
1497 
1498 	/*
1499 	 * Set the bus width(4 or 8) with host's support and
1500 	 * switch to HS200 mode if bus width is set successfully.
1501 	 */
1502 	err = mmc_select_bus_width(card);
1503 	if (err > 0) {
1504 		val = EXT_CSD_TIMING_HS200 |
1505 		      card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1506 		err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1507 				   EXT_CSD_HS_TIMING, val,
1508 				   card->ext_csd.generic_cmd6_time, 0,
1509 				   false, true, MMC_CMD_RETRIES);
1510 		if (err)
1511 			goto err;
1512 
1513 		/*
1514 		 * Bump to HS timing and frequency. Some cards don't handle
1515 		 * SEND_STATUS reliably at the initial frequency.
1516 		 * NB: We can't move to full (HS200) speeds until after we've
1517 		 * successfully switched over.
1518 		 */
1519 		old_timing = host->ios.timing;
1520 		old_clock = host->ios.clock;
1521 		mmc_set_timing(host, MMC_TIMING_MMC_HS200);
1522 		mmc_set_clock(card->host, card->ext_csd.hs_max_dtr);
1523 
1524 		/*
1525 		 * For HS200, CRC errors are not a reliable way to know the
1526 		 * switch failed. If there really is a problem, we would expect
1527 		 * tuning will fail and the result ends up the same.
1528 		 */
1529 		err = mmc_switch_status(card, false);
1530 
1531 		/*
1532 		 * mmc_select_timing() assumes timing has not changed if
1533 		 * it is a switch error.
1534 		 */
1535 		if (err == -EBADMSG) {
1536 			mmc_set_clock(host, old_clock);
1537 			mmc_set_timing(host, old_timing);
1538 		}
1539 	}
1540 err:
1541 	if (err) {
1542 		/* fall back to the old signal voltage, if fails report error */
1543 		if (mmc_set_signal_voltage(host, old_signal_voltage))
1544 			err = -EIO;
1545 
1546 		pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1547 		       __func__, err);
1548 	}
1549 	return err;
1550 }
1551 
1552 /*
1553  * Activate High Speed, HS200 or HS400ES mode if supported.
1554  */
1555 static int mmc_select_timing(struct mmc_card *card)
1556 {
1557 	int err = 0;
1558 
1559 	if (!mmc_card_can_ext_csd(card))
1560 		goto bus_speed;
1561 
1562 	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES) {
1563 		err = mmc_select_hs400es(card);
1564 		goto out;
1565 	}
1566 
1567 	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200) {
1568 		err = mmc_select_hs200(card);
1569 		if (err == -EBADMSG)
1570 			card->mmc_avail_type &= ~EXT_CSD_CARD_TYPE_HS200;
1571 		else
1572 			goto out;
1573 	}
1574 
1575 	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
1576 		err = mmc_select_hs(card);
1577 
1578 out:
1579 	if (err && err != -EBADMSG)
1580 		return err;
1581 
1582 bus_speed:
1583 	/*
1584 	 * Set the bus speed to the selected bus timing.
1585 	 * If timing is not selected, backward compatible is the default.
1586 	 */
1587 	mmc_set_bus_speed(card);
1588 	return 0;
1589 }
1590 
1591 /*
1592  * Execute tuning sequence to seek the proper bus operating
1593  * conditions for HS200 and HS400, which sends CMD21 to the device.
1594  */
1595 static int mmc_hs200_tuning(struct mmc_card *card)
1596 {
1597 	struct mmc_host *host = card->host;
1598 
1599 	/*
1600 	 * Timing should be adjusted to the HS400 target
1601 	 * operation frequency for tuning process
1602 	 */
1603 	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1604 	    host->ios.bus_width == MMC_BUS_WIDTH_8)
1605 		if (host->ops->prepare_hs400_tuning)
1606 			host->ops->prepare_hs400_tuning(host, &host->ios);
1607 
1608 	return mmc_execute_tuning(card);
1609 }
1610 
1611 /*
1612  * Handle the detection and initialisation of a card.
1613  *
1614  * In the case of a resume, "oldcard" will contain the card
1615  * we're trying to reinitialise.
1616  */
1617 static int mmc_init_card(struct mmc_host *host, u32 ocr,
1618 	struct mmc_card *oldcard)
1619 {
1620 	struct mmc_card *card;
1621 	int err;
1622 	u32 cid[4];
1623 	u32 rocr;
1624 
1625 	WARN_ON(!host->claimed);
1626 
1627 	/* Set correct bus mode for MMC before attempting init */
1628 	if (!mmc_host_is_spi(host))
1629 		mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
1630 
1631 	/*
1632 	 * Since we're changing the OCR value, we seem to
1633 	 * need to tell some cards to go back to the idle
1634 	 * state.  We wait 1ms to give cards time to
1635 	 * respond.
1636 	 * mmc_go_idle is needed for eMMC that are asleep
1637 	 */
1638 	mmc_go_idle(host);
1639 
1640 	/* The extra bit indicates that we support high capacity */
1641 	err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
1642 	if (err)
1643 		goto err;
1644 
1645 	/*
1646 	 * For SPI, enable CRC as appropriate.
1647 	 */
1648 	if (mmc_host_is_spi(host)) {
1649 		err = mmc_spi_set_crc(host, use_spi_crc);
1650 		if (err)
1651 			goto err;
1652 	}
1653 
1654 	/*
1655 	 * Fetch CID from card.
1656 	 */
1657 	err = mmc_send_cid(host, cid);
1658 	if (err)
1659 		goto err;
1660 
1661 	if (oldcard) {
1662 		if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
1663 			pr_debug("%s: Perhaps the card was replaced\n",
1664 				mmc_hostname(host));
1665 			err = -ENOENT;
1666 			goto err;
1667 		}
1668 
1669 		card = oldcard;
1670 	} else {
1671 		/*
1672 		 * Allocate card structure.
1673 		 */
1674 		card = mmc_alloc_card(host, &mmc_type);
1675 		if (IS_ERR(card)) {
1676 			err = PTR_ERR(card);
1677 			goto err;
1678 		}
1679 
1680 		card->ocr = ocr;
1681 		card->type = MMC_TYPE_MMC;
1682 		card->rca = 1;
1683 		memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
1684 	}
1685 
1686 	/*
1687 	 * Call the optional HC's init_card function to handle quirks.
1688 	 */
1689 	if (host->ops->init_card)
1690 		host->ops->init_card(host, card);
1691 
1692 	/*
1693 	 * For native busses:  set card RCA and quit open drain mode.
1694 	 */
1695 	if (!mmc_host_is_spi(host)) {
1696 		err = mmc_set_relative_addr(card);
1697 		if (err)
1698 			goto free_card;
1699 
1700 		mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
1701 	}
1702 
1703 	if (!oldcard) {
1704 		/*
1705 		 * Fetch CSD from card.
1706 		 */
1707 		err = mmc_send_csd(card, card->raw_csd);
1708 		if (err)
1709 			goto free_card;
1710 
1711 		err = mmc_decode_csd(card);
1712 		if (err)
1713 			goto free_card;
1714 		err = mmc_decode_cid(card);
1715 		if (err)
1716 			goto free_card;
1717 	}
1718 
1719 	/*
1720 	 * handling only for cards supporting DSR and hosts requesting
1721 	 * DSR configuration
1722 	 */
1723 	if (card->csd.dsr_imp && host->dsr_req)
1724 		mmc_set_dsr(host);
1725 
1726 	/*
1727 	 * Select card, as all following commands rely on that.
1728 	 */
1729 	if (!mmc_host_is_spi(host)) {
1730 		err = mmc_select_card(card);
1731 		if (err)
1732 			goto free_card;
1733 	}
1734 
1735 	if (!oldcard) {
1736 		/* Read extended CSD. */
1737 		err = mmc_read_ext_csd(card);
1738 		if (err)
1739 			goto free_card;
1740 
1741 		/*
1742 		 * If doing byte addressing, check if required to do sector
1743 		 * addressing.  Handle the case of <2GB cards needing sector
1744 		 * addressing.  See section 8.1 JEDEC Standard JED84-A441;
1745 		 * ocr register has bit 30 set for sector addressing.
1746 		 */
1747 		if (rocr & BIT(30))
1748 			mmc_card_set_blockaddr(card);
1749 
1750 		/* Erase size depends on CSD and Extended CSD */
1751 		mmc_set_erase_size(card);
1752 	}
1753 
1754 	/*
1755 	 * Reselect the card type since host caps could have been changed when
1756 	 * debugging even if the card is not new.
1757 	 */
1758 	mmc_select_card_type(card);
1759 
1760 	/* Enable ERASE_GRP_DEF. This bit is lost after a reset or power off. */
1761 	if (card->ext_csd.rev >= 3) {
1762 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1763 				 EXT_CSD_ERASE_GROUP_DEF, 1,
1764 				 card->ext_csd.generic_cmd6_time);
1765 
1766 		if (err && err != -EBADMSG)
1767 			goto free_card;
1768 
1769 		if (err) {
1770 			/*
1771 			 * Just disable enhanced area off & sz
1772 			 * will try to enable ERASE_GROUP_DEF
1773 			 * during next time reinit
1774 			 */
1775 			card->ext_csd.enhanced_area_offset = -EINVAL;
1776 			card->ext_csd.enhanced_area_size = -EINVAL;
1777 		} else {
1778 			card->ext_csd.erase_group_def = 1;
1779 			/*
1780 			 * enable ERASE_GRP_DEF successfully.
1781 			 * This will affect the erase size, so
1782 			 * here need to reset erase size
1783 			 */
1784 			mmc_set_erase_size(card);
1785 		}
1786 	}
1787 	mmc_set_wp_grp_size(card);
1788 	/*
1789 	 * Ensure eMMC user default partition is enabled
1790 	 */
1791 	if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
1792 		card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
1793 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
1794 				 card->ext_csd.part_config,
1795 				 card->ext_csd.part_time);
1796 		if (err && err != -EBADMSG)
1797 			goto free_card;
1798 	}
1799 
1800 	/*
1801 	 * Enable power_off_notification byte in the ext_csd register
1802 	 */
1803 	if (card->ext_csd.rev >= 6) {
1804 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1805 				 EXT_CSD_POWER_OFF_NOTIFICATION,
1806 				 EXT_CSD_POWER_ON,
1807 				 card->ext_csd.generic_cmd6_time);
1808 		if (err && err != -EBADMSG)
1809 			goto free_card;
1810 
1811 		/*
1812 		 * The err can be -EBADMSG or 0,
1813 		 * so check for success and update the flag
1814 		 */
1815 		if (!err)
1816 			card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
1817 	}
1818 
1819 	/* set erase_arg */
1820 	if (mmc_card_can_discard(card))
1821 		card->erase_arg = MMC_DISCARD_ARG;
1822 	else if (mmc_card_can_trim(card))
1823 		card->erase_arg = MMC_TRIM_ARG;
1824 	else
1825 		card->erase_arg = MMC_ERASE_ARG;
1826 
1827 	/*
1828 	 * Select timing interface
1829 	 */
1830 	err = mmc_select_timing(card);
1831 	if (err)
1832 		goto free_card;
1833 
1834 	if (mmc_card_hs200(card)) {
1835 		host->doing_init_tune = 1;
1836 
1837 		err = mmc_hs200_tuning(card);
1838 		if (!err)
1839 			err = mmc_select_hs400(card);
1840 
1841 		host->doing_init_tune = 0;
1842 
1843 		if (err)
1844 			goto free_card;
1845 	} else if (mmc_card_hs400es(card)) {
1846 		if (host->ops->execute_hs400_tuning) {
1847 			err = host->ops->execute_hs400_tuning(host, card);
1848 			if (err)
1849 				goto free_card;
1850 		}
1851 	} else {
1852 		/* Select the desired bus width optionally */
1853 		err = mmc_select_bus_width(card);
1854 		if (err > 0 && mmc_card_hs(card)) {
1855 			err = mmc_select_hs_ddr(card);
1856 			if (err)
1857 				goto free_card;
1858 		}
1859 	}
1860 
1861 	/*
1862 	 * Choose the power class with selected bus interface
1863 	 */
1864 	mmc_select_powerclass(card);
1865 
1866 	/*
1867 	 * Enable HPI feature (if supported)
1868 	 */
1869 	if (card->ext_csd.hpi) {
1870 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1871 				EXT_CSD_HPI_MGMT, 1,
1872 				card->ext_csd.generic_cmd6_time);
1873 		if (err && err != -EBADMSG)
1874 			goto free_card;
1875 		if (err) {
1876 			pr_warn("%s: Enabling HPI failed\n",
1877 				mmc_hostname(card->host));
1878 			card->ext_csd.hpi_en = 0;
1879 		} else {
1880 			card->ext_csd.hpi_en = 1;
1881 		}
1882 	}
1883 
1884 	/*
1885 	 * If cache size is higher than 0, this indicates the existence of cache
1886 	 * and it can be turned on. Note that some eMMCs from Micron has been
1887 	 * reported to need ~800 ms timeout, while enabling the cache after
1888 	 * sudden power failure tests. Let's extend the timeout to a minimum of
1889 	 * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
1890 	 */
1891 	if (card->ext_csd.cache_size > 0) {
1892 		unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
1893 
1894 		timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms);
1895 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1896 				EXT_CSD_CACHE_CTRL, 1, timeout_ms);
1897 		if (err && err != -EBADMSG)
1898 			goto free_card;
1899 
1900 		/*
1901 		 * Only if no error, cache is turned on successfully.
1902 		 */
1903 		if (err) {
1904 			pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
1905 				mmc_hostname(card->host), err);
1906 			card->ext_csd.cache_ctrl = 0;
1907 		} else {
1908 			card->ext_csd.cache_ctrl = 1;
1909 		}
1910 	}
1911 
1912 	/*
1913 	 * Enable Command Queue if supported. Note that Packed Commands cannot
1914 	 * be used with Command Queue.
1915 	 */
1916 	card->ext_csd.cmdq_en = false;
1917 	if (card->ext_csd.cmdq_support && host->caps2 & MMC_CAP2_CQE) {
1918 		err = mmc_cmdq_enable(card);
1919 		if (err && err != -EBADMSG)
1920 			goto free_card;
1921 		if (err) {
1922 			pr_warn("%s: Enabling CMDQ failed\n",
1923 				mmc_hostname(card->host));
1924 			card->ext_csd.cmdq_support = false;
1925 			card->ext_csd.cmdq_depth = 0;
1926 		}
1927 	}
1928 	/*
1929 	 * In some cases (e.g. RPMB or mmc_test), the Command Queue must be
1930 	 * disabled for a time, so a flag is needed to indicate to re-enable the
1931 	 * Command Queue.
1932 	 */
1933 	card->reenable_cmdq = card->ext_csd.cmdq_en;
1934 
1935 	if (host->cqe_ops && !host->cqe_enabled) {
1936 		err = host->cqe_ops->cqe_enable(host, card);
1937 		if (!err) {
1938 			host->cqe_enabled = true;
1939 
1940 			if (card->ext_csd.cmdq_en) {
1941 				pr_info("%s: Command Queue Engine enabled\n",
1942 					mmc_hostname(host));
1943 			} else {
1944 				host->hsq_enabled = true;
1945 				pr_info("%s: Host Software Queue enabled\n",
1946 					mmc_hostname(host));
1947 			}
1948 		}
1949 	}
1950 
1951 	if (host->caps2 & MMC_CAP2_AVOID_3_3V &&
1952 	    host->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1953 		pr_err("%s: Host failed to negotiate down from 3.3V\n",
1954 			mmc_hostname(host));
1955 		err = -EINVAL;
1956 		goto free_card;
1957 	}
1958 
1959 	if (!oldcard)
1960 		host->card = card;
1961 
1962 	return 0;
1963 
1964 free_card:
1965 	if (!oldcard)
1966 		mmc_remove_card(card);
1967 err:
1968 	return err;
1969 }
1970 
1971 static bool mmc_card_can_sleep(struct mmc_card *card)
1972 {
1973 	return card->ext_csd.rev >= 3;
1974 }
1975 
1976 static int mmc_sleep_busy_cb(void *cb_data, bool *busy)
1977 {
1978 	struct mmc_host *host = cb_data;
1979 
1980 	*busy = host->ops->card_busy(host);
1981 	return 0;
1982 }
1983 
1984 static int mmc_sleep(struct mmc_host *host)
1985 {
1986 	struct mmc_command cmd = {};
1987 	struct mmc_card *card = host->card;
1988 	unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
1989 	bool use_r1b_resp;
1990 	int err;
1991 
1992 	/* Re-tuning can't be done once the card is deselected */
1993 	mmc_retune_hold(host);
1994 
1995 	err = mmc_deselect_cards(host);
1996 	if (err)
1997 		goto out_release;
1998 
1999 	cmd.opcode = MMC_SLEEP_AWAKE;
2000 	cmd.arg = card->rca << 16;
2001 	cmd.arg |= 1 << 15;
2002 	use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
2003 
2004 	err = mmc_wait_for_cmd(host, &cmd, 0);
2005 	if (err)
2006 		goto out_release;
2007 
2008 	/*
2009 	 * If the host does not wait while the card signals busy, then we can
2010 	 * try to poll, but only if the host supports HW polling, as the
2011 	 * SEND_STATUS cmd is not allowed. If we can't poll, then we simply need
2012 	 * to wait the sleep/awake timeout.
2013 	 */
2014 	if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
2015 		goto out_release;
2016 
2017 	if (!host->ops->card_busy) {
2018 		mmc_delay(timeout_ms);
2019 		goto out_release;
2020 	}
2021 
2022 	err = __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_sleep_busy_cb, host);
2023 
2024 out_release:
2025 	mmc_retune_release(host);
2026 	return err;
2027 }
2028 
2029 static bool mmc_card_can_poweroff_notify(const struct mmc_card *card)
2030 {
2031 	return card &&
2032 		mmc_card_mmc(card) &&
2033 		(card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
2034 }
2035 
2036 static bool mmc_host_can_poweroff_notify(const struct mmc_host *host,
2037 					 enum mmc_poweroff_type pm_type)
2038 {
2039 	if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE)
2040 		return true;
2041 
2042 	if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND &&
2043 	    pm_type == MMC_POWEROFF_SUSPEND)
2044 		return true;
2045 
2046 	return pm_type == MMC_POWEROFF_SHUTDOWN;
2047 }
2048 
2049 static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
2050 {
2051 	unsigned int timeout = card->ext_csd.generic_cmd6_time;
2052 	int err;
2053 
2054 	/* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */
2055 	if (notify_type == EXT_CSD_POWER_OFF_LONG)
2056 		timeout = card->ext_csd.power_off_longtime;
2057 
2058 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2059 			EXT_CSD_POWER_OFF_NOTIFICATION,
2060 			notify_type, timeout, 0, false, false, MMC_CMD_RETRIES);
2061 	if (err)
2062 		pr_err("%s: Power Off Notification timed out, %u\n",
2063 		       mmc_hostname(card->host), timeout);
2064 
2065 	/* Disable the power off notification after the switch operation. */
2066 	card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION;
2067 
2068 	return err;
2069 }
2070 
2071 /*
2072  * Card detection - card is alive.
2073  */
2074 static int mmc_alive(struct mmc_host *host)
2075 {
2076 	return mmc_send_status(host->card, NULL);
2077 }
2078 
2079 /*
2080  * Card detection callback from host.
2081  */
2082 static void mmc_detect(struct mmc_host *host)
2083 {
2084 	int err;
2085 
2086 	mmc_get_card(host->card, NULL);
2087 
2088 	/*
2089 	 * Just check if our card has been removed.
2090 	 */
2091 	err = _mmc_detect_card_removed(host);
2092 
2093 	mmc_put_card(host->card, NULL);
2094 
2095 	if (err) {
2096 		mmc_remove_card(host->card);
2097 		host->card = NULL;
2098 
2099 		mmc_claim_host(host);
2100 		mmc_detach_bus(host);
2101 		mmc_power_off(host);
2102 		mmc_release_host(host);
2103 	}
2104 }
2105 
2106 static bool _mmc_cache_enabled(struct mmc_host *host)
2107 {
2108 	return host->card->ext_csd.cache_size > 0 &&
2109 	       host->card->ext_csd.cache_ctrl & 1;
2110 }
2111 
2112 /*
2113  * Flush the internal cache of the eMMC to non-volatile storage.
2114  */
2115 static int _mmc_flush_cache(struct mmc_host *host)
2116 {
2117 	int err = 0;
2118 
2119 	if (mmc_card_broken_cache_flush(host->card) && !host->card->written_flag)
2120 		return 0;
2121 
2122 	if (_mmc_cache_enabled(host)) {
2123 		err = mmc_switch(host->card, EXT_CSD_CMD_SET_NORMAL,
2124 				 EXT_CSD_FLUSH_CACHE, 1,
2125 				 CACHE_FLUSH_TIMEOUT_MS);
2126 		if (err)
2127 			pr_err("%s: cache flush error %d\n", mmc_hostname(host), err);
2128 		else
2129 			host->card->written_flag = false;
2130 	}
2131 
2132 	return err;
2133 }
2134 
2135 static int _mmc_suspend(struct mmc_host *host, enum mmc_poweroff_type pm_type)
2136 {
2137 	unsigned int notify_type = EXT_CSD_POWER_OFF_SHORT;
2138 	int err = 0;
2139 
2140 	if (pm_type == MMC_POWEROFF_SHUTDOWN)
2141 		notify_type = EXT_CSD_POWER_OFF_LONG;
2142 
2143 	mmc_claim_host(host);
2144 
2145 	if (mmc_card_suspended(host->card))
2146 		goto out;
2147 
2148 	/*
2149 	 * For the undervoltage case, we care more about device integrity.
2150 	 * Avoid cache flush and notify the device to power off quickly.
2151 	 */
2152 	if (pm_type != MMC_POWEROFF_UNDERVOLTAGE) {
2153 		err = _mmc_flush_cache(host);
2154 		if (err)
2155 			goto out;
2156 	}
2157 
2158 	if (mmc_card_can_poweroff_notify(host->card) &&
2159 	    mmc_host_can_poweroff_notify(host, pm_type))
2160 		err = mmc_poweroff_notify(host->card, notify_type);
2161 	else if (mmc_card_can_sleep(host->card))
2162 		err = mmc_sleep(host);
2163 	else if (!mmc_host_is_spi(host))
2164 		err = mmc_deselect_cards(host);
2165 
2166 	if (!err) {
2167 		mmc_power_off(host);
2168 		mmc_card_set_suspended(host->card);
2169 	}
2170 out:
2171 	mmc_release_host(host);
2172 	return err;
2173 }
2174 
2175 /*
2176  * Host is being removed. Free up the current card and do a graceful power-off.
2177  */
2178 static void mmc_remove(struct mmc_host *host)
2179 {
2180 	get_device(&host->card->dev);
2181 	mmc_remove_card(host->card);
2182 
2183 	_mmc_suspend(host, MMC_POWEROFF_UNBIND);
2184 
2185 	put_device(&host->card->dev);
2186 	host->card = NULL;
2187 }
2188 
2189 /*
2190  * Suspend callback
2191  */
2192 static int mmc_suspend(struct mmc_host *host)
2193 {
2194 	int err;
2195 
2196 	err = _mmc_suspend(host, MMC_POWEROFF_SUSPEND);
2197 	if (!err) {
2198 		pm_runtime_disable(&host->card->dev);
2199 		pm_runtime_set_suspended(&host->card->dev);
2200 	}
2201 
2202 	return err;
2203 }
2204 
2205 /*
2206  * This function tries to determine if the same card is still present
2207  * and, if so, restore all state to it.
2208  */
2209 static int _mmc_resume(struct mmc_host *host)
2210 {
2211 	int err = 0;
2212 
2213 	mmc_claim_host(host);
2214 
2215 	if (!mmc_card_suspended(host->card))
2216 		goto out;
2217 
2218 	mmc_power_up(host, host->card->ocr);
2219 	err = mmc_init_card(host, host->card->ocr, host->card);
2220 	mmc_card_clr_suspended(host->card);
2221 
2222 out:
2223 	mmc_release_host(host);
2224 	return err;
2225 }
2226 
2227 /*
2228  * Shutdown callback
2229  */
2230 static int mmc_shutdown(struct mmc_host *host)
2231 {
2232 	int err = 0;
2233 
2234 	/*
2235 	 * In case of undervoltage, the card will be powered off (removed) by
2236 	 * _mmc_handle_undervoltage()
2237 	 */
2238 	if (mmc_card_removed(host->card))
2239 		return 0;
2240 
2241 	/*
2242 	 * If the card remains suspended at this point and it was done by using
2243 	 * the sleep-cmd (CMD5), we may need to re-initialize it first, to allow
2244 	 * us to send the preferred poweroff-notification cmd at shutdown.
2245 	 */
2246 	if (mmc_card_can_poweroff_notify(host->card) &&
2247 	    !mmc_host_can_poweroff_notify(host, MMC_POWEROFF_SUSPEND))
2248 		err = _mmc_resume(host);
2249 
2250 	if (!err)
2251 		err = _mmc_suspend(host, MMC_POWEROFF_SHUTDOWN);
2252 
2253 	return err;
2254 }
2255 
2256 /*
2257  * Callback for resume.
2258  */
2259 static int mmc_resume(struct mmc_host *host)
2260 {
2261 	pm_runtime_enable(&host->card->dev);
2262 	return 0;
2263 }
2264 
2265 /*
2266  * Callback for runtime_suspend.
2267  */
2268 static int mmc_runtime_suspend(struct mmc_host *host)
2269 {
2270 	int err;
2271 
2272 	if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
2273 		return 0;
2274 
2275 	err = _mmc_suspend(host, MMC_POWEROFF_SUSPEND);
2276 	if (err)
2277 		pr_err("%s: error %d doing aggressive suspend\n",
2278 			mmc_hostname(host), err);
2279 
2280 	return err;
2281 }
2282 
2283 /*
2284  * Callback for runtime_resume.
2285  */
2286 static int mmc_runtime_resume(struct mmc_host *host)
2287 {
2288 	int err;
2289 
2290 	err = _mmc_resume(host);
2291 	if (err && err != -ENOMEDIUM)
2292 		pr_err("%s: error %d doing runtime resume\n",
2293 			mmc_hostname(host), err);
2294 
2295 	return 0;
2296 }
2297 
2298 static bool mmc_card_can_reset(struct mmc_card *card)
2299 {
2300 	u8 rst_n_function;
2301 
2302 	rst_n_function = card->ext_csd.rst_n_function;
2303 	return ((rst_n_function & EXT_CSD_RST_N_EN_MASK) == EXT_CSD_RST_N_ENABLED);
2304 }
2305 
2306 static int _mmc_hw_reset(struct mmc_host *host)
2307 {
2308 	struct mmc_card *card = host->card;
2309 
2310 	/*
2311 	 * In the case of recovery, we can't expect flushing the cache to work
2312 	 * always, but we have a go and ignore errors.
2313 	 */
2314 	_mmc_flush_cache(host);
2315 
2316 	if ((host->caps & MMC_CAP_HW_RESET) && host->ops->card_hw_reset &&
2317 	     mmc_card_can_reset(card)) {
2318 		/* If the card accept RST_n signal, send it. */
2319 		mmc_set_clock(host, host->f_init);
2320 		host->ops->card_hw_reset(host);
2321 		/* Set initial state and call mmc_set_ios */
2322 		mmc_set_initial_state(host);
2323 	} else {
2324 		/* Do a brute force power cycle */
2325 		mmc_power_cycle(host, card->ocr);
2326 		mmc_pwrseq_reset(host);
2327 	}
2328 	return mmc_init_card(host, card->ocr, card);
2329 }
2330 
2331 /**
2332  * _mmc_handle_undervoltage - Handle an undervoltage event for MMC/eMMC devices
2333  * @host: MMC host structure
2334  *
2335  * This function is triggered when an undervoltage condition is detected.
2336  * It attempts to transition the device into a low-power or safe state to
2337  * prevent data corruption.
2338  *
2339  * Steps performed:
2340  * - Perform an emergency suspend using EXT_CSD_POWER_OFF_SHORT if possible.
2341  *    - If power-off notify is not supported, fallback mechanisms like sleep or
2342  *      deselecting the card are attempted.
2343  *    - Cache flushing is skipped to reduce execution time.
2344  * - Mark the card as removed to prevent further interactions after
2345  *    undervoltage.
2346  *
2347  * Note: This function does not handle host claiming or releasing. The caller
2348  *	 must ensure that the host is properly claimed before calling this
2349  *	 function and released afterward.
2350  *
2351  * Returns: 0 on success, or a negative error code if any step fails.
2352  */
2353 static int _mmc_handle_undervoltage(struct mmc_host *host)
2354 {
2355 	struct mmc_card *card = host->card;
2356 	int err;
2357 
2358 	/*
2359 	 * Perform an emergency suspend to power off the eMMC quickly.
2360 	 * This ensures the device enters a safe state before power is lost.
2361 	 * We first attempt EXT_CSD_POWER_OFF_SHORT, but if power-off notify
2362 	 * is not supported, we fall back to sleep mode or deselecting the card.
2363 	 * Cache flushing is skipped to minimize delay.
2364 	 */
2365 	err = _mmc_suspend(host, MMC_POWEROFF_UNDERVOLTAGE);
2366 	if (err)
2367 		pr_err("%s: undervoltage suspend failed: %pe\n",
2368 		       mmc_hostname(host), ERR_PTR(err));
2369 
2370 	/*
2371 	 * Mark the card as removed to prevent further operations.
2372 	 * This ensures the system does not attempt to access the device
2373 	 * after an undervoltage event, avoiding potential corruption.
2374 	 */
2375 	mmc_card_set_removed(card);
2376 
2377 	return err;
2378 }
2379 
2380 static const struct mmc_bus_ops mmc_ops = {
2381 	.remove = mmc_remove,
2382 	.detect = mmc_detect,
2383 	.suspend = mmc_suspend,
2384 	.resume = mmc_resume,
2385 	.runtime_suspend = mmc_runtime_suspend,
2386 	.runtime_resume = mmc_runtime_resume,
2387 	.alive = mmc_alive,
2388 	.shutdown = mmc_shutdown,
2389 	.hw_reset = _mmc_hw_reset,
2390 	.cache_enabled = _mmc_cache_enabled,
2391 	.flush_cache = _mmc_flush_cache,
2392 	.handle_undervoltage = _mmc_handle_undervoltage,
2393 };
2394 
2395 /*
2396  * Starting point for MMC card init.
2397  */
2398 int mmc_attach_mmc(struct mmc_host *host)
2399 {
2400 	int err;
2401 	u32 ocr, rocr;
2402 
2403 	WARN_ON(!host->claimed);
2404 
2405 	/* Set correct bus mode for MMC before attempting attach */
2406 	if (!mmc_host_is_spi(host))
2407 		mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
2408 
2409 	err = mmc_send_op_cond(host, 0, &ocr);
2410 	if (err)
2411 		return err;
2412 
2413 	mmc_attach_bus(host, &mmc_ops);
2414 	if (host->ocr_avail_mmc)
2415 		host->ocr_avail = host->ocr_avail_mmc;
2416 
2417 	/*
2418 	 * We need to get OCR a different way for SPI.
2419 	 */
2420 	if (mmc_host_is_spi(host)) {
2421 		err = mmc_spi_read_ocr(host, 1, &ocr);
2422 		if (err)
2423 			goto err;
2424 	}
2425 
2426 	rocr = mmc_select_voltage(host, ocr);
2427 
2428 	/*
2429 	 * Can we support the voltage of the card?
2430 	 */
2431 	if (!rocr) {
2432 		err = -EINVAL;
2433 		goto err;
2434 	}
2435 
2436 	/*
2437 	 * Detect and init the card.
2438 	 */
2439 	err = mmc_init_card(host, rocr, NULL);
2440 	if (err)
2441 		goto err;
2442 
2443 	mmc_release_host(host);
2444 	err = mmc_add_card(host->card);
2445 	if (err)
2446 		goto remove_card;
2447 
2448 	mmc_claim_host(host);
2449 	return 0;
2450 
2451 remove_card:
2452 	mmc_remove_card(host->card);
2453 	mmc_claim_host(host);
2454 	host->card = NULL;
2455 err:
2456 	mmc_detach_bus(host);
2457 
2458 	pr_err("%s: error %d whilst initialising MMC card\n",
2459 		mmc_hostname(host), err);
2460 
2461 	return err;
2462 }
2463