1 /* 2 * linux/drivers/mmc/core/mmc.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. 6 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/err.h> 14 #include <linux/slab.h> 15 #include <linux/stat.h> 16 17 #include <linux/mmc/host.h> 18 #include <linux/mmc/card.h> 19 #include <linux/mmc/mmc.h> 20 21 #include "core.h" 22 #include "bus.h" 23 #include "mmc_ops.h" 24 #include "sd_ops.h" 25 26 static const unsigned int tran_exp[] = { 27 10000, 100000, 1000000, 10000000, 28 0, 0, 0, 0 29 }; 30 31 static const unsigned char tran_mant[] = { 32 0, 10, 12, 13, 15, 20, 25, 30, 33 35, 40, 45, 50, 55, 60, 70, 80, 34 }; 35 36 static const unsigned int tacc_exp[] = { 37 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 38 }; 39 40 static const unsigned int tacc_mant[] = { 41 0, 10, 12, 13, 15, 20, 25, 30, 42 35, 40, 45, 50, 55, 60, 70, 80, 43 }; 44 45 #define UNSTUFF_BITS(resp,start,size) \ 46 ({ \ 47 const int __size = size; \ 48 const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \ 49 const int __off = 3 - ((start) / 32); \ 50 const int __shft = (start) & 31; \ 51 u32 __res; \ 52 \ 53 __res = resp[__off] >> __shft; \ 54 if (__size + __shft > 32) \ 55 __res |= resp[__off-1] << ((32 - __shft) % 32); \ 56 __res & __mask; \ 57 }) 58 59 /* 60 * Given the decoded CSD structure, decode the raw CID to our CID structure. 61 */ 62 static int mmc_decode_cid(struct mmc_card *card) 63 { 64 u32 *resp = card->raw_cid; 65 66 /* 67 * The selection of the format here is based upon published 68 * specs from sandisk and from what people have reported. 69 */ 70 switch (card->csd.mmca_vsn) { 71 case 0: /* MMC v1.0 - v1.2 */ 72 case 1: /* MMC v1.4 */ 73 card->cid.manfid = UNSTUFF_BITS(resp, 104, 24); 74 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); 75 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); 76 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); 77 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); 78 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); 79 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); 80 card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8); 81 card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4); 82 card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4); 83 card->cid.serial = UNSTUFF_BITS(resp, 16, 24); 84 card->cid.month = UNSTUFF_BITS(resp, 12, 4); 85 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; 86 break; 87 88 case 2: /* MMC v2.0 - v2.2 */ 89 case 3: /* MMC v3.1 - v3.3 */ 90 case 4: /* MMC v4 */ 91 card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); 92 card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); 93 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); 94 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); 95 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); 96 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); 97 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); 98 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); 99 card->cid.serial = UNSTUFF_BITS(resp, 16, 32); 100 card->cid.month = UNSTUFF_BITS(resp, 12, 4); 101 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; 102 break; 103 104 default: 105 pr_err("%s: card has unknown MMCA version %d\n", 106 mmc_hostname(card->host), card->csd.mmca_vsn); 107 return -EINVAL; 108 } 109 110 return 0; 111 } 112 113 static void mmc_set_erase_size(struct mmc_card *card) 114 { 115 if (card->ext_csd.erase_group_def & 1) 116 card->erase_size = card->ext_csd.hc_erase_size; 117 else 118 card->erase_size = card->csd.erase_size; 119 120 mmc_init_erase(card); 121 } 122 123 /* 124 * Given a 128-bit response, decode to our card CSD structure. 125 */ 126 static int mmc_decode_csd(struct mmc_card *card) 127 { 128 struct mmc_csd *csd = &card->csd; 129 unsigned int e, m, a, b; 130 u32 *resp = card->raw_csd; 131 132 /* 133 * We only understand CSD structure v1.1 and v1.2. 134 * v1.2 has extra information in bits 15, 11 and 10. 135 * We also support eMMC v4.4 & v4.41. 136 */ 137 csd->structure = UNSTUFF_BITS(resp, 126, 2); 138 if (csd->structure == 0) { 139 pr_err("%s: unrecognised CSD structure version %d\n", 140 mmc_hostname(card->host), csd->structure); 141 return -EINVAL; 142 } 143 144 csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4); 145 m = UNSTUFF_BITS(resp, 115, 4); 146 e = UNSTUFF_BITS(resp, 112, 3); 147 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; 148 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; 149 150 m = UNSTUFF_BITS(resp, 99, 4); 151 e = UNSTUFF_BITS(resp, 96, 3); 152 csd->max_dtr = tran_exp[e] * tran_mant[m]; 153 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); 154 155 e = UNSTUFF_BITS(resp, 47, 3); 156 m = UNSTUFF_BITS(resp, 62, 12); 157 csd->capacity = (1 + m) << (e + 2); 158 159 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); 160 csd->read_partial = UNSTUFF_BITS(resp, 79, 1); 161 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); 162 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); 163 csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); 164 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); 165 csd->write_partial = UNSTUFF_BITS(resp, 21, 1); 166 167 if (csd->write_blkbits >= 9) { 168 a = UNSTUFF_BITS(resp, 42, 5); 169 b = UNSTUFF_BITS(resp, 37, 5); 170 csd->erase_size = (a + 1) * (b + 1); 171 csd->erase_size <<= csd->write_blkbits - 9; 172 } 173 174 return 0; 175 } 176 177 /* 178 * Read extended CSD. 179 */ 180 static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) 181 { 182 int err; 183 u8 *ext_csd; 184 185 BUG_ON(!card); 186 BUG_ON(!new_ext_csd); 187 188 *new_ext_csd = NULL; 189 190 if (card->csd.mmca_vsn < CSD_SPEC_VER_4) 191 return 0; 192 193 /* 194 * As the ext_csd is so large and mostly unused, we don't store the 195 * raw block in mmc_card. 196 */ 197 ext_csd = kmalloc(512, GFP_KERNEL); 198 if (!ext_csd) { 199 pr_err("%s: could not allocate a buffer to " 200 "receive the ext_csd.\n", mmc_hostname(card->host)); 201 return -ENOMEM; 202 } 203 204 err = mmc_send_ext_csd(card, ext_csd); 205 if (err) { 206 kfree(ext_csd); 207 *new_ext_csd = NULL; 208 209 /* If the host or the card can't do the switch, 210 * fail more gracefully. */ 211 if ((err != -EINVAL) 212 && (err != -ENOSYS) 213 && (err != -EFAULT)) 214 return err; 215 216 /* 217 * High capacity cards should have this "magic" size 218 * stored in their CSD. 219 */ 220 if (card->csd.capacity == (4096 * 512)) { 221 pr_err("%s: unable to read EXT_CSD " 222 "on a possible high capacity card. " 223 "Card will be ignored.\n", 224 mmc_hostname(card->host)); 225 } else { 226 pr_warning("%s: unable to read " 227 "EXT_CSD, performance might " 228 "suffer.\n", 229 mmc_hostname(card->host)); 230 err = 0; 231 } 232 } else 233 *new_ext_csd = ext_csd; 234 235 return err; 236 } 237 238 static void mmc_select_card_type(struct mmc_card *card) 239 { 240 struct mmc_host *host = card->host; 241 u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK; 242 unsigned int caps = host->caps, caps2 = host->caps2; 243 unsigned int hs_max_dtr = 0; 244 245 if (card_type & EXT_CSD_CARD_TYPE_26) 246 hs_max_dtr = MMC_HIGH_26_MAX_DTR; 247 248 if (caps & MMC_CAP_MMC_HIGHSPEED && 249 card_type & EXT_CSD_CARD_TYPE_52) 250 hs_max_dtr = MMC_HIGH_52_MAX_DTR; 251 252 if ((caps & MMC_CAP_1_8V_DDR && 253 card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) || 254 (caps & MMC_CAP_1_2V_DDR && 255 card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)) 256 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR; 257 258 if ((caps2 & MMC_CAP2_HS200_1_8V_SDR && 259 card_type & EXT_CSD_CARD_TYPE_SDR_1_8V) || 260 (caps2 & MMC_CAP2_HS200_1_2V_SDR && 261 card_type & EXT_CSD_CARD_TYPE_SDR_1_2V)) 262 hs_max_dtr = MMC_HS200_MAX_DTR; 263 264 card->ext_csd.hs_max_dtr = hs_max_dtr; 265 card->ext_csd.card_type = card_type; 266 } 267 268 /* 269 * Decode extended CSD. 270 */ 271 static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) 272 { 273 int err = 0, idx; 274 unsigned int part_size; 275 u8 hc_erase_grp_sz = 0, hc_wp_grp_sz = 0; 276 277 BUG_ON(!card); 278 279 if (!ext_csd) 280 return 0; 281 282 /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */ 283 card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE]; 284 if (card->csd.structure == 3) { 285 if (card->ext_csd.raw_ext_csd_structure > 2) { 286 pr_err("%s: unrecognised EXT_CSD structure " 287 "version %d\n", mmc_hostname(card->host), 288 card->ext_csd.raw_ext_csd_structure); 289 err = -EINVAL; 290 goto out; 291 } 292 } 293 294 card->ext_csd.rev = ext_csd[EXT_CSD_REV]; 295 if (card->ext_csd.rev > 6) { 296 pr_err("%s: unrecognised EXT_CSD revision %d\n", 297 mmc_hostname(card->host), card->ext_csd.rev); 298 err = -EINVAL; 299 goto out; 300 } 301 302 card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0]; 303 card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1]; 304 card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2]; 305 card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3]; 306 if (card->ext_csd.rev >= 2) { 307 card->ext_csd.sectors = 308 ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | 309 ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | 310 ext_csd[EXT_CSD_SEC_CNT + 2] << 16 | 311 ext_csd[EXT_CSD_SEC_CNT + 3] << 24; 312 313 /* Cards with density > 2GiB are sector addressed */ 314 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) 315 mmc_card_set_blockaddr(card); 316 } 317 318 card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE]; 319 mmc_select_card_type(card); 320 321 card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT]; 322 card->ext_csd.raw_erase_timeout_mult = 323 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; 324 card->ext_csd.raw_hc_erase_grp_size = 325 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 326 if (card->ext_csd.rev >= 3) { 327 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; 328 card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG]; 329 330 /* EXT_CSD value is in units of 10ms, but we store in ms */ 331 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME]; 332 333 /* Sleep / awake timeout in 100ns units */ 334 if (sa_shift > 0 && sa_shift <= 0x17) 335 card->ext_csd.sa_timeout = 336 1 << ext_csd[EXT_CSD_S_A_TIMEOUT]; 337 card->ext_csd.erase_group_def = 338 ext_csd[EXT_CSD_ERASE_GROUP_DEF]; 339 card->ext_csd.hc_erase_timeout = 300 * 340 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; 341 card->ext_csd.hc_erase_size = 342 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10; 343 344 card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C]; 345 346 /* 347 * There are two boot regions of equal size, defined in 348 * multiples of 128K. 349 */ 350 if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) { 351 for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) { 352 part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17; 353 mmc_part_add(card, part_size, 354 EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx, 355 "boot%d", idx, true, 356 MMC_BLK_DATA_AREA_BOOT); 357 } 358 } 359 } 360 361 card->ext_csd.raw_hc_erase_gap_size = 362 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 363 card->ext_csd.raw_sec_trim_mult = 364 ext_csd[EXT_CSD_SEC_TRIM_MULT]; 365 card->ext_csd.raw_sec_erase_mult = 366 ext_csd[EXT_CSD_SEC_ERASE_MULT]; 367 card->ext_csd.raw_sec_feature_support = 368 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; 369 card->ext_csd.raw_trim_mult = 370 ext_csd[EXT_CSD_TRIM_MULT]; 371 if (card->ext_csd.rev >= 4) { 372 /* 373 * Enhanced area feature support -- check whether the eMMC 374 * card has the Enhanced area enabled. If so, export enhanced 375 * area offset and size to user by adding sysfs interface. 376 */ 377 card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT]; 378 if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) && 379 (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { 380 hc_erase_grp_sz = 381 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 382 hc_wp_grp_sz = 383 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 384 385 card->ext_csd.enhanced_area_en = 1; 386 /* 387 * calculate the enhanced data area offset, in bytes 388 */ 389 card->ext_csd.enhanced_area_offset = 390 (ext_csd[139] << 24) + (ext_csd[138] << 16) + 391 (ext_csd[137] << 8) + ext_csd[136]; 392 if (mmc_card_blockaddr(card)) 393 card->ext_csd.enhanced_area_offset <<= 9; 394 /* 395 * calculate the enhanced data area size, in kilobytes 396 */ 397 card->ext_csd.enhanced_area_size = 398 (ext_csd[142] << 16) + (ext_csd[141] << 8) + 399 ext_csd[140]; 400 card->ext_csd.enhanced_area_size *= 401 (size_t)(hc_erase_grp_sz * hc_wp_grp_sz); 402 card->ext_csd.enhanced_area_size <<= 9; 403 } else { 404 /* 405 * If the enhanced area is not enabled, disable these 406 * device attributes. 407 */ 408 card->ext_csd.enhanced_area_offset = -EINVAL; 409 card->ext_csd.enhanced_area_size = -EINVAL; 410 } 411 412 /* 413 * General purpose partition feature support -- 414 * If ext_csd has the size of general purpose partitions, 415 * set size, part_cfg, partition name in mmc_part. 416 */ 417 if (ext_csd[EXT_CSD_PARTITION_SUPPORT] & 418 EXT_CSD_PART_SUPPORT_PART_EN) { 419 if (card->ext_csd.enhanced_area_en != 1) { 420 hc_erase_grp_sz = 421 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 422 hc_wp_grp_sz = 423 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 424 425 card->ext_csd.enhanced_area_en = 1; 426 } 427 428 for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) { 429 if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] && 430 !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] && 431 !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]) 432 continue; 433 part_size = 434 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2] 435 << 16) + 436 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] 437 << 8) + 438 ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3]; 439 part_size *= (size_t)(hc_erase_grp_sz * 440 hc_wp_grp_sz); 441 mmc_part_add(card, part_size << 19, 442 EXT_CSD_PART_CONFIG_ACC_GP0 + idx, 443 "gp%d", idx, false, 444 MMC_BLK_DATA_AREA_GP); 445 } 446 } 447 card->ext_csd.sec_trim_mult = 448 ext_csd[EXT_CSD_SEC_TRIM_MULT]; 449 card->ext_csd.sec_erase_mult = 450 ext_csd[EXT_CSD_SEC_ERASE_MULT]; 451 card->ext_csd.sec_feature_support = 452 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; 453 card->ext_csd.trim_timeout = 300 * 454 ext_csd[EXT_CSD_TRIM_MULT]; 455 456 /* 457 * Note that the call to mmc_part_add above defaults to read 458 * only. If this default assumption is changed, the call must 459 * take into account the value of boot_locked below. 460 */ 461 card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP]; 462 card->ext_csd.boot_ro_lockable = true; 463 } 464 465 if (card->ext_csd.rev >= 5) { 466 /* check whether the eMMC card supports HPI */ 467 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) { 468 card->ext_csd.hpi = 1; 469 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2) 470 card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION; 471 else 472 card->ext_csd.hpi_cmd = MMC_SEND_STATUS; 473 /* 474 * Indicate the maximum timeout to close 475 * a command interrupted by HPI 476 */ 477 card->ext_csd.out_of_int_time = 478 ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10; 479 } 480 481 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM]; 482 card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION]; 483 } 484 485 card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT]; 486 if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) 487 card->erased_byte = 0xFF; 488 else 489 card->erased_byte = 0x0; 490 491 /* eMMC v4.5 or later */ 492 if (card->ext_csd.rev >= 6) { 493 card->ext_csd.feature_support |= MMC_DISCARD_FEATURE; 494 495 card->ext_csd.generic_cmd6_time = 10 * 496 ext_csd[EXT_CSD_GENERIC_CMD6_TIME]; 497 card->ext_csd.power_off_longtime = 10 * 498 ext_csd[EXT_CSD_POWER_OFF_LONG_TIME]; 499 500 card->ext_csd.cache_size = 501 ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 | 502 ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 | 503 ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 | 504 ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24; 505 506 if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1) 507 card->ext_csd.data_sector_size = 4096; 508 else 509 card->ext_csd.data_sector_size = 512; 510 511 if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) && 512 (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) { 513 card->ext_csd.data_tag_unit_size = 514 ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) * 515 (card->ext_csd.data_sector_size); 516 } else { 517 card->ext_csd.data_tag_unit_size = 0; 518 } 519 } else { 520 card->ext_csd.data_sector_size = 512; 521 } 522 523 out: 524 return err; 525 } 526 527 static inline void mmc_free_ext_csd(u8 *ext_csd) 528 { 529 kfree(ext_csd); 530 } 531 532 533 static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width) 534 { 535 u8 *bw_ext_csd; 536 int err; 537 538 if (bus_width == MMC_BUS_WIDTH_1) 539 return 0; 540 541 err = mmc_get_ext_csd(card, &bw_ext_csd); 542 543 if (err || bw_ext_csd == NULL) { 544 err = -EINVAL; 545 goto out; 546 } 547 548 /* only compare read only fields */ 549 err = !((card->ext_csd.raw_partition_support == 550 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) && 551 (card->ext_csd.raw_erased_mem_count == 552 bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) && 553 (card->ext_csd.rev == 554 bw_ext_csd[EXT_CSD_REV]) && 555 (card->ext_csd.raw_ext_csd_structure == 556 bw_ext_csd[EXT_CSD_STRUCTURE]) && 557 (card->ext_csd.raw_card_type == 558 bw_ext_csd[EXT_CSD_CARD_TYPE]) && 559 (card->ext_csd.raw_s_a_timeout == 560 bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) && 561 (card->ext_csd.raw_hc_erase_gap_size == 562 bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) && 563 (card->ext_csd.raw_erase_timeout_mult == 564 bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) && 565 (card->ext_csd.raw_hc_erase_grp_size == 566 bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) && 567 (card->ext_csd.raw_sec_trim_mult == 568 bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) && 569 (card->ext_csd.raw_sec_erase_mult == 570 bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) && 571 (card->ext_csd.raw_sec_feature_support == 572 bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) && 573 (card->ext_csd.raw_trim_mult == 574 bw_ext_csd[EXT_CSD_TRIM_MULT]) && 575 (card->ext_csd.raw_sectors[0] == 576 bw_ext_csd[EXT_CSD_SEC_CNT + 0]) && 577 (card->ext_csd.raw_sectors[1] == 578 bw_ext_csd[EXT_CSD_SEC_CNT + 1]) && 579 (card->ext_csd.raw_sectors[2] == 580 bw_ext_csd[EXT_CSD_SEC_CNT + 2]) && 581 (card->ext_csd.raw_sectors[3] == 582 bw_ext_csd[EXT_CSD_SEC_CNT + 3])); 583 if (err) 584 err = -EINVAL; 585 586 out: 587 mmc_free_ext_csd(bw_ext_csd); 588 return err; 589 } 590 591 MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1], 592 card->raw_cid[2], card->raw_cid[3]); 593 MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1], 594 card->raw_csd[2], card->raw_csd[3]); 595 MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year); 596 MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9); 597 MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9); 598 MMC_DEV_ATTR(fwrev, "0x%x\n", card->cid.fwrev); 599 MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev); 600 MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); 601 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); 602 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); 603 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial); 604 MMC_DEV_ATTR(enhanced_area_offset, "%llu\n", 605 card->ext_csd.enhanced_area_offset); 606 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size); 607 608 static struct attribute *mmc_std_attrs[] = { 609 &dev_attr_cid.attr, 610 &dev_attr_csd.attr, 611 &dev_attr_date.attr, 612 &dev_attr_erase_size.attr, 613 &dev_attr_preferred_erase_size.attr, 614 &dev_attr_fwrev.attr, 615 &dev_attr_hwrev.attr, 616 &dev_attr_manfid.attr, 617 &dev_attr_name.attr, 618 &dev_attr_oemid.attr, 619 &dev_attr_serial.attr, 620 &dev_attr_enhanced_area_offset.attr, 621 &dev_attr_enhanced_area_size.attr, 622 NULL, 623 }; 624 625 static struct attribute_group mmc_std_attr_group = { 626 .attrs = mmc_std_attrs, 627 }; 628 629 static const struct attribute_group *mmc_attr_groups[] = { 630 &mmc_std_attr_group, 631 NULL, 632 }; 633 634 static struct device_type mmc_type = { 635 .groups = mmc_attr_groups, 636 }; 637 638 /* 639 * Select the PowerClass for the current bus width 640 * If power class is defined for 4/8 bit bus in the 641 * extended CSD register, select it by executing the 642 * mmc_switch command. 643 */ 644 static int mmc_select_powerclass(struct mmc_card *card, 645 unsigned int bus_width, u8 *ext_csd) 646 { 647 int err = 0; 648 unsigned int pwrclass_val; 649 unsigned int index = 0; 650 struct mmc_host *host; 651 652 BUG_ON(!card); 653 654 host = card->host; 655 BUG_ON(!host); 656 657 if (ext_csd == NULL) 658 return 0; 659 660 /* Power class selection is supported for versions >= 4.0 */ 661 if (card->csd.mmca_vsn < CSD_SPEC_VER_4) 662 return 0; 663 664 /* Power class values are defined only for 4/8 bit bus */ 665 if (bus_width == EXT_CSD_BUS_WIDTH_1) 666 return 0; 667 668 switch (1 << host->ios.vdd) { 669 case MMC_VDD_165_195: 670 if (host->ios.clock <= 26000000) 671 index = EXT_CSD_PWR_CL_26_195; 672 else if (host->ios.clock <= 52000000) 673 index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? 674 EXT_CSD_PWR_CL_52_195 : 675 EXT_CSD_PWR_CL_DDR_52_195; 676 else if (host->ios.clock <= 200000000) 677 index = EXT_CSD_PWR_CL_200_195; 678 break; 679 case MMC_VDD_27_28: 680 case MMC_VDD_28_29: 681 case MMC_VDD_29_30: 682 case MMC_VDD_30_31: 683 case MMC_VDD_31_32: 684 case MMC_VDD_32_33: 685 case MMC_VDD_33_34: 686 case MMC_VDD_34_35: 687 case MMC_VDD_35_36: 688 if (host->ios.clock <= 26000000) 689 index = EXT_CSD_PWR_CL_26_360; 690 else if (host->ios.clock <= 52000000) 691 index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? 692 EXT_CSD_PWR_CL_52_360 : 693 EXT_CSD_PWR_CL_DDR_52_360; 694 else if (host->ios.clock <= 200000000) 695 index = EXT_CSD_PWR_CL_200_360; 696 break; 697 default: 698 pr_warning("%s: Voltage range not supported " 699 "for power class.\n", mmc_hostname(host)); 700 return -EINVAL; 701 } 702 703 pwrclass_val = ext_csd[index]; 704 705 if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8)) 706 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >> 707 EXT_CSD_PWR_CL_8BIT_SHIFT; 708 else 709 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >> 710 EXT_CSD_PWR_CL_4BIT_SHIFT; 711 712 /* If the power class is different from the default value */ 713 if (pwrclass_val > 0) { 714 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 715 EXT_CSD_POWER_CLASS, 716 pwrclass_val, 717 card->ext_csd.generic_cmd6_time); 718 } 719 720 return err; 721 } 722 723 /* 724 * Selects the desired buswidth and switch to the HS200 mode 725 * if bus width set without error 726 */ 727 static int mmc_select_hs200(struct mmc_card *card) 728 { 729 int idx, err = -EINVAL; 730 struct mmc_host *host; 731 static unsigned ext_csd_bits[] = { 732 EXT_CSD_BUS_WIDTH_4, 733 EXT_CSD_BUS_WIDTH_8, 734 }; 735 static unsigned bus_widths[] = { 736 MMC_BUS_WIDTH_4, 737 MMC_BUS_WIDTH_8, 738 }; 739 740 BUG_ON(!card); 741 742 host = card->host; 743 744 if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V && 745 host->caps2 & MMC_CAP2_HS200_1_2V_SDR) 746 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0); 747 748 if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V && 749 host->caps2 & MMC_CAP2_HS200_1_8V_SDR) 750 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, 0); 751 752 /* If fails try again during next card power cycle */ 753 if (err) 754 goto err; 755 756 idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 1 : 0; 757 758 /* 759 * Unlike SD, MMC cards dont have a configuration register to notify 760 * supported bus width. So bus test command should be run to identify 761 * the supported bus width or compare the ext csd values of current 762 * bus width and ext csd values of 1 bit mode read earlier. 763 */ 764 for (; idx >= 0; idx--) { 765 766 /* 767 * Host is capable of 8bit transfer, then switch 768 * the device to work in 8bit transfer mode. If the 769 * mmc switch command returns error then switch to 770 * 4bit transfer mode. On success set the corresponding 771 * bus width on the host. 772 */ 773 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 774 EXT_CSD_BUS_WIDTH, 775 ext_csd_bits[idx], 776 card->ext_csd.generic_cmd6_time); 777 if (err) 778 continue; 779 780 mmc_set_bus_width(card->host, bus_widths[idx]); 781 782 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) 783 err = mmc_compare_ext_csds(card, bus_widths[idx]); 784 else 785 err = mmc_bus_test(card, bus_widths[idx]); 786 if (!err) 787 break; 788 } 789 790 /* switch to HS200 mode if bus width set successfully */ 791 if (!err) 792 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 793 EXT_CSD_HS_TIMING, 2, 0); 794 err: 795 return err; 796 } 797 798 /* 799 * Handle the detection and initialisation of a card. 800 * 801 * In the case of a resume, "oldcard" will contain the card 802 * we're trying to reinitialise. 803 */ 804 static int mmc_init_card(struct mmc_host *host, u32 ocr, 805 struct mmc_card *oldcard) 806 { 807 struct mmc_card *card; 808 int err, ddr = 0; 809 u32 cid[4]; 810 unsigned int max_dtr; 811 u32 rocr; 812 u8 *ext_csd = NULL; 813 814 BUG_ON(!host); 815 WARN_ON(!host->claimed); 816 817 /* Set correct bus mode for MMC before attempting init */ 818 if (!mmc_host_is_spi(host)) 819 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); 820 821 /* 822 * Since we're changing the OCR value, we seem to 823 * need to tell some cards to go back to the idle 824 * state. We wait 1ms to give cards time to 825 * respond. 826 * mmc_go_idle is needed for eMMC that are asleep 827 */ 828 mmc_go_idle(host); 829 830 /* The extra bit indicates that we support high capacity */ 831 err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr); 832 if (err) 833 goto err; 834 835 /* 836 * For SPI, enable CRC as appropriate. 837 */ 838 if (mmc_host_is_spi(host)) { 839 err = mmc_spi_set_crc(host, use_spi_crc); 840 if (err) 841 goto err; 842 } 843 844 /* 845 * Fetch CID from card. 846 */ 847 if (mmc_host_is_spi(host)) 848 err = mmc_send_cid(host, cid); 849 else 850 err = mmc_all_send_cid(host, cid); 851 if (err) 852 goto err; 853 854 if (oldcard) { 855 if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) { 856 err = -ENOENT; 857 goto err; 858 } 859 860 card = oldcard; 861 } else { 862 /* 863 * Allocate card structure. 864 */ 865 card = mmc_alloc_card(host, &mmc_type); 866 if (IS_ERR(card)) { 867 err = PTR_ERR(card); 868 goto err; 869 } 870 871 card->type = MMC_TYPE_MMC; 872 card->rca = 1; 873 memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); 874 } 875 876 /* 877 * For native busses: set card RCA and quit open drain mode. 878 */ 879 if (!mmc_host_is_spi(host)) { 880 err = mmc_set_relative_addr(card); 881 if (err) 882 goto free_card; 883 884 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); 885 } 886 887 if (!oldcard) { 888 /* 889 * Fetch CSD from card. 890 */ 891 err = mmc_send_csd(card, card->raw_csd); 892 if (err) 893 goto free_card; 894 895 err = mmc_decode_csd(card); 896 if (err) 897 goto free_card; 898 err = mmc_decode_cid(card); 899 if (err) 900 goto free_card; 901 } 902 903 /* 904 * Select card, as all following commands rely on that. 905 */ 906 if (!mmc_host_is_spi(host)) { 907 err = mmc_select_card(card); 908 if (err) 909 goto free_card; 910 } 911 912 if (!oldcard) { 913 /* 914 * Fetch and process extended CSD. 915 */ 916 917 err = mmc_get_ext_csd(card, &ext_csd); 918 if (err) 919 goto free_card; 920 err = mmc_read_ext_csd(card, ext_csd); 921 if (err) 922 goto free_card; 923 924 /* If doing byte addressing, check if required to do sector 925 * addressing. Handle the case of <2GB cards needing sector 926 * addressing. See section 8.1 JEDEC Standard JED84-A441; 927 * ocr register has bit 30 set for sector addressing. 928 */ 929 if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30))) 930 mmc_card_set_blockaddr(card); 931 932 /* Erase size depends on CSD and Extended CSD */ 933 mmc_set_erase_size(card); 934 } 935 936 /* 937 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF 938 * bit. This bit will be lost every time after a reset or power off. 939 */ 940 if (card->ext_csd.enhanced_area_en || 941 (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) { 942 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 943 EXT_CSD_ERASE_GROUP_DEF, 1, 944 card->ext_csd.generic_cmd6_time); 945 946 if (err && err != -EBADMSG) 947 goto free_card; 948 949 if (err) { 950 err = 0; 951 /* 952 * Just disable enhanced area off & sz 953 * will try to enable ERASE_GROUP_DEF 954 * during next time reinit 955 */ 956 card->ext_csd.enhanced_area_offset = -EINVAL; 957 card->ext_csd.enhanced_area_size = -EINVAL; 958 } else { 959 card->ext_csd.erase_group_def = 1; 960 /* 961 * enable ERASE_GRP_DEF successfully. 962 * This will affect the erase size, so 963 * here need to reset erase size 964 */ 965 mmc_set_erase_size(card); 966 } 967 } 968 969 /* 970 * Ensure eMMC user default partition is enabled 971 */ 972 if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) { 973 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; 974 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, 975 card->ext_csd.part_config, 976 card->ext_csd.part_time); 977 if (err && err != -EBADMSG) 978 goto free_card; 979 } 980 981 /* 982 * If the host supports the power_off_notify capability then 983 * set the notification byte in the ext_csd register of device 984 */ 985 if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) && 986 (card->ext_csd.rev >= 6)) { 987 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 988 EXT_CSD_POWER_OFF_NOTIFICATION, 989 EXT_CSD_POWER_ON, 990 card->ext_csd.generic_cmd6_time); 991 if (err && err != -EBADMSG) 992 goto free_card; 993 994 /* 995 * The err can be -EBADMSG or 0, 996 * so check for success and update the flag 997 */ 998 if (!err) 999 card->poweroff_notify_state = MMC_POWERED_ON; 1000 } 1001 1002 /* 1003 * Activate high speed (if supported) 1004 */ 1005 if (card->ext_csd.hs_max_dtr != 0) { 1006 err = 0; 1007 if (card->ext_csd.hs_max_dtr > 52000000 && 1008 host->caps2 & MMC_CAP2_HS200) 1009 err = mmc_select_hs200(card); 1010 else if (host->caps & MMC_CAP_MMC_HIGHSPEED) 1011 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1012 EXT_CSD_HS_TIMING, 1, 1013 card->ext_csd.generic_cmd6_time); 1014 1015 if (err && err != -EBADMSG) 1016 goto free_card; 1017 1018 if (err) { 1019 pr_warning("%s: switch to highspeed failed\n", 1020 mmc_hostname(card->host)); 1021 err = 0; 1022 } else { 1023 if (card->ext_csd.hs_max_dtr > 52000000 && 1024 host->caps2 & MMC_CAP2_HS200) { 1025 mmc_card_set_hs200(card); 1026 mmc_set_timing(card->host, 1027 MMC_TIMING_MMC_HS200); 1028 } else { 1029 mmc_card_set_highspeed(card); 1030 mmc_set_timing(card->host, MMC_TIMING_MMC_HS); 1031 } 1032 } 1033 } 1034 1035 /* 1036 * Compute bus speed. 1037 */ 1038 max_dtr = (unsigned int)-1; 1039 1040 if (mmc_card_highspeed(card) || mmc_card_hs200(card)) { 1041 if (max_dtr > card->ext_csd.hs_max_dtr) 1042 max_dtr = card->ext_csd.hs_max_dtr; 1043 } else if (max_dtr > card->csd.max_dtr) { 1044 max_dtr = card->csd.max_dtr; 1045 } 1046 1047 mmc_set_clock(host, max_dtr); 1048 1049 /* 1050 * Indicate DDR mode (if supported). 1051 */ 1052 if (mmc_card_highspeed(card)) { 1053 if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) 1054 && ((host->caps & (MMC_CAP_1_8V_DDR | 1055 MMC_CAP_UHS_DDR50)) 1056 == (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50))) 1057 ddr = MMC_1_8V_DDR_MODE; 1058 else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) 1059 && ((host->caps & (MMC_CAP_1_2V_DDR | 1060 MMC_CAP_UHS_DDR50)) 1061 == (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50))) 1062 ddr = MMC_1_2V_DDR_MODE; 1063 } 1064 1065 /* 1066 * Indicate HS200 SDR mode (if supported). 1067 */ 1068 if (mmc_card_hs200(card)) { 1069 u32 ext_csd_bits; 1070 u32 bus_width = card->host->ios.bus_width; 1071 1072 /* 1073 * For devices supporting HS200 mode, the bus width has 1074 * to be set before executing the tuning function. If 1075 * set before tuning, then device will respond with CRC 1076 * errors for responses on CMD line. So for HS200 the 1077 * sequence will be 1078 * 1. set bus width 4bit / 8 bit (1 bit not supported) 1079 * 2. switch to HS200 mode 1080 * 3. set the clock to > 52Mhz <=200MHz and 1081 * 4. execute tuning for HS200 1082 */ 1083 if ((host->caps2 & MMC_CAP2_HS200) && 1084 card->host->ops->execute_tuning) { 1085 mmc_host_clk_hold(card->host); 1086 err = card->host->ops->execute_tuning(card->host, 1087 MMC_SEND_TUNING_BLOCK_HS200); 1088 mmc_host_clk_release(card->host); 1089 } 1090 if (err) { 1091 pr_warning("%s: tuning execution failed\n", 1092 mmc_hostname(card->host)); 1093 goto err; 1094 } 1095 1096 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ? 1097 EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4; 1098 err = mmc_select_powerclass(card, ext_csd_bits, ext_csd); 1099 if (err) 1100 pr_warning("%s: power class selection to bus width %d" 1101 " failed\n", mmc_hostname(card->host), 1102 1 << bus_width); 1103 } 1104 1105 /* 1106 * Activate wide bus and DDR (if supported). 1107 */ 1108 if (!mmc_card_hs200(card) && 1109 (card->csd.mmca_vsn >= CSD_SPEC_VER_4) && 1110 (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { 1111 static unsigned ext_csd_bits[][2] = { 1112 { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, 1113 { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 }, 1114 { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 }, 1115 }; 1116 static unsigned bus_widths[] = { 1117 MMC_BUS_WIDTH_8, 1118 MMC_BUS_WIDTH_4, 1119 MMC_BUS_WIDTH_1 1120 }; 1121 unsigned idx, bus_width = 0; 1122 1123 if (host->caps & MMC_CAP_8_BIT_DATA) 1124 idx = 0; 1125 else 1126 idx = 1; 1127 for (; idx < ARRAY_SIZE(bus_widths); idx++) { 1128 bus_width = bus_widths[idx]; 1129 if (bus_width == MMC_BUS_WIDTH_1) 1130 ddr = 0; /* no DDR for 1-bit width */ 1131 err = mmc_select_powerclass(card, ext_csd_bits[idx][0], 1132 ext_csd); 1133 if (err) 1134 pr_warning("%s: power class selection to " 1135 "bus width %d failed\n", 1136 mmc_hostname(card->host), 1137 1 << bus_width); 1138 1139 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1140 EXT_CSD_BUS_WIDTH, 1141 ext_csd_bits[idx][0], 1142 card->ext_csd.generic_cmd6_time); 1143 if (!err) { 1144 mmc_set_bus_width(card->host, bus_width); 1145 1146 /* 1147 * If controller can't handle bus width test, 1148 * compare ext_csd previously read in 1 bit mode 1149 * against ext_csd at new bus width 1150 */ 1151 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) 1152 err = mmc_compare_ext_csds(card, 1153 bus_width); 1154 else 1155 err = mmc_bus_test(card, bus_width); 1156 if (!err) 1157 break; 1158 } 1159 } 1160 1161 if (!err && ddr) { 1162 err = mmc_select_powerclass(card, ext_csd_bits[idx][1], 1163 ext_csd); 1164 if (err) 1165 pr_warning("%s: power class selection to " 1166 "bus width %d ddr %d failed\n", 1167 mmc_hostname(card->host), 1168 1 << bus_width, ddr); 1169 1170 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1171 EXT_CSD_BUS_WIDTH, 1172 ext_csd_bits[idx][1], 1173 card->ext_csd.generic_cmd6_time); 1174 } 1175 if (err) { 1176 pr_warning("%s: switch to bus width %d ddr %d " 1177 "failed\n", mmc_hostname(card->host), 1178 1 << bus_width, ddr); 1179 goto free_card; 1180 } else if (ddr) { 1181 /* 1182 * eMMC cards can support 3.3V to 1.2V i/o (vccq) 1183 * signaling. 1184 * 1185 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq. 1186 * 1187 * 1.8V vccq at 3.3V core voltage (vcc) is not required 1188 * in the JEDEC spec for DDR. 1189 * 1190 * Do not force change in vccq since we are obviously 1191 * working and no change to vccq is needed. 1192 * 1193 * WARNING: eMMC rules are NOT the same as SD DDR 1194 */ 1195 if (ddr == MMC_1_2V_DDR_MODE) { 1196 err = mmc_set_signal_voltage(host, 1197 MMC_SIGNAL_VOLTAGE_120, 0); 1198 if (err) 1199 goto err; 1200 } 1201 mmc_card_set_ddr_mode(card); 1202 mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50); 1203 mmc_set_bus_width(card->host, bus_width); 1204 } 1205 } 1206 1207 /* 1208 * Enable HPI feature (if supported) 1209 */ 1210 if (card->ext_csd.hpi) { 1211 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1212 EXT_CSD_HPI_MGMT, 1, 1213 card->ext_csd.generic_cmd6_time); 1214 if (err && err != -EBADMSG) 1215 goto free_card; 1216 if (err) { 1217 pr_warning("%s: Enabling HPI failed\n", 1218 mmc_hostname(card->host)); 1219 err = 0; 1220 } else 1221 card->ext_csd.hpi_en = 1; 1222 } 1223 1224 /* 1225 * If cache size is higher than 0, this indicates 1226 * the existence of cache and it can be turned on. 1227 */ 1228 if ((host->caps2 & MMC_CAP2_CACHE_CTRL) && 1229 card->ext_csd.cache_size > 0) { 1230 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1231 EXT_CSD_CACHE_CTRL, 1, 1232 card->ext_csd.generic_cmd6_time); 1233 if (err && err != -EBADMSG) 1234 goto free_card; 1235 1236 /* 1237 * Only if no error, cache is turned on successfully. 1238 */ 1239 if (err) { 1240 pr_warning("%s: Cache is supported, " 1241 "but failed to turn on (%d)\n", 1242 mmc_hostname(card->host), err); 1243 card->ext_csd.cache_ctrl = 0; 1244 err = 0; 1245 } else { 1246 card->ext_csd.cache_ctrl = 1; 1247 } 1248 } 1249 1250 if (!oldcard) 1251 host->card = card; 1252 1253 mmc_free_ext_csd(ext_csd); 1254 return 0; 1255 1256 free_card: 1257 if (!oldcard) 1258 mmc_remove_card(card); 1259 err: 1260 mmc_free_ext_csd(ext_csd); 1261 1262 return err; 1263 } 1264 1265 /* 1266 * Host is being removed. Free up the current card. 1267 */ 1268 static void mmc_remove(struct mmc_host *host) 1269 { 1270 BUG_ON(!host); 1271 BUG_ON(!host->card); 1272 1273 mmc_remove_card(host->card); 1274 host->card = NULL; 1275 } 1276 1277 /* 1278 * Card detection - card is alive. 1279 */ 1280 static int mmc_alive(struct mmc_host *host) 1281 { 1282 return mmc_send_status(host->card, NULL); 1283 } 1284 1285 /* 1286 * Card detection callback from host. 1287 */ 1288 static void mmc_detect(struct mmc_host *host) 1289 { 1290 int err; 1291 1292 BUG_ON(!host); 1293 BUG_ON(!host->card); 1294 1295 mmc_claim_host(host); 1296 1297 /* 1298 * Just check if our card has been removed. 1299 */ 1300 err = _mmc_detect_card_removed(host); 1301 1302 mmc_release_host(host); 1303 1304 if (err) { 1305 mmc_remove(host); 1306 1307 mmc_claim_host(host); 1308 mmc_detach_bus(host); 1309 mmc_power_off(host); 1310 mmc_release_host(host); 1311 } 1312 } 1313 1314 /* 1315 * Suspend callback from host. 1316 */ 1317 static int mmc_suspend(struct mmc_host *host) 1318 { 1319 int err = 0; 1320 1321 BUG_ON(!host); 1322 BUG_ON(!host->card); 1323 1324 mmc_claim_host(host); 1325 if (mmc_card_can_sleep(host)) { 1326 err = mmc_card_sleep(host); 1327 if (!err) 1328 mmc_card_set_sleep(host->card); 1329 } else if (!mmc_host_is_spi(host)) 1330 err = mmc_deselect_cards(host); 1331 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); 1332 mmc_release_host(host); 1333 1334 return err; 1335 } 1336 1337 /* 1338 * Resume callback from host. 1339 * 1340 * This function tries to determine if the same card is still present 1341 * and, if so, restore all state to it. 1342 */ 1343 static int mmc_resume(struct mmc_host *host) 1344 { 1345 int err; 1346 1347 BUG_ON(!host); 1348 BUG_ON(!host->card); 1349 1350 mmc_claim_host(host); 1351 if (mmc_card_is_sleep(host->card)) { 1352 err = mmc_card_awake(host); 1353 mmc_card_clr_sleep(host->card); 1354 } else 1355 err = mmc_init_card(host, host->ocr, host->card); 1356 mmc_release_host(host); 1357 1358 return err; 1359 } 1360 1361 static int mmc_power_restore(struct mmc_host *host) 1362 { 1363 int ret; 1364 1365 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); 1366 mmc_card_clr_sleep(host->card); 1367 mmc_claim_host(host); 1368 ret = mmc_init_card(host, host->ocr, host->card); 1369 mmc_release_host(host); 1370 1371 return ret; 1372 } 1373 1374 static int mmc_sleep(struct mmc_host *host) 1375 { 1376 struct mmc_card *card = host->card; 1377 int err = -ENOSYS; 1378 1379 if (card && card->ext_csd.rev >= 3) { 1380 err = mmc_card_sleepawake(host, 1); 1381 if (err < 0) 1382 pr_debug("%s: Error %d while putting card into sleep", 1383 mmc_hostname(host), err); 1384 } 1385 1386 return err; 1387 } 1388 1389 static int mmc_awake(struct mmc_host *host) 1390 { 1391 struct mmc_card *card = host->card; 1392 int err = -ENOSYS; 1393 1394 if (card && card->ext_csd.rev >= 3) { 1395 err = mmc_card_sleepawake(host, 0); 1396 if (err < 0) 1397 pr_debug("%s: Error %d while awaking sleeping card", 1398 mmc_hostname(host), err); 1399 } 1400 1401 return err; 1402 } 1403 1404 static const struct mmc_bus_ops mmc_ops = { 1405 .awake = mmc_awake, 1406 .sleep = mmc_sleep, 1407 .remove = mmc_remove, 1408 .detect = mmc_detect, 1409 .suspend = NULL, 1410 .resume = NULL, 1411 .power_restore = mmc_power_restore, 1412 .alive = mmc_alive, 1413 }; 1414 1415 static const struct mmc_bus_ops mmc_ops_unsafe = { 1416 .awake = mmc_awake, 1417 .sleep = mmc_sleep, 1418 .remove = mmc_remove, 1419 .detect = mmc_detect, 1420 .suspend = mmc_suspend, 1421 .resume = mmc_resume, 1422 .power_restore = mmc_power_restore, 1423 .alive = mmc_alive, 1424 }; 1425 1426 static void mmc_attach_bus_ops(struct mmc_host *host) 1427 { 1428 const struct mmc_bus_ops *bus_ops; 1429 1430 if (!mmc_card_is_removable(host)) 1431 bus_ops = &mmc_ops_unsafe; 1432 else 1433 bus_ops = &mmc_ops; 1434 mmc_attach_bus(host, bus_ops); 1435 } 1436 1437 /* 1438 * Starting point for MMC card init. 1439 */ 1440 int mmc_attach_mmc(struct mmc_host *host) 1441 { 1442 int err; 1443 u32 ocr; 1444 1445 BUG_ON(!host); 1446 WARN_ON(!host->claimed); 1447 1448 /* Set correct bus mode for MMC before attempting attach */ 1449 if (!mmc_host_is_spi(host)) 1450 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); 1451 1452 err = mmc_send_op_cond(host, 0, &ocr); 1453 if (err) 1454 return err; 1455 1456 mmc_attach_bus_ops(host); 1457 if (host->ocr_avail_mmc) 1458 host->ocr_avail = host->ocr_avail_mmc; 1459 1460 /* 1461 * We need to get OCR a different way for SPI. 1462 */ 1463 if (mmc_host_is_spi(host)) { 1464 err = mmc_spi_read_ocr(host, 1, &ocr); 1465 if (err) 1466 goto err; 1467 } 1468 1469 /* 1470 * Sanity check the voltages that the card claims to 1471 * support. 1472 */ 1473 if (ocr & 0x7F) { 1474 pr_warning("%s: card claims to support voltages " 1475 "below the defined range. These will be ignored.\n", 1476 mmc_hostname(host)); 1477 ocr &= ~0x7F; 1478 } 1479 1480 host->ocr = mmc_select_voltage(host, ocr); 1481 1482 /* 1483 * Can we support the voltage of the card? 1484 */ 1485 if (!host->ocr) { 1486 err = -EINVAL; 1487 goto err; 1488 } 1489 1490 /* 1491 * Detect and init the card. 1492 */ 1493 err = mmc_init_card(host, host->ocr, NULL); 1494 if (err) 1495 goto err; 1496 1497 mmc_release_host(host); 1498 err = mmc_add_card(host->card); 1499 mmc_claim_host(host); 1500 if (err) 1501 goto remove_card; 1502 1503 return 0; 1504 1505 remove_card: 1506 mmc_release_host(host); 1507 mmc_remove_card(host->card); 1508 mmc_claim_host(host); 1509 host->card = NULL; 1510 err: 1511 mmc_detach_bus(host); 1512 1513 pr_err("%s: error %d whilst initialising MMC card\n", 1514 mmc_hostname(host), err); 1515 1516 return err; 1517 } 1518