1 /* 2 * linux/drivers/mmc/core/mmc.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. 6 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/err.h> 14 #include <linux/slab.h> 15 #include <linux/stat.h> 16 17 #include <linux/mmc/host.h> 18 #include <linux/mmc/card.h> 19 #include <linux/mmc/mmc.h> 20 21 #include "core.h" 22 #include "bus.h" 23 #include "mmc_ops.h" 24 #include "sd_ops.h" 25 26 static const unsigned int tran_exp[] = { 27 10000, 100000, 1000000, 10000000, 28 0, 0, 0, 0 29 }; 30 31 static const unsigned char tran_mant[] = { 32 0, 10, 12, 13, 15, 20, 25, 30, 33 35, 40, 45, 50, 55, 60, 70, 80, 34 }; 35 36 static const unsigned int tacc_exp[] = { 37 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 38 }; 39 40 static const unsigned int tacc_mant[] = { 41 0, 10, 12, 13, 15, 20, 25, 30, 42 35, 40, 45, 50, 55, 60, 70, 80, 43 }; 44 45 #define UNSTUFF_BITS(resp,start,size) \ 46 ({ \ 47 const int __size = size; \ 48 const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \ 49 const int __off = 3 - ((start) / 32); \ 50 const int __shft = (start) & 31; \ 51 u32 __res; \ 52 \ 53 __res = resp[__off] >> __shft; \ 54 if (__size + __shft > 32) \ 55 __res |= resp[__off-1] << ((32 - __shft) % 32); \ 56 __res & __mask; \ 57 }) 58 59 /* 60 * Given the decoded CSD structure, decode the raw CID to our CID structure. 61 */ 62 static int mmc_decode_cid(struct mmc_card *card) 63 { 64 u32 *resp = card->raw_cid; 65 66 /* 67 * The selection of the format here is based upon published 68 * specs from sandisk and from what people have reported. 69 */ 70 switch (card->csd.mmca_vsn) { 71 case 0: /* MMC v1.0 - v1.2 */ 72 case 1: /* MMC v1.4 */ 73 card->cid.manfid = UNSTUFF_BITS(resp, 104, 24); 74 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); 75 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); 76 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); 77 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); 78 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); 79 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); 80 card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8); 81 card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4); 82 card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4); 83 card->cid.serial = UNSTUFF_BITS(resp, 16, 24); 84 card->cid.month = UNSTUFF_BITS(resp, 12, 4); 85 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; 86 break; 87 88 case 2: /* MMC v2.0 - v2.2 */ 89 case 3: /* MMC v3.1 - v3.3 */ 90 case 4: /* MMC v4 */ 91 card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); 92 card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); 93 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); 94 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); 95 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); 96 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); 97 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); 98 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); 99 card->cid.serial = UNSTUFF_BITS(resp, 16, 32); 100 card->cid.month = UNSTUFF_BITS(resp, 12, 4); 101 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; 102 break; 103 104 default: 105 pr_err("%s: card has unknown MMCA version %d\n", 106 mmc_hostname(card->host), card->csd.mmca_vsn); 107 return -EINVAL; 108 } 109 110 return 0; 111 } 112 113 static void mmc_set_erase_size(struct mmc_card *card) 114 { 115 if (card->ext_csd.erase_group_def & 1) 116 card->erase_size = card->ext_csd.hc_erase_size; 117 else 118 card->erase_size = card->csd.erase_size; 119 120 mmc_init_erase(card); 121 } 122 123 /* 124 * Given a 128-bit response, decode to our card CSD structure. 125 */ 126 static int mmc_decode_csd(struct mmc_card *card) 127 { 128 struct mmc_csd *csd = &card->csd; 129 unsigned int e, m, a, b; 130 u32 *resp = card->raw_csd; 131 132 /* 133 * We only understand CSD structure v1.1 and v1.2. 134 * v1.2 has extra information in bits 15, 11 and 10. 135 * We also support eMMC v4.4 & v4.41. 136 */ 137 csd->structure = UNSTUFF_BITS(resp, 126, 2); 138 if (csd->structure == 0) { 139 pr_err("%s: unrecognised CSD structure version %d\n", 140 mmc_hostname(card->host), csd->structure); 141 return -EINVAL; 142 } 143 144 csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4); 145 m = UNSTUFF_BITS(resp, 115, 4); 146 e = UNSTUFF_BITS(resp, 112, 3); 147 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; 148 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; 149 150 m = UNSTUFF_BITS(resp, 99, 4); 151 e = UNSTUFF_BITS(resp, 96, 3); 152 csd->max_dtr = tran_exp[e] * tran_mant[m]; 153 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); 154 155 e = UNSTUFF_BITS(resp, 47, 3); 156 m = UNSTUFF_BITS(resp, 62, 12); 157 csd->capacity = (1 + m) << (e + 2); 158 159 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); 160 csd->read_partial = UNSTUFF_BITS(resp, 79, 1); 161 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); 162 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); 163 csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); 164 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); 165 csd->write_partial = UNSTUFF_BITS(resp, 21, 1); 166 167 if (csd->write_blkbits >= 9) { 168 a = UNSTUFF_BITS(resp, 42, 5); 169 b = UNSTUFF_BITS(resp, 37, 5); 170 csd->erase_size = (a + 1) * (b + 1); 171 csd->erase_size <<= csd->write_blkbits - 9; 172 } 173 174 return 0; 175 } 176 177 /* 178 * Read extended CSD. 179 */ 180 static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) 181 { 182 int err; 183 u8 *ext_csd; 184 185 BUG_ON(!card); 186 BUG_ON(!new_ext_csd); 187 188 *new_ext_csd = NULL; 189 190 if (card->csd.mmca_vsn < CSD_SPEC_VER_4) 191 return 0; 192 193 /* 194 * As the ext_csd is so large and mostly unused, we don't store the 195 * raw block in mmc_card. 196 */ 197 ext_csd = kmalloc(512, GFP_KERNEL); 198 if (!ext_csd) { 199 pr_err("%s: could not allocate a buffer to " 200 "receive the ext_csd.\n", mmc_hostname(card->host)); 201 return -ENOMEM; 202 } 203 204 err = mmc_send_ext_csd(card, ext_csd); 205 if (err) { 206 kfree(ext_csd); 207 *new_ext_csd = NULL; 208 209 /* If the host or the card can't do the switch, 210 * fail more gracefully. */ 211 if ((err != -EINVAL) 212 && (err != -ENOSYS) 213 && (err != -EFAULT)) 214 return err; 215 216 /* 217 * High capacity cards should have this "magic" size 218 * stored in their CSD. 219 */ 220 if (card->csd.capacity == (4096 * 512)) { 221 pr_err("%s: unable to read EXT_CSD " 222 "on a possible high capacity card. " 223 "Card will be ignored.\n", 224 mmc_hostname(card->host)); 225 } else { 226 pr_warning("%s: unable to read " 227 "EXT_CSD, performance might " 228 "suffer.\n", 229 mmc_hostname(card->host)); 230 err = 0; 231 } 232 } else 233 *new_ext_csd = ext_csd; 234 235 return err; 236 } 237 238 static void mmc_select_card_type(struct mmc_card *card) 239 { 240 struct mmc_host *host = card->host; 241 u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK; 242 unsigned int caps = host->caps, caps2 = host->caps2; 243 unsigned int hs_max_dtr = 0; 244 245 if (card_type & EXT_CSD_CARD_TYPE_26) 246 hs_max_dtr = MMC_HIGH_26_MAX_DTR; 247 248 if (caps & MMC_CAP_MMC_HIGHSPEED && 249 card_type & EXT_CSD_CARD_TYPE_52) 250 hs_max_dtr = MMC_HIGH_52_MAX_DTR; 251 252 if ((caps & MMC_CAP_1_8V_DDR && 253 card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) || 254 (caps & MMC_CAP_1_2V_DDR && 255 card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)) 256 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR; 257 258 if ((caps2 & MMC_CAP2_HS200_1_8V_SDR && 259 card_type & EXT_CSD_CARD_TYPE_SDR_1_8V) || 260 (caps2 & MMC_CAP2_HS200_1_2V_SDR && 261 card_type & EXT_CSD_CARD_TYPE_SDR_1_2V)) 262 hs_max_dtr = MMC_HS200_MAX_DTR; 263 264 card->ext_csd.hs_max_dtr = hs_max_dtr; 265 card->ext_csd.card_type = card_type; 266 } 267 268 /* 269 * Decode extended CSD. 270 */ 271 static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) 272 { 273 int err = 0, idx; 274 unsigned int part_size; 275 u8 hc_erase_grp_sz = 0, hc_wp_grp_sz = 0; 276 277 BUG_ON(!card); 278 279 if (!ext_csd) 280 return 0; 281 282 /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */ 283 card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE]; 284 if (card->csd.structure == 3) { 285 if (card->ext_csd.raw_ext_csd_structure > 2) { 286 pr_err("%s: unrecognised EXT_CSD structure " 287 "version %d\n", mmc_hostname(card->host), 288 card->ext_csd.raw_ext_csd_structure); 289 err = -EINVAL; 290 goto out; 291 } 292 } 293 294 card->ext_csd.rev = ext_csd[EXT_CSD_REV]; 295 if (card->ext_csd.rev > 6) { 296 pr_err("%s: unrecognised EXT_CSD revision %d\n", 297 mmc_hostname(card->host), card->ext_csd.rev); 298 err = -EINVAL; 299 goto out; 300 } 301 302 card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0]; 303 card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1]; 304 card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2]; 305 card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3]; 306 if (card->ext_csd.rev >= 2) { 307 card->ext_csd.sectors = 308 ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | 309 ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | 310 ext_csd[EXT_CSD_SEC_CNT + 2] << 16 | 311 ext_csd[EXT_CSD_SEC_CNT + 3] << 24; 312 313 /* Cards with density > 2GiB are sector addressed */ 314 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) 315 mmc_card_set_blockaddr(card); 316 } 317 318 card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE]; 319 mmc_select_card_type(card); 320 321 card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT]; 322 card->ext_csd.raw_erase_timeout_mult = 323 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; 324 card->ext_csd.raw_hc_erase_grp_size = 325 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 326 if (card->ext_csd.rev >= 3) { 327 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; 328 card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG]; 329 330 /* EXT_CSD value is in units of 10ms, but we store in ms */ 331 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME]; 332 333 /* Sleep / awake timeout in 100ns units */ 334 if (sa_shift > 0 && sa_shift <= 0x17) 335 card->ext_csd.sa_timeout = 336 1 << ext_csd[EXT_CSD_S_A_TIMEOUT]; 337 card->ext_csd.erase_group_def = 338 ext_csd[EXT_CSD_ERASE_GROUP_DEF]; 339 card->ext_csd.hc_erase_timeout = 300 * 340 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; 341 card->ext_csd.hc_erase_size = 342 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10; 343 344 card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C]; 345 346 /* 347 * There are two boot regions of equal size, defined in 348 * multiples of 128K. 349 */ 350 if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) { 351 for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) { 352 part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17; 353 mmc_part_add(card, part_size, 354 EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx, 355 "boot%d", idx, true, 356 MMC_BLK_DATA_AREA_BOOT); 357 } 358 } 359 } 360 361 card->ext_csd.raw_hc_erase_gap_size = 362 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 363 card->ext_csd.raw_sec_trim_mult = 364 ext_csd[EXT_CSD_SEC_TRIM_MULT]; 365 card->ext_csd.raw_sec_erase_mult = 366 ext_csd[EXT_CSD_SEC_ERASE_MULT]; 367 card->ext_csd.raw_sec_feature_support = 368 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; 369 card->ext_csd.raw_trim_mult = 370 ext_csd[EXT_CSD_TRIM_MULT]; 371 if (card->ext_csd.rev >= 4) { 372 /* 373 * Enhanced area feature support -- check whether the eMMC 374 * card has the Enhanced area enabled. If so, export enhanced 375 * area offset and size to user by adding sysfs interface. 376 */ 377 card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT]; 378 if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) && 379 (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { 380 hc_erase_grp_sz = 381 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 382 hc_wp_grp_sz = 383 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 384 385 card->ext_csd.enhanced_area_en = 1; 386 /* 387 * calculate the enhanced data area offset, in bytes 388 */ 389 card->ext_csd.enhanced_area_offset = 390 (ext_csd[139] << 24) + (ext_csd[138] << 16) + 391 (ext_csd[137] << 8) + ext_csd[136]; 392 if (mmc_card_blockaddr(card)) 393 card->ext_csd.enhanced_area_offset <<= 9; 394 /* 395 * calculate the enhanced data area size, in kilobytes 396 */ 397 card->ext_csd.enhanced_area_size = 398 (ext_csd[142] << 16) + (ext_csd[141] << 8) + 399 ext_csd[140]; 400 card->ext_csd.enhanced_area_size *= 401 (size_t)(hc_erase_grp_sz * hc_wp_grp_sz); 402 card->ext_csd.enhanced_area_size <<= 9; 403 } else { 404 /* 405 * If the enhanced area is not enabled, disable these 406 * device attributes. 407 */ 408 card->ext_csd.enhanced_area_offset = -EINVAL; 409 card->ext_csd.enhanced_area_size = -EINVAL; 410 } 411 412 /* 413 * General purpose partition feature support -- 414 * If ext_csd has the size of general purpose partitions, 415 * set size, part_cfg, partition name in mmc_part. 416 */ 417 if (ext_csd[EXT_CSD_PARTITION_SUPPORT] & 418 EXT_CSD_PART_SUPPORT_PART_EN) { 419 if (card->ext_csd.enhanced_area_en != 1) { 420 hc_erase_grp_sz = 421 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 422 hc_wp_grp_sz = 423 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 424 425 card->ext_csd.enhanced_area_en = 1; 426 } 427 428 for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) { 429 if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] && 430 !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] && 431 !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]) 432 continue; 433 part_size = 434 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2] 435 << 16) + 436 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] 437 << 8) + 438 ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3]; 439 part_size *= (size_t)(hc_erase_grp_sz * 440 hc_wp_grp_sz); 441 mmc_part_add(card, part_size << 19, 442 EXT_CSD_PART_CONFIG_ACC_GP0 + idx, 443 "gp%d", idx, false, 444 MMC_BLK_DATA_AREA_GP); 445 } 446 } 447 card->ext_csd.sec_trim_mult = 448 ext_csd[EXT_CSD_SEC_TRIM_MULT]; 449 card->ext_csd.sec_erase_mult = 450 ext_csd[EXT_CSD_SEC_ERASE_MULT]; 451 card->ext_csd.sec_feature_support = 452 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; 453 card->ext_csd.trim_timeout = 300 * 454 ext_csd[EXT_CSD_TRIM_MULT]; 455 456 /* 457 * Note that the call to mmc_part_add above defaults to read 458 * only. If this default assumption is changed, the call must 459 * take into account the value of boot_locked below. 460 */ 461 card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP]; 462 card->ext_csd.boot_ro_lockable = true; 463 } 464 465 if (card->ext_csd.rev >= 5) { 466 /* check whether the eMMC card supports HPI */ 467 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) { 468 card->ext_csd.hpi = 1; 469 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2) 470 card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION; 471 else 472 card->ext_csd.hpi_cmd = MMC_SEND_STATUS; 473 /* 474 * Indicate the maximum timeout to close 475 * a command interrupted by HPI 476 */ 477 card->ext_csd.out_of_int_time = 478 ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10; 479 } 480 481 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM]; 482 card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION]; 483 } 484 485 card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT]; 486 if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) 487 card->erased_byte = 0xFF; 488 else 489 card->erased_byte = 0x0; 490 491 /* eMMC v4.5 or later */ 492 if (card->ext_csd.rev >= 6) { 493 card->ext_csd.feature_support |= MMC_DISCARD_FEATURE; 494 495 card->ext_csd.generic_cmd6_time = 10 * 496 ext_csd[EXT_CSD_GENERIC_CMD6_TIME]; 497 card->ext_csd.power_off_longtime = 10 * 498 ext_csd[EXT_CSD_POWER_OFF_LONG_TIME]; 499 500 card->ext_csd.cache_size = 501 ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 | 502 ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 | 503 ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 | 504 ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24; 505 506 if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1) 507 card->ext_csd.data_sector_size = 4096; 508 else 509 card->ext_csd.data_sector_size = 512; 510 511 if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) && 512 (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) { 513 card->ext_csd.data_tag_unit_size = 514 ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) * 515 (card->ext_csd.data_sector_size); 516 } else { 517 card->ext_csd.data_tag_unit_size = 0; 518 } 519 } else { 520 card->ext_csd.data_sector_size = 512; 521 } 522 523 out: 524 return err; 525 } 526 527 static inline void mmc_free_ext_csd(u8 *ext_csd) 528 { 529 kfree(ext_csd); 530 } 531 532 533 static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width) 534 { 535 u8 *bw_ext_csd; 536 int err; 537 538 if (bus_width == MMC_BUS_WIDTH_1) 539 return 0; 540 541 err = mmc_get_ext_csd(card, &bw_ext_csd); 542 543 if (err || bw_ext_csd == NULL) { 544 err = -EINVAL; 545 goto out; 546 } 547 548 /* only compare read only fields */ 549 err = !((card->ext_csd.raw_partition_support == 550 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) && 551 (card->ext_csd.raw_erased_mem_count == 552 bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) && 553 (card->ext_csd.rev == 554 bw_ext_csd[EXT_CSD_REV]) && 555 (card->ext_csd.raw_ext_csd_structure == 556 bw_ext_csd[EXT_CSD_STRUCTURE]) && 557 (card->ext_csd.raw_card_type == 558 bw_ext_csd[EXT_CSD_CARD_TYPE]) && 559 (card->ext_csd.raw_s_a_timeout == 560 bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) && 561 (card->ext_csd.raw_hc_erase_gap_size == 562 bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) && 563 (card->ext_csd.raw_erase_timeout_mult == 564 bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) && 565 (card->ext_csd.raw_hc_erase_grp_size == 566 bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) && 567 (card->ext_csd.raw_sec_trim_mult == 568 bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) && 569 (card->ext_csd.raw_sec_erase_mult == 570 bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) && 571 (card->ext_csd.raw_sec_feature_support == 572 bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) && 573 (card->ext_csd.raw_trim_mult == 574 bw_ext_csd[EXT_CSD_TRIM_MULT]) && 575 (card->ext_csd.raw_sectors[0] == 576 bw_ext_csd[EXT_CSD_SEC_CNT + 0]) && 577 (card->ext_csd.raw_sectors[1] == 578 bw_ext_csd[EXT_CSD_SEC_CNT + 1]) && 579 (card->ext_csd.raw_sectors[2] == 580 bw_ext_csd[EXT_CSD_SEC_CNT + 2]) && 581 (card->ext_csd.raw_sectors[3] == 582 bw_ext_csd[EXT_CSD_SEC_CNT + 3])); 583 if (err) 584 err = -EINVAL; 585 586 out: 587 mmc_free_ext_csd(bw_ext_csd); 588 return err; 589 } 590 591 MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1], 592 card->raw_cid[2], card->raw_cid[3]); 593 MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1], 594 card->raw_csd[2], card->raw_csd[3]); 595 MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year); 596 MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9); 597 MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9); 598 MMC_DEV_ATTR(fwrev, "0x%x\n", card->cid.fwrev); 599 MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev); 600 MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); 601 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); 602 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); 603 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial); 604 MMC_DEV_ATTR(enhanced_area_offset, "%llu\n", 605 card->ext_csd.enhanced_area_offset); 606 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size); 607 608 static struct attribute *mmc_std_attrs[] = { 609 &dev_attr_cid.attr, 610 &dev_attr_csd.attr, 611 &dev_attr_date.attr, 612 &dev_attr_erase_size.attr, 613 &dev_attr_preferred_erase_size.attr, 614 &dev_attr_fwrev.attr, 615 &dev_attr_hwrev.attr, 616 &dev_attr_manfid.attr, 617 &dev_attr_name.attr, 618 &dev_attr_oemid.attr, 619 &dev_attr_serial.attr, 620 &dev_attr_enhanced_area_offset.attr, 621 &dev_attr_enhanced_area_size.attr, 622 NULL, 623 }; 624 625 static struct attribute_group mmc_std_attr_group = { 626 .attrs = mmc_std_attrs, 627 }; 628 629 static const struct attribute_group *mmc_attr_groups[] = { 630 &mmc_std_attr_group, 631 NULL, 632 }; 633 634 static struct device_type mmc_type = { 635 .groups = mmc_attr_groups, 636 }; 637 638 /* 639 * Select the PowerClass for the current bus width 640 * If power class is defined for 4/8 bit bus in the 641 * extended CSD register, select it by executing the 642 * mmc_switch command. 643 */ 644 static int mmc_select_powerclass(struct mmc_card *card, 645 unsigned int bus_width, u8 *ext_csd) 646 { 647 int err = 0; 648 unsigned int pwrclass_val; 649 unsigned int index = 0; 650 struct mmc_host *host; 651 652 BUG_ON(!card); 653 654 host = card->host; 655 BUG_ON(!host); 656 657 if (ext_csd == NULL) 658 return 0; 659 660 /* Power class selection is supported for versions >= 4.0 */ 661 if (card->csd.mmca_vsn < CSD_SPEC_VER_4) 662 return 0; 663 664 /* Power class values are defined only for 4/8 bit bus */ 665 if (bus_width == EXT_CSD_BUS_WIDTH_1) 666 return 0; 667 668 switch (1 << host->ios.vdd) { 669 case MMC_VDD_165_195: 670 if (host->ios.clock <= 26000000) 671 index = EXT_CSD_PWR_CL_26_195; 672 else if (host->ios.clock <= 52000000) 673 index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? 674 EXT_CSD_PWR_CL_52_195 : 675 EXT_CSD_PWR_CL_DDR_52_195; 676 else if (host->ios.clock <= 200000000) 677 index = EXT_CSD_PWR_CL_200_195; 678 break; 679 case MMC_VDD_27_28: 680 case MMC_VDD_28_29: 681 case MMC_VDD_29_30: 682 case MMC_VDD_30_31: 683 case MMC_VDD_31_32: 684 case MMC_VDD_32_33: 685 case MMC_VDD_33_34: 686 case MMC_VDD_34_35: 687 case MMC_VDD_35_36: 688 if (host->ios.clock <= 26000000) 689 index = EXT_CSD_PWR_CL_26_360; 690 else if (host->ios.clock <= 52000000) 691 index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? 692 EXT_CSD_PWR_CL_52_360 : 693 EXT_CSD_PWR_CL_DDR_52_360; 694 else if (host->ios.clock <= 200000000) 695 index = EXT_CSD_PWR_CL_200_360; 696 break; 697 default: 698 pr_warning("%s: Voltage range not supported " 699 "for power class.\n", mmc_hostname(host)); 700 return -EINVAL; 701 } 702 703 pwrclass_val = ext_csd[index]; 704 705 if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8)) 706 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >> 707 EXT_CSD_PWR_CL_8BIT_SHIFT; 708 else 709 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >> 710 EXT_CSD_PWR_CL_4BIT_SHIFT; 711 712 /* If the power class is different from the default value */ 713 if (pwrclass_val > 0) { 714 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 715 EXT_CSD_POWER_CLASS, 716 pwrclass_val, 717 card->ext_csd.generic_cmd6_time); 718 } 719 720 if (err) 721 pr_err("%s: power class selection for ext_csd_bus_width %d" 722 " failed\n", mmc_hostname(card->host), bus_width); 723 724 return err; 725 } 726 727 /* 728 * Selects the desired buswidth and switch to the HS200 mode 729 * if bus width set without error 730 */ 731 static int mmc_select_hs200(struct mmc_card *card) 732 { 733 int idx, err = -EINVAL; 734 struct mmc_host *host; 735 static unsigned ext_csd_bits[] = { 736 EXT_CSD_BUS_WIDTH_4, 737 EXT_CSD_BUS_WIDTH_8, 738 }; 739 static unsigned bus_widths[] = { 740 MMC_BUS_WIDTH_4, 741 MMC_BUS_WIDTH_8, 742 }; 743 744 BUG_ON(!card); 745 746 host = card->host; 747 748 if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V && 749 host->caps2 & MMC_CAP2_HS200_1_2V_SDR) 750 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0); 751 752 if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V && 753 host->caps2 & MMC_CAP2_HS200_1_8V_SDR) 754 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, 0); 755 756 /* If fails try again during next card power cycle */ 757 if (err) 758 goto err; 759 760 idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 1 : 0; 761 762 /* 763 * Unlike SD, MMC cards dont have a configuration register to notify 764 * supported bus width. So bus test command should be run to identify 765 * the supported bus width or compare the ext csd values of current 766 * bus width and ext csd values of 1 bit mode read earlier. 767 */ 768 for (; idx >= 0; idx--) { 769 770 /* 771 * Host is capable of 8bit transfer, then switch 772 * the device to work in 8bit transfer mode. If the 773 * mmc switch command returns error then switch to 774 * 4bit transfer mode. On success set the corresponding 775 * bus width on the host. 776 */ 777 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 778 EXT_CSD_BUS_WIDTH, 779 ext_csd_bits[idx], 780 card->ext_csd.generic_cmd6_time); 781 if (err) 782 continue; 783 784 mmc_set_bus_width(card->host, bus_widths[idx]); 785 786 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) 787 err = mmc_compare_ext_csds(card, bus_widths[idx]); 788 else 789 err = mmc_bus_test(card, bus_widths[idx]); 790 if (!err) 791 break; 792 } 793 794 /* switch to HS200 mode if bus width set successfully */ 795 if (!err) 796 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 797 EXT_CSD_HS_TIMING, 2, 0); 798 err: 799 return err; 800 } 801 802 /* 803 * Handle the detection and initialisation of a card. 804 * 805 * In the case of a resume, "oldcard" will contain the card 806 * we're trying to reinitialise. 807 */ 808 static int mmc_init_card(struct mmc_host *host, u32 ocr, 809 struct mmc_card *oldcard) 810 { 811 struct mmc_card *card; 812 int err, ddr = 0; 813 u32 cid[4]; 814 unsigned int max_dtr; 815 u32 rocr; 816 u8 *ext_csd = NULL; 817 818 BUG_ON(!host); 819 WARN_ON(!host->claimed); 820 821 /* Set correct bus mode for MMC before attempting init */ 822 if (!mmc_host_is_spi(host)) 823 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); 824 825 /* Initialization should be done at 3.3 V I/O voltage. */ 826 mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0); 827 828 /* 829 * Since we're changing the OCR value, we seem to 830 * need to tell some cards to go back to the idle 831 * state. We wait 1ms to give cards time to 832 * respond. 833 * mmc_go_idle is needed for eMMC that are asleep 834 */ 835 mmc_go_idle(host); 836 837 /* The extra bit indicates that we support high capacity */ 838 err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr); 839 if (err) 840 goto err; 841 842 /* 843 * For SPI, enable CRC as appropriate. 844 */ 845 if (mmc_host_is_spi(host)) { 846 err = mmc_spi_set_crc(host, use_spi_crc); 847 if (err) 848 goto err; 849 } 850 851 /* 852 * Fetch CID from card. 853 */ 854 if (mmc_host_is_spi(host)) 855 err = mmc_send_cid(host, cid); 856 else 857 err = mmc_all_send_cid(host, cid); 858 if (err) 859 goto err; 860 861 if (oldcard) { 862 if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) { 863 err = -ENOENT; 864 goto err; 865 } 866 867 card = oldcard; 868 } else { 869 /* 870 * Allocate card structure. 871 */ 872 card = mmc_alloc_card(host, &mmc_type); 873 if (IS_ERR(card)) { 874 err = PTR_ERR(card); 875 goto err; 876 } 877 878 card->type = MMC_TYPE_MMC; 879 card->rca = 1; 880 memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); 881 } 882 883 /* 884 * For native busses: set card RCA and quit open drain mode. 885 */ 886 if (!mmc_host_is_spi(host)) { 887 err = mmc_set_relative_addr(card); 888 if (err) 889 goto free_card; 890 891 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); 892 } 893 894 if (!oldcard) { 895 /* 896 * Fetch CSD from card. 897 */ 898 err = mmc_send_csd(card, card->raw_csd); 899 if (err) 900 goto free_card; 901 902 err = mmc_decode_csd(card); 903 if (err) 904 goto free_card; 905 err = mmc_decode_cid(card); 906 if (err) 907 goto free_card; 908 } 909 910 /* 911 * Select card, as all following commands rely on that. 912 */ 913 if (!mmc_host_is_spi(host)) { 914 err = mmc_select_card(card); 915 if (err) 916 goto free_card; 917 } 918 919 if (!oldcard) { 920 /* 921 * Fetch and process extended CSD. 922 */ 923 924 err = mmc_get_ext_csd(card, &ext_csd); 925 if (err) 926 goto free_card; 927 err = mmc_read_ext_csd(card, ext_csd); 928 if (err) 929 goto free_card; 930 931 /* If doing byte addressing, check if required to do sector 932 * addressing. Handle the case of <2GB cards needing sector 933 * addressing. See section 8.1 JEDEC Standard JED84-A441; 934 * ocr register has bit 30 set for sector addressing. 935 */ 936 if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30))) 937 mmc_card_set_blockaddr(card); 938 939 /* Erase size depends on CSD and Extended CSD */ 940 mmc_set_erase_size(card); 941 } 942 943 /* 944 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF 945 * bit. This bit will be lost every time after a reset or power off. 946 */ 947 if (card->ext_csd.enhanced_area_en || 948 (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) { 949 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 950 EXT_CSD_ERASE_GROUP_DEF, 1, 951 card->ext_csd.generic_cmd6_time); 952 953 if (err && err != -EBADMSG) 954 goto free_card; 955 956 if (err) { 957 err = 0; 958 /* 959 * Just disable enhanced area off & sz 960 * will try to enable ERASE_GROUP_DEF 961 * during next time reinit 962 */ 963 card->ext_csd.enhanced_area_offset = -EINVAL; 964 card->ext_csd.enhanced_area_size = -EINVAL; 965 } else { 966 card->ext_csd.erase_group_def = 1; 967 /* 968 * enable ERASE_GRP_DEF successfully. 969 * This will affect the erase size, so 970 * here need to reset erase size 971 */ 972 mmc_set_erase_size(card); 973 } 974 } 975 976 /* 977 * Ensure eMMC user default partition is enabled 978 */ 979 if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) { 980 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; 981 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, 982 card->ext_csd.part_config, 983 card->ext_csd.part_time); 984 if (err && err != -EBADMSG) 985 goto free_card; 986 } 987 988 /* 989 * If the host supports the power_off_notify capability then 990 * set the notification byte in the ext_csd register of device 991 */ 992 if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) && 993 (card->ext_csd.rev >= 6)) { 994 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 995 EXT_CSD_POWER_OFF_NOTIFICATION, 996 EXT_CSD_POWER_ON, 997 card->ext_csd.generic_cmd6_time); 998 if (err && err != -EBADMSG) 999 goto free_card; 1000 1001 /* 1002 * The err can be -EBADMSG or 0, 1003 * so check for success and update the flag 1004 */ 1005 if (!err) 1006 card->poweroff_notify_state = MMC_POWERED_ON; 1007 } 1008 1009 /* 1010 * Activate high speed (if supported) 1011 */ 1012 if (card->ext_csd.hs_max_dtr != 0) { 1013 err = 0; 1014 if (card->ext_csd.hs_max_dtr > 52000000 && 1015 host->caps2 & MMC_CAP2_HS200) 1016 err = mmc_select_hs200(card); 1017 else if (host->caps & MMC_CAP_MMC_HIGHSPEED) 1018 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1019 EXT_CSD_HS_TIMING, 1, 1020 card->ext_csd.generic_cmd6_time); 1021 1022 if (err && err != -EBADMSG) 1023 goto free_card; 1024 1025 if (err) { 1026 pr_warning("%s: switch to highspeed failed\n", 1027 mmc_hostname(card->host)); 1028 err = 0; 1029 } else { 1030 if (card->ext_csd.hs_max_dtr > 52000000 && 1031 host->caps2 & MMC_CAP2_HS200) { 1032 mmc_card_set_hs200(card); 1033 mmc_set_timing(card->host, 1034 MMC_TIMING_MMC_HS200); 1035 } else { 1036 mmc_card_set_highspeed(card); 1037 mmc_set_timing(card->host, MMC_TIMING_MMC_HS); 1038 } 1039 } 1040 } 1041 1042 /* 1043 * Compute bus speed. 1044 */ 1045 max_dtr = (unsigned int)-1; 1046 1047 if (mmc_card_highspeed(card) || mmc_card_hs200(card)) { 1048 if (max_dtr > card->ext_csd.hs_max_dtr) 1049 max_dtr = card->ext_csd.hs_max_dtr; 1050 } else if (max_dtr > card->csd.max_dtr) { 1051 max_dtr = card->csd.max_dtr; 1052 } 1053 1054 mmc_set_clock(host, max_dtr); 1055 1056 /* 1057 * Indicate DDR mode (if supported). 1058 */ 1059 if (mmc_card_highspeed(card)) { 1060 if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) 1061 && ((host->caps & (MMC_CAP_1_8V_DDR | 1062 MMC_CAP_UHS_DDR50)) 1063 == (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50))) 1064 ddr = MMC_1_8V_DDR_MODE; 1065 else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) 1066 && ((host->caps & (MMC_CAP_1_2V_DDR | 1067 MMC_CAP_UHS_DDR50)) 1068 == (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50))) 1069 ddr = MMC_1_2V_DDR_MODE; 1070 } 1071 1072 /* 1073 * Indicate HS200 SDR mode (if supported). 1074 */ 1075 if (mmc_card_hs200(card)) { 1076 u32 ext_csd_bits; 1077 u32 bus_width = card->host->ios.bus_width; 1078 1079 /* 1080 * For devices supporting HS200 mode, the bus width has 1081 * to be set before executing the tuning function. If 1082 * set before tuning, then device will respond with CRC 1083 * errors for responses on CMD line. So for HS200 the 1084 * sequence will be 1085 * 1. set bus width 4bit / 8 bit (1 bit not supported) 1086 * 2. switch to HS200 mode 1087 * 3. set the clock to > 52Mhz <=200MHz and 1088 * 4. execute tuning for HS200 1089 */ 1090 if ((host->caps2 & MMC_CAP2_HS200) && 1091 card->host->ops->execute_tuning) { 1092 mmc_host_clk_hold(card->host); 1093 err = card->host->ops->execute_tuning(card->host, 1094 MMC_SEND_TUNING_BLOCK_HS200); 1095 mmc_host_clk_release(card->host); 1096 } 1097 if (err) { 1098 pr_warning("%s: tuning execution failed\n", 1099 mmc_hostname(card->host)); 1100 goto err; 1101 } 1102 1103 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ? 1104 EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4; 1105 err = mmc_select_powerclass(card, ext_csd_bits, ext_csd); 1106 if (err) 1107 goto err; 1108 } 1109 1110 /* 1111 * Activate wide bus and DDR (if supported). 1112 */ 1113 if (!mmc_card_hs200(card) && 1114 (card->csd.mmca_vsn >= CSD_SPEC_VER_4) && 1115 (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { 1116 static unsigned ext_csd_bits[][2] = { 1117 { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, 1118 { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 }, 1119 { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 }, 1120 }; 1121 static unsigned bus_widths[] = { 1122 MMC_BUS_WIDTH_8, 1123 MMC_BUS_WIDTH_4, 1124 MMC_BUS_WIDTH_1 1125 }; 1126 unsigned idx, bus_width = 0; 1127 1128 if (host->caps & MMC_CAP_8_BIT_DATA) 1129 idx = 0; 1130 else 1131 idx = 1; 1132 for (; idx < ARRAY_SIZE(bus_widths); idx++) { 1133 bus_width = bus_widths[idx]; 1134 if (bus_width == MMC_BUS_WIDTH_1) 1135 ddr = 0; /* no DDR for 1-bit width */ 1136 err = mmc_select_powerclass(card, ext_csd_bits[idx][0], 1137 ext_csd); 1138 if (err) 1139 goto err; 1140 1141 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1142 EXT_CSD_BUS_WIDTH, 1143 ext_csd_bits[idx][0], 1144 card->ext_csd.generic_cmd6_time); 1145 if (!err) { 1146 mmc_set_bus_width(card->host, bus_width); 1147 1148 /* 1149 * If controller can't handle bus width test, 1150 * compare ext_csd previously read in 1 bit mode 1151 * against ext_csd at new bus width 1152 */ 1153 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) 1154 err = mmc_compare_ext_csds(card, 1155 bus_width); 1156 else 1157 err = mmc_bus_test(card, bus_width); 1158 if (!err) 1159 break; 1160 } 1161 } 1162 1163 if (!err && ddr) { 1164 err = mmc_select_powerclass(card, ext_csd_bits[idx][1], 1165 ext_csd); 1166 if (err) 1167 goto err; 1168 1169 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1170 EXT_CSD_BUS_WIDTH, 1171 ext_csd_bits[idx][1], 1172 card->ext_csd.generic_cmd6_time); 1173 } 1174 if (err) { 1175 pr_warning("%s: switch to bus width %d ddr %d " 1176 "failed\n", mmc_hostname(card->host), 1177 1 << bus_width, ddr); 1178 goto free_card; 1179 } else if (ddr) { 1180 /* 1181 * eMMC cards can support 3.3V to 1.2V i/o (vccq) 1182 * signaling. 1183 * 1184 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq. 1185 * 1186 * 1.8V vccq at 3.3V core voltage (vcc) is not required 1187 * in the JEDEC spec for DDR. 1188 * 1189 * Do not force change in vccq since we are obviously 1190 * working and no change to vccq is needed. 1191 * 1192 * WARNING: eMMC rules are NOT the same as SD DDR 1193 */ 1194 if (ddr == MMC_1_2V_DDR_MODE) { 1195 err = mmc_set_signal_voltage(host, 1196 MMC_SIGNAL_VOLTAGE_120, 0); 1197 if (err) 1198 goto err; 1199 } 1200 mmc_card_set_ddr_mode(card); 1201 mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50); 1202 mmc_set_bus_width(card->host, bus_width); 1203 } 1204 } 1205 1206 /* 1207 * Enable HPI feature (if supported) 1208 */ 1209 if (card->ext_csd.hpi) { 1210 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1211 EXT_CSD_HPI_MGMT, 1, 1212 card->ext_csd.generic_cmd6_time); 1213 if (err && err != -EBADMSG) 1214 goto free_card; 1215 if (err) { 1216 pr_warning("%s: Enabling HPI failed\n", 1217 mmc_hostname(card->host)); 1218 err = 0; 1219 } else 1220 card->ext_csd.hpi_en = 1; 1221 } 1222 1223 /* 1224 * If cache size is higher than 0, this indicates 1225 * the existence of cache and it can be turned on. 1226 */ 1227 if ((host->caps2 & MMC_CAP2_CACHE_CTRL) && 1228 card->ext_csd.cache_size > 0) { 1229 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1230 EXT_CSD_CACHE_CTRL, 1, 1231 card->ext_csd.generic_cmd6_time); 1232 if (err && err != -EBADMSG) 1233 goto free_card; 1234 1235 /* 1236 * Only if no error, cache is turned on successfully. 1237 */ 1238 if (err) { 1239 pr_warning("%s: Cache is supported, " 1240 "but failed to turn on (%d)\n", 1241 mmc_hostname(card->host), err); 1242 card->ext_csd.cache_ctrl = 0; 1243 err = 0; 1244 } else { 1245 card->ext_csd.cache_ctrl = 1; 1246 } 1247 } 1248 1249 if (!oldcard) 1250 host->card = card; 1251 1252 mmc_free_ext_csd(ext_csd); 1253 return 0; 1254 1255 free_card: 1256 if (!oldcard) 1257 mmc_remove_card(card); 1258 err: 1259 mmc_free_ext_csd(ext_csd); 1260 1261 return err; 1262 } 1263 1264 /* 1265 * Host is being removed. Free up the current card. 1266 */ 1267 static void mmc_remove(struct mmc_host *host) 1268 { 1269 BUG_ON(!host); 1270 BUG_ON(!host->card); 1271 1272 mmc_remove_card(host->card); 1273 host->card = NULL; 1274 } 1275 1276 /* 1277 * Card detection - card is alive. 1278 */ 1279 static int mmc_alive(struct mmc_host *host) 1280 { 1281 return mmc_send_status(host->card, NULL); 1282 } 1283 1284 /* 1285 * Card detection callback from host. 1286 */ 1287 static void mmc_detect(struct mmc_host *host) 1288 { 1289 int err; 1290 1291 BUG_ON(!host); 1292 BUG_ON(!host->card); 1293 1294 mmc_claim_host(host); 1295 1296 /* 1297 * Just check if our card has been removed. 1298 */ 1299 err = _mmc_detect_card_removed(host); 1300 1301 mmc_release_host(host); 1302 1303 if (err) { 1304 mmc_remove(host); 1305 1306 mmc_claim_host(host); 1307 mmc_detach_bus(host); 1308 mmc_power_off(host); 1309 mmc_release_host(host); 1310 } 1311 } 1312 1313 /* 1314 * Suspend callback from host. 1315 */ 1316 static int mmc_suspend(struct mmc_host *host) 1317 { 1318 int err = 0; 1319 1320 BUG_ON(!host); 1321 BUG_ON(!host->card); 1322 1323 mmc_claim_host(host); 1324 if (mmc_card_can_sleep(host)) { 1325 err = mmc_card_sleep(host); 1326 if (!err) 1327 mmc_card_set_sleep(host->card); 1328 } else if (!mmc_host_is_spi(host)) 1329 err = mmc_deselect_cards(host); 1330 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); 1331 mmc_release_host(host); 1332 1333 return err; 1334 } 1335 1336 /* 1337 * Resume callback from host. 1338 * 1339 * This function tries to determine if the same card is still present 1340 * and, if so, restore all state to it. 1341 */ 1342 static int mmc_resume(struct mmc_host *host) 1343 { 1344 int err; 1345 1346 BUG_ON(!host); 1347 BUG_ON(!host->card); 1348 1349 mmc_claim_host(host); 1350 if (mmc_card_is_sleep(host->card)) { 1351 err = mmc_card_awake(host); 1352 mmc_card_clr_sleep(host->card); 1353 } else 1354 err = mmc_init_card(host, host->ocr, host->card); 1355 mmc_release_host(host); 1356 1357 return err; 1358 } 1359 1360 static int mmc_power_restore(struct mmc_host *host) 1361 { 1362 int ret; 1363 1364 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); 1365 mmc_card_clr_sleep(host->card); 1366 mmc_claim_host(host); 1367 ret = mmc_init_card(host, host->ocr, host->card); 1368 mmc_release_host(host); 1369 1370 return ret; 1371 } 1372 1373 static int mmc_sleep(struct mmc_host *host) 1374 { 1375 struct mmc_card *card = host->card; 1376 int err = -ENOSYS; 1377 1378 if (card && card->ext_csd.rev >= 3) { 1379 err = mmc_card_sleepawake(host, 1); 1380 if (err < 0) 1381 pr_debug("%s: Error %d while putting card into sleep", 1382 mmc_hostname(host), err); 1383 } 1384 1385 return err; 1386 } 1387 1388 static int mmc_awake(struct mmc_host *host) 1389 { 1390 struct mmc_card *card = host->card; 1391 int err = -ENOSYS; 1392 1393 if (card && card->ext_csd.rev >= 3) { 1394 err = mmc_card_sleepawake(host, 0); 1395 if (err < 0) 1396 pr_debug("%s: Error %d while awaking sleeping card", 1397 mmc_hostname(host), err); 1398 } 1399 1400 return err; 1401 } 1402 1403 static const struct mmc_bus_ops mmc_ops = { 1404 .awake = mmc_awake, 1405 .sleep = mmc_sleep, 1406 .remove = mmc_remove, 1407 .detect = mmc_detect, 1408 .suspend = NULL, 1409 .resume = NULL, 1410 .power_restore = mmc_power_restore, 1411 .alive = mmc_alive, 1412 }; 1413 1414 static const struct mmc_bus_ops mmc_ops_unsafe = { 1415 .awake = mmc_awake, 1416 .sleep = mmc_sleep, 1417 .remove = mmc_remove, 1418 .detect = mmc_detect, 1419 .suspend = mmc_suspend, 1420 .resume = mmc_resume, 1421 .power_restore = mmc_power_restore, 1422 .alive = mmc_alive, 1423 }; 1424 1425 static void mmc_attach_bus_ops(struct mmc_host *host) 1426 { 1427 const struct mmc_bus_ops *bus_ops; 1428 1429 if (!mmc_card_is_removable(host)) 1430 bus_ops = &mmc_ops_unsafe; 1431 else 1432 bus_ops = &mmc_ops; 1433 mmc_attach_bus(host, bus_ops); 1434 } 1435 1436 /* 1437 * Starting point for MMC card init. 1438 */ 1439 int mmc_attach_mmc(struct mmc_host *host) 1440 { 1441 int err; 1442 u32 ocr; 1443 1444 BUG_ON(!host); 1445 WARN_ON(!host->claimed); 1446 1447 /* Set correct bus mode for MMC before attempting attach */ 1448 if (!mmc_host_is_spi(host)) 1449 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); 1450 1451 err = mmc_send_op_cond(host, 0, &ocr); 1452 if (err) 1453 return err; 1454 1455 mmc_attach_bus_ops(host); 1456 if (host->ocr_avail_mmc) 1457 host->ocr_avail = host->ocr_avail_mmc; 1458 1459 /* 1460 * We need to get OCR a different way for SPI. 1461 */ 1462 if (mmc_host_is_spi(host)) { 1463 err = mmc_spi_read_ocr(host, 1, &ocr); 1464 if (err) 1465 goto err; 1466 } 1467 1468 /* 1469 * Sanity check the voltages that the card claims to 1470 * support. 1471 */ 1472 if (ocr & 0x7F) { 1473 pr_warning("%s: card claims to support voltages " 1474 "below the defined range. These will be ignored.\n", 1475 mmc_hostname(host)); 1476 ocr &= ~0x7F; 1477 } 1478 1479 host->ocr = mmc_select_voltage(host, ocr); 1480 1481 /* 1482 * Can we support the voltage of the card? 1483 */ 1484 if (!host->ocr) { 1485 err = -EINVAL; 1486 goto err; 1487 } 1488 1489 /* 1490 * Detect and init the card. 1491 */ 1492 err = mmc_init_card(host, host->ocr, NULL); 1493 if (err) 1494 goto err; 1495 1496 mmc_release_host(host); 1497 err = mmc_add_card(host->card); 1498 mmc_claim_host(host); 1499 if (err) 1500 goto remove_card; 1501 1502 return 0; 1503 1504 remove_card: 1505 mmc_release_host(host); 1506 mmc_remove_card(host->card); 1507 mmc_claim_host(host); 1508 host->card = NULL; 1509 err: 1510 mmc_detach_bus(host); 1511 1512 pr_err("%s: error %d whilst initialising MMC card\n", 1513 mmc_hostname(host), err); 1514 1515 return err; 1516 } 1517