1 /* 2 * linux/drivers/mmc/core/mmc.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. 6 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/err.h> 14 #include <linux/slab.h> 15 #include <linux/stat.h> 16 17 #include <linux/mmc/host.h> 18 #include <linux/mmc/card.h> 19 #include <linux/mmc/mmc.h> 20 21 #include "core.h" 22 #include "bus.h" 23 #include "mmc_ops.h" 24 #include "sd_ops.h" 25 26 static const unsigned int tran_exp[] = { 27 10000, 100000, 1000000, 10000000, 28 0, 0, 0, 0 29 }; 30 31 static const unsigned char tran_mant[] = { 32 0, 10, 12, 13, 15, 20, 25, 30, 33 35, 40, 45, 50, 55, 60, 70, 80, 34 }; 35 36 static const unsigned int tacc_exp[] = { 37 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 38 }; 39 40 static const unsigned int tacc_mant[] = { 41 0, 10, 12, 13, 15, 20, 25, 30, 42 35, 40, 45, 50, 55, 60, 70, 80, 43 }; 44 45 #define UNSTUFF_BITS(resp,start,size) \ 46 ({ \ 47 const int __size = size; \ 48 const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \ 49 const int __off = 3 - ((start) / 32); \ 50 const int __shft = (start) & 31; \ 51 u32 __res; \ 52 \ 53 __res = resp[__off] >> __shft; \ 54 if (__size + __shft > 32) \ 55 __res |= resp[__off-1] << ((32 - __shft) % 32); \ 56 __res & __mask; \ 57 }) 58 59 /* 60 * Given the decoded CSD structure, decode the raw CID to our CID structure. 61 */ 62 static int mmc_decode_cid(struct mmc_card *card) 63 { 64 u32 *resp = card->raw_cid; 65 66 /* 67 * The selection of the format here is based upon published 68 * specs from sandisk and from what people have reported. 69 */ 70 switch (card->csd.mmca_vsn) { 71 case 0: /* MMC v1.0 - v1.2 */ 72 case 1: /* MMC v1.4 */ 73 card->cid.manfid = UNSTUFF_BITS(resp, 104, 24); 74 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); 75 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); 76 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); 77 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); 78 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); 79 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); 80 card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8); 81 card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4); 82 card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4); 83 card->cid.serial = UNSTUFF_BITS(resp, 16, 24); 84 card->cid.month = UNSTUFF_BITS(resp, 12, 4); 85 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; 86 break; 87 88 case 2: /* MMC v2.0 - v2.2 */ 89 case 3: /* MMC v3.1 - v3.3 */ 90 case 4: /* MMC v4 */ 91 card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); 92 card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); 93 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); 94 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); 95 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); 96 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); 97 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); 98 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); 99 card->cid.serial = UNSTUFF_BITS(resp, 16, 32); 100 card->cid.month = UNSTUFF_BITS(resp, 12, 4); 101 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; 102 break; 103 104 default: 105 pr_err("%s: card has unknown MMCA version %d\n", 106 mmc_hostname(card->host), card->csd.mmca_vsn); 107 return -EINVAL; 108 } 109 110 return 0; 111 } 112 113 static void mmc_set_erase_size(struct mmc_card *card) 114 { 115 if (card->ext_csd.erase_group_def & 1) 116 card->erase_size = card->ext_csd.hc_erase_size; 117 else 118 card->erase_size = card->csd.erase_size; 119 120 mmc_init_erase(card); 121 } 122 123 /* 124 * Given a 128-bit response, decode to our card CSD structure. 125 */ 126 static int mmc_decode_csd(struct mmc_card *card) 127 { 128 struct mmc_csd *csd = &card->csd; 129 unsigned int e, m, a, b; 130 u32 *resp = card->raw_csd; 131 132 /* 133 * We only understand CSD structure v1.1 and v1.2. 134 * v1.2 has extra information in bits 15, 11 and 10. 135 * We also support eMMC v4.4 & v4.41. 136 */ 137 csd->structure = UNSTUFF_BITS(resp, 126, 2); 138 if (csd->structure == 0) { 139 pr_err("%s: unrecognised CSD structure version %d\n", 140 mmc_hostname(card->host), csd->structure); 141 return -EINVAL; 142 } 143 144 csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4); 145 m = UNSTUFF_BITS(resp, 115, 4); 146 e = UNSTUFF_BITS(resp, 112, 3); 147 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; 148 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; 149 150 m = UNSTUFF_BITS(resp, 99, 4); 151 e = UNSTUFF_BITS(resp, 96, 3); 152 csd->max_dtr = tran_exp[e] * tran_mant[m]; 153 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); 154 155 e = UNSTUFF_BITS(resp, 47, 3); 156 m = UNSTUFF_BITS(resp, 62, 12); 157 csd->capacity = (1 + m) << (e + 2); 158 159 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); 160 csd->read_partial = UNSTUFF_BITS(resp, 79, 1); 161 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); 162 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); 163 csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); 164 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); 165 csd->write_partial = UNSTUFF_BITS(resp, 21, 1); 166 167 if (csd->write_blkbits >= 9) { 168 a = UNSTUFF_BITS(resp, 42, 5); 169 b = UNSTUFF_BITS(resp, 37, 5); 170 csd->erase_size = (a + 1) * (b + 1); 171 csd->erase_size <<= csd->write_blkbits - 9; 172 } 173 174 return 0; 175 } 176 177 /* 178 * Read extended CSD. 179 */ 180 static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) 181 { 182 int err; 183 u8 *ext_csd; 184 185 BUG_ON(!card); 186 BUG_ON(!new_ext_csd); 187 188 *new_ext_csd = NULL; 189 190 if (card->csd.mmca_vsn < CSD_SPEC_VER_4) 191 return 0; 192 193 /* 194 * As the ext_csd is so large and mostly unused, we don't store the 195 * raw block in mmc_card. 196 */ 197 ext_csd = kmalloc(512, GFP_KERNEL); 198 if (!ext_csd) { 199 pr_err("%s: could not allocate a buffer to " 200 "receive the ext_csd.\n", mmc_hostname(card->host)); 201 return -ENOMEM; 202 } 203 204 err = mmc_send_ext_csd(card, ext_csd); 205 if (err) { 206 kfree(ext_csd); 207 *new_ext_csd = NULL; 208 209 /* If the host or the card can't do the switch, 210 * fail more gracefully. */ 211 if ((err != -EINVAL) 212 && (err != -ENOSYS) 213 && (err != -EFAULT)) 214 return err; 215 216 /* 217 * High capacity cards should have this "magic" size 218 * stored in their CSD. 219 */ 220 if (card->csd.capacity == (4096 * 512)) { 221 pr_err("%s: unable to read EXT_CSD " 222 "on a possible high capacity card. " 223 "Card will be ignored.\n", 224 mmc_hostname(card->host)); 225 } else { 226 pr_warning("%s: unable to read " 227 "EXT_CSD, performance might " 228 "suffer.\n", 229 mmc_hostname(card->host)); 230 err = 0; 231 } 232 } else 233 *new_ext_csd = ext_csd; 234 235 return err; 236 } 237 238 static void mmc_select_card_type(struct mmc_card *card) 239 { 240 struct mmc_host *host = card->host; 241 u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK; 242 u32 caps = host->caps, caps2 = host->caps2; 243 unsigned int hs_max_dtr = 0; 244 245 if (card_type & EXT_CSD_CARD_TYPE_26) 246 hs_max_dtr = MMC_HIGH_26_MAX_DTR; 247 248 if (caps & MMC_CAP_MMC_HIGHSPEED && 249 card_type & EXT_CSD_CARD_TYPE_52) 250 hs_max_dtr = MMC_HIGH_52_MAX_DTR; 251 252 if ((caps & MMC_CAP_1_8V_DDR && 253 card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) || 254 (caps & MMC_CAP_1_2V_DDR && 255 card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)) 256 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR; 257 258 if ((caps2 & MMC_CAP2_HS200_1_8V_SDR && 259 card_type & EXT_CSD_CARD_TYPE_SDR_1_8V) || 260 (caps2 & MMC_CAP2_HS200_1_2V_SDR && 261 card_type & EXT_CSD_CARD_TYPE_SDR_1_2V)) 262 hs_max_dtr = MMC_HS200_MAX_DTR; 263 264 card->ext_csd.hs_max_dtr = hs_max_dtr; 265 card->ext_csd.card_type = card_type; 266 } 267 268 /* 269 * Decode extended CSD. 270 */ 271 static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) 272 { 273 int err = 0, idx; 274 unsigned int part_size; 275 u8 hc_erase_grp_sz = 0, hc_wp_grp_sz = 0; 276 277 BUG_ON(!card); 278 279 if (!ext_csd) 280 return 0; 281 282 /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */ 283 card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE]; 284 if (card->csd.structure == 3) { 285 if (card->ext_csd.raw_ext_csd_structure > 2) { 286 pr_err("%s: unrecognised EXT_CSD structure " 287 "version %d\n", mmc_hostname(card->host), 288 card->ext_csd.raw_ext_csd_structure); 289 err = -EINVAL; 290 goto out; 291 } 292 } 293 294 card->ext_csd.rev = ext_csd[EXT_CSD_REV]; 295 if (card->ext_csd.rev > 6) { 296 pr_err("%s: unrecognised EXT_CSD revision %d\n", 297 mmc_hostname(card->host), card->ext_csd.rev); 298 err = -EINVAL; 299 goto out; 300 } 301 302 card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0]; 303 card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1]; 304 card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2]; 305 card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3]; 306 if (card->ext_csd.rev >= 2) { 307 card->ext_csd.sectors = 308 ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | 309 ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | 310 ext_csd[EXT_CSD_SEC_CNT + 2] << 16 | 311 ext_csd[EXT_CSD_SEC_CNT + 3] << 24; 312 313 /* Cards with density > 2GiB are sector addressed */ 314 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) 315 mmc_card_set_blockaddr(card); 316 } 317 318 card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE]; 319 mmc_select_card_type(card); 320 321 card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT]; 322 card->ext_csd.raw_erase_timeout_mult = 323 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; 324 card->ext_csd.raw_hc_erase_grp_size = 325 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 326 if (card->ext_csd.rev >= 3) { 327 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; 328 card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG]; 329 330 /* EXT_CSD value is in units of 10ms, but we store in ms */ 331 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME]; 332 333 /* Sleep / awake timeout in 100ns units */ 334 if (sa_shift > 0 && sa_shift <= 0x17) 335 card->ext_csd.sa_timeout = 336 1 << ext_csd[EXT_CSD_S_A_TIMEOUT]; 337 card->ext_csd.erase_group_def = 338 ext_csd[EXT_CSD_ERASE_GROUP_DEF]; 339 card->ext_csd.hc_erase_timeout = 300 * 340 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; 341 card->ext_csd.hc_erase_size = 342 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10; 343 344 card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C]; 345 346 /* 347 * There are two boot regions of equal size, defined in 348 * multiples of 128K. 349 */ 350 if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) { 351 for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) { 352 part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17; 353 mmc_part_add(card, part_size, 354 EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx, 355 "boot%d", idx, true, 356 MMC_BLK_DATA_AREA_BOOT); 357 } 358 } 359 } 360 361 card->ext_csd.raw_hc_erase_gap_size = 362 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 363 card->ext_csd.raw_sec_trim_mult = 364 ext_csd[EXT_CSD_SEC_TRIM_MULT]; 365 card->ext_csd.raw_sec_erase_mult = 366 ext_csd[EXT_CSD_SEC_ERASE_MULT]; 367 card->ext_csd.raw_sec_feature_support = 368 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; 369 card->ext_csd.raw_trim_mult = 370 ext_csd[EXT_CSD_TRIM_MULT]; 371 if (card->ext_csd.rev >= 4) { 372 /* 373 * Enhanced area feature support -- check whether the eMMC 374 * card has the Enhanced area enabled. If so, export enhanced 375 * area offset and size to user by adding sysfs interface. 376 */ 377 card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT]; 378 if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) && 379 (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { 380 hc_erase_grp_sz = 381 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 382 hc_wp_grp_sz = 383 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 384 385 card->ext_csd.enhanced_area_en = 1; 386 /* 387 * calculate the enhanced data area offset, in bytes 388 */ 389 card->ext_csd.enhanced_area_offset = 390 (ext_csd[139] << 24) + (ext_csd[138] << 16) + 391 (ext_csd[137] << 8) + ext_csd[136]; 392 if (mmc_card_blockaddr(card)) 393 card->ext_csd.enhanced_area_offset <<= 9; 394 /* 395 * calculate the enhanced data area size, in kilobytes 396 */ 397 card->ext_csd.enhanced_area_size = 398 (ext_csd[142] << 16) + (ext_csd[141] << 8) + 399 ext_csd[140]; 400 card->ext_csd.enhanced_area_size *= 401 (size_t)(hc_erase_grp_sz * hc_wp_grp_sz); 402 card->ext_csd.enhanced_area_size <<= 9; 403 } else { 404 /* 405 * If the enhanced area is not enabled, disable these 406 * device attributes. 407 */ 408 card->ext_csd.enhanced_area_offset = -EINVAL; 409 card->ext_csd.enhanced_area_size = -EINVAL; 410 } 411 412 /* 413 * General purpose partition feature support -- 414 * If ext_csd has the size of general purpose partitions, 415 * set size, part_cfg, partition name in mmc_part. 416 */ 417 if (ext_csd[EXT_CSD_PARTITION_SUPPORT] & 418 EXT_CSD_PART_SUPPORT_PART_EN) { 419 if (card->ext_csd.enhanced_area_en != 1) { 420 hc_erase_grp_sz = 421 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 422 hc_wp_grp_sz = 423 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 424 425 card->ext_csd.enhanced_area_en = 1; 426 } 427 428 for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) { 429 if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] && 430 !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] && 431 !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]) 432 continue; 433 part_size = 434 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2] 435 << 16) + 436 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] 437 << 8) + 438 ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3]; 439 part_size *= (size_t)(hc_erase_grp_sz * 440 hc_wp_grp_sz); 441 mmc_part_add(card, part_size << 19, 442 EXT_CSD_PART_CONFIG_ACC_GP0 + idx, 443 "gp%d", idx, false, 444 MMC_BLK_DATA_AREA_GP); 445 } 446 } 447 card->ext_csd.sec_trim_mult = 448 ext_csd[EXT_CSD_SEC_TRIM_MULT]; 449 card->ext_csd.sec_erase_mult = 450 ext_csd[EXT_CSD_SEC_ERASE_MULT]; 451 card->ext_csd.sec_feature_support = 452 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; 453 card->ext_csd.trim_timeout = 300 * 454 ext_csd[EXT_CSD_TRIM_MULT]; 455 456 /* 457 * Note that the call to mmc_part_add above defaults to read 458 * only. If this default assumption is changed, the call must 459 * take into account the value of boot_locked below. 460 */ 461 card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP]; 462 card->ext_csd.boot_ro_lockable = true; 463 } 464 465 if (card->ext_csd.rev >= 5) { 466 /* check whether the eMMC card supports BKOPS */ 467 if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) { 468 card->ext_csd.bkops = 1; 469 card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN]; 470 card->ext_csd.raw_bkops_status = 471 ext_csd[EXT_CSD_BKOPS_STATUS]; 472 if (!card->ext_csd.bkops_en) 473 pr_info("%s: BKOPS_EN bit is not set\n", 474 mmc_hostname(card->host)); 475 } 476 477 /* check whether the eMMC card supports HPI */ 478 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) { 479 card->ext_csd.hpi = 1; 480 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2) 481 card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION; 482 else 483 card->ext_csd.hpi_cmd = MMC_SEND_STATUS; 484 /* 485 * Indicate the maximum timeout to close 486 * a command interrupted by HPI 487 */ 488 card->ext_csd.out_of_int_time = 489 ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10; 490 } 491 492 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM]; 493 card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION]; 494 495 /* 496 * RPMB regions are defined in multiples of 128K. 497 */ 498 card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT]; 499 if (ext_csd[EXT_CSD_RPMB_MULT]) { 500 mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17, 501 EXT_CSD_PART_CONFIG_ACC_RPMB, 502 "rpmb", 0, false, 503 MMC_BLK_DATA_AREA_RPMB); 504 } 505 } 506 507 card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT]; 508 if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) 509 card->erased_byte = 0xFF; 510 else 511 card->erased_byte = 0x0; 512 513 /* eMMC v4.5 or later */ 514 if (card->ext_csd.rev >= 6) { 515 card->ext_csd.feature_support |= MMC_DISCARD_FEATURE; 516 517 card->ext_csd.generic_cmd6_time = 10 * 518 ext_csd[EXT_CSD_GENERIC_CMD6_TIME]; 519 card->ext_csd.power_off_longtime = 10 * 520 ext_csd[EXT_CSD_POWER_OFF_LONG_TIME]; 521 522 card->ext_csd.cache_size = 523 ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 | 524 ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 | 525 ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 | 526 ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24; 527 528 if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1) 529 card->ext_csd.data_sector_size = 4096; 530 else 531 card->ext_csd.data_sector_size = 512; 532 533 if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) && 534 (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) { 535 card->ext_csd.data_tag_unit_size = 536 ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) * 537 (card->ext_csd.data_sector_size); 538 } else { 539 card->ext_csd.data_tag_unit_size = 0; 540 } 541 } else { 542 card->ext_csd.data_sector_size = 512; 543 } 544 545 out: 546 return err; 547 } 548 549 static inline void mmc_free_ext_csd(u8 *ext_csd) 550 { 551 kfree(ext_csd); 552 } 553 554 555 static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width) 556 { 557 u8 *bw_ext_csd; 558 int err; 559 560 if (bus_width == MMC_BUS_WIDTH_1) 561 return 0; 562 563 err = mmc_get_ext_csd(card, &bw_ext_csd); 564 565 if (err || bw_ext_csd == NULL) { 566 err = -EINVAL; 567 goto out; 568 } 569 570 /* only compare read only fields */ 571 err = !((card->ext_csd.raw_partition_support == 572 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) && 573 (card->ext_csd.raw_erased_mem_count == 574 bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) && 575 (card->ext_csd.rev == 576 bw_ext_csd[EXT_CSD_REV]) && 577 (card->ext_csd.raw_ext_csd_structure == 578 bw_ext_csd[EXT_CSD_STRUCTURE]) && 579 (card->ext_csd.raw_card_type == 580 bw_ext_csd[EXT_CSD_CARD_TYPE]) && 581 (card->ext_csd.raw_s_a_timeout == 582 bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) && 583 (card->ext_csd.raw_hc_erase_gap_size == 584 bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) && 585 (card->ext_csd.raw_erase_timeout_mult == 586 bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) && 587 (card->ext_csd.raw_hc_erase_grp_size == 588 bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) && 589 (card->ext_csd.raw_sec_trim_mult == 590 bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) && 591 (card->ext_csd.raw_sec_erase_mult == 592 bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) && 593 (card->ext_csd.raw_sec_feature_support == 594 bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) && 595 (card->ext_csd.raw_trim_mult == 596 bw_ext_csd[EXT_CSD_TRIM_MULT]) && 597 (card->ext_csd.raw_sectors[0] == 598 bw_ext_csd[EXT_CSD_SEC_CNT + 0]) && 599 (card->ext_csd.raw_sectors[1] == 600 bw_ext_csd[EXT_CSD_SEC_CNT + 1]) && 601 (card->ext_csd.raw_sectors[2] == 602 bw_ext_csd[EXT_CSD_SEC_CNT + 2]) && 603 (card->ext_csd.raw_sectors[3] == 604 bw_ext_csd[EXT_CSD_SEC_CNT + 3])); 605 if (err) 606 err = -EINVAL; 607 608 out: 609 mmc_free_ext_csd(bw_ext_csd); 610 return err; 611 } 612 613 MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1], 614 card->raw_cid[2], card->raw_cid[3]); 615 MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1], 616 card->raw_csd[2], card->raw_csd[3]); 617 MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year); 618 MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9); 619 MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9); 620 MMC_DEV_ATTR(fwrev, "0x%x\n", card->cid.fwrev); 621 MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev); 622 MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); 623 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); 624 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); 625 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial); 626 MMC_DEV_ATTR(enhanced_area_offset, "%llu\n", 627 card->ext_csd.enhanced_area_offset); 628 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size); 629 MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult); 630 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors); 631 632 static struct attribute *mmc_std_attrs[] = { 633 &dev_attr_cid.attr, 634 &dev_attr_csd.attr, 635 &dev_attr_date.attr, 636 &dev_attr_erase_size.attr, 637 &dev_attr_preferred_erase_size.attr, 638 &dev_attr_fwrev.attr, 639 &dev_attr_hwrev.attr, 640 &dev_attr_manfid.attr, 641 &dev_attr_name.attr, 642 &dev_attr_oemid.attr, 643 &dev_attr_serial.attr, 644 &dev_attr_enhanced_area_offset.attr, 645 &dev_attr_enhanced_area_size.attr, 646 &dev_attr_raw_rpmb_size_mult.attr, 647 &dev_attr_rel_sectors.attr, 648 NULL, 649 }; 650 651 static struct attribute_group mmc_std_attr_group = { 652 .attrs = mmc_std_attrs, 653 }; 654 655 static const struct attribute_group *mmc_attr_groups[] = { 656 &mmc_std_attr_group, 657 NULL, 658 }; 659 660 static struct device_type mmc_type = { 661 .groups = mmc_attr_groups, 662 }; 663 664 /* 665 * Select the PowerClass for the current bus width 666 * If power class is defined for 4/8 bit bus in the 667 * extended CSD register, select it by executing the 668 * mmc_switch command. 669 */ 670 static int mmc_select_powerclass(struct mmc_card *card, 671 unsigned int bus_width, u8 *ext_csd) 672 { 673 int err = 0; 674 unsigned int pwrclass_val; 675 unsigned int index = 0; 676 struct mmc_host *host; 677 678 BUG_ON(!card); 679 680 host = card->host; 681 BUG_ON(!host); 682 683 if (ext_csd == NULL) 684 return 0; 685 686 /* Power class selection is supported for versions >= 4.0 */ 687 if (card->csd.mmca_vsn < CSD_SPEC_VER_4) 688 return 0; 689 690 /* Power class values are defined only for 4/8 bit bus */ 691 if (bus_width == EXT_CSD_BUS_WIDTH_1) 692 return 0; 693 694 switch (1 << host->ios.vdd) { 695 case MMC_VDD_165_195: 696 if (host->ios.clock <= 26000000) 697 index = EXT_CSD_PWR_CL_26_195; 698 else if (host->ios.clock <= 52000000) 699 index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? 700 EXT_CSD_PWR_CL_52_195 : 701 EXT_CSD_PWR_CL_DDR_52_195; 702 else if (host->ios.clock <= 200000000) 703 index = EXT_CSD_PWR_CL_200_195; 704 break; 705 case MMC_VDD_27_28: 706 case MMC_VDD_28_29: 707 case MMC_VDD_29_30: 708 case MMC_VDD_30_31: 709 case MMC_VDD_31_32: 710 case MMC_VDD_32_33: 711 case MMC_VDD_33_34: 712 case MMC_VDD_34_35: 713 case MMC_VDD_35_36: 714 if (host->ios.clock <= 26000000) 715 index = EXT_CSD_PWR_CL_26_360; 716 else if (host->ios.clock <= 52000000) 717 index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? 718 EXT_CSD_PWR_CL_52_360 : 719 EXT_CSD_PWR_CL_DDR_52_360; 720 else if (host->ios.clock <= 200000000) 721 index = EXT_CSD_PWR_CL_200_360; 722 break; 723 default: 724 pr_warning("%s: Voltage range not supported " 725 "for power class.\n", mmc_hostname(host)); 726 return -EINVAL; 727 } 728 729 pwrclass_val = ext_csd[index]; 730 731 if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8)) 732 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >> 733 EXT_CSD_PWR_CL_8BIT_SHIFT; 734 else 735 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >> 736 EXT_CSD_PWR_CL_4BIT_SHIFT; 737 738 /* If the power class is different from the default value */ 739 if (pwrclass_val > 0) { 740 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 741 EXT_CSD_POWER_CLASS, 742 pwrclass_val, 743 card->ext_csd.generic_cmd6_time); 744 } 745 746 return err; 747 } 748 749 /* 750 * Selects the desired buswidth and switch to the HS200 mode 751 * if bus width set without error 752 */ 753 static int mmc_select_hs200(struct mmc_card *card) 754 { 755 int idx, err = -EINVAL; 756 struct mmc_host *host; 757 static unsigned ext_csd_bits[] = { 758 EXT_CSD_BUS_WIDTH_4, 759 EXT_CSD_BUS_WIDTH_8, 760 }; 761 static unsigned bus_widths[] = { 762 MMC_BUS_WIDTH_4, 763 MMC_BUS_WIDTH_8, 764 }; 765 766 BUG_ON(!card); 767 768 host = card->host; 769 770 if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V && 771 host->caps2 & MMC_CAP2_HS200_1_2V_SDR) 772 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0); 773 774 if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V && 775 host->caps2 & MMC_CAP2_HS200_1_8V_SDR) 776 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, 0); 777 778 /* If fails try again during next card power cycle */ 779 if (err) 780 goto err; 781 782 idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 1 : 0; 783 784 /* 785 * Unlike SD, MMC cards dont have a configuration register to notify 786 * supported bus width. So bus test command should be run to identify 787 * the supported bus width or compare the ext csd values of current 788 * bus width and ext csd values of 1 bit mode read earlier. 789 */ 790 for (; idx >= 0; idx--) { 791 792 /* 793 * Host is capable of 8bit transfer, then switch 794 * the device to work in 8bit transfer mode. If the 795 * mmc switch command returns error then switch to 796 * 4bit transfer mode. On success set the corresponding 797 * bus width on the host. 798 */ 799 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 800 EXT_CSD_BUS_WIDTH, 801 ext_csd_bits[idx], 802 card->ext_csd.generic_cmd6_time); 803 if (err) 804 continue; 805 806 mmc_set_bus_width(card->host, bus_widths[idx]); 807 808 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) 809 err = mmc_compare_ext_csds(card, bus_widths[idx]); 810 else 811 err = mmc_bus_test(card, bus_widths[idx]); 812 if (!err) 813 break; 814 } 815 816 /* switch to HS200 mode if bus width set successfully */ 817 if (!err) 818 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 819 EXT_CSD_HS_TIMING, 2, 0); 820 err: 821 return err; 822 } 823 824 /* 825 * Handle the detection and initialisation of a card. 826 * 827 * In the case of a resume, "oldcard" will contain the card 828 * we're trying to reinitialise. 829 */ 830 static int mmc_init_card(struct mmc_host *host, u32 ocr, 831 struct mmc_card *oldcard) 832 { 833 struct mmc_card *card; 834 int err, ddr = 0; 835 u32 cid[4]; 836 unsigned int max_dtr; 837 u32 rocr; 838 u8 *ext_csd = NULL; 839 840 BUG_ON(!host); 841 WARN_ON(!host->claimed); 842 843 /* Set correct bus mode for MMC before attempting init */ 844 if (!mmc_host_is_spi(host)) 845 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); 846 847 /* 848 * Since we're changing the OCR value, we seem to 849 * need to tell some cards to go back to the idle 850 * state. We wait 1ms to give cards time to 851 * respond. 852 * mmc_go_idle is needed for eMMC that are asleep 853 */ 854 mmc_go_idle(host); 855 856 /* The extra bit indicates that we support high capacity */ 857 err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr); 858 if (err) 859 goto err; 860 861 /* 862 * For SPI, enable CRC as appropriate. 863 */ 864 if (mmc_host_is_spi(host)) { 865 err = mmc_spi_set_crc(host, use_spi_crc); 866 if (err) 867 goto err; 868 } 869 870 /* 871 * Fetch CID from card. 872 */ 873 if (mmc_host_is_spi(host)) 874 err = mmc_send_cid(host, cid); 875 else 876 err = mmc_all_send_cid(host, cid); 877 if (err) 878 goto err; 879 880 if (oldcard) { 881 if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) { 882 err = -ENOENT; 883 goto err; 884 } 885 886 card = oldcard; 887 } else { 888 /* 889 * Allocate card structure. 890 */ 891 card = mmc_alloc_card(host, &mmc_type); 892 if (IS_ERR(card)) { 893 err = PTR_ERR(card); 894 goto err; 895 } 896 897 card->type = MMC_TYPE_MMC; 898 card->rca = 1; 899 memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); 900 } 901 902 /* 903 * For native busses: set card RCA and quit open drain mode. 904 */ 905 if (!mmc_host_is_spi(host)) { 906 err = mmc_set_relative_addr(card); 907 if (err) 908 goto free_card; 909 910 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); 911 } 912 913 if (!oldcard) { 914 /* 915 * Fetch CSD from card. 916 */ 917 err = mmc_send_csd(card, card->raw_csd); 918 if (err) 919 goto free_card; 920 921 err = mmc_decode_csd(card); 922 if (err) 923 goto free_card; 924 err = mmc_decode_cid(card); 925 if (err) 926 goto free_card; 927 } 928 929 /* 930 * Select card, as all following commands rely on that. 931 */ 932 if (!mmc_host_is_spi(host)) { 933 err = mmc_select_card(card); 934 if (err) 935 goto free_card; 936 } 937 938 if (!oldcard) { 939 /* 940 * Fetch and process extended CSD. 941 */ 942 943 err = mmc_get_ext_csd(card, &ext_csd); 944 if (err) 945 goto free_card; 946 err = mmc_read_ext_csd(card, ext_csd); 947 if (err) 948 goto free_card; 949 950 /* If doing byte addressing, check if required to do sector 951 * addressing. Handle the case of <2GB cards needing sector 952 * addressing. See section 8.1 JEDEC Standard JED84-A441; 953 * ocr register has bit 30 set for sector addressing. 954 */ 955 if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30))) 956 mmc_card_set_blockaddr(card); 957 958 /* Erase size depends on CSD and Extended CSD */ 959 mmc_set_erase_size(card); 960 } 961 962 /* 963 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF 964 * bit. This bit will be lost every time after a reset or power off. 965 */ 966 if (card->ext_csd.enhanced_area_en || 967 (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) { 968 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 969 EXT_CSD_ERASE_GROUP_DEF, 1, 970 card->ext_csd.generic_cmd6_time); 971 972 if (err && err != -EBADMSG) 973 goto free_card; 974 975 if (err) { 976 err = 0; 977 /* 978 * Just disable enhanced area off & sz 979 * will try to enable ERASE_GROUP_DEF 980 * during next time reinit 981 */ 982 card->ext_csd.enhanced_area_offset = -EINVAL; 983 card->ext_csd.enhanced_area_size = -EINVAL; 984 } else { 985 card->ext_csd.erase_group_def = 1; 986 /* 987 * enable ERASE_GRP_DEF successfully. 988 * This will affect the erase size, so 989 * here need to reset erase size 990 */ 991 mmc_set_erase_size(card); 992 } 993 } 994 995 /* 996 * Ensure eMMC user default partition is enabled 997 */ 998 if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) { 999 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; 1000 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, 1001 card->ext_csd.part_config, 1002 card->ext_csd.part_time); 1003 if (err && err != -EBADMSG) 1004 goto free_card; 1005 } 1006 1007 /* 1008 * If the host supports the power_off_notify capability then 1009 * set the notification byte in the ext_csd register of device 1010 */ 1011 if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) && 1012 (card->ext_csd.rev >= 6)) { 1013 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1014 EXT_CSD_POWER_OFF_NOTIFICATION, 1015 EXT_CSD_POWER_ON, 1016 card->ext_csd.generic_cmd6_time); 1017 if (err && err != -EBADMSG) 1018 goto free_card; 1019 1020 /* 1021 * The err can be -EBADMSG or 0, 1022 * so check for success and update the flag 1023 */ 1024 if (!err) 1025 card->ext_csd.power_off_notification = EXT_CSD_POWER_ON; 1026 } 1027 1028 /* 1029 * Activate high speed (if supported) 1030 */ 1031 if (card->ext_csd.hs_max_dtr != 0) { 1032 err = 0; 1033 if (card->ext_csd.hs_max_dtr > 52000000 && 1034 host->caps2 & MMC_CAP2_HS200) 1035 err = mmc_select_hs200(card); 1036 else if (host->caps & MMC_CAP_MMC_HIGHSPEED) 1037 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1038 EXT_CSD_HS_TIMING, 1, 1039 card->ext_csd.generic_cmd6_time); 1040 1041 if (err && err != -EBADMSG) 1042 goto free_card; 1043 1044 if (err) { 1045 pr_warning("%s: switch to highspeed failed\n", 1046 mmc_hostname(card->host)); 1047 err = 0; 1048 } else { 1049 if (card->ext_csd.hs_max_dtr > 52000000 && 1050 host->caps2 & MMC_CAP2_HS200) { 1051 mmc_card_set_hs200(card); 1052 mmc_set_timing(card->host, 1053 MMC_TIMING_MMC_HS200); 1054 } else { 1055 mmc_card_set_highspeed(card); 1056 mmc_set_timing(card->host, MMC_TIMING_MMC_HS); 1057 } 1058 } 1059 } 1060 1061 /* 1062 * Compute bus speed. 1063 */ 1064 max_dtr = (unsigned int)-1; 1065 1066 if (mmc_card_highspeed(card) || mmc_card_hs200(card)) { 1067 if (max_dtr > card->ext_csd.hs_max_dtr) 1068 max_dtr = card->ext_csd.hs_max_dtr; 1069 if (mmc_card_highspeed(card) && (max_dtr > 52000000)) 1070 max_dtr = 52000000; 1071 } else if (max_dtr > card->csd.max_dtr) { 1072 max_dtr = card->csd.max_dtr; 1073 } 1074 1075 mmc_set_clock(host, max_dtr); 1076 1077 /* 1078 * Indicate DDR mode (if supported). 1079 */ 1080 if (mmc_card_highspeed(card)) { 1081 if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) 1082 && ((host->caps & (MMC_CAP_1_8V_DDR | 1083 MMC_CAP_UHS_DDR50)) 1084 == (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50))) 1085 ddr = MMC_1_8V_DDR_MODE; 1086 else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) 1087 && ((host->caps & (MMC_CAP_1_2V_DDR | 1088 MMC_CAP_UHS_DDR50)) 1089 == (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50))) 1090 ddr = MMC_1_2V_DDR_MODE; 1091 } 1092 1093 /* 1094 * Indicate HS200 SDR mode (if supported). 1095 */ 1096 if (mmc_card_hs200(card)) { 1097 u32 ext_csd_bits; 1098 u32 bus_width = card->host->ios.bus_width; 1099 1100 /* 1101 * For devices supporting HS200 mode, the bus width has 1102 * to be set before executing the tuning function. If 1103 * set before tuning, then device will respond with CRC 1104 * errors for responses on CMD line. So for HS200 the 1105 * sequence will be 1106 * 1. set bus width 4bit / 8 bit (1 bit not supported) 1107 * 2. switch to HS200 mode 1108 * 3. set the clock to > 52Mhz <=200MHz and 1109 * 4. execute tuning for HS200 1110 */ 1111 if ((host->caps2 & MMC_CAP2_HS200) && 1112 card->host->ops->execute_tuning) { 1113 mmc_host_clk_hold(card->host); 1114 err = card->host->ops->execute_tuning(card->host, 1115 MMC_SEND_TUNING_BLOCK_HS200); 1116 mmc_host_clk_release(card->host); 1117 } 1118 if (err) { 1119 pr_warning("%s: tuning execution failed\n", 1120 mmc_hostname(card->host)); 1121 goto err; 1122 } 1123 1124 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ? 1125 EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4; 1126 err = mmc_select_powerclass(card, ext_csd_bits, ext_csd); 1127 if (err) 1128 pr_warning("%s: power class selection to bus width %d" 1129 " failed\n", mmc_hostname(card->host), 1130 1 << bus_width); 1131 } 1132 1133 /* 1134 * Activate wide bus and DDR (if supported). 1135 */ 1136 if (!mmc_card_hs200(card) && 1137 (card->csd.mmca_vsn >= CSD_SPEC_VER_4) && 1138 (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { 1139 static unsigned ext_csd_bits[][2] = { 1140 { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, 1141 { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 }, 1142 { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 }, 1143 }; 1144 static unsigned bus_widths[] = { 1145 MMC_BUS_WIDTH_8, 1146 MMC_BUS_WIDTH_4, 1147 MMC_BUS_WIDTH_1 1148 }; 1149 unsigned idx, bus_width = 0; 1150 1151 if (host->caps & MMC_CAP_8_BIT_DATA) 1152 idx = 0; 1153 else 1154 idx = 1; 1155 for (; idx < ARRAY_SIZE(bus_widths); idx++) { 1156 bus_width = bus_widths[idx]; 1157 if (bus_width == MMC_BUS_WIDTH_1) 1158 ddr = 0; /* no DDR for 1-bit width */ 1159 err = mmc_select_powerclass(card, ext_csd_bits[idx][0], 1160 ext_csd); 1161 if (err) 1162 pr_warning("%s: power class selection to " 1163 "bus width %d failed\n", 1164 mmc_hostname(card->host), 1165 1 << bus_width); 1166 1167 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1168 EXT_CSD_BUS_WIDTH, 1169 ext_csd_bits[idx][0], 1170 card->ext_csd.generic_cmd6_time); 1171 if (!err) { 1172 mmc_set_bus_width(card->host, bus_width); 1173 1174 /* 1175 * If controller can't handle bus width test, 1176 * compare ext_csd previously read in 1 bit mode 1177 * against ext_csd at new bus width 1178 */ 1179 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) 1180 err = mmc_compare_ext_csds(card, 1181 bus_width); 1182 else 1183 err = mmc_bus_test(card, bus_width); 1184 if (!err) 1185 break; 1186 } 1187 } 1188 1189 if (!err && ddr) { 1190 err = mmc_select_powerclass(card, ext_csd_bits[idx][1], 1191 ext_csd); 1192 if (err) 1193 pr_warning("%s: power class selection to " 1194 "bus width %d ddr %d failed\n", 1195 mmc_hostname(card->host), 1196 1 << bus_width, ddr); 1197 1198 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1199 EXT_CSD_BUS_WIDTH, 1200 ext_csd_bits[idx][1], 1201 card->ext_csd.generic_cmd6_time); 1202 } 1203 if (err) { 1204 pr_warning("%s: switch to bus width %d ddr %d " 1205 "failed\n", mmc_hostname(card->host), 1206 1 << bus_width, ddr); 1207 goto free_card; 1208 } else if (ddr) { 1209 /* 1210 * eMMC cards can support 3.3V to 1.2V i/o (vccq) 1211 * signaling. 1212 * 1213 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq. 1214 * 1215 * 1.8V vccq at 3.3V core voltage (vcc) is not required 1216 * in the JEDEC spec for DDR. 1217 * 1218 * Do not force change in vccq since we are obviously 1219 * working and no change to vccq is needed. 1220 * 1221 * WARNING: eMMC rules are NOT the same as SD DDR 1222 */ 1223 if (ddr == MMC_1_2V_DDR_MODE) { 1224 err = mmc_set_signal_voltage(host, 1225 MMC_SIGNAL_VOLTAGE_120, 0); 1226 if (err) 1227 goto err; 1228 } 1229 mmc_card_set_ddr_mode(card); 1230 mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50); 1231 mmc_set_bus_width(card->host, bus_width); 1232 } 1233 } 1234 1235 /* 1236 * Enable HPI feature (if supported) 1237 */ 1238 if (card->ext_csd.hpi) { 1239 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1240 EXT_CSD_HPI_MGMT, 1, 1241 card->ext_csd.generic_cmd6_time); 1242 if (err && err != -EBADMSG) 1243 goto free_card; 1244 if (err) { 1245 pr_warning("%s: Enabling HPI failed\n", 1246 mmc_hostname(card->host)); 1247 err = 0; 1248 } else 1249 card->ext_csd.hpi_en = 1; 1250 } 1251 1252 /* 1253 * If cache size is higher than 0, this indicates 1254 * the existence of cache and it can be turned on. 1255 */ 1256 if ((host->caps2 & MMC_CAP2_CACHE_CTRL) && 1257 card->ext_csd.cache_size > 0) { 1258 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1259 EXT_CSD_CACHE_CTRL, 1, 1260 card->ext_csd.generic_cmd6_time); 1261 if (err && err != -EBADMSG) 1262 goto free_card; 1263 1264 /* 1265 * Only if no error, cache is turned on successfully. 1266 */ 1267 if (err) { 1268 pr_warning("%s: Cache is supported, " 1269 "but failed to turn on (%d)\n", 1270 mmc_hostname(card->host), err); 1271 card->ext_csd.cache_ctrl = 0; 1272 err = 0; 1273 } else { 1274 card->ext_csd.cache_ctrl = 1; 1275 } 1276 } 1277 1278 if (!oldcard) 1279 host->card = card; 1280 1281 mmc_free_ext_csd(ext_csd); 1282 return 0; 1283 1284 free_card: 1285 if (!oldcard) 1286 mmc_remove_card(card); 1287 err: 1288 mmc_free_ext_csd(ext_csd); 1289 1290 return err; 1291 } 1292 1293 static int mmc_can_poweroff_notify(const struct mmc_card *card) 1294 { 1295 return card && 1296 mmc_card_mmc(card) && 1297 (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON); 1298 } 1299 1300 static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type) 1301 { 1302 unsigned int timeout = card->ext_csd.generic_cmd6_time; 1303 int err; 1304 1305 /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */ 1306 if (notify_type == EXT_CSD_POWER_OFF_LONG) 1307 timeout = card->ext_csd.power_off_longtime; 1308 1309 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1310 EXT_CSD_POWER_OFF_NOTIFICATION, 1311 notify_type, timeout); 1312 if (err) 1313 pr_err("%s: Power Off Notification timed out, %u\n", 1314 mmc_hostname(card->host), timeout); 1315 1316 /* Disable the power off notification after the switch operation. */ 1317 card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION; 1318 1319 return err; 1320 } 1321 1322 /* 1323 * Host is being removed. Free up the current card. 1324 */ 1325 static void mmc_remove(struct mmc_host *host) 1326 { 1327 BUG_ON(!host); 1328 BUG_ON(!host->card); 1329 1330 mmc_remove_card(host->card); 1331 host->card = NULL; 1332 } 1333 1334 /* 1335 * Card detection - card is alive. 1336 */ 1337 static int mmc_alive(struct mmc_host *host) 1338 { 1339 return mmc_send_status(host->card, NULL); 1340 } 1341 1342 /* 1343 * Card detection callback from host. 1344 */ 1345 static void mmc_detect(struct mmc_host *host) 1346 { 1347 int err; 1348 1349 BUG_ON(!host); 1350 BUG_ON(!host->card); 1351 1352 mmc_claim_host(host); 1353 1354 /* 1355 * Just check if our card has been removed. 1356 */ 1357 err = _mmc_detect_card_removed(host); 1358 1359 mmc_release_host(host); 1360 1361 if (err) { 1362 mmc_remove(host); 1363 1364 mmc_claim_host(host); 1365 mmc_detach_bus(host); 1366 mmc_power_off(host); 1367 mmc_release_host(host); 1368 } 1369 } 1370 1371 /* 1372 * Suspend callback from host. 1373 */ 1374 static int mmc_suspend(struct mmc_host *host) 1375 { 1376 int err = 0; 1377 1378 BUG_ON(!host); 1379 BUG_ON(!host->card); 1380 1381 mmc_claim_host(host); 1382 if (mmc_can_poweroff_notify(host->card)) 1383 err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_SHORT); 1384 else if (mmc_card_can_sleep(host)) 1385 err = mmc_card_sleep(host); 1386 else if (!mmc_host_is_spi(host)) 1387 err = mmc_deselect_cards(host); 1388 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); 1389 mmc_release_host(host); 1390 1391 return err; 1392 } 1393 1394 /* 1395 * Resume callback from host. 1396 * 1397 * This function tries to determine if the same card is still present 1398 * and, if so, restore all state to it. 1399 */ 1400 static int mmc_resume(struct mmc_host *host) 1401 { 1402 int err; 1403 1404 BUG_ON(!host); 1405 BUG_ON(!host->card); 1406 1407 mmc_claim_host(host); 1408 err = mmc_init_card(host, host->ocr, host->card); 1409 mmc_release_host(host); 1410 1411 return err; 1412 } 1413 1414 static int mmc_power_restore(struct mmc_host *host) 1415 { 1416 int ret; 1417 1418 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); 1419 mmc_claim_host(host); 1420 ret = mmc_init_card(host, host->ocr, host->card); 1421 mmc_release_host(host); 1422 1423 return ret; 1424 } 1425 1426 static int mmc_sleep(struct mmc_host *host) 1427 { 1428 struct mmc_card *card = host->card; 1429 int err = -ENOSYS; 1430 1431 if (card && card->ext_csd.rev >= 3) { 1432 err = mmc_card_sleepawake(host, 1); 1433 if (err < 0) 1434 pr_debug("%s: Error %d while putting card into sleep", 1435 mmc_hostname(host), err); 1436 } 1437 1438 return err; 1439 } 1440 1441 static int mmc_awake(struct mmc_host *host) 1442 { 1443 struct mmc_card *card = host->card; 1444 int err = -ENOSYS; 1445 1446 if (card && card->ext_csd.rev >= 3) { 1447 err = mmc_card_sleepawake(host, 0); 1448 if (err < 0) 1449 pr_debug("%s: Error %d while awaking sleeping card", 1450 mmc_hostname(host), err); 1451 } 1452 1453 return err; 1454 } 1455 1456 static const struct mmc_bus_ops mmc_ops = { 1457 .awake = mmc_awake, 1458 .sleep = mmc_sleep, 1459 .remove = mmc_remove, 1460 .detect = mmc_detect, 1461 .suspend = NULL, 1462 .resume = NULL, 1463 .power_restore = mmc_power_restore, 1464 .alive = mmc_alive, 1465 }; 1466 1467 static const struct mmc_bus_ops mmc_ops_unsafe = { 1468 .awake = mmc_awake, 1469 .sleep = mmc_sleep, 1470 .remove = mmc_remove, 1471 .detect = mmc_detect, 1472 .suspend = mmc_suspend, 1473 .resume = mmc_resume, 1474 .power_restore = mmc_power_restore, 1475 .alive = mmc_alive, 1476 }; 1477 1478 static void mmc_attach_bus_ops(struct mmc_host *host) 1479 { 1480 const struct mmc_bus_ops *bus_ops; 1481 1482 if (!mmc_card_is_removable(host)) 1483 bus_ops = &mmc_ops_unsafe; 1484 else 1485 bus_ops = &mmc_ops; 1486 mmc_attach_bus(host, bus_ops); 1487 } 1488 1489 /* 1490 * Starting point for MMC card init. 1491 */ 1492 int mmc_attach_mmc(struct mmc_host *host) 1493 { 1494 int err; 1495 u32 ocr; 1496 1497 BUG_ON(!host); 1498 WARN_ON(!host->claimed); 1499 1500 /* Set correct bus mode for MMC before attempting attach */ 1501 if (!mmc_host_is_spi(host)) 1502 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); 1503 1504 err = mmc_send_op_cond(host, 0, &ocr); 1505 if (err) 1506 return err; 1507 1508 mmc_attach_bus_ops(host); 1509 if (host->ocr_avail_mmc) 1510 host->ocr_avail = host->ocr_avail_mmc; 1511 1512 /* 1513 * We need to get OCR a different way for SPI. 1514 */ 1515 if (mmc_host_is_spi(host)) { 1516 err = mmc_spi_read_ocr(host, 1, &ocr); 1517 if (err) 1518 goto err; 1519 } 1520 1521 /* 1522 * Sanity check the voltages that the card claims to 1523 * support. 1524 */ 1525 if (ocr & 0x7F) { 1526 pr_warning("%s: card claims to support voltages " 1527 "below the defined range. These will be ignored.\n", 1528 mmc_hostname(host)); 1529 ocr &= ~0x7F; 1530 } 1531 1532 host->ocr = mmc_select_voltage(host, ocr); 1533 1534 /* 1535 * Can we support the voltage of the card? 1536 */ 1537 if (!host->ocr) { 1538 err = -EINVAL; 1539 goto err; 1540 } 1541 1542 /* 1543 * Detect and init the card. 1544 */ 1545 err = mmc_init_card(host, host->ocr, NULL); 1546 if (err) 1547 goto err; 1548 1549 mmc_release_host(host); 1550 err = mmc_add_card(host->card); 1551 mmc_claim_host(host); 1552 if (err) 1553 goto remove_card; 1554 1555 return 0; 1556 1557 remove_card: 1558 mmc_release_host(host); 1559 mmc_remove_card(host->card); 1560 mmc_claim_host(host); 1561 host->card = NULL; 1562 err: 1563 mmc_detach_bus(host); 1564 1565 pr_err("%s: error %d whilst initialising MMC card\n", 1566 mmc_hostname(host), err); 1567 1568 return err; 1569 } 1570