1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/core/mmc_ops.h 4 * 5 * Copyright 2006-2007 Pierre Ossman 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/export.h> 10 #include <linux/types.h> 11 #include <linux/scatterlist.h> 12 13 #include <linux/mmc/host.h> 14 #include <linux/mmc/card.h> 15 #include <linux/mmc/mmc.h> 16 17 #include "core.h" 18 #include "card.h" 19 #include "host.h" 20 #include "mmc_ops.h" 21 22 #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */ 23 #define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */ 24 #define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */ 25 26 static const u8 tuning_blk_pattern_4bit[] = { 27 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, 28 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, 29 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, 30 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, 31 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, 32 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, 33 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, 34 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, 35 }; 36 37 static const u8 tuning_blk_pattern_8bit[] = { 38 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 39 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 40 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 41 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 42 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 43 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 44 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 45 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, 46 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 47 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 48 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 49 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 50 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 51 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 52 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 53 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 54 }; 55 56 int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries) 57 { 58 int err; 59 struct mmc_command cmd = {}; 60 61 cmd.opcode = MMC_SEND_STATUS; 62 if (!mmc_host_is_spi(card->host)) 63 cmd.arg = card->rca << 16; 64 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 65 66 err = mmc_wait_for_cmd(card->host, &cmd, retries); 67 if (err) 68 return err; 69 70 /* NOTE: callers are required to understand the difference 71 * between "native" and SPI format status words! 72 */ 73 if (status) 74 *status = cmd.resp[0]; 75 76 return 0; 77 } 78 EXPORT_SYMBOL_GPL(__mmc_send_status); 79 80 int mmc_send_status(struct mmc_card *card, u32 *status) 81 { 82 return __mmc_send_status(card, status, MMC_CMD_RETRIES); 83 } 84 EXPORT_SYMBOL_GPL(mmc_send_status); 85 86 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) 87 { 88 struct mmc_command cmd = {}; 89 90 cmd.opcode = MMC_SELECT_CARD; 91 92 if (card) { 93 cmd.arg = card->rca << 16; 94 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 95 } else { 96 cmd.arg = 0; 97 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 98 } 99 100 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 101 } 102 103 int mmc_select_card(struct mmc_card *card) 104 { 105 106 return _mmc_select_card(card->host, card); 107 } 108 109 int mmc_deselect_cards(struct mmc_host *host) 110 { 111 return _mmc_select_card(host, NULL); 112 } 113 114 /* 115 * Write the value specified in the device tree or board code into the optional 116 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and 117 * drive strength of the DAT and CMD outputs. The actual meaning of a given 118 * value is hardware dependant. 119 * The presence of the DSR register can be determined from the CSD register, 120 * bit 76. 121 */ 122 int mmc_set_dsr(struct mmc_host *host) 123 { 124 struct mmc_command cmd = {}; 125 126 cmd.opcode = MMC_SET_DSR; 127 128 cmd.arg = (host->dsr << 16) | 0xffff; 129 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 130 131 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 132 } 133 134 int mmc_go_idle(struct mmc_host *host) 135 { 136 int err; 137 struct mmc_command cmd = {}; 138 139 /* 140 * Non-SPI hosts need to prevent chipselect going active during 141 * GO_IDLE; that would put chips into SPI mode. Remind them of 142 * that in case of hardware that won't pull up DAT3/nCS otherwise. 143 * 144 * SPI hosts ignore ios.chip_select; it's managed according to 145 * rules that must accommodate non-MMC slaves which this layer 146 * won't even know about. 147 */ 148 if (!mmc_host_is_spi(host)) { 149 mmc_set_chip_select(host, MMC_CS_HIGH); 150 mmc_delay(1); 151 } 152 153 cmd.opcode = MMC_GO_IDLE_STATE; 154 cmd.arg = 0; 155 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; 156 157 err = mmc_wait_for_cmd(host, &cmd, 0); 158 159 mmc_delay(1); 160 161 if (!mmc_host_is_spi(host)) { 162 mmc_set_chip_select(host, MMC_CS_DONTCARE); 163 mmc_delay(1); 164 } 165 166 host->use_spi_crc = 0; 167 168 return err; 169 } 170 171 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 172 { 173 struct mmc_command cmd = {}; 174 int i, err = 0; 175 176 cmd.opcode = MMC_SEND_OP_COND; 177 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; 178 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; 179 180 for (i = 100; i; i--) { 181 err = mmc_wait_for_cmd(host, &cmd, 0); 182 if (err) 183 break; 184 185 /* wait until reset completes */ 186 if (mmc_host_is_spi(host)) { 187 if (!(cmd.resp[0] & R1_SPI_IDLE)) 188 break; 189 } else { 190 if (cmd.resp[0] & MMC_CARD_BUSY) 191 break; 192 } 193 194 err = -ETIMEDOUT; 195 196 mmc_delay(10); 197 198 /* 199 * According to eMMC specification v5.1 section 6.4.3, we 200 * should issue CMD1 repeatedly in the idle state until 201 * the eMMC is ready. Otherwise some eMMC devices seem to enter 202 * the inactive mode after mmc_init_card() issued CMD0 when 203 * the eMMC device is busy. 204 */ 205 if (!ocr && !mmc_host_is_spi(host)) 206 cmd.arg = cmd.resp[0] | BIT(30); 207 } 208 209 if (rocr && !mmc_host_is_spi(host)) 210 *rocr = cmd.resp[0]; 211 212 return err; 213 } 214 215 int mmc_set_relative_addr(struct mmc_card *card) 216 { 217 struct mmc_command cmd = {}; 218 219 cmd.opcode = MMC_SET_RELATIVE_ADDR; 220 cmd.arg = card->rca << 16; 221 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 222 223 return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 224 } 225 226 static int 227 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) 228 { 229 int err; 230 struct mmc_command cmd = {}; 231 232 cmd.opcode = opcode; 233 cmd.arg = arg; 234 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; 235 236 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 237 if (err) 238 return err; 239 240 memcpy(cxd, cmd.resp, sizeof(u32) * 4); 241 242 return 0; 243 } 244 245 /* 246 * NOTE: void *buf, caller for the buf is required to use DMA-capable 247 * buffer or on-stack buffer (with some overhead in callee). 248 */ 249 static int 250 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, 251 u32 opcode, void *buf, unsigned len) 252 { 253 struct mmc_request mrq = {}; 254 struct mmc_command cmd = {}; 255 struct mmc_data data = {}; 256 struct scatterlist sg; 257 258 mrq.cmd = &cmd; 259 mrq.data = &data; 260 261 cmd.opcode = opcode; 262 cmd.arg = 0; 263 264 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 265 * rely on callers to never use this with "native" calls for reading 266 * CSD or CID. Native versions of those commands use the R2 type, 267 * not R1 plus a data block. 268 */ 269 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 270 271 data.blksz = len; 272 data.blocks = 1; 273 data.flags = MMC_DATA_READ; 274 data.sg = &sg; 275 data.sg_len = 1; 276 277 sg_init_one(&sg, buf, len); 278 279 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) { 280 /* 281 * The spec states that CSR and CID accesses have a timeout 282 * of 64 clock cycles. 283 */ 284 data.timeout_ns = 0; 285 data.timeout_clks = 64; 286 } else 287 mmc_set_data_timeout(&data, card); 288 289 mmc_wait_for_req(host, &mrq); 290 291 if (cmd.error) 292 return cmd.error; 293 if (data.error) 294 return data.error; 295 296 return 0; 297 } 298 299 static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode) 300 { 301 int ret, i; 302 __be32 *cxd_tmp; 303 304 cxd_tmp = kzalloc(16, GFP_KERNEL); 305 if (!cxd_tmp) 306 return -ENOMEM; 307 308 ret = mmc_send_cxd_data(NULL, host, opcode, cxd_tmp, 16); 309 if (ret) 310 goto err; 311 312 for (i = 0; i < 4; i++) 313 cxd[i] = be32_to_cpu(cxd_tmp[i]); 314 315 err: 316 kfree(cxd_tmp); 317 return ret; 318 } 319 320 int mmc_send_csd(struct mmc_card *card, u32 *csd) 321 { 322 if (mmc_host_is_spi(card->host)) 323 return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD); 324 325 return mmc_send_cxd_native(card->host, card->rca << 16, csd, 326 MMC_SEND_CSD); 327 } 328 329 int mmc_send_cid(struct mmc_host *host, u32 *cid) 330 { 331 if (mmc_host_is_spi(host)) 332 return mmc_spi_send_cxd(host, cid, MMC_SEND_CID); 333 334 return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID); 335 } 336 337 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) 338 { 339 int err; 340 u8 *ext_csd; 341 342 if (!card || !new_ext_csd) 343 return -EINVAL; 344 345 if (!mmc_can_ext_csd(card)) 346 return -EOPNOTSUPP; 347 348 /* 349 * As the ext_csd is so large and mostly unused, we don't store the 350 * raw block in mmc_card. 351 */ 352 ext_csd = kzalloc(512, GFP_KERNEL); 353 if (!ext_csd) 354 return -ENOMEM; 355 356 err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd, 357 512); 358 if (err) 359 kfree(ext_csd); 360 else 361 *new_ext_csd = ext_csd; 362 363 return err; 364 } 365 EXPORT_SYMBOL_GPL(mmc_get_ext_csd); 366 367 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) 368 { 369 struct mmc_command cmd = {}; 370 int err; 371 372 cmd.opcode = MMC_SPI_READ_OCR; 373 cmd.arg = highcap ? (1 << 30) : 0; 374 cmd.flags = MMC_RSP_SPI_R3; 375 376 err = mmc_wait_for_cmd(host, &cmd, 0); 377 378 *ocrp = cmd.resp[1]; 379 return err; 380 } 381 382 int mmc_spi_set_crc(struct mmc_host *host, int use_crc) 383 { 384 struct mmc_command cmd = {}; 385 int err; 386 387 cmd.opcode = MMC_SPI_CRC_ON_OFF; 388 cmd.flags = MMC_RSP_SPI_R1; 389 cmd.arg = use_crc; 390 391 err = mmc_wait_for_cmd(host, &cmd, 0); 392 if (!err) 393 host->use_spi_crc = use_crc; 394 return err; 395 } 396 397 static int mmc_switch_status_error(struct mmc_host *host, u32 status) 398 { 399 if (mmc_host_is_spi(host)) { 400 if (status & R1_SPI_ILLEGAL_COMMAND) 401 return -EBADMSG; 402 } else { 403 if (R1_STATUS(status)) 404 pr_warn("%s: unexpected status %#x after switch\n", 405 mmc_hostname(host), status); 406 if (status & R1_SWITCH_ERROR) 407 return -EBADMSG; 408 } 409 return 0; 410 } 411 412 /* Caller must hold re-tuning */ 413 int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal) 414 { 415 u32 status; 416 int err; 417 418 err = mmc_send_status(card, &status); 419 if (!crc_err_fatal && err == -EILSEQ) 420 return 0; 421 if (err) 422 return err; 423 424 return mmc_switch_status_error(card->host, status); 425 } 426 427 static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err, 428 enum mmc_busy_cmd busy_cmd, bool *busy) 429 { 430 struct mmc_host *host = card->host; 431 u32 status = 0; 432 int err; 433 434 if (host->ops->card_busy) { 435 *busy = host->ops->card_busy(host); 436 return 0; 437 } 438 439 err = mmc_send_status(card, &status); 440 if (retry_crc_err && err == -EILSEQ) { 441 *busy = true; 442 return 0; 443 } 444 if (err) 445 return err; 446 447 switch (busy_cmd) { 448 case MMC_BUSY_CMD6: 449 err = mmc_switch_status_error(card->host, status); 450 break; 451 case MMC_BUSY_ERASE: 452 err = R1_STATUS(status) ? -EIO : 0; 453 break; 454 case MMC_BUSY_HPI: 455 break; 456 default: 457 err = -EINVAL; 458 } 459 460 if (err) 461 return err; 462 463 *busy = !mmc_ready_for_data(status); 464 return 0; 465 } 466 467 static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 468 bool send_status, bool retry_crc_err, 469 enum mmc_busy_cmd busy_cmd) 470 { 471 struct mmc_host *host = card->host; 472 int err; 473 unsigned long timeout; 474 unsigned int udelay = 32, udelay_max = 32768; 475 bool expired = false; 476 bool busy = false; 477 478 /* 479 * In cases when not allowed to poll by using CMD13 or because we aren't 480 * capable of polling by using ->card_busy(), then rely on waiting the 481 * stated timeout to be sufficient. 482 */ 483 if (!send_status && !host->ops->card_busy) { 484 mmc_delay(timeout_ms); 485 return 0; 486 } 487 488 timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1; 489 do { 490 /* 491 * Due to the possibility of being preempted while polling, 492 * check the expiration time first. 493 */ 494 expired = time_after(jiffies, timeout); 495 496 err = mmc_busy_status(card, retry_crc_err, busy_cmd, &busy); 497 if (err) 498 return err; 499 500 /* Timeout if the device still remains busy. */ 501 if (expired && busy) { 502 pr_err("%s: Card stuck being busy! %s\n", 503 mmc_hostname(host), __func__); 504 return -ETIMEDOUT; 505 } 506 507 /* Throttle the polling rate to avoid hogging the CPU. */ 508 if (busy) { 509 usleep_range(udelay, udelay * 2); 510 if (udelay < udelay_max) 511 udelay *= 2; 512 } 513 } while (busy); 514 515 return 0; 516 } 517 518 int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 519 enum mmc_busy_cmd busy_cmd) 520 { 521 return __mmc_poll_for_busy(card, timeout_ms, true, false, busy_cmd); 522 } 523 524 /** 525 * __mmc_switch - modify EXT_CSD register 526 * @card: the MMC card associated with the data transfer 527 * @set: cmd set values 528 * @index: EXT_CSD register index 529 * @value: value to program into EXT_CSD register 530 * @timeout_ms: timeout (ms) for operation performed by register write, 531 * timeout of zero implies maximum possible timeout 532 * @timing: new timing to change to 533 * @send_status: send status cmd to poll for busy 534 * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy 535 * @retries: number of retries 536 * 537 * Modifies the EXT_CSD register for selected card. 538 */ 539 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 540 unsigned int timeout_ms, unsigned char timing, 541 bool send_status, bool retry_crc_err, unsigned int retries) 542 { 543 struct mmc_host *host = card->host; 544 int err; 545 struct mmc_command cmd = {}; 546 bool use_r1b_resp = true; 547 unsigned char old_timing = host->ios.timing; 548 549 mmc_retune_hold(host); 550 551 if (!timeout_ms) { 552 pr_warn("%s: unspecified timeout for CMD6 - use generic\n", 553 mmc_hostname(host)); 554 timeout_ms = card->ext_csd.generic_cmd6_time; 555 } 556 557 /* 558 * If the max_busy_timeout of the host is specified, make sure it's 559 * enough to fit the used timeout_ms. In case it's not, let's instruct 560 * the host to avoid HW busy detection, by converting to a R1 response 561 * instead of a R1B. Note, some hosts requires R1B, which also means 562 * they are on their own when it comes to deal with the busy timeout. 563 */ 564 if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout && 565 (timeout_ms > host->max_busy_timeout)) 566 use_r1b_resp = false; 567 568 cmd.opcode = MMC_SWITCH; 569 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 570 (index << 16) | 571 (value << 8) | 572 set; 573 cmd.flags = MMC_CMD_AC; 574 if (use_r1b_resp) { 575 cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B; 576 cmd.busy_timeout = timeout_ms; 577 } else { 578 cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1; 579 } 580 581 err = mmc_wait_for_cmd(host, &cmd, retries); 582 if (err) 583 goto out; 584 585 /*If SPI or used HW busy detection above, then we don't need to poll. */ 586 if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) || 587 mmc_host_is_spi(host)) 588 goto out_tim; 589 590 /* Let's try to poll to find out when the command is completed. */ 591 err = __mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err, 592 MMC_BUSY_CMD6); 593 if (err) 594 goto out; 595 596 out_tim: 597 /* Switch to new timing before check switch status. */ 598 if (timing) 599 mmc_set_timing(host, timing); 600 601 if (send_status) { 602 err = mmc_switch_status(card, true); 603 if (err && timing) 604 mmc_set_timing(host, old_timing); 605 } 606 out: 607 mmc_retune_release(host); 608 609 return err; 610 } 611 612 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 613 unsigned int timeout_ms) 614 { 615 return __mmc_switch(card, set, index, value, timeout_ms, 0, 616 true, false, MMC_CMD_RETRIES); 617 } 618 EXPORT_SYMBOL_GPL(mmc_switch); 619 620 int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error) 621 { 622 struct mmc_request mrq = {}; 623 struct mmc_command cmd = {}; 624 struct mmc_data data = {}; 625 struct scatterlist sg; 626 struct mmc_ios *ios = &host->ios; 627 const u8 *tuning_block_pattern; 628 int size, err = 0; 629 u8 *data_buf; 630 631 if (ios->bus_width == MMC_BUS_WIDTH_8) { 632 tuning_block_pattern = tuning_blk_pattern_8bit; 633 size = sizeof(tuning_blk_pattern_8bit); 634 } else if (ios->bus_width == MMC_BUS_WIDTH_4) { 635 tuning_block_pattern = tuning_blk_pattern_4bit; 636 size = sizeof(tuning_blk_pattern_4bit); 637 } else 638 return -EINVAL; 639 640 data_buf = kzalloc(size, GFP_KERNEL); 641 if (!data_buf) 642 return -ENOMEM; 643 644 mrq.cmd = &cmd; 645 mrq.data = &data; 646 647 cmd.opcode = opcode; 648 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 649 650 data.blksz = size; 651 data.blocks = 1; 652 data.flags = MMC_DATA_READ; 653 654 /* 655 * According to the tuning specs, Tuning process 656 * is normally shorter 40 executions of CMD19, 657 * and timeout value should be shorter than 150 ms 658 */ 659 data.timeout_ns = 150 * NSEC_PER_MSEC; 660 661 data.sg = &sg; 662 data.sg_len = 1; 663 sg_init_one(&sg, data_buf, size); 664 665 mmc_wait_for_req(host, &mrq); 666 667 if (cmd_error) 668 *cmd_error = cmd.error; 669 670 if (cmd.error) { 671 err = cmd.error; 672 goto out; 673 } 674 675 if (data.error) { 676 err = data.error; 677 goto out; 678 } 679 680 if (memcmp(data_buf, tuning_block_pattern, size)) 681 err = -EIO; 682 683 out: 684 kfree(data_buf); 685 return err; 686 } 687 EXPORT_SYMBOL_GPL(mmc_send_tuning); 688 689 int mmc_abort_tuning(struct mmc_host *host, u32 opcode) 690 { 691 struct mmc_command cmd = {}; 692 693 /* 694 * eMMC specification specifies that CMD12 can be used to stop a tuning 695 * command, but SD specification does not, so do nothing unless it is 696 * eMMC. 697 */ 698 if (opcode != MMC_SEND_TUNING_BLOCK_HS200) 699 return 0; 700 701 cmd.opcode = MMC_STOP_TRANSMISSION; 702 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 703 704 /* 705 * For drivers that override R1 to R1b, set an arbitrary timeout based 706 * on the tuning timeout i.e. 150ms. 707 */ 708 cmd.busy_timeout = 150; 709 710 return mmc_wait_for_cmd(host, &cmd, 0); 711 } 712 EXPORT_SYMBOL_GPL(mmc_abort_tuning); 713 714 static int 715 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, 716 u8 len) 717 { 718 struct mmc_request mrq = {}; 719 struct mmc_command cmd = {}; 720 struct mmc_data data = {}; 721 struct scatterlist sg; 722 u8 *data_buf; 723 u8 *test_buf; 724 int i, err; 725 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 }; 726 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 }; 727 728 /* dma onto stack is unsafe/nonportable, but callers to this 729 * routine normally provide temporary on-stack buffers ... 730 */ 731 data_buf = kmalloc(len, GFP_KERNEL); 732 if (!data_buf) 733 return -ENOMEM; 734 735 if (len == 8) 736 test_buf = testdata_8bit; 737 else if (len == 4) 738 test_buf = testdata_4bit; 739 else { 740 pr_err("%s: Invalid bus_width %d\n", 741 mmc_hostname(host), len); 742 kfree(data_buf); 743 return -EINVAL; 744 } 745 746 if (opcode == MMC_BUS_TEST_W) 747 memcpy(data_buf, test_buf, len); 748 749 mrq.cmd = &cmd; 750 mrq.data = &data; 751 cmd.opcode = opcode; 752 cmd.arg = 0; 753 754 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 755 * rely on callers to never use this with "native" calls for reading 756 * CSD or CID. Native versions of those commands use the R2 type, 757 * not R1 plus a data block. 758 */ 759 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 760 761 data.blksz = len; 762 data.blocks = 1; 763 if (opcode == MMC_BUS_TEST_R) 764 data.flags = MMC_DATA_READ; 765 else 766 data.flags = MMC_DATA_WRITE; 767 768 data.sg = &sg; 769 data.sg_len = 1; 770 mmc_set_data_timeout(&data, card); 771 sg_init_one(&sg, data_buf, len); 772 mmc_wait_for_req(host, &mrq); 773 err = 0; 774 if (opcode == MMC_BUS_TEST_R) { 775 for (i = 0; i < len / 4; i++) 776 if ((test_buf[i] ^ data_buf[i]) != 0xff) { 777 err = -EIO; 778 break; 779 } 780 } 781 kfree(data_buf); 782 783 if (cmd.error) 784 return cmd.error; 785 if (data.error) 786 return data.error; 787 788 return err; 789 } 790 791 int mmc_bus_test(struct mmc_card *card, u8 bus_width) 792 { 793 int width; 794 795 if (bus_width == MMC_BUS_WIDTH_8) 796 width = 8; 797 else if (bus_width == MMC_BUS_WIDTH_4) 798 width = 4; 799 else if (bus_width == MMC_BUS_WIDTH_1) 800 return 0; /* no need for test */ 801 else 802 return -EINVAL; 803 804 /* 805 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there 806 * is a problem. This improves chances that the test will work. 807 */ 808 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width); 809 return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); 810 } 811 812 static int mmc_send_hpi_cmd(struct mmc_card *card) 813 { 814 unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time; 815 struct mmc_host *host = card->host; 816 bool use_r1b_resp = true; 817 struct mmc_command cmd = {}; 818 int err; 819 820 cmd.opcode = card->ext_csd.hpi_cmd; 821 cmd.arg = card->rca << 16 | 1; 822 823 /* 824 * Make sure the host's max_busy_timeout fit the needed timeout for HPI. 825 * In case it doesn't, let's instruct the host to avoid HW busy 826 * detection, by using a R1 response instead of R1B. 827 */ 828 if (host->max_busy_timeout && busy_timeout_ms > host->max_busy_timeout) 829 use_r1b_resp = false; 830 831 if (cmd.opcode == MMC_STOP_TRANSMISSION && use_r1b_resp) { 832 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; 833 cmd.busy_timeout = busy_timeout_ms; 834 } else { 835 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 836 use_r1b_resp = false; 837 } 838 839 err = mmc_wait_for_cmd(host, &cmd, 0); 840 if (err) { 841 pr_warn("%s: HPI error %d. Command response %#x\n", 842 mmc_hostname(host), err, cmd.resp[0]); 843 return err; 844 } 845 846 /* No need to poll when using HW busy detection. */ 847 if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp) 848 return 0; 849 850 /* Let's poll to find out when the HPI request completes. */ 851 return mmc_poll_for_busy(card, busy_timeout_ms, MMC_BUSY_HPI); 852 } 853 854 /** 855 * mmc_interrupt_hpi - Issue for High priority Interrupt 856 * @card: the MMC card associated with the HPI transfer 857 * 858 * Issued High Priority Interrupt, and check for card status 859 * until out-of prg-state. 860 */ 861 static int mmc_interrupt_hpi(struct mmc_card *card) 862 { 863 int err; 864 u32 status; 865 866 if (!card->ext_csd.hpi_en) { 867 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 868 return 1; 869 } 870 871 err = mmc_send_status(card, &status); 872 if (err) { 873 pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 874 goto out; 875 } 876 877 switch (R1_CURRENT_STATE(status)) { 878 case R1_STATE_IDLE: 879 case R1_STATE_READY: 880 case R1_STATE_STBY: 881 case R1_STATE_TRAN: 882 /* 883 * In idle and transfer states, HPI is not needed and the caller 884 * can issue the next intended command immediately 885 */ 886 goto out; 887 case R1_STATE_PRG: 888 break; 889 default: 890 /* In all other states, it's illegal to issue HPI */ 891 pr_debug("%s: HPI cannot be sent. Card state=%d\n", 892 mmc_hostname(card->host), R1_CURRENT_STATE(status)); 893 err = -EINVAL; 894 goto out; 895 } 896 897 err = mmc_send_hpi_cmd(card); 898 out: 899 return err; 900 } 901 902 int mmc_can_ext_csd(struct mmc_card *card) 903 { 904 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3); 905 } 906 907 static int mmc_read_bkops_status(struct mmc_card *card) 908 { 909 int err; 910 u8 *ext_csd; 911 912 err = mmc_get_ext_csd(card, &ext_csd); 913 if (err) 914 return err; 915 916 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; 917 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; 918 kfree(ext_csd); 919 return 0; 920 } 921 922 /** 923 * mmc_run_bkops - Run BKOPS for supported cards 924 * @card: MMC card to run BKOPS for 925 * 926 * Run background operations synchronously for cards having manual BKOPS 927 * enabled and in case it reports urgent BKOPS level. 928 */ 929 void mmc_run_bkops(struct mmc_card *card) 930 { 931 int err; 932 933 if (!card->ext_csd.man_bkops_en) 934 return; 935 936 err = mmc_read_bkops_status(card); 937 if (err) { 938 pr_err("%s: Failed to read bkops status: %d\n", 939 mmc_hostname(card->host), err); 940 return; 941 } 942 943 if (!card->ext_csd.raw_bkops_status || 944 card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2) 945 return; 946 947 mmc_retune_hold(card->host); 948 949 /* 950 * For urgent BKOPS status, LEVEL_2 and higher, let's execute 951 * synchronously. Future wise, we may consider to start BKOPS, for less 952 * urgent levels by using an asynchronous background task, when idle. 953 */ 954 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 955 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS); 956 if (err) 957 pr_warn("%s: Error %d starting bkops\n", 958 mmc_hostname(card->host), err); 959 960 mmc_retune_release(card->host); 961 } 962 EXPORT_SYMBOL(mmc_run_bkops); 963 964 /* 965 * Flush the cache to the non-volatile storage. 966 */ 967 int mmc_flush_cache(struct mmc_card *card) 968 { 969 int err = 0; 970 971 if (mmc_cache_enabled(card->host)) { 972 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 973 EXT_CSD_FLUSH_CACHE, 1, 974 MMC_CACHE_FLUSH_TIMEOUT_MS); 975 if (err) 976 pr_err("%s: cache flush error %d\n", 977 mmc_hostname(card->host), err); 978 } 979 980 return err; 981 } 982 EXPORT_SYMBOL(mmc_flush_cache); 983 984 static int mmc_cmdq_switch(struct mmc_card *card, bool enable) 985 { 986 u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0; 987 int err; 988 989 if (!card->ext_csd.cmdq_support) 990 return -EOPNOTSUPP; 991 992 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN, 993 val, card->ext_csd.generic_cmd6_time); 994 if (!err) 995 card->ext_csd.cmdq_en = enable; 996 997 return err; 998 } 999 1000 int mmc_cmdq_enable(struct mmc_card *card) 1001 { 1002 return mmc_cmdq_switch(card, true); 1003 } 1004 EXPORT_SYMBOL_GPL(mmc_cmdq_enable); 1005 1006 int mmc_cmdq_disable(struct mmc_card *card) 1007 { 1008 return mmc_cmdq_switch(card, false); 1009 } 1010 EXPORT_SYMBOL_GPL(mmc_cmdq_disable); 1011 1012 int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms) 1013 { 1014 struct mmc_host *host = card->host; 1015 int err; 1016 1017 if (!mmc_can_sanitize(card)) { 1018 pr_warn("%s: Sanitize not supported\n", mmc_hostname(host)); 1019 return -EOPNOTSUPP; 1020 } 1021 1022 if (!timeout_ms) 1023 timeout_ms = MMC_SANITIZE_TIMEOUT_MS; 1024 1025 pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host)); 1026 1027 mmc_retune_hold(host); 1028 1029 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START, 1030 1, timeout_ms, 0, true, false, 0); 1031 if (err) 1032 pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err); 1033 1034 /* 1035 * If the sanitize operation timed out, the card is probably still busy 1036 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort 1037 * it with a HPI command to get back into R1_STATE_TRAN. 1038 */ 1039 if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card)) 1040 pr_warn("%s: Sanitize aborted\n", mmc_hostname(host)); 1041 1042 mmc_retune_release(host); 1043 1044 pr_debug("%s: Sanitize completed\n", mmc_hostname(host)); 1045 return err; 1046 } 1047 EXPORT_SYMBOL_GPL(mmc_sanitize); 1048