1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/core/mmc_ops.h 4 * 5 * Copyright 2006-2007 Pierre Ossman 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/export.h> 10 #include <linux/types.h> 11 #include <linux/scatterlist.h> 12 13 #include <linux/mmc/host.h> 14 #include <linux/mmc/card.h> 15 #include <linux/mmc/mmc.h> 16 17 #include "core.h" 18 #include "card.h" 19 #include "host.h" 20 #include "mmc_ops.h" 21 22 #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */ 23 #define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */ 24 #define MMC_OP_COND_PERIOD_US (4 * 1000) /* 4ms */ 25 #define MMC_OP_COND_TIMEOUT_MS 1000 /* 1s */ 26 27 static const u8 tuning_blk_pattern_4bit[] = { 28 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, 29 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, 30 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, 31 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, 32 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, 33 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, 34 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, 35 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, 36 }; 37 38 static const u8 tuning_blk_pattern_8bit[] = { 39 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 40 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 41 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 42 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 43 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 44 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 45 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 46 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, 47 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 48 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 49 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 50 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 51 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 52 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 53 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 54 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 55 }; 56 57 struct mmc_busy_data { 58 struct mmc_card *card; 59 bool retry_crc_err; 60 enum mmc_busy_cmd busy_cmd; 61 }; 62 63 struct mmc_op_cond_busy_data { 64 struct mmc_host *host; 65 u32 ocr; 66 struct mmc_command *cmd; 67 }; 68 69 int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries) 70 { 71 int err; 72 struct mmc_command cmd = {}; 73 74 cmd.opcode = MMC_SEND_STATUS; 75 if (!mmc_host_is_spi(card->host)) 76 cmd.arg = card->rca << 16; 77 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 78 79 err = mmc_wait_for_cmd(card->host, &cmd, retries); 80 if (err) 81 return err; 82 83 /* NOTE: callers are required to understand the difference 84 * between "native" and SPI format status words! 85 */ 86 if (status) 87 *status = cmd.resp[0]; 88 89 return 0; 90 } 91 EXPORT_SYMBOL_GPL(__mmc_send_status); 92 93 int mmc_send_status(struct mmc_card *card, u32 *status) 94 { 95 return __mmc_send_status(card, status, MMC_CMD_RETRIES); 96 } 97 EXPORT_SYMBOL_GPL(mmc_send_status); 98 99 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) 100 { 101 struct mmc_command cmd = {}; 102 103 cmd.opcode = MMC_SELECT_CARD; 104 105 if (card) { 106 cmd.arg = card->rca << 16; 107 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 108 } else { 109 cmd.arg = 0; 110 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 111 } 112 113 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 114 } 115 116 int mmc_select_card(struct mmc_card *card) 117 { 118 119 return _mmc_select_card(card->host, card); 120 } 121 122 int mmc_deselect_cards(struct mmc_host *host) 123 { 124 return _mmc_select_card(host, NULL); 125 } 126 127 /* 128 * Write the value specified in the device tree or board code into the optional 129 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and 130 * drive strength of the DAT and CMD outputs. The actual meaning of a given 131 * value is hardware dependant. 132 * The presence of the DSR register can be determined from the CSD register, 133 * bit 76. 134 */ 135 int mmc_set_dsr(struct mmc_host *host) 136 { 137 struct mmc_command cmd = {}; 138 139 cmd.opcode = MMC_SET_DSR; 140 141 cmd.arg = (host->dsr << 16) | 0xffff; 142 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 143 144 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 145 } 146 147 int mmc_go_idle(struct mmc_host *host) 148 { 149 int err; 150 struct mmc_command cmd = {}; 151 152 /* 153 * Non-SPI hosts need to prevent chipselect going active during 154 * GO_IDLE; that would put chips into SPI mode. Remind them of 155 * that in case of hardware that won't pull up DAT3/nCS otherwise. 156 * 157 * SPI hosts ignore ios.chip_select; it's managed according to 158 * rules that must accommodate non-MMC slaves which this layer 159 * won't even know about. 160 */ 161 if (!mmc_host_is_spi(host)) { 162 mmc_set_chip_select(host, MMC_CS_HIGH); 163 mmc_delay(1); 164 } 165 166 cmd.opcode = MMC_GO_IDLE_STATE; 167 cmd.arg = 0; 168 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; 169 170 err = mmc_wait_for_cmd(host, &cmd, 0); 171 172 mmc_delay(1); 173 174 if (!mmc_host_is_spi(host)) { 175 mmc_set_chip_select(host, MMC_CS_DONTCARE); 176 mmc_delay(1); 177 } 178 179 host->use_spi_crc = 0; 180 181 return err; 182 } 183 184 static int __mmc_send_op_cond_cb(void *cb_data, bool *busy) 185 { 186 struct mmc_op_cond_busy_data *data = cb_data; 187 struct mmc_host *host = data->host; 188 struct mmc_command *cmd = data->cmd; 189 u32 ocr = data->ocr; 190 int err = 0; 191 192 err = mmc_wait_for_cmd(host, cmd, 0); 193 if (err) 194 return err; 195 196 if (mmc_host_is_spi(host)) { 197 if (!(cmd->resp[0] & R1_SPI_IDLE)) { 198 *busy = false; 199 return 0; 200 } 201 } else { 202 if (cmd->resp[0] & MMC_CARD_BUSY) { 203 *busy = false; 204 return 0; 205 } 206 } 207 208 *busy = true; 209 210 /* 211 * According to eMMC specification v5.1 section 6.4.3, we 212 * should issue CMD1 repeatedly in the idle state until 213 * the eMMC is ready. Otherwise some eMMC devices seem to enter 214 * the inactive mode after mmc_init_card() issued CMD0 when 215 * the eMMC device is busy. 216 */ 217 if (!ocr && !mmc_host_is_spi(host)) 218 cmd->arg = cmd->resp[0] | BIT(30); 219 220 return 0; 221 } 222 223 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 224 { 225 struct mmc_command cmd = {}; 226 int err = 0; 227 struct mmc_op_cond_busy_data cb_data = { 228 .host = host, 229 .ocr = ocr, 230 .cmd = &cmd 231 }; 232 233 cmd.opcode = MMC_SEND_OP_COND; 234 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; 235 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; 236 237 err = __mmc_poll_for_busy(host, MMC_OP_COND_PERIOD_US, 238 MMC_OP_COND_TIMEOUT_MS, 239 &__mmc_send_op_cond_cb, &cb_data); 240 if (err) 241 return err; 242 243 if (rocr && !mmc_host_is_spi(host)) 244 *rocr = cmd.resp[0]; 245 246 return err; 247 } 248 249 int mmc_set_relative_addr(struct mmc_card *card) 250 { 251 struct mmc_command cmd = {}; 252 253 cmd.opcode = MMC_SET_RELATIVE_ADDR; 254 cmd.arg = card->rca << 16; 255 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 256 257 return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 258 } 259 260 static int 261 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) 262 { 263 int err; 264 struct mmc_command cmd = {}; 265 266 cmd.opcode = opcode; 267 cmd.arg = arg; 268 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; 269 270 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 271 if (err) 272 return err; 273 274 memcpy(cxd, cmd.resp, sizeof(u32) * 4); 275 276 return 0; 277 } 278 279 /* 280 * NOTE: void *buf, caller for the buf is required to use DMA-capable 281 * buffer or on-stack buffer (with some overhead in callee). 282 */ 283 int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode, 284 u32 args, void *buf, unsigned len) 285 { 286 struct mmc_request mrq = {}; 287 struct mmc_command cmd = {}; 288 struct mmc_data data = {}; 289 struct scatterlist sg; 290 291 mrq.cmd = &cmd; 292 mrq.data = &data; 293 294 cmd.opcode = opcode; 295 cmd.arg = args; 296 297 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 298 * rely on callers to never use this with "native" calls for reading 299 * CSD or CID. Native versions of those commands use the R2 type, 300 * not R1 plus a data block. 301 */ 302 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 303 304 data.blksz = len; 305 data.blocks = 1; 306 data.flags = MMC_DATA_READ; 307 data.sg = &sg; 308 data.sg_len = 1; 309 310 sg_init_one(&sg, buf, len); 311 312 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) { 313 /* 314 * The spec states that CSR and CID accesses have a timeout 315 * of 64 clock cycles. 316 */ 317 data.timeout_ns = 0; 318 data.timeout_clks = 64; 319 } else 320 mmc_set_data_timeout(&data, card); 321 322 mmc_wait_for_req(host, &mrq); 323 324 if (cmd.error) 325 return cmd.error; 326 if (data.error) 327 return data.error; 328 329 return 0; 330 } 331 332 static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode) 333 { 334 int ret, i; 335 __be32 *cxd_tmp; 336 337 cxd_tmp = kzalloc(16, GFP_KERNEL); 338 if (!cxd_tmp) 339 return -ENOMEM; 340 341 ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16); 342 if (ret) 343 goto err; 344 345 for (i = 0; i < 4; i++) 346 cxd[i] = be32_to_cpu(cxd_tmp[i]); 347 348 err: 349 kfree(cxd_tmp); 350 return ret; 351 } 352 353 int mmc_send_csd(struct mmc_card *card, u32 *csd) 354 { 355 if (mmc_host_is_spi(card->host)) 356 return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD); 357 358 return mmc_send_cxd_native(card->host, card->rca << 16, csd, 359 MMC_SEND_CSD); 360 } 361 362 int mmc_send_cid(struct mmc_host *host, u32 *cid) 363 { 364 if (mmc_host_is_spi(host)) 365 return mmc_spi_send_cxd(host, cid, MMC_SEND_CID); 366 367 return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID); 368 } 369 370 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) 371 { 372 int err; 373 u8 *ext_csd; 374 375 if (!card || !new_ext_csd) 376 return -EINVAL; 377 378 if (!mmc_can_ext_csd(card)) 379 return -EOPNOTSUPP; 380 381 /* 382 * As the ext_csd is so large and mostly unused, we don't store the 383 * raw block in mmc_card. 384 */ 385 ext_csd = kzalloc(512, GFP_KERNEL); 386 if (!ext_csd) 387 return -ENOMEM; 388 389 err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd, 390 512); 391 if (err) 392 kfree(ext_csd); 393 else 394 *new_ext_csd = ext_csd; 395 396 return err; 397 } 398 EXPORT_SYMBOL_GPL(mmc_get_ext_csd); 399 400 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) 401 { 402 struct mmc_command cmd = {}; 403 int err; 404 405 cmd.opcode = MMC_SPI_READ_OCR; 406 cmd.arg = highcap ? (1 << 30) : 0; 407 cmd.flags = MMC_RSP_SPI_R3; 408 409 err = mmc_wait_for_cmd(host, &cmd, 0); 410 411 *ocrp = cmd.resp[1]; 412 return err; 413 } 414 415 int mmc_spi_set_crc(struct mmc_host *host, int use_crc) 416 { 417 struct mmc_command cmd = {}; 418 int err; 419 420 cmd.opcode = MMC_SPI_CRC_ON_OFF; 421 cmd.flags = MMC_RSP_SPI_R1; 422 cmd.arg = use_crc; 423 424 err = mmc_wait_for_cmd(host, &cmd, 0); 425 if (!err) 426 host->use_spi_crc = use_crc; 427 return err; 428 } 429 430 static int mmc_switch_status_error(struct mmc_host *host, u32 status) 431 { 432 if (mmc_host_is_spi(host)) { 433 if (status & R1_SPI_ILLEGAL_COMMAND) 434 return -EBADMSG; 435 } else { 436 if (R1_STATUS(status)) 437 pr_warn("%s: unexpected status %#x after switch\n", 438 mmc_hostname(host), status); 439 if (status & R1_SWITCH_ERROR) 440 return -EBADMSG; 441 } 442 return 0; 443 } 444 445 /* Caller must hold re-tuning */ 446 int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal) 447 { 448 u32 status; 449 int err; 450 451 err = mmc_send_status(card, &status); 452 if (!crc_err_fatal && err == -EILSEQ) 453 return 0; 454 if (err) 455 return err; 456 457 return mmc_switch_status_error(card->host, status); 458 } 459 460 static int mmc_busy_cb(void *cb_data, bool *busy) 461 { 462 struct mmc_busy_data *data = cb_data; 463 struct mmc_host *host = data->card->host; 464 u32 status = 0; 465 int err; 466 467 if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) { 468 *busy = host->ops->card_busy(host); 469 return 0; 470 } 471 472 err = mmc_send_status(data->card, &status); 473 if (data->retry_crc_err && err == -EILSEQ) { 474 *busy = true; 475 return 0; 476 } 477 if (err) 478 return err; 479 480 switch (data->busy_cmd) { 481 case MMC_BUSY_CMD6: 482 err = mmc_switch_status_error(host, status); 483 break; 484 case MMC_BUSY_ERASE: 485 err = R1_STATUS(status) ? -EIO : 0; 486 break; 487 case MMC_BUSY_HPI: 488 case MMC_BUSY_EXTR_SINGLE: 489 case MMC_BUSY_IO: 490 break; 491 default: 492 err = -EINVAL; 493 } 494 495 if (err) 496 return err; 497 498 *busy = !mmc_ready_for_data(status); 499 return 0; 500 } 501 502 int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us, 503 unsigned int timeout_ms, 504 int (*busy_cb)(void *cb_data, bool *busy), 505 void *cb_data) 506 { 507 int err; 508 unsigned long timeout; 509 unsigned int udelay = period_us ? period_us : 32, udelay_max = 32768; 510 bool expired = false; 511 bool busy = false; 512 513 timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1; 514 do { 515 /* 516 * Due to the possibility of being preempted while polling, 517 * check the expiration time first. 518 */ 519 expired = time_after(jiffies, timeout); 520 521 err = (*busy_cb)(cb_data, &busy); 522 if (err) 523 return err; 524 525 /* Timeout if the device still remains busy. */ 526 if (expired && busy) { 527 pr_err("%s: Card stuck being busy! %s\n", 528 mmc_hostname(host), __func__); 529 return -ETIMEDOUT; 530 } 531 532 /* Throttle the polling rate to avoid hogging the CPU. */ 533 if (busy) { 534 usleep_range(udelay, udelay * 2); 535 if (udelay < udelay_max) 536 udelay *= 2; 537 } 538 } while (busy); 539 540 return 0; 541 } 542 EXPORT_SYMBOL_GPL(__mmc_poll_for_busy); 543 544 int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 545 bool retry_crc_err, enum mmc_busy_cmd busy_cmd) 546 { 547 struct mmc_host *host = card->host; 548 struct mmc_busy_data cb_data; 549 550 cb_data.card = card; 551 cb_data.retry_crc_err = retry_crc_err; 552 cb_data.busy_cmd = busy_cmd; 553 554 return __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_busy_cb, &cb_data); 555 } 556 EXPORT_SYMBOL_GPL(mmc_poll_for_busy); 557 558 bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd, 559 unsigned int timeout_ms) 560 { 561 /* 562 * If the max_busy_timeout of the host is specified, make sure it's 563 * enough to fit the used timeout_ms. In case it's not, let's instruct 564 * the host to avoid HW busy detection, by converting to a R1 response 565 * instead of a R1B. Note, some hosts requires R1B, which also means 566 * they are on their own when it comes to deal with the busy timeout. 567 */ 568 if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout && 569 (timeout_ms > host->max_busy_timeout)) { 570 cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1; 571 return false; 572 } 573 574 cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B; 575 cmd->busy_timeout = timeout_ms; 576 return true; 577 } 578 EXPORT_SYMBOL_GPL(mmc_prepare_busy_cmd); 579 580 /** 581 * __mmc_switch - modify EXT_CSD register 582 * @card: the MMC card associated with the data transfer 583 * @set: cmd set values 584 * @index: EXT_CSD register index 585 * @value: value to program into EXT_CSD register 586 * @timeout_ms: timeout (ms) for operation performed by register write, 587 * timeout of zero implies maximum possible timeout 588 * @timing: new timing to change to 589 * @send_status: send status cmd to poll for busy 590 * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy 591 * @retries: number of retries 592 * 593 * Modifies the EXT_CSD register for selected card. 594 */ 595 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 596 unsigned int timeout_ms, unsigned char timing, 597 bool send_status, bool retry_crc_err, unsigned int retries) 598 { 599 struct mmc_host *host = card->host; 600 int err; 601 struct mmc_command cmd = {}; 602 bool use_r1b_resp; 603 unsigned char old_timing = host->ios.timing; 604 605 mmc_retune_hold(host); 606 607 if (!timeout_ms) { 608 pr_warn("%s: unspecified timeout for CMD6 - use generic\n", 609 mmc_hostname(host)); 610 timeout_ms = card->ext_csd.generic_cmd6_time; 611 } 612 613 cmd.opcode = MMC_SWITCH; 614 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 615 (index << 16) | 616 (value << 8) | 617 set; 618 use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms); 619 620 err = mmc_wait_for_cmd(host, &cmd, retries); 621 if (err) 622 goto out; 623 624 /*If SPI or used HW busy detection above, then we don't need to poll. */ 625 if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) || 626 mmc_host_is_spi(host)) 627 goto out_tim; 628 629 /* 630 * If the host doesn't support HW polling via the ->card_busy() ops and 631 * when it's not allowed to poll by using CMD13, then we need to rely on 632 * waiting the stated timeout to be sufficient. 633 */ 634 if (!send_status && !host->ops->card_busy) { 635 mmc_delay(timeout_ms); 636 goto out_tim; 637 } 638 639 /* Let's try to poll to find out when the command is completed. */ 640 err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6); 641 if (err) 642 goto out; 643 644 out_tim: 645 /* Switch to new timing before check switch status. */ 646 if (timing) 647 mmc_set_timing(host, timing); 648 649 if (send_status) { 650 err = mmc_switch_status(card, true); 651 if (err && timing) 652 mmc_set_timing(host, old_timing); 653 } 654 out: 655 mmc_retune_release(host); 656 657 return err; 658 } 659 660 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 661 unsigned int timeout_ms) 662 { 663 return __mmc_switch(card, set, index, value, timeout_ms, 0, 664 true, false, MMC_CMD_RETRIES); 665 } 666 EXPORT_SYMBOL_GPL(mmc_switch); 667 668 int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error) 669 { 670 struct mmc_request mrq = {}; 671 struct mmc_command cmd = {}; 672 struct mmc_data data = {}; 673 struct scatterlist sg; 674 struct mmc_ios *ios = &host->ios; 675 const u8 *tuning_block_pattern; 676 int size, err = 0; 677 u8 *data_buf; 678 679 if (ios->bus_width == MMC_BUS_WIDTH_8) { 680 tuning_block_pattern = tuning_blk_pattern_8bit; 681 size = sizeof(tuning_blk_pattern_8bit); 682 } else if (ios->bus_width == MMC_BUS_WIDTH_4) { 683 tuning_block_pattern = tuning_blk_pattern_4bit; 684 size = sizeof(tuning_blk_pattern_4bit); 685 } else 686 return -EINVAL; 687 688 data_buf = kzalloc(size, GFP_KERNEL); 689 if (!data_buf) 690 return -ENOMEM; 691 692 mrq.cmd = &cmd; 693 mrq.data = &data; 694 695 cmd.opcode = opcode; 696 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 697 698 data.blksz = size; 699 data.blocks = 1; 700 data.flags = MMC_DATA_READ; 701 702 /* 703 * According to the tuning specs, Tuning process 704 * is normally shorter 40 executions of CMD19, 705 * and timeout value should be shorter than 150 ms 706 */ 707 data.timeout_ns = 150 * NSEC_PER_MSEC; 708 709 data.sg = &sg; 710 data.sg_len = 1; 711 sg_init_one(&sg, data_buf, size); 712 713 mmc_wait_for_req(host, &mrq); 714 715 if (cmd_error) 716 *cmd_error = cmd.error; 717 718 if (cmd.error) { 719 err = cmd.error; 720 goto out; 721 } 722 723 if (data.error) { 724 err = data.error; 725 goto out; 726 } 727 728 if (memcmp(data_buf, tuning_block_pattern, size)) 729 err = -EIO; 730 731 out: 732 kfree(data_buf); 733 return err; 734 } 735 EXPORT_SYMBOL_GPL(mmc_send_tuning); 736 737 int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode) 738 { 739 struct mmc_command cmd = {}; 740 741 /* 742 * eMMC specification specifies that CMD12 can be used to stop a tuning 743 * command, but SD specification does not, so do nothing unless it is 744 * eMMC. 745 */ 746 if (opcode != MMC_SEND_TUNING_BLOCK_HS200) 747 return 0; 748 749 cmd.opcode = MMC_STOP_TRANSMISSION; 750 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 751 752 /* 753 * For drivers that override R1 to R1b, set an arbitrary timeout based 754 * on the tuning timeout i.e. 150ms. 755 */ 756 cmd.busy_timeout = 150; 757 758 return mmc_wait_for_cmd(host, &cmd, 0); 759 } 760 EXPORT_SYMBOL_GPL(mmc_send_abort_tuning); 761 762 static int 763 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, 764 u8 len) 765 { 766 struct mmc_request mrq = {}; 767 struct mmc_command cmd = {}; 768 struct mmc_data data = {}; 769 struct scatterlist sg; 770 u8 *data_buf; 771 u8 *test_buf; 772 int i, err; 773 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 }; 774 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 }; 775 776 /* dma onto stack is unsafe/nonportable, but callers to this 777 * routine normally provide temporary on-stack buffers ... 778 */ 779 data_buf = kmalloc(len, GFP_KERNEL); 780 if (!data_buf) 781 return -ENOMEM; 782 783 if (len == 8) 784 test_buf = testdata_8bit; 785 else if (len == 4) 786 test_buf = testdata_4bit; 787 else { 788 pr_err("%s: Invalid bus_width %d\n", 789 mmc_hostname(host), len); 790 kfree(data_buf); 791 return -EINVAL; 792 } 793 794 if (opcode == MMC_BUS_TEST_W) 795 memcpy(data_buf, test_buf, len); 796 797 mrq.cmd = &cmd; 798 mrq.data = &data; 799 cmd.opcode = opcode; 800 cmd.arg = 0; 801 802 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 803 * rely on callers to never use this with "native" calls for reading 804 * CSD or CID. Native versions of those commands use the R2 type, 805 * not R1 plus a data block. 806 */ 807 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 808 809 data.blksz = len; 810 data.blocks = 1; 811 if (opcode == MMC_BUS_TEST_R) 812 data.flags = MMC_DATA_READ; 813 else 814 data.flags = MMC_DATA_WRITE; 815 816 data.sg = &sg; 817 data.sg_len = 1; 818 mmc_set_data_timeout(&data, card); 819 sg_init_one(&sg, data_buf, len); 820 mmc_wait_for_req(host, &mrq); 821 err = 0; 822 if (opcode == MMC_BUS_TEST_R) { 823 for (i = 0; i < len / 4; i++) 824 if ((test_buf[i] ^ data_buf[i]) != 0xff) { 825 err = -EIO; 826 break; 827 } 828 } 829 kfree(data_buf); 830 831 if (cmd.error) 832 return cmd.error; 833 if (data.error) 834 return data.error; 835 836 return err; 837 } 838 839 int mmc_bus_test(struct mmc_card *card, u8 bus_width) 840 { 841 int width; 842 843 if (bus_width == MMC_BUS_WIDTH_8) 844 width = 8; 845 else if (bus_width == MMC_BUS_WIDTH_4) 846 width = 4; 847 else if (bus_width == MMC_BUS_WIDTH_1) 848 return 0; /* no need for test */ 849 else 850 return -EINVAL; 851 852 /* 853 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there 854 * is a problem. This improves chances that the test will work. 855 */ 856 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width); 857 return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); 858 } 859 860 static int mmc_send_hpi_cmd(struct mmc_card *card) 861 { 862 unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time; 863 struct mmc_host *host = card->host; 864 bool use_r1b_resp = false; 865 struct mmc_command cmd = {}; 866 int err; 867 868 cmd.opcode = card->ext_csd.hpi_cmd; 869 cmd.arg = card->rca << 16 | 1; 870 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 871 872 if (cmd.opcode == MMC_STOP_TRANSMISSION) 873 use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, 874 busy_timeout_ms); 875 876 err = mmc_wait_for_cmd(host, &cmd, 0); 877 if (err) { 878 pr_warn("%s: HPI error %d. Command response %#x\n", 879 mmc_hostname(host), err, cmd.resp[0]); 880 return err; 881 } 882 883 /* No need to poll when using HW busy detection. */ 884 if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp) 885 return 0; 886 887 /* Let's poll to find out when the HPI request completes. */ 888 return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI); 889 } 890 891 /** 892 * mmc_interrupt_hpi - Issue for High priority Interrupt 893 * @card: the MMC card associated with the HPI transfer 894 * 895 * Issued High Priority Interrupt, and check for card status 896 * until out-of prg-state. 897 */ 898 static int mmc_interrupt_hpi(struct mmc_card *card) 899 { 900 int err; 901 u32 status; 902 903 if (!card->ext_csd.hpi_en) { 904 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 905 return 1; 906 } 907 908 err = mmc_send_status(card, &status); 909 if (err) { 910 pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 911 goto out; 912 } 913 914 switch (R1_CURRENT_STATE(status)) { 915 case R1_STATE_IDLE: 916 case R1_STATE_READY: 917 case R1_STATE_STBY: 918 case R1_STATE_TRAN: 919 /* 920 * In idle and transfer states, HPI is not needed and the caller 921 * can issue the next intended command immediately 922 */ 923 goto out; 924 case R1_STATE_PRG: 925 break; 926 default: 927 /* In all other states, it's illegal to issue HPI */ 928 pr_debug("%s: HPI cannot be sent. Card state=%d\n", 929 mmc_hostname(card->host), R1_CURRENT_STATE(status)); 930 err = -EINVAL; 931 goto out; 932 } 933 934 err = mmc_send_hpi_cmd(card); 935 out: 936 return err; 937 } 938 939 int mmc_can_ext_csd(struct mmc_card *card) 940 { 941 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3); 942 } 943 944 static int mmc_read_bkops_status(struct mmc_card *card) 945 { 946 int err; 947 u8 *ext_csd; 948 949 err = mmc_get_ext_csd(card, &ext_csd); 950 if (err) 951 return err; 952 953 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; 954 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; 955 kfree(ext_csd); 956 return 0; 957 } 958 959 /** 960 * mmc_run_bkops - Run BKOPS for supported cards 961 * @card: MMC card to run BKOPS for 962 * 963 * Run background operations synchronously for cards having manual BKOPS 964 * enabled and in case it reports urgent BKOPS level. 965 */ 966 void mmc_run_bkops(struct mmc_card *card) 967 { 968 int err; 969 970 if (!card->ext_csd.man_bkops_en) 971 return; 972 973 err = mmc_read_bkops_status(card); 974 if (err) { 975 pr_err("%s: Failed to read bkops status: %d\n", 976 mmc_hostname(card->host), err); 977 return; 978 } 979 980 if (!card->ext_csd.raw_bkops_status || 981 card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2) 982 return; 983 984 mmc_retune_hold(card->host); 985 986 /* 987 * For urgent BKOPS status, LEVEL_2 and higher, let's execute 988 * synchronously. Future wise, we may consider to start BKOPS, for less 989 * urgent levels by using an asynchronous background task, when idle. 990 */ 991 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 992 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS); 993 /* 994 * If the BKOPS timed out, the card is probably still busy in the 995 * R1_STATE_PRG. Rather than continue to wait, let's try to abort 996 * it with a HPI command to get back into R1_STATE_TRAN. 997 */ 998 if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card)) 999 pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host)); 1000 else if (err) 1001 pr_warn("%s: Error %d running bkops\n", 1002 mmc_hostname(card->host), err); 1003 1004 mmc_retune_release(card->host); 1005 } 1006 EXPORT_SYMBOL(mmc_run_bkops); 1007 1008 static int mmc_cmdq_switch(struct mmc_card *card, bool enable) 1009 { 1010 u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0; 1011 int err; 1012 1013 if (!card->ext_csd.cmdq_support) 1014 return -EOPNOTSUPP; 1015 1016 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN, 1017 val, card->ext_csd.generic_cmd6_time); 1018 if (!err) 1019 card->ext_csd.cmdq_en = enable; 1020 1021 return err; 1022 } 1023 1024 int mmc_cmdq_enable(struct mmc_card *card) 1025 { 1026 return mmc_cmdq_switch(card, true); 1027 } 1028 EXPORT_SYMBOL_GPL(mmc_cmdq_enable); 1029 1030 int mmc_cmdq_disable(struct mmc_card *card) 1031 { 1032 return mmc_cmdq_switch(card, false); 1033 } 1034 EXPORT_SYMBOL_GPL(mmc_cmdq_disable); 1035 1036 int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms) 1037 { 1038 struct mmc_host *host = card->host; 1039 int err; 1040 1041 if (!mmc_can_sanitize(card)) { 1042 pr_warn("%s: Sanitize not supported\n", mmc_hostname(host)); 1043 return -EOPNOTSUPP; 1044 } 1045 1046 if (!timeout_ms) 1047 timeout_ms = MMC_SANITIZE_TIMEOUT_MS; 1048 1049 pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host)); 1050 1051 mmc_retune_hold(host); 1052 1053 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START, 1054 1, timeout_ms, 0, true, false, 0); 1055 if (err) 1056 pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err); 1057 1058 /* 1059 * If the sanitize operation timed out, the card is probably still busy 1060 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort 1061 * it with a HPI command to get back into R1_STATE_TRAN. 1062 */ 1063 if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card)) 1064 pr_warn("%s: Sanitize aborted\n", mmc_hostname(host)); 1065 1066 mmc_retune_release(host); 1067 1068 pr_debug("%s: Sanitize completed\n", mmc_hostname(host)); 1069 return err; 1070 } 1071 EXPORT_SYMBOL_GPL(mmc_sanitize); 1072