1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/core/mmc_ops.h 4 * 5 * Copyright 2006-2007 Pierre Ossman 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/export.h> 10 #include <linux/types.h> 11 #include <linux/scatterlist.h> 12 13 #include <linux/mmc/host.h> 14 #include <linux/mmc/card.h> 15 #include <linux/mmc/mmc.h> 16 17 #include "core.h" 18 #include "card.h" 19 #include "host.h" 20 #include "mmc_ops.h" 21 22 #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */ 23 #define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */ 24 #define MMC_OP_COND_PERIOD_US (4 * 1000) /* 4ms */ 25 #define MMC_OP_COND_TIMEOUT_MS 1000 /* 1s */ 26 27 static const u8 tuning_blk_pattern_4bit[] = { 28 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, 29 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, 30 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, 31 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, 32 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, 33 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, 34 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, 35 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, 36 }; 37 38 static const u8 tuning_blk_pattern_8bit[] = { 39 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 40 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 41 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 42 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 43 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 44 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 45 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 46 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, 47 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 48 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 49 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 50 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 51 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 52 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 53 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 54 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 55 }; 56 57 struct mmc_busy_data { 58 struct mmc_card *card; 59 bool retry_crc_err; 60 enum mmc_busy_cmd busy_cmd; 61 }; 62 63 struct mmc_op_cond_busy_data { 64 struct mmc_host *host; 65 u32 ocr; 66 struct mmc_command *cmd; 67 }; 68 69 int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries) 70 { 71 int err; 72 struct mmc_command cmd = {}; 73 74 cmd.opcode = MMC_SEND_STATUS; 75 if (!mmc_host_is_spi(card->host)) 76 cmd.arg = card->rca << 16; 77 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 78 79 err = mmc_wait_for_cmd(card->host, &cmd, retries); 80 if (err) 81 return err; 82 83 /* NOTE: callers are required to understand the difference 84 * between "native" and SPI format status words! 85 */ 86 if (status) 87 *status = cmd.resp[0]; 88 89 return 0; 90 } 91 EXPORT_SYMBOL_GPL(__mmc_send_status); 92 93 int mmc_send_status(struct mmc_card *card, u32 *status) 94 { 95 return __mmc_send_status(card, status, MMC_CMD_RETRIES); 96 } 97 EXPORT_SYMBOL_GPL(mmc_send_status); 98 99 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) 100 { 101 struct mmc_command cmd = {}; 102 103 cmd.opcode = MMC_SELECT_CARD; 104 105 if (card) { 106 cmd.arg = card->rca << 16; 107 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 108 } else { 109 cmd.arg = 0; 110 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 111 } 112 113 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 114 } 115 116 int mmc_select_card(struct mmc_card *card) 117 { 118 119 return _mmc_select_card(card->host, card); 120 } 121 122 int mmc_deselect_cards(struct mmc_host *host) 123 { 124 return _mmc_select_card(host, NULL); 125 } 126 127 /* 128 * Write the value specified in the device tree or board code into the optional 129 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and 130 * drive strength of the DAT and CMD outputs. The actual meaning of a given 131 * value is hardware dependant. 132 * The presence of the DSR register can be determined from the CSD register, 133 * bit 76. 134 */ 135 int mmc_set_dsr(struct mmc_host *host) 136 { 137 struct mmc_command cmd = {}; 138 139 cmd.opcode = MMC_SET_DSR; 140 141 cmd.arg = (host->dsr << 16) | 0xffff; 142 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 143 144 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 145 } 146 147 int __mmc_go_idle(struct mmc_host *host) 148 { 149 struct mmc_command cmd = {}; 150 int err; 151 152 cmd.opcode = MMC_GO_IDLE_STATE; 153 cmd.arg = 0; 154 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; 155 156 err = mmc_wait_for_cmd(host, &cmd, 0); 157 mmc_delay(1); 158 159 return err; 160 } 161 162 int mmc_go_idle(struct mmc_host *host) 163 { 164 int err; 165 166 /* 167 * Non-SPI hosts need to prevent chipselect going active during 168 * GO_IDLE; that would put chips into SPI mode. Remind them of 169 * that in case of hardware that won't pull up DAT3/nCS otherwise. 170 * 171 * SPI hosts ignore ios.chip_select; it's managed according to 172 * rules that must accommodate non-MMC slaves which this layer 173 * won't even know about. 174 */ 175 if (!mmc_host_is_spi(host)) { 176 mmc_set_chip_select(host, MMC_CS_HIGH); 177 mmc_delay(1); 178 } 179 180 err = __mmc_go_idle(host); 181 182 if (!mmc_host_is_spi(host)) { 183 mmc_set_chip_select(host, MMC_CS_DONTCARE); 184 mmc_delay(1); 185 } 186 187 host->use_spi_crc = 0; 188 189 return err; 190 } 191 192 static int __mmc_send_op_cond_cb(void *cb_data, bool *busy) 193 { 194 struct mmc_op_cond_busy_data *data = cb_data; 195 struct mmc_host *host = data->host; 196 struct mmc_command *cmd = data->cmd; 197 u32 ocr = data->ocr; 198 int err = 0; 199 200 err = mmc_wait_for_cmd(host, cmd, 0); 201 if (err) 202 return err; 203 204 if (mmc_host_is_spi(host)) { 205 if (!(cmd->resp[0] & R1_SPI_IDLE)) { 206 *busy = false; 207 return 0; 208 } 209 } else { 210 if (cmd->resp[0] & MMC_CARD_BUSY) { 211 *busy = false; 212 return 0; 213 } 214 } 215 216 *busy = true; 217 218 /* 219 * According to eMMC specification v5.1 section 6.4.3, we 220 * should issue CMD1 repeatedly in the idle state until 221 * the eMMC is ready. Otherwise some eMMC devices seem to enter 222 * the inactive mode after mmc_init_card() issued CMD0 when 223 * the eMMC device is busy. 224 */ 225 if (!ocr && !mmc_host_is_spi(host)) 226 cmd->arg = cmd->resp[0] | BIT(30); 227 228 return 0; 229 } 230 231 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 232 { 233 struct mmc_command cmd = {}; 234 int err = 0; 235 struct mmc_op_cond_busy_data cb_data = { 236 .host = host, 237 .ocr = ocr, 238 .cmd = &cmd 239 }; 240 241 cmd.opcode = MMC_SEND_OP_COND; 242 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; 243 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; 244 245 err = __mmc_poll_for_busy(host, MMC_OP_COND_PERIOD_US, 246 MMC_OP_COND_TIMEOUT_MS, 247 &__mmc_send_op_cond_cb, &cb_data); 248 if (err) 249 return err; 250 251 if (rocr && !mmc_host_is_spi(host)) 252 *rocr = cmd.resp[0]; 253 254 return err; 255 } 256 257 int mmc_set_relative_addr(struct mmc_card *card) 258 { 259 struct mmc_command cmd = {}; 260 261 cmd.opcode = MMC_SET_RELATIVE_ADDR; 262 cmd.arg = card->rca << 16; 263 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 264 265 return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 266 } 267 268 static int 269 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) 270 { 271 int err; 272 struct mmc_command cmd = {}; 273 274 cmd.opcode = opcode; 275 cmd.arg = arg; 276 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; 277 278 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 279 if (err) 280 return err; 281 282 memcpy(cxd, cmd.resp, sizeof(u32) * 4); 283 284 return 0; 285 } 286 287 /* 288 * NOTE: void *buf, caller for the buf is required to use DMA-capable 289 * buffer or on-stack buffer (with some overhead in callee). 290 */ 291 int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode, 292 u32 args, void *buf, unsigned len) 293 { 294 struct mmc_request mrq = {}; 295 struct mmc_command cmd = {}; 296 struct mmc_data data = {}; 297 struct scatterlist sg; 298 299 mrq.cmd = &cmd; 300 mrq.data = &data; 301 302 cmd.opcode = opcode; 303 cmd.arg = args; 304 305 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 306 * rely on callers to never use this with "native" calls for reading 307 * CSD or CID. Native versions of those commands use the R2 type, 308 * not R1 plus a data block. 309 */ 310 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 311 312 data.blksz = len; 313 data.blocks = 1; 314 data.flags = MMC_DATA_READ; 315 data.sg = &sg; 316 data.sg_len = 1; 317 318 sg_init_one(&sg, buf, len); 319 320 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) { 321 /* 322 * The spec states that CSR and CID accesses have a timeout 323 * of 64 clock cycles. 324 */ 325 data.timeout_ns = 0; 326 data.timeout_clks = 64; 327 } else 328 mmc_set_data_timeout(&data, card); 329 330 mmc_wait_for_req(host, &mrq); 331 332 if (cmd.error) 333 return cmd.error; 334 if (data.error) 335 return data.error; 336 337 return 0; 338 } 339 340 static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode) 341 { 342 int ret, i; 343 __be32 *cxd_tmp; 344 345 cxd_tmp = kzalloc(16, GFP_KERNEL); 346 if (!cxd_tmp) 347 return -ENOMEM; 348 349 ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16); 350 if (ret) 351 goto err; 352 353 for (i = 0; i < 4; i++) 354 cxd[i] = be32_to_cpu(cxd_tmp[i]); 355 356 err: 357 kfree(cxd_tmp); 358 return ret; 359 } 360 361 int mmc_send_csd(struct mmc_card *card, u32 *csd) 362 { 363 if (mmc_host_is_spi(card->host)) 364 return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD); 365 366 return mmc_send_cxd_native(card->host, card->rca << 16, csd, 367 MMC_SEND_CSD); 368 } 369 370 int mmc_send_cid(struct mmc_host *host, u32 *cid) 371 { 372 if (mmc_host_is_spi(host)) 373 return mmc_spi_send_cxd(host, cid, MMC_SEND_CID); 374 375 return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID); 376 } 377 378 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) 379 { 380 int err; 381 u8 *ext_csd; 382 383 if (!card || !new_ext_csd) 384 return -EINVAL; 385 386 if (!mmc_can_ext_csd(card)) 387 return -EOPNOTSUPP; 388 389 /* 390 * As the ext_csd is so large and mostly unused, we don't store the 391 * raw block in mmc_card. 392 */ 393 ext_csd = kzalloc(512, GFP_KERNEL); 394 if (!ext_csd) 395 return -ENOMEM; 396 397 err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd, 398 512); 399 if (err) 400 kfree(ext_csd); 401 else 402 *new_ext_csd = ext_csd; 403 404 return err; 405 } 406 EXPORT_SYMBOL_GPL(mmc_get_ext_csd); 407 408 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) 409 { 410 struct mmc_command cmd = {}; 411 int err; 412 413 cmd.opcode = MMC_SPI_READ_OCR; 414 cmd.arg = highcap ? (1 << 30) : 0; 415 cmd.flags = MMC_RSP_SPI_R3; 416 417 err = mmc_wait_for_cmd(host, &cmd, 0); 418 419 *ocrp = cmd.resp[1]; 420 return err; 421 } 422 423 int mmc_spi_set_crc(struct mmc_host *host, int use_crc) 424 { 425 struct mmc_command cmd = {}; 426 int err; 427 428 cmd.opcode = MMC_SPI_CRC_ON_OFF; 429 cmd.flags = MMC_RSP_SPI_R1; 430 cmd.arg = use_crc; 431 432 err = mmc_wait_for_cmd(host, &cmd, 0); 433 if (!err) 434 host->use_spi_crc = use_crc; 435 return err; 436 } 437 438 static int mmc_switch_status_error(struct mmc_host *host, u32 status) 439 { 440 if (mmc_host_is_spi(host)) { 441 if (status & R1_SPI_ILLEGAL_COMMAND) 442 return -EBADMSG; 443 } else { 444 if (R1_STATUS(status)) 445 pr_warn("%s: unexpected status %#x after switch\n", 446 mmc_hostname(host), status); 447 if (status & R1_SWITCH_ERROR) 448 return -EBADMSG; 449 } 450 return 0; 451 } 452 453 /* Caller must hold re-tuning */ 454 int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal) 455 { 456 u32 status; 457 int err; 458 459 err = mmc_send_status(card, &status); 460 if (!crc_err_fatal && err == -EILSEQ) 461 return 0; 462 if (err) 463 return err; 464 465 return mmc_switch_status_error(card->host, status); 466 } 467 468 static int mmc_busy_cb(void *cb_data, bool *busy) 469 { 470 struct mmc_busy_data *data = cb_data; 471 struct mmc_host *host = data->card->host; 472 u32 status = 0; 473 int err; 474 475 if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) { 476 *busy = host->ops->card_busy(host); 477 return 0; 478 } 479 480 err = mmc_send_status(data->card, &status); 481 if (data->retry_crc_err && err == -EILSEQ) { 482 *busy = true; 483 return 0; 484 } 485 if (err) 486 return err; 487 488 switch (data->busy_cmd) { 489 case MMC_BUSY_CMD6: 490 err = mmc_switch_status_error(host, status); 491 break; 492 case MMC_BUSY_ERASE: 493 err = R1_STATUS(status) ? -EIO : 0; 494 break; 495 case MMC_BUSY_HPI: 496 case MMC_BUSY_EXTR_SINGLE: 497 case MMC_BUSY_IO: 498 break; 499 default: 500 err = -EINVAL; 501 } 502 503 if (err) 504 return err; 505 506 *busy = !mmc_ready_for_data(status); 507 return 0; 508 } 509 510 int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us, 511 unsigned int timeout_ms, 512 int (*busy_cb)(void *cb_data, bool *busy), 513 void *cb_data) 514 { 515 int err; 516 unsigned long timeout; 517 unsigned int udelay = period_us ? period_us : 32, udelay_max = 32768; 518 bool expired = false; 519 bool busy = false; 520 521 timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1; 522 do { 523 /* 524 * Due to the possibility of being preempted while polling, 525 * check the expiration time first. 526 */ 527 expired = time_after(jiffies, timeout); 528 529 err = (*busy_cb)(cb_data, &busy); 530 if (err) 531 return err; 532 533 /* Timeout if the device still remains busy. */ 534 if (expired && busy) { 535 pr_err("%s: Card stuck being busy! %s\n", 536 mmc_hostname(host), __func__); 537 return -ETIMEDOUT; 538 } 539 540 /* Throttle the polling rate to avoid hogging the CPU. */ 541 if (busy) { 542 usleep_range(udelay, udelay * 2); 543 if (udelay < udelay_max) 544 udelay *= 2; 545 } 546 } while (busy); 547 548 return 0; 549 } 550 EXPORT_SYMBOL_GPL(__mmc_poll_for_busy); 551 552 int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 553 bool retry_crc_err, enum mmc_busy_cmd busy_cmd) 554 { 555 struct mmc_host *host = card->host; 556 struct mmc_busy_data cb_data; 557 558 cb_data.card = card; 559 cb_data.retry_crc_err = retry_crc_err; 560 cb_data.busy_cmd = busy_cmd; 561 562 return __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_busy_cb, &cb_data); 563 } 564 EXPORT_SYMBOL_GPL(mmc_poll_for_busy); 565 566 bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd, 567 unsigned int timeout_ms) 568 { 569 /* 570 * If the max_busy_timeout of the host is specified, make sure it's 571 * enough to fit the used timeout_ms. In case it's not, let's instruct 572 * the host to avoid HW busy detection, by converting to a R1 response 573 * instead of a R1B. Note, some hosts requires R1B, which also means 574 * they are on their own when it comes to deal with the busy timeout. 575 */ 576 if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout && 577 (timeout_ms > host->max_busy_timeout)) { 578 cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1; 579 return false; 580 } 581 582 cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B; 583 cmd->busy_timeout = timeout_ms; 584 return true; 585 } 586 EXPORT_SYMBOL_GPL(mmc_prepare_busy_cmd); 587 588 /** 589 * __mmc_switch - modify EXT_CSD register 590 * @card: the MMC card associated with the data transfer 591 * @set: cmd set values 592 * @index: EXT_CSD register index 593 * @value: value to program into EXT_CSD register 594 * @timeout_ms: timeout (ms) for operation performed by register write, 595 * timeout of zero implies maximum possible timeout 596 * @timing: new timing to change to 597 * @send_status: send status cmd to poll for busy 598 * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy 599 * @retries: number of retries 600 * 601 * Modifies the EXT_CSD register for selected card. 602 */ 603 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 604 unsigned int timeout_ms, unsigned char timing, 605 bool send_status, bool retry_crc_err, unsigned int retries) 606 { 607 struct mmc_host *host = card->host; 608 int err; 609 struct mmc_command cmd = {}; 610 bool use_r1b_resp; 611 unsigned char old_timing = host->ios.timing; 612 613 mmc_retune_hold(host); 614 615 if (!timeout_ms) { 616 pr_warn("%s: unspecified timeout for CMD6 - use generic\n", 617 mmc_hostname(host)); 618 timeout_ms = card->ext_csd.generic_cmd6_time; 619 } 620 621 cmd.opcode = MMC_SWITCH; 622 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 623 (index << 16) | 624 (value << 8) | 625 set; 626 use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms); 627 628 err = mmc_wait_for_cmd(host, &cmd, retries); 629 if (err) 630 goto out; 631 632 /*If SPI or used HW busy detection above, then we don't need to poll. */ 633 if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) || 634 mmc_host_is_spi(host)) 635 goto out_tim; 636 637 /* 638 * If the host doesn't support HW polling via the ->card_busy() ops and 639 * when it's not allowed to poll by using CMD13, then we need to rely on 640 * waiting the stated timeout to be sufficient. 641 */ 642 if (!send_status && !host->ops->card_busy) { 643 mmc_delay(timeout_ms); 644 goto out_tim; 645 } 646 647 /* Let's try to poll to find out when the command is completed. */ 648 err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6); 649 if (err) 650 goto out; 651 652 out_tim: 653 /* Switch to new timing before check switch status. */ 654 if (timing) 655 mmc_set_timing(host, timing); 656 657 if (send_status) { 658 err = mmc_switch_status(card, true); 659 if (err && timing) 660 mmc_set_timing(host, old_timing); 661 } 662 out: 663 mmc_retune_release(host); 664 665 return err; 666 } 667 668 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 669 unsigned int timeout_ms) 670 { 671 return __mmc_switch(card, set, index, value, timeout_ms, 0, 672 true, false, MMC_CMD_RETRIES); 673 } 674 EXPORT_SYMBOL_GPL(mmc_switch); 675 676 int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error) 677 { 678 struct mmc_request mrq = {}; 679 struct mmc_command cmd = {}; 680 struct mmc_data data = {}; 681 struct scatterlist sg; 682 struct mmc_ios *ios = &host->ios; 683 const u8 *tuning_block_pattern; 684 int size, err = 0; 685 u8 *data_buf; 686 687 if (ios->bus_width == MMC_BUS_WIDTH_8) { 688 tuning_block_pattern = tuning_blk_pattern_8bit; 689 size = sizeof(tuning_blk_pattern_8bit); 690 } else if (ios->bus_width == MMC_BUS_WIDTH_4) { 691 tuning_block_pattern = tuning_blk_pattern_4bit; 692 size = sizeof(tuning_blk_pattern_4bit); 693 } else 694 return -EINVAL; 695 696 data_buf = kzalloc(size, GFP_KERNEL); 697 if (!data_buf) 698 return -ENOMEM; 699 700 mrq.cmd = &cmd; 701 mrq.data = &data; 702 703 cmd.opcode = opcode; 704 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 705 706 data.blksz = size; 707 data.blocks = 1; 708 data.flags = MMC_DATA_READ; 709 710 /* 711 * According to the tuning specs, Tuning process 712 * is normally shorter 40 executions of CMD19, 713 * and timeout value should be shorter than 150 ms 714 */ 715 data.timeout_ns = 150 * NSEC_PER_MSEC; 716 717 data.sg = &sg; 718 data.sg_len = 1; 719 sg_init_one(&sg, data_buf, size); 720 721 mmc_wait_for_req(host, &mrq); 722 723 if (cmd_error) 724 *cmd_error = cmd.error; 725 726 if (cmd.error) { 727 err = cmd.error; 728 goto out; 729 } 730 731 if (data.error) { 732 err = data.error; 733 goto out; 734 } 735 736 if (memcmp(data_buf, tuning_block_pattern, size)) 737 err = -EIO; 738 739 out: 740 kfree(data_buf); 741 return err; 742 } 743 EXPORT_SYMBOL_GPL(mmc_send_tuning); 744 745 int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode) 746 { 747 struct mmc_command cmd = {}; 748 749 /* 750 * eMMC specification specifies that CMD12 can be used to stop a tuning 751 * command, but SD specification does not, so do nothing unless it is 752 * eMMC. 753 */ 754 if (opcode != MMC_SEND_TUNING_BLOCK_HS200) 755 return 0; 756 757 cmd.opcode = MMC_STOP_TRANSMISSION; 758 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 759 760 /* 761 * For drivers that override R1 to R1b, set an arbitrary timeout based 762 * on the tuning timeout i.e. 150ms. 763 */ 764 cmd.busy_timeout = 150; 765 766 return mmc_wait_for_cmd(host, &cmd, 0); 767 } 768 EXPORT_SYMBOL_GPL(mmc_send_abort_tuning); 769 770 static int 771 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, 772 u8 len) 773 { 774 struct mmc_request mrq = {}; 775 struct mmc_command cmd = {}; 776 struct mmc_data data = {}; 777 struct scatterlist sg; 778 u8 *data_buf; 779 u8 *test_buf; 780 int i, err; 781 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 }; 782 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 }; 783 784 /* dma onto stack is unsafe/nonportable, but callers to this 785 * routine normally provide temporary on-stack buffers ... 786 */ 787 data_buf = kmalloc(len, GFP_KERNEL); 788 if (!data_buf) 789 return -ENOMEM; 790 791 if (len == 8) 792 test_buf = testdata_8bit; 793 else if (len == 4) 794 test_buf = testdata_4bit; 795 else { 796 pr_err("%s: Invalid bus_width %d\n", 797 mmc_hostname(host), len); 798 kfree(data_buf); 799 return -EINVAL; 800 } 801 802 if (opcode == MMC_BUS_TEST_W) 803 memcpy(data_buf, test_buf, len); 804 805 mrq.cmd = &cmd; 806 mrq.data = &data; 807 cmd.opcode = opcode; 808 cmd.arg = 0; 809 810 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 811 * rely on callers to never use this with "native" calls for reading 812 * CSD or CID. Native versions of those commands use the R2 type, 813 * not R1 plus a data block. 814 */ 815 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 816 817 data.blksz = len; 818 data.blocks = 1; 819 if (opcode == MMC_BUS_TEST_R) 820 data.flags = MMC_DATA_READ; 821 else 822 data.flags = MMC_DATA_WRITE; 823 824 data.sg = &sg; 825 data.sg_len = 1; 826 mmc_set_data_timeout(&data, card); 827 sg_init_one(&sg, data_buf, len); 828 mmc_wait_for_req(host, &mrq); 829 err = 0; 830 if (opcode == MMC_BUS_TEST_R) { 831 for (i = 0; i < len / 4; i++) 832 if ((test_buf[i] ^ data_buf[i]) != 0xff) { 833 err = -EIO; 834 break; 835 } 836 } 837 kfree(data_buf); 838 839 if (cmd.error) 840 return cmd.error; 841 if (data.error) 842 return data.error; 843 844 return err; 845 } 846 847 int mmc_bus_test(struct mmc_card *card, u8 bus_width) 848 { 849 int width; 850 851 if (bus_width == MMC_BUS_WIDTH_8) 852 width = 8; 853 else if (bus_width == MMC_BUS_WIDTH_4) 854 width = 4; 855 else if (bus_width == MMC_BUS_WIDTH_1) 856 return 0; /* no need for test */ 857 else 858 return -EINVAL; 859 860 /* 861 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there 862 * is a problem. This improves chances that the test will work. 863 */ 864 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width); 865 return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); 866 } 867 868 static int mmc_send_hpi_cmd(struct mmc_card *card) 869 { 870 unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time; 871 struct mmc_host *host = card->host; 872 bool use_r1b_resp = false; 873 struct mmc_command cmd = {}; 874 int err; 875 876 cmd.opcode = card->ext_csd.hpi_cmd; 877 cmd.arg = card->rca << 16 | 1; 878 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 879 880 if (cmd.opcode == MMC_STOP_TRANSMISSION) 881 use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, 882 busy_timeout_ms); 883 884 err = mmc_wait_for_cmd(host, &cmd, 0); 885 if (err) { 886 pr_warn("%s: HPI error %d. Command response %#x\n", 887 mmc_hostname(host), err, cmd.resp[0]); 888 return err; 889 } 890 891 /* No need to poll when using HW busy detection. */ 892 if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp) 893 return 0; 894 895 /* Let's poll to find out when the HPI request completes. */ 896 return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI); 897 } 898 899 /** 900 * mmc_interrupt_hpi - Issue for High priority Interrupt 901 * @card: the MMC card associated with the HPI transfer 902 * 903 * Issued High Priority Interrupt, and check for card status 904 * until out-of prg-state. 905 */ 906 static int mmc_interrupt_hpi(struct mmc_card *card) 907 { 908 int err; 909 u32 status; 910 911 if (!card->ext_csd.hpi_en) { 912 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 913 return 1; 914 } 915 916 err = mmc_send_status(card, &status); 917 if (err) { 918 pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 919 goto out; 920 } 921 922 switch (R1_CURRENT_STATE(status)) { 923 case R1_STATE_IDLE: 924 case R1_STATE_READY: 925 case R1_STATE_STBY: 926 case R1_STATE_TRAN: 927 /* 928 * In idle and transfer states, HPI is not needed and the caller 929 * can issue the next intended command immediately 930 */ 931 goto out; 932 case R1_STATE_PRG: 933 break; 934 default: 935 /* In all other states, it's illegal to issue HPI */ 936 pr_debug("%s: HPI cannot be sent. Card state=%d\n", 937 mmc_hostname(card->host), R1_CURRENT_STATE(status)); 938 err = -EINVAL; 939 goto out; 940 } 941 942 err = mmc_send_hpi_cmd(card); 943 out: 944 return err; 945 } 946 947 int mmc_can_ext_csd(struct mmc_card *card) 948 { 949 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3); 950 } 951 952 static int mmc_read_bkops_status(struct mmc_card *card) 953 { 954 int err; 955 u8 *ext_csd; 956 957 err = mmc_get_ext_csd(card, &ext_csd); 958 if (err) 959 return err; 960 961 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; 962 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; 963 kfree(ext_csd); 964 return 0; 965 } 966 967 /** 968 * mmc_run_bkops - Run BKOPS for supported cards 969 * @card: MMC card to run BKOPS for 970 * 971 * Run background operations synchronously for cards having manual BKOPS 972 * enabled and in case it reports urgent BKOPS level. 973 */ 974 void mmc_run_bkops(struct mmc_card *card) 975 { 976 int err; 977 978 if (!card->ext_csd.man_bkops_en) 979 return; 980 981 err = mmc_read_bkops_status(card); 982 if (err) { 983 pr_err("%s: Failed to read bkops status: %d\n", 984 mmc_hostname(card->host), err); 985 return; 986 } 987 988 if (!card->ext_csd.raw_bkops_status || 989 card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2) 990 return; 991 992 mmc_retune_hold(card->host); 993 994 /* 995 * For urgent BKOPS status, LEVEL_2 and higher, let's execute 996 * synchronously. Future wise, we may consider to start BKOPS, for less 997 * urgent levels by using an asynchronous background task, when idle. 998 */ 999 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1000 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS); 1001 /* 1002 * If the BKOPS timed out, the card is probably still busy in the 1003 * R1_STATE_PRG. Rather than continue to wait, let's try to abort 1004 * it with a HPI command to get back into R1_STATE_TRAN. 1005 */ 1006 if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card)) 1007 pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host)); 1008 else if (err) 1009 pr_warn("%s: Error %d running bkops\n", 1010 mmc_hostname(card->host), err); 1011 1012 mmc_retune_release(card->host); 1013 } 1014 EXPORT_SYMBOL(mmc_run_bkops); 1015 1016 static int mmc_cmdq_switch(struct mmc_card *card, bool enable) 1017 { 1018 u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0; 1019 int err; 1020 1021 if (!card->ext_csd.cmdq_support) 1022 return -EOPNOTSUPP; 1023 1024 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN, 1025 val, card->ext_csd.generic_cmd6_time); 1026 if (!err) 1027 card->ext_csd.cmdq_en = enable; 1028 1029 return err; 1030 } 1031 1032 int mmc_cmdq_enable(struct mmc_card *card) 1033 { 1034 return mmc_cmdq_switch(card, true); 1035 } 1036 EXPORT_SYMBOL_GPL(mmc_cmdq_enable); 1037 1038 int mmc_cmdq_disable(struct mmc_card *card) 1039 { 1040 return mmc_cmdq_switch(card, false); 1041 } 1042 EXPORT_SYMBOL_GPL(mmc_cmdq_disable); 1043 1044 int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms) 1045 { 1046 struct mmc_host *host = card->host; 1047 int err; 1048 1049 if (!mmc_can_sanitize(card)) { 1050 pr_warn("%s: Sanitize not supported\n", mmc_hostname(host)); 1051 return -EOPNOTSUPP; 1052 } 1053 1054 if (!timeout_ms) 1055 timeout_ms = MMC_SANITIZE_TIMEOUT_MS; 1056 1057 pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host)); 1058 1059 mmc_retune_hold(host); 1060 1061 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START, 1062 1, timeout_ms, 0, true, false, 0); 1063 if (err) 1064 pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err); 1065 1066 /* 1067 * If the sanitize operation timed out, the card is probably still busy 1068 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort 1069 * it with a HPI command to get back into R1_STATE_TRAN. 1070 */ 1071 if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card)) 1072 pr_warn("%s: Sanitize aborted\n", mmc_hostname(host)); 1073 1074 mmc_retune_release(host); 1075 1076 pr_debug("%s: Sanitize completed\n", mmc_hostname(host)); 1077 return err; 1078 } 1079 EXPORT_SYMBOL_GPL(mmc_sanitize); 1080