1 /* 2 * linux/drivers/mmc/core/core.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/completion.h> 17 #include <linux/device.h> 18 #include <linux/delay.h> 19 #include <linux/pagemap.h> 20 #include <linux/err.h> 21 #include <linux/leds.h> 22 #include <linux/scatterlist.h> 23 #include <linux/log2.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/suspend.h> 27 #include <linux/fault-inject.h> 28 #include <linux/random.h> 29 30 #include <linux/mmc/card.h> 31 #include <linux/mmc/host.h> 32 #include <linux/mmc/mmc.h> 33 #include <linux/mmc/sd.h> 34 35 #include "core.h" 36 #include "bus.h" 37 #include "host.h" 38 #include "sdio_bus.h" 39 40 #include "mmc_ops.h" 41 #include "sd_ops.h" 42 #include "sdio_ops.h" 43 44 static struct workqueue_struct *workqueue; 45 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; 46 47 /* 48 * Enabling software CRCs on the data blocks can be a significant (30%) 49 * performance cost, and for other reasons may not always be desired. 50 * So we allow it it to be disabled. 51 */ 52 bool use_spi_crc = 1; 53 module_param(use_spi_crc, bool, 0); 54 55 /* 56 * We normally treat cards as removed during suspend if they are not 57 * known to be on a non-removable bus, to avoid the risk of writing 58 * back data to a different card after resume. Allow this to be 59 * overridden if necessary. 60 */ 61 #ifdef CONFIG_MMC_UNSAFE_RESUME 62 bool mmc_assume_removable; 63 #else 64 bool mmc_assume_removable = 1; 65 #endif 66 EXPORT_SYMBOL(mmc_assume_removable); 67 module_param_named(removable, mmc_assume_removable, bool, 0644); 68 MODULE_PARM_DESC( 69 removable, 70 "MMC/SD cards are removable and may be removed during suspend"); 71 72 /* 73 * Internal function. Schedule delayed work in the MMC work queue. 74 */ 75 static int mmc_schedule_delayed_work(struct delayed_work *work, 76 unsigned long delay) 77 { 78 return queue_delayed_work(workqueue, work, delay); 79 } 80 81 /* 82 * Internal function. Flush all scheduled work from the MMC work queue. 83 */ 84 static void mmc_flush_scheduled_work(void) 85 { 86 flush_workqueue(workqueue); 87 } 88 89 #ifdef CONFIG_FAIL_MMC_REQUEST 90 91 /* 92 * Internal function. Inject random data errors. 93 * If mmc_data is NULL no errors are injected. 94 */ 95 static void mmc_should_fail_request(struct mmc_host *host, 96 struct mmc_request *mrq) 97 { 98 struct mmc_command *cmd = mrq->cmd; 99 struct mmc_data *data = mrq->data; 100 static const int data_errors[] = { 101 -ETIMEDOUT, 102 -EILSEQ, 103 -EIO, 104 }; 105 106 if (!data) 107 return; 108 109 if (cmd->error || data->error || 110 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) 111 return; 112 113 data->error = data_errors[random32() % ARRAY_SIZE(data_errors)]; 114 data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9; 115 } 116 117 #else /* CONFIG_FAIL_MMC_REQUEST */ 118 119 static inline void mmc_should_fail_request(struct mmc_host *host, 120 struct mmc_request *mrq) 121 { 122 } 123 124 #endif /* CONFIG_FAIL_MMC_REQUEST */ 125 126 /** 127 * mmc_request_done - finish processing an MMC request 128 * @host: MMC host which completed request 129 * @mrq: MMC request which request 130 * 131 * MMC drivers should call this function when they have completed 132 * their processing of a request. 133 */ 134 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) 135 { 136 struct mmc_command *cmd = mrq->cmd; 137 int err = cmd->error; 138 139 if (err && cmd->retries && mmc_host_is_spi(host)) { 140 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) 141 cmd->retries = 0; 142 } 143 144 if (err && cmd->retries && !mmc_card_removed(host->card)) { 145 /* 146 * Request starter must handle retries - see 147 * mmc_wait_for_req_done(). 148 */ 149 if (mrq->done) 150 mrq->done(mrq); 151 } else { 152 mmc_should_fail_request(host, mrq); 153 154 led_trigger_event(host->led, LED_OFF); 155 156 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", 157 mmc_hostname(host), cmd->opcode, err, 158 cmd->resp[0], cmd->resp[1], 159 cmd->resp[2], cmd->resp[3]); 160 161 if (mrq->data) { 162 pr_debug("%s: %d bytes transferred: %d\n", 163 mmc_hostname(host), 164 mrq->data->bytes_xfered, mrq->data->error); 165 } 166 167 if (mrq->stop) { 168 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n", 169 mmc_hostname(host), mrq->stop->opcode, 170 mrq->stop->error, 171 mrq->stop->resp[0], mrq->stop->resp[1], 172 mrq->stop->resp[2], mrq->stop->resp[3]); 173 } 174 175 if (mrq->done) 176 mrq->done(mrq); 177 178 mmc_host_clk_release(host); 179 } 180 } 181 182 EXPORT_SYMBOL(mmc_request_done); 183 184 static void 185 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 186 { 187 #ifdef CONFIG_MMC_DEBUG 188 unsigned int i, sz; 189 struct scatterlist *sg; 190 #endif 191 192 if (mrq->sbc) { 193 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n", 194 mmc_hostname(host), mrq->sbc->opcode, 195 mrq->sbc->arg, mrq->sbc->flags); 196 } 197 198 pr_debug("%s: starting CMD%u arg %08x flags %08x\n", 199 mmc_hostname(host), mrq->cmd->opcode, 200 mrq->cmd->arg, mrq->cmd->flags); 201 202 if (mrq->data) { 203 pr_debug("%s: blksz %d blocks %d flags %08x " 204 "tsac %d ms nsac %d\n", 205 mmc_hostname(host), mrq->data->blksz, 206 mrq->data->blocks, mrq->data->flags, 207 mrq->data->timeout_ns / 1000000, 208 mrq->data->timeout_clks); 209 } 210 211 if (mrq->stop) { 212 pr_debug("%s: CMD%u arg %08x flags %08x\n", 213 mmc_hostname(host), mrq->stop->opcode, 214 mrq->stop->arg, mrq->stop->flags); 215 } 216 217 WARN_ON(!host->claimed); 218 219 mrq->cmd->error = 0; 220 mrq->cmd->mrq = mrq; 221 if (mrq->data) { 222 BUG_ON(mrq->data->blksz > host->max_blk_size); 223 BUG_ON(mrq->data->blocks > host->max_blk_count); 224 BUG_ON(mrq->data->blocks * mrq->data->blksz > 225 host->max_req_size); 226 227 #ifdef CONFIG_MMC_DEBUG 228 sz = 0; 229 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) 230 sz += sg->length; 231 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); 232 #endif 233 234 mrq->cmd->data = mrq->data; 235 mrq->data->error = 0; 236 mrq->data->mrq = mrq; 237 if (mrq->stop) { 238 mrq->data->stop = mrq->stop; 239 mrq->stop->error = 0; 240 mrq->stop->mrq = mrq; 241 } 242 } 243 mmc_host_clk_hold(host); 244 led_trigger_event(host->led, LED_FULL); 245 host->ops->request(host, mrq); 246 } 247 248 static void mmc_wait_done(struct mmc_request *mrq) 249 { 250 complete(&mrq->completion); 251 } 252 253 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) 254 { 255 init_completion(&mrq->completion); 256 mrq->done = mmc_wait_done; 257 if (mmc_card_removed(host->card)) { 258 mrq->cmd->error = -ENOMEDIUM; 259 complete(&mrq->completion); 260 return -ENOMEDIUM; 261 } 262 mmc_start_request(host, mrq); 263 return 0; 264 } 265 266 static void mmc_wait_for_req_done(struct mmc_host *host, 267 struct mmc_request *mrq) 268 { 269 struct mmc_command *cmd; 270 271 while (1) { 272 wait_for_completion(&mrq->completion); 273 274 cmd = mrq->cmd; 275 if (!cmd->error || !cmd->retries || 276 mmc_card_removed(host->card)) 277 break; 278 279 pr_debug("%s: req failed (CMD%u): %d, retrying...\n", 280 mmc_hostname(host), cmd->opcode, cmd->error); 281 cmd->retries--; 282 cmd->error = 0; 283 host->ops->request(host, mrq); 284 } 285 } 286 287 /** 288 * mmc_pre_req - Prepare for a new request 289 * @host: MMC host to prepare command 290 * @mrq: MMC request to prepare for 291 * @is_first_req: true if there is no previous started request 292 * that may run in parellel to this call, otherwise false 293 * 294 * mmc_pre_req() is called in prior to mmc_start_req() to let 295 * host prepare for the new request. Preparation of a request may be 296 * performed while another request is running on the host. 297 */ 298 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, 299 bool is_first_req) 300 { 301 if (host->ops->pre_req) { 302 mmc_host_clk_hold(host); 303 host->ops->pre_req(host, mrq, is_first_req); 304 mmc_host_clk_release(host); 305 } 306 } 307 308 /** 309 * mmc_post_req - Post process a completed request 310 * @host: MMC host to post process command 311 * @mrq: MMC request to post process for 312 * @err: Error, if non zero, clean up any resources made in pre_req 313 * 314 * Let the host post process a completed request. Post processing of 315 * a request may be performed while another reuqest is running. 316 */ 317 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, 318 int err) 319 { 320 if (host->ops->post_req) { 321 mmc_host_clk_hold(host); 322 host->ops->post_req(host, mrq, err); 323 mmc_host_clk_release(host); 324 } 325 } 326 327 /** 328 * mmc_start_req - start a non-blocking request 329 * @host: MMC host to start command 330 * @areq: async request to start 331 * @error: out parameter returns 0 for success, otherwise non zero 332 * 333 * Start a new MMC custom command request for a host. 334 * If there is on ongoing async request wait for completion 335 * of that request and start the new one and return. 336 * Does not wait for the new request to complete. 337 * 338 * Returns the completed request, NULL in case of none completed. 339 * Wait for the an ongoing request (previoulsy started) to complete and 340 * return the completed request. If there is no ongoing request, NULL 341 * is returned without waiting. NULL is not an error condition. 342 */ 343 struct mmc_async_req *mmc_start_req(struct mmc_host *host, 344 struct mmc_async_req *areq, int *error) 345 { 346 int err = 0; 347 int start_err = 0; 348 struct mmc_async_req *data = host->areq; 349 350 /* Prepare a new request */ 351 if (areq) 352 mmc_pre_req(host, areq->mrq, !host->areq); 353 354 if (host->areq) { 355 mmc_wait_for_req_done(host, host->areq->mrq); 356 err = host->areq->err_check(host->card, host->areq); 357 } 358 359 if (!err && areq) 360 start_err = __mmc_start_req(host, areq->mrq); 361 362 if (host->areq) 363 mmc_post_req(host, host->areq->mrq, 0); 364 365 /* Cancel a prepared request if it was not started. */ 366 if ((err || start_err) && areq) 367 mmc_post_req(host, areq->mrq, -EINVAL); 368 369 if (err) 370 host->areq = NULL; 371 else 372 host->areq = areq; 373 374 if (error) 375 *error = err; 376 return data; 377 } 378 EXPORT_SYMBOL(mmc_start_req); 379 380 /** 381 * mmc_wait_for_req - start a request and wait for completion 382 * @host: MMC host to start command 383 * @mrq: MMC request to start 384 * 385 * Start a new MMC custom command request for a host, and wait 386 * for the command to complete. Does not attempt to parse the 387 * response. 388 */ 389 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 390 { 391 __mmc_start_req(host, mrq); 392 mmc_wait_for_req_done(host, mrq); 393 } 394 EXPORT_SYMBOL(mmc_wait_for_req); 395 396 /** 397 * mmc_interrupt_hpi - Issue for High priority Interrupt 398 * @card: the MMC card associated with the HPI transfer 399 * 400 * Issued High Priority Interrupt, and check for card status 401 * util out-of prg-state. 402 */ 403 int mmc_interrupt_hpi(struct mmc_card *card) 404 { 405 int err; 406 u32 status; 407 unsigned long prg_wait; 408 409 BUG_ON(!card); 410 411 if (!card->ext_csd.hpi_en) { 412 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 413 return 1; 414 } 415 416 mmc_claim_host(card->host); 417 err = mmc_send_status(card, &status); 418 if (err) { 419 pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 420 goto out; 421 } 422 423 switch (R1_CURRENT_STATE(status)) { 424 case R1_STATE_IDLE: 425 case R1_STATE_READY: 426 case R1_STATE_STBY: 427 /* 428 * In idle states, HPI is not needed and the caller 429 * can issue the next intended command immediately 430 */ 431 goto out; 432 case R1_STATE_PRG: 433 break; 434 default: 435 /* In all other states, it's illegal to issue HPI */ 436 pr_debug("%s: HPI cannot be sent. Card state=%d\n", 437 mmc_hostname(card->host), R1_CURRENT_STATE(status)); 438 err = -EINVAL; 439 goto out; 440 } 441 442 err = mmc_send_hpi_cmd(card, &status); 443 if (err) 444 goto out; 445 446 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time); 447 do { 448 err = mmc_send_status(card, &status); 449 450 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN) 451 break; 452 if (time_after(jiffies, prg_wait)) 453 err = -ETIMEDOUT; 454 } while (!err); 455 456 out: 457 mmc_release_host(card->host); 458 return err; 459 } 460 EXPORT_SYMBOL(mmc_interrupt_hpi); 461 462 /** 463 * mmc_wait_for_cmd - start a command and wait for completion 464 * @host: MMC host to start command 465 * @cmd: MMC command to start 466 * @retries: maximum number of retries 467 * 468 * Start a new MMC command for a host, and wait for the command 469 * to complete. Return any error that occurred while the command 470 * was executing. Do not attempt to parse the response. 471 */ 472 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 473 { 474 struct mmc_request mrq = {NULL}; 475 476 WARN_ON(!host->claimed); 477 478 memset(cmd->resp, 0, sizeof(cmd->resp)); 479 cmd->retries = retries; 480 481 mrq.cmd = cmd; 482 cmd->data = NULL; 483 484 mmc_wait_for_req(host, &mrq); 485 486 return cmd->error; 487 } 488 489 EXPORT_SYMBOL(mmc_wait_for_cmd); 490 491 /** 492 * mmc_set_data_timeout - set the timeout for a data command 493 * @data: data phase for command 494 * @card: the MMC card associated with the data transfer 495 * 496 * Computes the data timeout parameters according to the 497 * correct algorithm given the card type. 498 */ 499 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) 500 { 501 unsigned int mult; 502 503 /* 504 * SDIO cards only define an upper 1 s limit on access. 505 */ 506 if (mmc_card_sdio(card)) { 507 data->timeout_ns = 1000000000; 508 data->timeout_clks = 0; 509 return; 510 } 511 512 /* 513 * SD cards use a 100 multiplier rather than 10 514 */ 515 mult = mmc_card_sd(card) ? 100 : 10; 516 517 /* 518 * Scale up the multiplier (and therefore the timeout) by 519 * the r2w factor for writes. 520 */ 521 if (data->flags & MMC_DATA_WRITE) 522 mult <<= card->csd.r2w_factor; 523 524 data->timeout_ns = card->csd.tacc_ns * mult; 525 data->timeout_clks = card->csd.tacc_clks * mult; 526 527 /* 528 * SD cards also have an upper limit on the timeout. 529 */ 530 if (mmc_card_sd(card)) { 531 unsigned int timeout_us, limit_us; 532 533 timeout_us = data->timeout_ns / 1000; 534 if (mmc_host_clk_rate(card->host)) 535 timeout_us += data->timeout_clks * 1000 / 536 (mmc_host_clk_rate(card->host) / 1000); 537 538 if (data->flags & MMC_DATA_WRITE) 539 /* 540 * The MMC spec "It is strongly recommended 541 * for hosts to implement more than 500ms 542 * timeout value even if the card indicates 543 * the 250ms maximum busy length." Even the 544 * previous value of 300ms is known to be 545 * insufficient for some cards. 546 */ 547 limit_us = 3000000; 548 else 549 limit_us = 100000; 550 551 /* 552 * SDHC cards always use these fixed values. 553 */ 554 if (timeout_us > limit_us || mmc_card_blockaddr(card)) { 555 data->timeout_ns = limit_us * 1000; 556 data->timeout_clks = 0; 557 } 558 } 559 560 /* 561 * Some cards require longer data read timeout than indicated in CSD. 562 * Address this by setting the read timeout to a "reasonably high" 563 * value. For the cards tested, 300ms has proven enough. If necessary, 564 * this value can be increased if other problematic cards require this. 565 */ 566 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) { 567 data->timeout_ns = 300000000; 568 data->timeout_clks = 0; 569 } 570 571 /* 572 * Some cards need very high timeouts if driven in SPI mode. 573 * The worst observed timeout was 900ms after writing a 574 * continuous stream of data until the internal logic 575 * overflowed. 576 */ 577 if (mmc_host_is_spi(card->host)) { 578 if (data->flags & MMC_DATA_WRITE) { 579 if (data->timeout_ns < 1000000000) 580 data->timeout_ns = 1000000000; /* 1s */ 581 } else { 582 if (data->timeout_ns < 100000000) 583 data->timeout_ns = 100000000; /* 100ms */ 584 } 585 } 586 } 587 EXPORT_SYMBOL(mmc_set_data_timeout); 588 589 /** 590 * mmc_align_data_size - pads a transfer size to a more optimal value 591 * @card: the MMC card associated with the data transfer 592 * @sz: original transfer size 593 * 594 * Pads the original data size with a number of extra bytes in 595 * order to avoid controller bugs and/or performance hits 596 * (e.g. some controllers revert to PIO for certain sizes). 597 * 598 * Returns the improved size, which might be unmodified. 599 * 600 * Note that this function is only relevant when issuing a 601 * single scatter gather entry. 602 */ 603 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) 604 { 605 /* 606 * FIXME: We don't have a system for the controller to tell 607 * the core about its problems yet, so for now we just 32-bit 608 * align the size. 609 */ 610 sz = ((sz + 3) / 4) * 4; 611 612 return sz; 613 } 614 EXPORT_SYMBOL(mmc_align_data_size); 615 616 /** 617 * __mmc_claim_host - exclusively claim a host 618 * @host: mmc host to claim 619 * @abort: whether or not the operation should be aborted 620 * 621 * Claim a host for a set of operations. If @abort is non null and 622 * dereference a non-zero value then this will return prematurely with 623 * that non-zero value without acquiring the lock. Returns zero 624 * with the lock held otherwise. 625 */ 626 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) 627 { 628 DECLARE_WAITQUEUE(wait, current); 629 unsigned long flags; 630 int stop; 631 632 might_sleep(); 633 634 add_wait_queue(&host->wq, &wait); 635 spin_lock_irqsave(&host->lock, flags); 636 while (1) { 637 set_current_state(TASK_UNINTERRUPTIBLE); 638 stop = abort ? atomic_read(abort) : 0; 639 if (stop || !host->claimed || host->claimer == current) 640 break; 641 spin_unlock_irqrestore(&host->lock, flags); 642 schedule(); 643 spin_lock_irqsave(&host->lock, flags); 644 } 645 set_current_state(TASK_RUNNING); 646 if (!stop) { 647 host->claimed = 1; 648 host->claimer = current; 649 host->claim_cnt += 1; 650 } else 651 wake_up(&host->wq); 652 spin_unlock_irqrestore(&host->lock, flags); 653 remove_wait_queue(&host->wq, &wait); 654 if (host->ops->enable && !stop && host->claim_cnt == 1) 655 host->ops->enable(host); 656 return stop; 657 } 658 659 EXPORT_SYMBOL(__mmc_claim_host); 660 661 /** 662 * mmc_try_claim_host - try exclusively to claim a host 663 * @host: mmc host to claim 664 * 665 * Returns %1 if the host is claimed, %0 otherwise. 666 */ 667 int mmc_try_claim_host(struct mmc_host *host) 668 { 669 int claimed_host = 0; 670 unsigned long flags; 671 672 spin_lock_irqsave(&host->lock, flags); 673 if (!host->claimed || host->claimer == current) { 674 host->claimed = 1; 675 host->claimer = current; 676 host->claim_cnt += 1; 677 claimed_host = 1; 678 } 679 spin_unlock_irqrestore(&host->lock, flags); 680 if (host->ops->enable && claimed_host && host->claim_cnt == 1) 681 host->ops->enable(host); 682 return claimed_host; 683 } 684 EXPORT_SYMBOL(mmc_try_claim_host); 685 686 /** 687 * mmc_release_host - release a host 688 * @host: mmc host to release 689 * 690 * Release a MMC host, allowing others to claim the host 691 * for their operations. 692 */ 693 void mmc_release_host(struct mmc_host *host) 694 { 695 unsigned long flags; 696 697 WARN_ON(!host->claimed); 698 699 if (host->ops->disable && host->claim_cnt == 1) 700 host->ops->disable(host); 701 702 spin_lock_irqsave(&host->lock, flags); 703 if (--host->claim_cnt) { 704 /* Release for nested claim */ 705 spin_unlock_irqrestore(&host->lock, flags); 706 } else { 707 host->claimed = 0; 708 host->claimer = NULL; 709 spin_unlock_irqrestore(&host->lock, flags); 710 wake_up(&host->wq); 711 } 712 } 713 EXPORT_SYMBOL(mmc_release_host); 714 715 /* 716 * Internal function that does the actual ios call to the host driver, 717 * optionally printing some debug output. 718 */ 719 static inline void mmc_set_ios(struct mmc_host *host) 720 { 721 struct mmc_ios *ios = &host->ios; 722 723 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " 724 "width %u timing %u\n", 725 mmc_hostname(host), ios->clock, ios->bus_mode, 726 ios->power_mode, ios->chip_select, ios->vdd, 727 ios->bus_width, ios->timing); 728 729 if (ios->clock > 0) 730 mmc_set_ungated(host); 731 host->ops->set_ios(host, ios); 732 } 733 734 /* 735 * Control chip select pin on a host. 736 */ 737 void mmc_set_chip_select(struct mmc_host *host, int mode) 738 { 739 mmc_host_clk_hold(host); 740 host->ios.chip_select = mode; 741 mmc_set_ios(host); 742 mmc_host_clk_release(host); 743 } 744 745 /* 746 * Sets the host clock to the highest possible frequency that 747 * is below "hz". 748 */ 749 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz) 750 { 751 WARN_ON(hz < host->f_min); 752 753 if (hz > host->f_max) 754 hz = host->f_max; 755 756 host->ios.clock = hz; 757 mmc_set_ios(host); 758 } 759 760 void mmc_set_clock(struct mmc_host *host, unsigned int hz) 761 { 762 mmc_host_clk_hold(host); 763 __mmc_set_clock(host, hz); 764 mmc_host_clk_release(host); 765 } 766 767 #ifdef CONFIG_MMC_CLKGATE 768 /* 769 * This gates the clock by setting it to 0 Hz. 770 */ 771 void mmc_gate_clock(struct mmc_host *host) 772 { 773 unsigned long flags; 774 775 spin_lock_irqsave(&host->clk_lock, flags); 776 host->clk_old = host->ios.clock; 777 host->ios.clock = 0; 778 host->clk_gated = true; 779 spin_unlock_irqrestore(&host->clk_lock, flags); 780 mmc_set_ios(host); 781 } 782 783 /* 784 * This restores the clock from gating by using the cached 785 * clock value. 786 */ 787 void mmc_ungate_clock(struct mmc_host *host) 788 { 789 /* 790 * We should previously have gated the clock, so the clock shall 791 * be 0 here! The clock may however be 0 during initialization, 792 * when some request operations are performed before setting 793 * the frequency. When ungate is requested in that situation 794 * we just ignore the call. 795 */ 796 if (host->clk_old) { 797 BUG_ON(host->ios.clock); 798 /* This call will also set host->clk_gated to false */ 799 __mmc_set_clock(host, host->clk_old); 800 } 801 } 802 803 void mmc_set_ungated(struct mmc_host *host) 804 { 805 unsigned long flags; 806 807 /* 808 * We've been given a new frequency while the clock is gated, 809 * so make sure we regard this as ungating it. 810 */ 811 spin_lock_irqsave(&host->clk_lock, flags); 812 host->clk_gated = false; 813 spin_unlock_irqrestore(&host->clk_lock, flags); 814 } 815 816 #else 817 void mmc_set_ungated(struct mmc_host *host) 818 { 819 } 820 #endif 821 822 /* 823 * Change the bus mode (open drain/push-pull) of a host. 824 */ 825 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 826 { 827 mmc_host_clk_hold(host); 828 host->ios.bus_mode = mode; 829 mmc_set_ios(host); 830 mmc_host_clk_release(host); 831 } 832 833 /* 834 * Change data bus width of a host. 835 */ 836 void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 837 { 838 mmc_host_clk_hold(host); 839 host->ios.bus_width = width; 840 mmc_set_ios(host); 841 mmc_host_clk_release(host); 842 } 843 844 /** 845 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number 846 * @vdd: voltage (mV) 847 * @low_bits: prefer low bits in boundary cases 848 * 849 * This function returns the OCR bit number according to the provided @vdd 850 * value. If conversion is not possible a negative errno value returned. 851 * 852 * Depending on the @low_bits flag the function prefers low or high OCR bits 853 * on boundary voltages. For example, 854 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33); 855 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34); 856 * 857 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21). 858 */ 859 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits) 860 { 861 const int max_bit = ilog2(MMC_VDD_35_36); 862 int bit; 863 864 if (vdd < 1650 || vdd > 3600) 865 return -EINVAL; 866 867 if (vdd >= 1650 && vdd <= 1950) 868 return ilog2(MMC_VDD_165_195); 869 870 if (low_bits) 871 vdd -= 1; 872 873 /* Base 2000 mV, step 100 mV, bit's base 8. */ 874 bit = (vdd - 2000) / 100 + 8; 875 if (bit > max_bit) 876 return max_bit; 877 return bit; 878 } 879 880 /** 881 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask 882 * @vdd_min: minimum voltage value (mV) 883 * @vdd_max: maximum voltage value (mV) 884 * 885 * This function returns the OCR mask bits according to the provided @vdd_min 886 * and @vdd_max values. If conversion is not possible the function returns 0. 887 * 888 * Notes wrt boundary cases: 889 * This function sets the OCR bits for all boundary voltages, for example 890 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 | 891 * MMC_VDD_34_35 mask. 892 */ 893 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) 894 { 895 u32 mask = 0; 896 897 if (vdd_max < vdd_min) 898 return 0; 899 900 /* Prefer high bits for the boundary vdd_max values. */ 901 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false); 902 if (vdd_max < 0) 903 return 0; 904 905 /* Prefer low bits for the boundary vdd_min values. */ 906 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true); 907 if (vdd_min < 0) 908 return 0; 909 910 /* Fill the mask, from max bit to min bit. */ 911 while (vdd_max >= vdd_min) 912 mask |= 1 << vdd_max--; 913 914 return mask; 915 } 916 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); 917 918 #ifdef CONFIG_REGULATOR 919 920 /** 921 * mmc_regulator_get_ocrmask - return mask of supported voltages 922 * @supply: regulator to use 923 * 924 * This returns either a negative errno, or a mask of voltages that 925 * can be provided to MMC/SD/SDIO devices using the specified voltage 926 * regulator. This would normally be called before registering the 927 * MMC host adapter. 928 */ 929 int mmc_regulator_get_ocrmask(struct regulator *supply) 930 { 931 int result = 0; 932 int count; 933 int i; 934 935 count = regulator_count_voltages(supply); 936 if (count < 0) 937 return count; 938 939 for (i = 0; i < count; i++) { 940 int vdd_uV; 941 int vdd_mV; 942 943 vdd_uV = regulator_list_voltage(supply, i); 944 if (vdd_uV <= 0) 945 continue; 946 947 vdd_mV = vdd_uV / 1000; 948 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); 949 } 950 951 return result; 952 } 953 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask); 954 955 /** 956 * mmc_regulator_set_ocr - set regulator to match host->ios voltage 957 * @mmc: the host to regulate 958 * @supply: regulator to use 959 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) 960 * 961 * Returns zero on success, else negative errno. 962 * 963 * MMC host drivers may use this to enable or disable a regulator using 964 * a particular supply voltage. This would normally be called from the 965 * set_ios() method. 966 */ 967 int mmc_regulator_set_ocr(struct mmc_host *mmc, 968 struct regulator *supply, 969 unsigned short vdd_bit) 970 { 971 int result = 0; 972 int min_uV, max_uV; 973 974 if (vdd_bit) { 975 int tmp; 976 int voltage; 977 978 /* REVISIT mmc_vddrange_to_ocrmask() may have set some 979 * bits this regulator doesn't quite support ... don't 980 * be too picky, most cards and regulators are OK with 981 * a 0.1V range goof (it's a small error percentage). 982 */ 983 tmp = vdd_bit - ilog2(MMC_VDD_165_195); 984 if (tmp == 0) { 985 min_uV = 1650 * 1000; 986 max_uV = 1950 * 1000; 987 } else { 988 min_uV = 1900 * 1000 + tmp * 100 * 1000; 989 max_uV = min_uV + 100 * 1000; 990 } 991 992 /* avoid needless changes to this voltage; the regulator 993 * might not allow this operation 994 */ 995 voltage = regulator_get_voltage(supply); 996 997 if (mmc->caps2 & MMC_CAP2_BROKEN_VOLTAGE) 998 min_uV = max_uV = voltage; 999 1000 if (voltage < 0) 1001 result = voltage; 1002 else if (voltage < min_uV || voltage > max_uV) 1003 result = regulator_set_voltage(supply, min_uV, max_uV); 1004 else 1005 result = 0; 1006 1007 if (result == 0 && !mmc->regulator_enabled) { 1008 result = regulator_enable(supply); 1009 if (!result) 1010 mmc->regulator_enabled = true; 1011 } 1012 } else if (mmc->regulator_enabled) { 1013 result = regulator_disable(supply); 1014 if (result == 0) 1015 mmc->regulator_enabled = false; 1016 } 1017 1018 if (result) 1019 dev_err(mmc_dev(mmc), 1020 "could not set regulator OCR (%d)\n", result); 1021 return result; 1022 } 1023 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr); 1024 1025 int mmc_regulator_get_supply(struct mmc_host *mmc) 1026 { 1027 struct device *dev = mmc_dev(mmc); 1028 struct regulator *supply; 1029 int ret; 1030 1031 supply = devm_regulator_get(dev, "vmmc"); 1032 mmc->supply.vmmc = supply; 1033 mmc->supply.vqmmc = devm_regulator_get(dev, "vqmmc"); 1034 1035 if (IS_ERR(supply)) 1036 return PTR_ERR(supply); 1037 1038 ret = mmc_regulator_get_ocrmask(supply); 1039 if (ret > 0) 1040 mmc->ocr_avail = ret; 1041 else 1042 dev_warn(mmc_dev(mmc), "Failed getting OCR mask: %d\n", ret); 1043 1044 return 0; 1045 } 1046 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply); 1047 1048 #endif /* CONFIG_REGULATOR */ 1049 1050 /* 1051 * Mask off any voltages we don't support and select 1052 * the lowest voltage 1053 */ 1054 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) 1055 { 1056 int bit; 1057 1058 ocr &= host->ocr_avail; 1059 1060 bit = ffs(ocr); 1061 if (bit) { 1062 bit -= 1; 1063 1064 ocr &= 3 << bit; 1065 1066 mmc_host_clk_hold(host); 1067 host->ios.vdd = bit; 1068 mmc_set_ios(host); 1069 mmc_host_clk_release(host); 1070 } else { 1071 pr_warning("%s: host doesn't support card's voltages\n", 1072 mmc_hostname(host)); 1073 ocr = 0; 1074 } 1075 1076 return ocr; 1077 } 1078 1079 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11) 1080 { 1081 struct mmc_command cmd = {0}; 1082 int err = 0; 1083 1084 BUG_ON(!host); 1085 1086 /* 1087 * Send CMD11 only if the request is to switch the card to 1088 * 1.8V signalling. 1089 */ 1090 if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) { 1091 cmd.opcode = SD_SWITCH_VOLTAGE; 1092 cmd.arg = 0; 1093 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1094 1095 err = mmc_wait_for_cmd(host, &cmd, 0); 1096 if (err) 1097 return err; 1098 1099 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) 1100 return -EIO; 1101 } 1102 1103 host->ios.signal_voltage = signal_voltage; 1104 1105 if (host->ops->start_signal_voltage_switch) { 1106 mmc_host_clk_hold(host); 1107 err = host->ops->start_signal_voltage_switch(host, &host->ios); 1108 mmc_host_clk_release(host); 1109 } 1110 1111 return err; 1112 } 1113 1114 /* 1115 * Select timing parameters for host. 1116 */ 1117 void mmc_set_timing(struct mmc_host *host, unsigned int timing) 1118 { 1119 mmc_host_clk_hold(host); 1120 host->ios.timing = timing; 1121 mmc_set_ios(host); 1122 mmc_host_clk_release(host); 1123 } 1124 1125 /* 1126 * Select appropriate driver type for host. 1127 */ 1128 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) 1129 { 1130 mmc_host_clk_hold(host); 1131 host->ios.drv_type = drv_type; 1132 mmc_set_ios(host); 1133 mmc_host_clk_release(host); 1134 } 1135 1136 static void mmc_poweroff_notify(struct mmc_host *host) 1137 { 1138 struct mmc_card *card; 1139 unsigned int timeout; 1140 unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION; 1141 int err = 0; 1142 1143 card = host->card; 1144 mmc_claim_host(host); 1145 1146 /* 1147 * Send power notify command only if card 1148 * is mmc and notify state is powered ON 1149 */ 1150 if (card && mmc_card_mmc(card) && 1151 (card->poweroff_notify_state == MMC_POWERED_ON)) { 1152 1153 if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) { 1154 notify_type = EXT_CSD_POWER_OFF_SHORT; 1155 timeout = card->ext_csd.generic_cmd6_time; 1156 card->poweroff_notify_state = MMC_POWEROFF_SHORT; 1157 } else { 1158 notify_type = EXT_CSD_POWER_OFF_LONG; 1159 timeout = card->ext_csd.power_off_longtime; 1160 card->poweroff_notify_state = MMC_POWEROFF_LONG; 1161 } 1162 1163 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1164 EXT_CSD_POWER_OFF_NOTIFICATION, 1165 notify_type, timeout); 1166 1167 if (err && err != -EBADMSG) 1168 pr_err("Device failed to respond within %d poweroff " 1169 "time. Forcefully powering down the device\n", 1170 timeout); 1171 1172 /* Set the card state to no notification after the poweroff */ 1173 card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION; 1174 } 1175 mmc_release_host(host); 1176 } 1177 1178 /* 1179 * Apply power to the MMC stack. This is a two-stage process. 1180 * First, we enable power to the card without the clock running. 1181 * We then wait a bit for the power to stabilise. Finally, 1182 * enable the bus drivers and clock to the card. 1183 * 1184 * We must _NOT_ enable the clock prior to power stablising. 1185 * 1186 * If a host does all the power sequencing itself, ignore the 1187 * initial MMC_POWER_UP stage. 1188 */ 1189 static void mmc_power_up(struct mmc_host *host) 1190 { 1191 int bit; 1192 1193 if (host->ios.power_mode == MMC_POWER_ON) 1194 return; 1195 1196 mmc_host_clk_hold(host); 1197 1198 /* If ocr is set, we use it */ 1199 if (host->ocr) 1200 bit = ffs(host->ocr) - 1; 1201 else 1202 bit = fls(host->ocr_avail) - 1; 1203 1204 host->ios.vdd = bit; 1205 if (mmc_host_is_spi(host)) 1206 host->ios.chip_select = MMC_CS_HIGH; 1207 else 1208 host->ios.chip_select = MMC_CS_DONTCARE; 1209 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 1210 host->ios.power_mode = MMC_POWER_UP; 1211 host->ios.bus_width = MMC_BUS_WIDTH_1; 1212 host->ios.timing = MMC_TIMING_LEGACY; 1213 mmc_set_ios(host); 1214 1215 /* Set signal voltage to 3.3V */ 1216 mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false); 1217 1218 /* 1219 * This delay should be sufficient to allow the power supply 1220 * to reach the minimum voltage. 1221 */ 1222 mmc_delay(10); 1223 1224 host->ios.clock = host->f_init; 1225 1226 host->ios.power_mode = MMC_POWER_ON; 1227 mmc_set_ios(host); 1228 1229 /* 1230 * This delay must be at least 74 clock sizes, or 1 ms, or the 1231 * time required to reach a stable voltage. 1232 */ 1233 mmc_delay(10); 1234 1235 mmc_host_clk_release(host); 1236 } 1237 1238 void mmc_power_off(struct mmc_host *host) 1239 { 1240 int err = 0; 1241 1242 if (host->ios.power_mode == MMC_POWER_OFF) 1243 return; 1244 1245 mmc_host_clk_hold(host); 1246 1247 host->ios.clock = 0; 1248 host->ios.vdd = 0; 1249 1250 /* 1251 * For eMMC 4.5 device send AWAKE command before 1252 * POWER_OFF_NOTIFY command, because in sleep state 1253 * eMMC 4.5 devices respond to only RESET and AWAKE cmd 1254 */ 1255 if (host->card && mmc_card_is_sleep(host->card) && 1256 host->bus_ops->resume) { 1257 err = host->bus_ops->resume(host); 1258 1259 if (!err) 1260 mmc_poweroff_notify(host); 1261 else 1262 pr_warning("%s: error %d during resume " 1263 "(continue with poweroff sequence)\n", 1264 mmc_hostname(host), err); 1265 } 1266 1267 /* 1268 * Reset ocr mask to be the highest possible voltage supported for 1269 * this mmc host. This value will be used at next power up. 1270 */ 1271 host->ocr = 1 << (fls(host->ocr_avail) - 1); 1272 1273 if (!mmc_host_is_spi(host)) { 1274 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1275 host->ios.chip_select = MMC_CS_DONTCARE; 1276 } 1277 host->ios.power_mode = MMC_POWER_OFF; 1278 host->ios.bus_width = MMC_BUS_WIDTH_1; 1279 host->ios.timing = MMC_TIMING_LEGACY; 1280 mmc_set_ios(host); 1281 1282 /* 1283 * Some configurations, such as the 802.11 SDIO card in the OLPC 1284 * XO-1.5, require a short delay after poweroff before the card 1285 * can be successfully turned on again. 1286 */ 1287 mmc_delay(1); 1288 1289 mmc_host_clk_release(host); 1290 } 1291 1292 /* 1293 * Cleanup when the last reference to the bus operator is dropped. 1294 */ 1295 static void __mmc_release_bus(struct mmc_host *host) 1296 { 1297 BUG_ON(!host); 1298 BUG_ON(host->bus_refs); 1299 BUG_ON(!host->bus_dead); 1300 1301 host->bus_ops = NULL; 1302 } 1303 1304 /* 1305 * Increase reference count of bus operator 1306 */ 1307 static inline void mmc_bus_get(struct mmc_host *host) 1308 { 1309 unsigned long flags; 1310 1311 spin_lock_irqsave(&host->lock, flags); 1312 host->bus_refs++; 1313 spin_unlock_irqrestore(&host->lock, flags); 1314 } 1315 1316 /* 1317 * Decrease reference count of bus operator and free it if 1318 * it is the last reference. 1319 */ 1320 static inline void mmc_bus_put(struct mmc_host *host) 1321 { 1322 unsigned long flags; 1323 1324 spin_lock_irqsave(&host->lock, flags); 1325 host->bus_refs--; 1326 if ((host->bus_refs == 0) && host->bus_ops) 1327 __mmc_release_bus(host); 1328 spin_unlock_irqrestore(&host->lock, flags); 1329 } 1330 1331 /* 1332 * Assign a mmc bus handler to a host. Only one bus handler may control a 1333 * host at any given time. 1334 */ 1335 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) 1336 { 1337 unsigned long flags; 1338 1339 BUG_ON(!host); 1340 BUG_ON(!ops); 1341 1342 WARN_ON(!host->claimed); 1343 1344 spin_lock_irqsave(&host->lock, flags); 1345 1346 BUG_ON(host->bus_ops); 1347 BUG_ON(host->bus_refs); 1348 1349 host->bus_ops = ops; 1350 host->bus_refs = 1; 1351 host->bus_dead = 0; 1352 1353 spin_unlock_irqrestore(&host->lock, flags); 1354 } 1355 1356 /* 1357 * Remove the current bus handler from a host. 1358 */ 1359 void mmc_detach_bus(struct mmc_host *host) 1360 { 1361 unsigned long flags; 1362 1363 BUG_ON(!host); 1364 1365 WARN_ON(!host->claimed); 1366 WARN_ON(!host->bus_ops); 1367 1368 spin_lock_irqsave(&host->lock, flags); 1369 1370 host->bus_dead = 1; 1371 1372 spin_unlock_irqrestore(&host->lock, flags); 1373 1374 mmc_bus_put(host); 1375 } 1376 1377 /** 1378 * mmc_detect_change - process change of state on a MMC socket 1379 * @host: host which changed state. 1380 * @delay: optional delay to wait before detection (jiffies) 1381 * 1382 * MMC drivers should call this when they detect a card has been 1383 * inserted or removed. The MMC layer will confirm that any 1384 * present card is still functional, and initialize any newly 1385 * inserted. 1386 */ 1387 void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1388 { 1389 #ifdef CONFIG_MMC_DEBUG 1390 unsigned long flags; 1391 spin_lock_irqsave(&host->lock, flags); 1392 WARN_ON(host->removed); 1393 spin_unlock_irqrestore(&host->lock, flags); 1394 #endif 1395 host->detect_change = 1; 1396 mmc_schedule_delayed_work(&host->detect, delay); 1397 } 1398 1399 EXPORT_SYMBOL(mmc_detect_change); 1400 1401 void mmc_init_erase(struct mmc_card *card) 1402 { 1403 unsigned int sz; 1404 1405 if (is_power_of_2(card->erase_size)) 1406 card->erase_shift = ffs(card->erase_size) - 1; 1407 else 1408 card->erase_shift = 0; 1409 1410 /* 1411 * It is possible to erase an arbitrarily large area of an SD or MMC 1412 * card. That is not desirable because it can take a long time 1413 * (minutes) potentially delaying more important I/O, and also the 1414 * timeout calculations become increasingly hugely over-estimated. 1415 * Consequently, 'pref_erase' is defined as a guide to limit erases 1416 * to that size and alignment. 1417 * 1418 * For SD cards that define Allocation Unit size, limit erases to one 1419 * Allocation Unit at a time. For MMC cards that define High Capacity 1420 * Erase Size, whether it is switched on or not, limit to that size. 1421 * Otherwise just have a stab at a good value. For modern cards it 1422 * will end up being 4MiB. Note that if the value is too small, it 1423 * can end up taking longer to erase. 1424 */ 1425 if (mmc_card_sd(card) && card->ssr.au) { 1426 card->pref_erase = card->ssr.au; 1427 card->erase_shift = ffs(card->ssr.au) - 1; 1428 } else if (card->ext_csd.hc_erase_size) { 1429 card->pref_erase = card->ext_csd.hc_erase_size; 1430 } else { 1431 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; 1432 if (sz < 128) 1433 card->pref_erase = 512 * 1024 / 512; 1434 else if (sz < 512) 1435 card->pref_erase = 1024 * 1024 / 512; 1436 else if (sz < 1024) 1437 card->pref_erase = 2 * 1024 * 1024 / 512; 1438 else 1439 card->pref_erase = 4 * 1024 * 1024 / 512; 1440 if (card->pref_erase < card->erase_size) 1441 card->pref_erase = card->erase_size; 1442 else { 1443 sz = card->pref_erase % card->erase_size; 1444 if (sz) 1445 card->pref_erase += card->erase_size - sz; 1446 } 1447 } 1448 } 1449 1450 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, 1451 unsigned int arg, unsigned int qty) 1452 { 1453 unsigned int erase_timeout; 1454 1455 if (arg == MMC_DISCARD_ARG || 1456 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) { 1457 erase_timeout = card->ext_csd.trim_timeout; 1458 } else if (card->ext_csd.erase_group_def & 1) { 1459 /* High Capacity Erase Group Size uses HC timeouts */ 1460 if (arg == MMC_TRIM_ARG) 1461 erase_timeout = card->ext_csd.trim_timeout; 1462 else 1463 erase_timeout = card->ext_csd.hc_erase_timeout; 1464 } else { 1465 /* CSD Erase Group Size uses write timeout */ 1466 unsigned int mult = (10 << card->csd.r2w_factor); 1467 unsigned int timeout_clks = card->csd.tacc_clks * mult; 1468 unsigned int timeout_us; 1469 1470 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ 1471 if (card->csd.tacc_ns < 1000000) 1472 timeout_us = (card->csd.tacc_ns * mult) / 1000; 1473 else 1474 timeout_us = (card->csd.tacc_ns / 1000) * mult; 1475 1476 /* 1477 * ios.clock is only a target. The real clock rate might be 1478 * less but not that much less, so fudge it by multiplying by 2. 1479 */ 1480 timeout_clks <<= 1; 1481 timeout_us += (timeout_clks * 1000) / 1482 (mmc_host_clk_rate(card->host) / 1000); 1483 1484 erase_timeout = timeout_us / 1000; 1485 1486 /* 1487 * Theoretically, the calculation could underflow so round up 1488 * to 1ms in that case. 1489 */ 1490 if (!erase_timeout) 1491 erase_timeout = 1; 1492 } 1493 1494 /* Multiplier for secure operations */ 1495 if (arg & MMC_SECURE_ARGS) { 1496 if (arg == MMC_SECURE_ERASE_ARG) 1497 erase_timeout *= card->ext_csd.sec_erase_mult; 1498 else 1499 erase_timeout *= card->ext_csd.sec_trim_mult; 1500 } 1501 1502 erase_timeout *= qty; 1503 1504 /* 1505 * Ensure at least a 1 second timeout for SPI as per 1506 * 'mmc_set_data_timeout()' 1507 */ 1508 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) 1509 erase_timeout = 1000; 1510 1511 return erase_timeout; 1512 } 1513 1514 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card, 1515 unsigned int arg, 1516 unsigned int qty) 1517 { 1518 unsigned int erase_timeout; 1519 1520 if (card->ssr.erase_timeout) { 1521 /* Erase timeout specified in SD Status Register (SSR) */ 1522 erase_timeout = card->ssr.erase_timeout * qty + 1523 card->ssr.erase_offset; 1524 } else { 1525 /* 1526 * Erase timeout not specified in SD Status Register (SSR) so 1527 * use 250ms per write block. 1528 */ 1529 erase_timeout = 250 * qty; 1530 } 1531 1532 /* Must not be less than 1 second */ 1533 if (erase_timeout < 1000) 1534 erase_timeout = 1000; 1535 1536 return erase_timeout; 1537 } 1538 1539 static unsigned int mmc_erase_timeout(struct mmc_card *card, 1540 unsigned int arg, 1541 unsigned int qty) 1542 { 1543 if (mmc_card_sd(card)) 1544 return mmc_sd_erase_timeout(card, arg, qty); 1545 else 1546 return mmc_mmc_erase_timeout(card, arg, qty); 1547 } 1548 1549 static int mmc_do_erase(struct mmc_card *card, unsigned int from, 1550 unsigned int to, unsigned int arg) 1551 { 1552 struct mmc_command cmd = {0}; 1553 unsigned int qty = 0; 1554 int err; 1555 1556 /* 1557 * qty is used to calculate the erase timeout which depends on how many 1558 * erase groups (or allocation units in SD terminology) are affected. 1559 * We count erasing part of an erase group as one erase group. 1560 * For SD, the allocation units are always a power of 2. For MMC, the 1561 * erase group size is almost certainly also power of 2, but it does not 1562 * seem to insist on that in the JEDEC standard, so we fall back to 1563 * division in that case. SD may not specify an allocation unit size, 1564 * in which case the timeout is based on the number of write blocks. 1565 * 1566 * Note that the timeout for secure trim 2 will only be correct if the 1567 * number of erase groups specified is the same as the total of all 1568 * preceding secure trim 1 commands. Since the power may have been 1569 * lost since the secure trim 1 commands occurred, it is generally 1570 * impossible to calculate the secure trim 2 timeout correctly. 1571 */ 1572 if (card->erase_shift) 1573 qty += ((to >> card->erase_shift) - 1574 (from >> card->erase_shift)) + 1; 1575 else if (mmc_card_sd(card)) 1576 qty += to - from + 1; 1577 else 1578 qty += ((to / card->erase_size) - 1579 (from / card->erase_size)) + 1; 1580 1581 if (!mmc_card_blockaddr(card)) { 1582 from <<= 9; 1583 to <<= 9; 1584 } 1585 1586 if (mmc_card_sd(card)) 1587 cmd.opcode = SD_ERASE_WR_BLK_START; 1588 else 1589 cmd.opcode = MMC_ERASE_GROUP_START; 1590 cmd.arg = from; 1591 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1592 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1593 if (err) { 1594 pr_err("mmc_erase: group start error %d, " 1595 "status %#x\n", err, cmd.resp[0]); 1596 err = -EIO; 1597 goto out; 1598 } 1599 1600 memset(&cmd, 0, sizeof(struct mmc_command)); 1601 if (mmc_card_sd(card)) 1602 cmd.opcode = SD_ERASE_WR_BLK_END; 1603 else 1604 cmd.opcode = MMC_ERASE_GROUP_END; 1605 cmd.arg = to; 1606 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1607 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1608 if (err) { 1609 pr_err("mmc_erase: group end error %d, status %#x\n", 1610 err, cmd.resp[0]); 1611 err = -EIO; 1612 goto out; 1613 } 1614 1615 memset(&cmd, 0, sizeof(struct mmc_command)); 1616 cmd.opcode = MMC_ERASE; 1617 cmd.arg = arg; 1618 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1619 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty); 1620 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1621 if (err) { 1622 pr_err("mmc_erase: erase error %d, status %#x\n", 1623 err, cmd.resp[0]); 1624 err = -EIO; 1625 goto out; 1626 } 1627 1628 if (mmc_host_is_spi(card->host)) 1629 goto out; 1630 1631 do { 1632 memset(&cmd, 0, sizeof(struct mmc_command)); 1633 cmd.opcode = MMC_SEND_STATUS; 1634 cmd.arg = card->rca << 16; 1635 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1636 /* Do not retry else we can't see errors */ 1637 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1638 if (err || (cmd.resp[0] & 0xFDF92000)) { 1639 pr_err("error %d requesting status %#x\n", 1640 err, cmd.resp[0]); 1641 err = -EIO; 1642 goto out; 1643 } 1644 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 1645 R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG); 1646 out: 1647 return err; 1648 } 1649 1650 /** 1651 * mmc_erase - erase sectors. 1652 * @card: card to erase 1653 * @from: first sector to erase 1654 * @nr: number of sectors to erase 1655 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG) 1656 * 1657 * Caller must claim host before calling this function. 1658 */ 1659 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, 1660 unsigned int arg) 1661 { 1662 unsigned int rem, to = from + nr; 1663 1664 if (!(card->host->caps & MMC_CAP_ERASE) || 1665 !(card->csd.cmdclass & CCC_ERASE)) 1666 return -EOPNOTSUPP; 1667 1668 if (!card->erase_size) 1669 return -EOPNOTSUPP; 1670 1671 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG) 1672 return -EOPNOTSUPP; 1673 1674 if ((arg & MMC_SECURE_ARGS) && 1675 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) 1676 return -EOPNOTSUPP; 1677 1678 if ((arg & MMC_TRIM_ARGS) && 1679 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) 1680 return -EOPNOTSUPP; 1681 1682 if (arg == MMC_SECURE_ERASE_ARG) { 1683 if (from % card->erase_size || nr % card->erase_size) 1684 return -EINVAL; 1685 } 1686 1687 if (arg == MMC_ERASE_ARG) { 1688 rem = from % card->erase_size; 1689 if (rem) { 1690 rem = card->erase_size - rem; 1691 from += rem; 1692 if (nr > rem) 1693 nr -= rem; 1694 else 1695 return 0; 1696 } 1697 rem = nr % card->erase_size; 1698 if (rem) 1699 nr -= rem; 1700 } 1701 1702 if (nr == 0) 1703 return 0; 1704 1705 to = from + nr; 1706 1707 if (to <= from) 1708 return -EINVAL; 1709 1710 /* 'from' and 'to' are inclusive */ 1711 to -= 1; 1712 1713 return mmc_do_erase(card, from, to, arg); 1714 } 1715 EXPORT_SYMBOL(mmc_erase); 1716 1717 int mmc_can_erase(struct mmc_card *card) 1718 { 1719 if ((card->host->caps & MMC_CAP_ERASE) && 1720 (card->csd.cmdclass & CCC_ERASE) && card->erase_size) 1721 return 1; 1722 return 0; 1723 } 1724 EXPORT_SYMBOL(mmc_can_erase); 1725 1726 int mmc_can_trim(struct mmc_card *card) 1727 { 1728 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) 1729 return 1; 1730 return 0; 1731 } 1732 EXPORT_SYMBOL(mmc_can_trim); 1733 1734 int mmc_can_discard(struct mmc_card *card) 1735 { 1736 /* 1737 * As there's no way to detect the discard support bit at v4.5 1738 * use the s/w feature support filed. 1739 */ 1740 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE) 1741 return 1; 1742 return 0; 1743 } 1744 EXPORT_SYMBOL(mmc_can_discard); 1745 1746 int mmc_can_sanitize(struct mmc_card *card) 1747 { 1748 if (!mmc_can_trim(card) && !mmc_can_erase(card)) 1749 return 0; 1750 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE) 1751 return 1; 1752 return 0; 1753 } 1754 EXPORT_SYMBOL(mmc_can_sanitize); 1755 1756 int mmc_can_secure_erase_trim(struct mmc_card *card) 1757 { 1758 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) 1759 return 1; 1760 return 0; 1761 } 1762 EXPORT_SYMBOL(mmc_can_secure_erase_trim); 1763 1764 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, 1765 unsigned int nr) 1766 { 1767 if (!card->erase_size) 1768 return 0; 1769 if (from % card->erase_size || nr % card->erase_size) 1770 return 0; 1771 return 1; 1772 } 1773 EXPORT_SYMBOL(mmc_erase_group_aligned); 1774 1775 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card, 1776 unsigned int arg) 1777 { 1778 struct mmc_host *host = card->host; 1779 unsigned int max_discard, x, y, qty = 0, max_qty, timeout; 1780 unsigned int last_timeout = 0; 1781 1782 if (card->erase_shift) 1783 max_qty = UINT_MAX >> card->erase_shift; 1784 else if (mmc_card_sd(card)) 1785 max_qty = UINT_MAX; 1786 else 1787 max_qty = UINT_MAX / card->erase_size; 1788 1789 /* Find the largest qty with an OK timeout */ 1790 do { 1791 y = 0; 1792 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) { 1793 timeout = mmc_erase_timeout(card, arg, qty + x); 1794 if (timeout > host->max_discard_to) 1795 break; 1796 if (timeout < last_timeout) 1797 break; 1798 last_timeout = timeout; 1799 y = x; 1800 } 1801 qty += y; 1802 } while (y); 1803 1804 if (!qty) 1805 return 0; 1806 1807 if (qty == 1) 1808 return 1; 1809 1810 /* Convert qty to sectors */ 1811 if (card->erase_shift) 1812 max_discard = --qty << card->erase_shift; 1813 else if (mmc_card_sd(card)) 1814 max_discard = qty; 1815 else 1816 max_discard = --qty * card->erase_size; 1817 1818 return max_discard; 1819 } 1820 1821 unsigned int mmc_calc_max_discard(struct mmc_card *card) 1822 { 1823 struct mmc_host *host = card->host; 1824 unsigned int max_discard, max_trim; 1825 1826 if (!host->max_discard_to) 1827 return UINT_MAX; 1828 1829 /* 1830 * Without erase_group_def set, MMC erase timeout depends on clock 1831 * frequence which can change. In that case, the best choice is 1832 * just the preferred erase size. 1833 */ 1834 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1)) 1835 return card->pref_erase; 1836 1837 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG); 1838 if (mmc_can_trim(card)) { 1839 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG); 1840 if (max_trim < max_discard) 1841 max_discard = max_trim; 1842 } else if (max_discard < card->erase_size) { 1843 max_discard = 0; 1844 } 1845 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n", 1846 mmc_hostname(host), max_discard, host->max_discard_to); 1847 return max_discard; 1848 } 1849 EXPORT_SYMBOL(mmc_calc_max_discard); 1850 1851 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) 1852 { 1853 struct mmc_command cmd = {0}; 1854 1855 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card)) 1856 return 0; 1857 1858 cmd.opcode = MMC_SET_BLOCKLEN; 1859 cmd.arg = blocklen; 1860 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1861 return mmc_wait_for_cmd(card->host, &cmd, 5); 1862 } 1863 EXPORT_SYMBOL(mmc_set_blocklen); 1864 1865 static void mmc_hw_reset_for_init(struct mmc_host *host) 1866 { 1867 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 1868 return; 1869 mmc_host_clk_hold(host); 1870 host->ops->hw_reset(host); 1871 mmc_host_clk_release(host); 1872 } 1873 1874 int mmc_can_reset(struct mmc_card *card) 1875 { 1876 u8 rst_n_function; 1877 1878 if (!mmc_card_mmc(card)) 1879 return 0; 1880 rst_n_function = card->ext_csd.rst_n_function; 1881 if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED) 1882 return 0; 1883 return 1; 1884 } 1885 EXPORT_SYMBOL(mmc_can_reset); 1886 1887 static int mmc_do_hw_reset(struct mmc_host *host, int check) 1888 { 1889 struct mmc_card *card = host->card; 1890 1891 if (!host->bus_ops->power_restore) 1892 return -EOPNOTSUPP; 1893 1894 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 1895 return -EOPNOTSUPP; 1896 1897 if (!card) 1898 return -EINVAL; 1899 1900 if (!mmc_can_reset(card)) 1901 return -EOPNOTSUPP; 1902 1903 mmc_host_clk_hold(host); 1904 mmc_set_clock(host, host->f_init); 1905 1906 host->ops->hw_reset(host); 1907 1908 /* If the reset has happened, then a status command will fail */ 1909 if (check) { 1910 struct mmc_command cmd = {0}; 1911 int err; 1912 1913 cmd.opcode = MMC_SEND_STATUS; 1914 if (!mmc_host_is_spi(card->host)) 1915 cmd.arg = card->rca << 16; 1916 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 1917 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1918 if (!err) { 1919 mmc_host_clk_release(host); 1920 return -ENOSYS; 1921 } 1922 } 1923 1924 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR); 1925 if (mmc_host_is_spi(host)) { 1926 host->ios.chip_select = MMC_CS_HIGH; 1927 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 1928 } else { 1929 host->ios.chip_select = MMC_CS_DONTCARE; 1930 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1931 } 1932 host->ios.bus_width = MMC_BUS_WIDTH_1; 1933 host->ios.timing = MMC_TIMING_LEGACY; 1934 mmc_set_ios(host); 1935 1936 mmc_host_clk_release(host); 1937 1938 return host->bus_ops->power_restore(host); 1939 } 1940 1941 int mmc_hw_reset(struct mmc_host *host) 1942 { 1943 return mmc_do_hw_reset(host, 0); 1944 } 1945 EXPORT_SYMBOL(mmc_hw_reset); 1946 1947 int mmc_hw_reset_check(struct mmc_host *host) 1948 { 1949 return mmc_do_hw_reset(host, 1); 1950 } 1951 EXPORT_SYMBOL(mmc_hw_reset_check); 1952 1953 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) 1954 { 1955 host->f_init = freq; 1956 1957 #ifdef CONFIG_MMC_DEBUG 1958 pr_info("%s: %s: trying to init card at %u Hz\n", 1959 mmc_hostname(host), __func__, host->f_init); 1960 #endif 1961 mmc_power_up(host); 1962 1963 /* 1964 * Some eMMCs (with VCCQ always on) may not be reset after power up, so 1965 * do a hardware reset if possible. 1966 */ 1967 mmc_hw_reset_for_init(host); 1968 1969 /* 1970 * sdio_reset sends CMD52 to reset card. Since we do not know 1971 * if the card is being re-initialized, just send it. CMD52 1972 * should be ignored by SD/eMMC cards. 1973 */ 1974 sdio_reset(host); 1975 mmc_go_idle(host); 1976 1977 mmc_send_if_cond(host, host->ocr_avail); 1978 1979 /* Order's important: probe SDIO, then SD, then MMC */ 1980 if (!mmc_attach_sdio(host)) 1981 return 0; 1982 if (!mmc_attach_sd(host)) 1983 return 0; 1984 if (!mmc_attach_mmc(host)) 1985 return 0; 1986 1987 mmc_power_off(host); 1988 return -EIO; 1989 } 1990 1991 int _mmc_detect_card_removed(struct mmc_host *host) 1992 { 1993 int ret; 1994 1995 if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive) 1996 return 0; 1997 1998 if (!host->card || mmc_card_removed(host->card)) 1999 return 1; 2000 2001 ret = host->bus_ops->alive(host); 2002 if (ret) { 2003 mmc_card_set_removed(host->card); 2004 pr_debug("%s: card remove detected\n", mmc_hostname(host)); 2005 } 2006 2007 return ret; 2008 } 2009 2010 int mmc_detect_card_removed(struct mmc_host *host) 2011 { 2012 struct mmc_card *card = host->card; 2013 int ret; 2014 2015 WARN_ON(!host->claimed); 2016 2017 if (!card) 2018 return 1; 2019 2020 ret = mmc_card_removed(card); 2021 /* 2022 * The card will be considered unchanged unless we have been asked to 2023 * detect a change or host requires polling to provide card detection. 2024 */ 2025 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL) && 2026 !(host->caps2 & MMC_CAP2_DETECT_ON_ERR)) 2027 return ret; 2028 2029 host->detect_change = 0; 2030 if (!ret) { 2031 ret = _mmc_detect_card_removed(host); 2032 if (ret && (host->caps2 & MMC_CAP2_DETECT_ON_ERR)) { 2033 /* 2034 * Schedule a detect work as soon as possible to let a 2035 * rescan handle the card removal. 2036 */ 2037 cancel_delayed_work(&host->detect); 2038 mmc_detect_change(host, 0); 2039 } 2040 } 2041 2042 return ret; 2043 } 2044 EXPORT_SYMBOL(mmc_detect_card_removed); 2045 2046 void mmc_rescan(struct work_struct *work) 2047 { 2048 struct mmc_host *host = 2049 container_of(work, struct mmc_host, detect.work); 2050 int i; 2051 2052 if (host->rescan_disable) 2053 return; 2054 2055 mmc_bus_get(host); 2056 2057 /* 2058 * if there is a _removable_ card registered, check whether it is 2059 * still present 2060 */ 2061 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead 2062 && !(host->caps & MMC_CAP_NONREMOVABLE)) 2063 host->bus_ops->detect(host); 2064 2065 host->detect_change = 0; 2066 2067 /* 2068 * Let mmc_bus_put() free the bus/bus_ops if we've found that 2069 * the card is no longer present. 2070 */ 2071 mmc_bus_put(host); 2072 mmc_bus_get(host); 2073 2074 /* if there still is a card present, stop here */ 2075 if (host->bus_ops != NULL) { 2076 mmc_bus_put(host); 2077 goto out; 2078 } 2079 2080 /* 2081 * Only we can add a new handler, so it's safe to 2082 * release the lock here. 2083 */ 2084 mmc_bus_put(host); 2085 2086 if (host->ops->get_cd && host->ops->get_cd(host) == 0) { 2087 mmc_claim_host(host); 2088 mmc_power_off(host); 2089 mmc_release_host(host); 2090 goto out; 2091 } 2092 2093 mmc_claim_host(host); 2094 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 2095 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) 2096 break; 2097 if (freqs[i] <= host->f_min) 2098 break; 2099 } 2100 mmc_release_host(host); 2101 2102 out: 2103 if (host->caps & MMC_CAP_NEEDS_POLL) 2104 mmc_schedule_delayed_work(&host->detect, HZ); 2105 } 2106 2107 void mmc_start_host(struct mmc_host *host) 2108 { 2109 host->f_init = max(freqs[0], host->f_min); 2110 host->rescan_disable = 0; 2111 mmc_power_up(host); 2112 mmc_detect_change(host, 0); 2113 } 2114 2115 void mmc_stop_host(struct mmc_host *host) 2116 { 2117 #ifdef CONFIG_MMC_DEBUG 2118 unsigned long flags; 2119 spin_lock_irqsave(&host->lock, flags); 2120 host->removed = 1; 2121 spin_unlock_irqrestore(&host->lock, flags); 2122 #endif 2123 2124 host->rescan_disable = 1; 2125 cancel_delayed_work_sync(&host->detect); 2126 mmc_flush_scheduled_work(); 2127 2128 /* clear pm flags now and let card drivers set them as needed */ 2129 host->pm_flags = 0; 2130 2131 mmc_bus_get(host); 2132 if (host->bus_ops && !host->bus_dead) { 2133 /* Calling bus_ops->remove() with a claimed host can deadlock */ 2134 if (host->bus_ops->remove) 2135 host->bus_ops->remove(host); 2136 2137 mmc_claim_host(host); 2138 mmc_detach_bus(host); 2139 mmc_power_off(host); 2140 mmc_release_host(host); 2141 mmc_bus_put(host); 2142 return; 2143 } 2144 mmc_bus_put(host); 2145 2146 BUG_ON(host->card); 2147 2148 mmc_power_off(host); 2149 } 2150 2151 int mmc_power_save_host(struct mmc_host *host) 2152 { 2153 int ret = 0; 2154 2155 #ifdef CONFIG_MMC_DEBUG 2156 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__); 2157 #endif 2158 2159 mmc_bus_get(host); 2160 2161 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 2162 mmc_bus_put(host); 2163 return -EINVAL; 2164 } 2165 2166 if (host->bus_ops->power_save) 2167 ret = host->bus_ops->power_save(host); 2168 2169 mmc_bus_put(host); 2170 2171 mmc_power_off(host); 2172 2173 return ret; 2174 } 2175 EXPORT_SYMBOL(mmc_power_save_host); 2176 2177 int mmc_power_restore_host(struct mmc_host *host) 2178 { 2179 int ret; 2180 2181 #ifdef CONFIG_MMC_DEBUG 2182 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__); 2183 #endif 2184 2185 mmc_bus_get(host); 2186 2187 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 2188 mmc_bus_put(host); 2189 return -EINVAL; 2190 } 2191 2192 mmc_power_up(host); 2193 ret = host->bus_ops->power_restore(host); 2194 2195 mmc_bus_put(host); 2196 2197 return ret; 2198 } 2199 EXPORT_SYMBOL(mmc_power_restore_host); 2200 2201 int mmc_card_awake(struct mmc_host *host) 2202 { 2203 int err = -ENOSYS; 2204 2205 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD) 2206 return 0; 2207 2208 mmc_bus_get(host); 2209 2210 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 2211 err = host->bus_ops->awake(host); 2212 2213 mmc_bus_put(host); 2214 2215 return err; 2216 } 2217 EXPORT_SYMBOL(mmc_card_awake); 2218 2219 int mmc_card_sleep(struct mmc_host *host) 2220 { 2221 int err = -ENOSYS; 2222 2223 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD) 2224 return 0; 2225 2226 mmc_bus_get(host); 2227 2228 if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep) 2229 err = host->bus_ops->sleep(host); 2230 2231 mmc_bus_put(host); 2232 2233 return err; 2234 } 2235 EXPORT_SYMBOL(mmc_card_sleep); 2236 2237 int mmc_card_can_sleep(struct mmc_host *host) 2238 { 2239 struct mmc_card *card = host->card; 2240 2241 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3) 2242 return 1; 2243 return 0; 2244 } 2245 EXPORT_SYMBOL(mmc_card_can_sleep); 2246 2247 /* 2248 * Flush the cache to the non-volatile storage. 2249 */ 2250 int mmc_flush_cache(struct mmc_card *card) 2251 { 2252 struct mmc_host *host = card->host; 2253 int err = 0; 2254 2255 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL)) 2256 return err; 2257 2258 if (mmc_card_mmc(card) && 2259 (card->ext_csd.cache_size > 0) && 2260 (card->ext_csd.cache_ctrl & 1)) { 2261 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 2262 EXT_CSD_FLUSH_CACHE, 1, 0); 2263 if (err) 2264 pr_err("%s: cache flush error %d\n", 2265 mmc_hostname(card->host), err); 2266 } 2267 2268 return err; 2269 } 2270 EXPORT_SYMBOL(mmc_flush_cache); 2271 2272 /* 2273 * Turn the cache ON/OFF. 2274 * Turning the cache OFF shall trigger flushing of the data 2275 * to the non-volatile storage. 2276 */ 2277 int mmc_cache_ctrl(struct mmc_host *host, u8 enable) 2278 { 2279 struct mmc_card *card = host->card; 2280 unsigned int timeout; 2281 int err = 0; 2282 2283 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) || 2284 mmc_card_is_removable(host)) 2285 return err; 2286 2287 mmc_claim_host(host); 2288 if (card && mmc_card_mmc(card) && 2289 (card->ext_csd.cache_size > 0)) { 2290 enable = !!enable; 2291 2292 if (card->ext_csd.cache_ctrl ^ enable) { 2293 timeout = enable ? card->ext_csd.generic_cmd6_time : 0; 2294 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 2295 EXT_CSD_CACHE_CTRL, enable, timeout); 2296 if (err) 2297 pr_err("%s: cache %s error %d\n", 2298 mmc_hostname(card->host), 2299 enable ? "on" : "off", 2300 err); 2301 else 2302 card->ext_csd.cache_ctrl = enable; 2303 } 2304 } 2305 mmc_release_host(host); 2306 2307 return err; 2308 } 2309 EXPORT_SYMBOL(mmc_cache_ctrl); 2310 2311 #ifdef CONFIG_PM 2312 2313 /** 2314 * mmc_suspend_host - suspend a host 2315 * @host: mmc host 2316 */ 2317 int mmc_suspend_host(struct mmc_host *host) 2318 { 2319 int err = 0; 2320 2321 cancel_delayed_work(&host->detect); 2322 mmc_flush_scheduled_work(); 2323 2324 err = mmc_cache_ctrl(host, 0); 2325 if (err) 2326 goto out; 2327 2328 mmc_bus_get(host); 2329 if (host->bus_ops && !host->bus_dead) { 2330 2331 if (host->bus_ops->suspend) 2332 err = host->bus_ops->suspend(host); 2333 2334 if (err == -ENOSYS || !host->bus_ops->resume) { 2335 /* 2336 * We simply "remove" the card in this case. 2337 * It will be redetected on resume. (Calling 2338 * bus_ops->remove() with a claimed host can 2339 * deadlock.) 2340 */ 2341 if (host->bus_ops->remove) 2342 host->bus_ops->remove(host); 2343 mmc_claim_host(host); 2344 mmc_detach_bus(host); 2345 mmc_power_off(host); 2346 mmc_release_host(host); 2347 host->pm_flags = 0; 2348 err = 0; 2349 } 2350 } 2351 mmc_bus_put(host); 2352 2353 if (!err && !mmc_card_keep_power(host)) 2354 mmc_power_off(host); 2355 2356 out: 2357 return err; 2358 } 2359 2360 EXPORT_SYMBOL(mmc_suspend_host); 2361 2362 /** 2363 * mmc_resume_host - resume a previously suspended host 2364 * @host: mmc host 2365 */ 2366 int mmc_resume_host(struct mmc_host *host) 2367 { 2368 int err = 0; 2369 2370 mmc_bus_get(host); 2371 if (host->bus_ops && !host->bus_dead) { 2372 if (!mmc_card_keep_power(host)) { 2373 mmc_power_up(host); 2374 mmc_select_voltage(host, host->ocr); 2375 /* 2376 * Tell runtime PM core we just powered up the card, 2377 * since it still believes the card is powered off. 2378 * Note that currently runtime PM is only enabled 2379 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD 2380 */ 2381 if (mmc_card_sdio(host->card) && 2382 (host->caps & MMC_CAP_POWER_OFF_CARD)) { 2383 pm_runtime_disable(&host->card->dev); 2384 pm_runtime_set_active(&host->card->dev); 2385 pm_runtime_enable(&host->card->dev); 2386 } 2387 } 2388 BUG_ON(!host->bus_ops->resume); 2389 err = host->bus_ops->resume(host); 2390 if (err) { 2391 pr_warning("%s: error %d during resume " 2392 "(card was removed?)\n", 2393 mmc_hostname(host), err); 2394 err = 0; 2395 } 2396 } 2397 host->pm_flags &= ~MMC_PM_KEEP_POWER; 2398 mmc_bus_put(host); 2399 2400 return err; 2401 } 2402 EXPORT_SYMBOL(mmc_resume_host); 2403 2404 /* Do the card removal on suspend if card is assumed removeable 2405 * Do that in pm notifier while userspace isn't yet frozen, so we will be able 2406 to sync the card. 2407 */ 2408 int mmc_pm_notify(struct notifier_block *notify_block, 2409 unsigned long mode, void *unused) 2410 { 2411 struct mmc_host *host = container_of( 2412 notify_block, struct mmc_host, pm_notify); 2413 unsigned long flags; 2414 2415 2416 switch (mode) { 2417 case PM_HIBERNATION_PREPARE: 2418 case PM_SUSPEND_PREPARE: 2419 2420 spin_lock_irqsave(&host->lock, flags); 2421 host->rescan_disable = 1; 2422 host->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT; 2423 spin_unlock_irqrestore(&host->lock, flags); 2424 cancel_delayed_work_sync(&host->detect); 2425 2426 if (!host->bus_ops || host->bus_ops->suspend) 2427 break; 2428 2429 /* Calling bus_ops->remove() with a claimed host can deadlock */ 2430 if (host->bus_ops->remove) 2431 host->bus_ops->remove(host); 2432 2433 mmc_claim_host(host); 2434 mmc_detach_bus(host); 2435 mmc_power_off(host); 2436 mmc_release_host(host); 2437 host->pm_flags = 0; 2438 break; 2439 2440 case PM_POST_SUSPEND: 2441 case PM_POST_HIBERNATION: 2442 case PM_POST_RESTORE: 2443 2444 spin_lock_irqsave(&host->lock, flags); 2445 host->rescan_disable = 0; 2446 host->power_notify_type = MMC_HOST_PW_NOTIFY_LONG; 2447 spin_unlock_irqrestore(&host->lock, flags); 2448 mmc_detect_change(host, 0); 2449 2450 } 2451 2452 return 0; 2453 } 2454 #endif 2455 2456 static int __init mmc_init(void) 2457 { 2458 int ret; 2459 2460 workqueue = alloc_ordered_workqueue("kmmcd", 0); 2461 if (!workqueue) 2462 return -ENOMEM; 2463 2464 ret = mmc_register_bus(); 2465 if (ret) 2466 goto destroy_workqueue; 2467 2468 ret = mmc_register_host_class(); 2469 if (ret) 2470 goto unregister_bus; 2471 2472 ret = sdio_register_bus(); 2473 if (ret) 2474 goto unregister_host_class; 2475 2476 return 0; 2477 2478 unregister_host_class: 2479 mmc_unregister_host_class(); 2480 unregister_bus: 2481 mmc_unregister_bus(); 2482 destroy_workqueue: 2483 destroy_workqueue(workqueue); 2484 2485 return ret; 2486 } 2487 2488 static void __exit mmc_exit(void) 2489 { 2490 sdio_unregister_bus(); 2491 mmc_unregister_host_class(); 2492 mmc_unregister_bus(); 2493 destroy_workqueue(workqueue); 2494 } 2495 2496 subsys_initcall(mmc_init); 2497 module_exit(mmc_exit); 2498 2499 MODULE_LICENSE("GPL"); 2500