1 /* 2 * linux/drivers/mmc/core/core.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/completion.h> 17 #include <linux/device.h> 18 #include <linux/delay.h> 19 #include <linux/pagemap.h> 20 #include <linux/err.h> 21 #include <linux/leds.h> 22 #include <linux/scatterlist.h> 23 #include <linux/log2.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/suspend.h> 27 #include <linux/fault-inject.h> 28 #include <linux/random.h> 29 30 #include <linux/mmc/card.h> 31 #include <linux/mmc/host.h> 32 #include <linux/mmc/mmc.h> 33 #include <linux/mmc/sd.h> 34 35 #include "core.h" 36 #include "bus.h" 37 #include "host.h" 38 #include "sdio_bus.h" 39 40 #include "mmc_ops.h" 41 #include "sd_ops.h" 42 #include "sdio_ops.h" 43 44 static struct workqueue_struct *workqueue; 45 46 /* 47 * Enabling software CRCs on the data blocks can be a significant (30%) 48 * performance cost, and for other reasons may not always be desired. 49 * So we allow it it to be disabled. 50 */ 51 bool use_spi_crc = 1; 52 module_param(use_spi_crc, bool, 0); 53 54 /* 55 * We normally treat cards as removed during suspend if they are not 56 * known to be on a non-removable bus, to avoid the risk of writing 57 * back data to a different card after resume. Allow this to be 58 * overridden if necessary. 59 */ 60 #ifdef CONFIG_MMC_UNSAFE_RESUME 61 bool mmc_assume_removable; 62 #else 63 bool mmc_assume_removable = 1; 64 #endif 65 EXPORT_SYMBOL(mmc_assume_removable); 66 module_param_named(removable, mmc_assume_removable, bool, 0644); 67 MODULE_PARM_DESC( 68 removable, 69 "MMC/SD cards are removable and may be removed during suspend"); 70 71 /* 72 * Internal function. Schedule delayed work in the MMC work queue. 73 */ 74 static int mmc_schedule_delayed_work(struct delayed_work *work, 75 unsigned long delay) 76 { 77 return queue_delayed_work(workqueue, work, delay); 78 } 79 80 /* 81 * Internal function. Flush all scheduled work from the MMC work queue. 82 */ 83 static void mmc_flush_scheduled_work(void) 84 { 85 flush_workqueue(workqueue); 86 } 87 88 #ifdef CONFIG_FAIL_MMC_REQUEST 89 90 /* 91 * Internal function. Inject random data errors. 92 * If mmc_data is NULL no errors are injected. 93 */ 94 static void mmc_should_fail_request(struct mmc_host *host, 95 struct mmc_request *mrq) 96 { 97 struct mmc_command *cmd = mrq->cmd; 98 struct mmc_data *data = mrq->data; 99 static const int data_errors[] = { 100 -ETIMEDOUT, 101 -EILSEQ, 102 -EIO, 103 }; 104 105 if (!data) 106 return; 107 108 if (cmd->error || data->error || 109 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) 110 return; 111 112 data->error = data_errors[random32() % ARRAY_SIZE(data_errors)]; 113 data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9; 114 } 115 116 #else /* CONFIG_FAIL_MMC_REQUEST */ 117 118 static inline void mmc_should_fail_request(struct mmc_host *host, 119 struct mmc_request *mrq) 120 { 121 } 122 123 #endif /* CONFIG_FAIL_MMC_REQUEST */ 124 125 /** 126 * mmc_request_done - finish processing an MMC request 127 * @host: MMC host which completed request 128 * @mrq: MMC request which request 129 * 130 * MMC drivers should call this function when they have completed 131 * their processing of a request. 132 */ 133 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) 134 { 135 struct mmc_command *cmd = mrq->cmd; 136 int err = cmd->error; 137 138 if (err && cmd->retries && mmc_host_is_spi(host)) { 139 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) 140 cmd->retries = 0; 141 } 142 143 if (err && cmd->retries && !mmc_card_removed(host->card)) { 144 /* 145 * Request starter must handle retries - see 146 * mmc_wait_for_req_done(). 147 */ 148 if (mrq->done) 149 mrq->done(mrq); 150 } else { 151 mmc_should_fail_request(host, mrq); 152 153 led_trigger_event(host->led, LED_OFF); 154 155 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", 156 mmc_hostname(host), cmd->opcode, err, 157 cmd->resp[0], cmd->resp[1], 158 cmd->resp[2], cmd->resp[3]); 159 160 if (mrq->data) { 161 pr_debug("%s: %d bytes transferred: %d\n", 162 mmc_hostname(host), 163 mrq->data->bytes_xfered, mrq->data->error); 164 } 165 166 if (mrq->stop) { 167 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n", 168 mmc_hostname(host), mrq->stop->opcode, 169 mrq->stop->error, 170 mrq->stop->resp[0], mrq->stop->resp[1], 171 mrq->stop->resp[2], mrq->stop->resp[3]); 172 } 173 174 if (mrq->done) 175 mrq->done(mrq); 176 177 mmc_host_clk_release(host); 178 } 179 } 180 181 EXPORT_SYMBOL(mmc_request_done); 182 183 static void 184 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 185 { 186 #ifdef CONFIG_MMC_DEBUG 187 unsigned int i, sz; 188 struct scatterlist *sg; 189 #endif 190 191 pr_debug("%s: starting CMD%u arg %08x flags %08x\n", 192 mmc_hostname(host), mrq->cmd->opcode, 193 mrq->cmd->arg, mrq->cmd->flags); 194 195 if (mrq->data) { 196 pr_debug("%s: blksz %d blocks %d flags %08x " 197 "tsac %d ms nsac %d\n", 198 mmc_hostname(host), mrq->data->blksz, 199 mrq->data->blocks, mrq->data->flags, 200 mrq->data->timeout_ns / 1000000, 201 mrq->data->timeout_clks); 202 } 203 204 if (mrq->stop) { 205 pr_debug("%s: CMD%u arg %08x flags %08x\n", 206 mmc_hostname(host), mrq->stop->opcode, 207 mrq->stop->arg, mrq->stop->flags); 208 } 209 210 WARN_ON(!host->claimed); 211 212 mrq->cmd->error = 0; 213 mrq->cmd->mrq = mrq; 214 if (mrq->data) { 215 BUG_ON(mrq->data->blksz > host->max_blk_size); 216 BUG_ON(mrq->data->blocks > host->max_blk_count); 217 BUG_ON(mrq->data->blocks * mrq->data->blksz > 218 host->max_req_size); 219 220 #ifdef CONFIG_MMC_DEBUG 221 sz = 0; 222 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) 223 sz += sg->length; 224 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); 225 #endif 226 227 mrq->cmd->data = mrq->data; 228 mrq->data->error = 0; 229 mrq->data->mrq = mrq; 230 if (mrq->stop) { 231 mrq->data->stop = mrq->stop; 232 mrq->stop->error = 0; 233 mrq->stop->mrq = mrq; 234 } 235 } 236 mmc_host_clk_hold(host); 237 led_trigger_event(host->led, LED_FULL); 238 host->ops->request(host, mrq); 239 } 240 241 static void mmc_wait_done(struct mmc_request *mrq) 242 { 243 complete(&mrq->completion); 244 } 245 246 static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) 247 { 248 init_completion(&mrq->completion); 249 mrq->done = mmc_wait_done; 250 if (mmc_card_removed(host->card)) { 251 mrq->cmd->error = -ENOMEDIUM; 252 complete(&mrq->completion); 253 return; 254 } 255 mmc_start_request(host, mrq); 256 } 257 258 static void mmc_wait_for_req_done(struct mmc_host *host, 259 struct mmc_request *mrq) 260 { 261 struct mmc_command *cmd; 262 263 while (1) { 264 wait_for_completion(&mrq->completion); 265 266 cmd = mrq->cmd; 267 if (!cmd->error || !cmd->retries || 268 mmc_card_removed(host->card)) 269 break; 270 271 pr_debug("%s: req failed (CMD%u): %d, retrying...\n", 272 mmc_hostname(host), cmd->opcode, cmd->error); 273 cmd->retries--; 274 cmd->error = 0; 275 host->ops->request(host, mrq); 276 } 277 } 278 279 /** 280 * mmc_pre_req - Prepare for a new request 281 * @host: MMC host to prepare command 282 * @mrq: MMC request to prepare for 283 * @is_first_req: true if there is no previous started request 284 * that may run in parellel to this call, otherwise false 285 * 286 * mmc_pre_req() is called in prior to mmc_start_req() to let 287 * host prepare for the new request. Preparation of a request may be 288 * performed while another request is running on the host. 289 */ 290 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, 291 bool is_first_req) 292 { 293 if (host->ops->pre_req) { 294 mmc_host_clk_hold(host); 295 host->ops->pre_req(host, mrq, is_first_req); 296 mmc_host_clk_release(host); 297 } 298 } 299 300 /** 301 * mmc_post_req - Post process a completed request 302 * @host: MMC host to post process command 303 * @mrq: MMC request to post process for 304 * @err: Error, if non zero, clean up any resources made in pre_req 305 * 306 * Let the host post process a completed request. Post processing of 307 * a request may be performed while another reuqest is running. 308 */ 309 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, 310 int err) 311 { 312 if (host->ops->post_req) { 313 mmc_host_clk_hold(host); 314 host->ops->post_req(host, mrq, err); 315 mmc_host_clk_release(host); 316 } 317 } 318 319 /** 320 * mmc_start_req - start a non-blocking request 321 * @host: MMC host to start command 322 * @areq: async request to start 323 * @error: out parameter returns 0 for success, otherwise non zero 324 * 325 * Start a new MMC custom command request for a host. 326 * If there is on ongoing async request wait for completion 327 * of that request and start the new one and return. 328 * Does not wait for the new request to complete. 329 * 330 * Returns the completed request, NULL in case of none completed. 331 * Wait for the an ongoing request (previoulsy started) to complete and 332 * return the completed request. If there is no ongoing request, NULL 333 * is returned without waiting. NULL is not an error condition. 334 */ 335 struct mmc_async_req *mmc_start_req(struct mmc_host *host, 336 struct mmc_async_req *areq, int *error) 337 { 338 int err = 0; 339 struct mmc_async_req *data = host->areq; 340 341 /* Prepare a new request */ 342 if (areq) 343 mmc_pre_req(host, areq->mrq, !host->areq); 344 345 if (host->areq) { 346 mmc_wait_for_req_done(host, host->areq->mrq); 347 err = host->areq->err_check(host->card, host->areq); 348 if (err) { 349 /* post process the completed failed request */ 350 mmc_post_req(host, host->areq->mrq, 0); 351 if (areq) 352 /* 353 * Cancel the new prepared request, because 354 * it can't run until the failed 355 * request has been properly handled. 356 */ 357 mmc_post_req(host, areq->mrq, -EINVAL); 358 359 host->areq = NULL; 360 goto out; 361 } 362 } 363 364 if (areq) 365 __mmc_start_req(host, areq->mrq); 366 367 if (host->areq) 368 mmc_post_req(host, host->areq->mrq, 0); 369 370 host->areq = areq; 371 out: 372 if (error) 373 *error = err; 374 return data; 375 } 376 EXPORT_SYMBOL(mmc_start_req); 377 378 /** 379 * mmc_wait_for_req - start a request and wait for completion 380 * @host: MMC host to start command 381 * @mrq: MMC request to start 382 * 383 * Start a new MMC custom command request for a host, and wait 384 * for the command to complete. Does not attempt to parse the 385 * response. 386 */ 387 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 388 { 389 __mmc_start_req(host, mrq); 390 mmc_wait_for_req_done(host, mrq); 391 } 392 EXPORT_SYMBOL(mmc_wait_for_req); 393 394 /** 395 * mmc_interrupt_hpi - Issue for High priority Interrupt 396 * @card: the MMC card associated with the HPI transfer 397 * 398 * Issued High Priority Interrupt, and check for card status 399 * util out-of prg-state. 400 */ 401 int mmc_interrupt_hpi(struct mmc_card *card) 402 { 403 int err; 404 u32 status; 405 406 BUG_ON(!card); 407 408 if (!card->ext_csd.hpi_en) { 409 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 410 return 1; 411 } 412 413 mmc_claim_host(card->host); 414 err = mmc_send_status(card, &status); 415 if (err) { 416 pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 417 goto out; 418 } 419 420 /* 421 * If the card status is in PRG-state, we can send the HPI command. 422 */ 423 if (R1_CURRENT_STATE(status) == R1_STATE_PRG) { 424 do { 425 /* 426 * We don't know when the HPI command will finish 427 * processing, so we need to resend HPI until out 428 * of prg-state, and keep checking the card status 429 * with SEND_STATUS. If a timeout error occurs when 430 * sending the HPI command, we are already out of 431 * prg-state. 432 */ 433 err = mmc_send_hpi_cmd(card, &status); 434 if (err) 435 pr_debug("%s: abort HPI (%d error)\n", 436 mmc_hostname(card->host), err); 437 438 err = mmc_send_status(card, &status); 439 if (err) 440 break; 441 } while (R1_CURRENT_STATE(status) == R1_STATE_PRG); 442 } else 443 pr_debug("%s: Left prg-state\n", mmc_hostname(card->host)); 444 445 out: 446 mmc_release_host(card->host); 447 return err; 448 } 449 EXPORT_SYMBOL(mmc_interrupt_hpi); 450 451 /** 452 * mmc_wait_for_cmd - start a command and wait for completion 453 * @host: MMC host to start command 454 * @cmd: MMC command to start 455 * @retries: maximum number of retries 456 * 457 * Start a new MMC command for a host, and wait for the command 458 * to complete. Return any error that occurred while the command 459 * was executing. Do not attempt to parse the response. 460 */ 461 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 462 { 463 struct mmc_request mrq = {NULL}; 464 465 WARN_ON(!host->claimed); 466 467 memset(cmd->resp, 0, sizeof(cmd->resp)); 468 cmd->retries = retries; 469 470 mrq.cmd = cmd; 471 cmd->data = NULL; 472 473 mmc_wait_for_req(host, &mrq); 474 475 return cmd->error; 476 } 477 478 EXPORT_SYMBOL(mmc_wait_for_cmd); 479 480 /** 481 * mmc_set_data_timeout - set the timeout for a data command 482 * @data: data phase for command 483 * @card: the MMC card associated with the data transfer 484 * 485 * Computes the data timeout parameters according to the 486 * correct algorithm given the card type. 487 */ 488 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) 489 { 490 unsigned int mult; 491 492 /* 493 * SDIO cards only define an upper 1 s limit on access. 494 */ 495 if (mmc_card_sdio(card)) { 496 data->timeout_ns = 1000000000; 497 data->timeout_clks = 0; 498 return; 499 } 500 501 /* 502 * SD cards use a 100 multiplier rather than 10 503 */ 504 mult = mmc_card_sd(card) ? 100 : 10; 505 506 /* 507 * Scale up the multiplier (and therefore the timeout) by 508 * the r2w factor for writes. 509 */ 510 if (data->flags & MMC_DATA_WRITE) 511 mult <<= card->csd.r2w_factor; 512 513 data->timeout_ns = card->csd.tacc_ns * mult; 514 data->timeout_clks = card->csd.tacc_clks * mult; 515 516 /* 517 * SD cards also have an upper limit on the timeout. 518 */ 519 if (mmc_card_sd(card)) { 520 unsigned int timeout_us, limit_us; 521 522 timeout_us = data->timeout_ns / 1000; 523 if (mmc_host_clk_rate(card->host)) 524 timeout_us += data->timeout_clks * 1000 / 525 (mmc_host_clk_rate(card->host) / 1000); 526 527 if (data->flags & MMC_DATA_WRITE) 528 /* 529 * The limit is really 250 ms, but that is 530 * insufficient for some crappy cards. 531 */ 532 limit_us = 300000; 533 else 534 limit_us = 100000; 535 536 /* 537 * SDHC cards always use these fixed values. 538 */ 539 if (timeout_us > limit_us || mmc_card_blockaddr(card)) { 540 data->timeout_ns = limit_us * 1000; 541 data->timeout_clks = 0; 542 } 543 } 544 545 /* 546 * Some cards require longer data read timeout than indicated in CSD. 547 * Address this by setting the read timeout to a "reasonably high" 548 * value. For the cards tested, 300ms has proven enough. If necessary, 549 * this value can be increased if other problematic cards require this. 550 */ 551 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) { 552 data->timeout_ns = 300000000; 553 data->timeout_clks = 0; 554 } 555 556 /* 557 * Some cards need very high timeouts if driven in SPI mode. 558 * The worst observed timeout was 900ms after writing a 559 * continuous stream of data until the internal logic 560 * overflowed. 561 */ 562 if (mmc_host_is_spi(card->host)) { 563 if (data->flags & MMC_DATA_WRITE) { 564 if (data->timeout_ns < 1000000000) 565 data->timeout_ns = 1000000000; /* 1s */ 566 } else { 567 if (data->timeout_ns < 100000000) 568 data->timeout_ns = 100000000; /* 100ms */ 569 } 570 } 571 } 572 EXPORT_SYMBOL(mmc_set_data_timeout); 573 574 /** 575 * mmc_align_data_size - pads a transfer size to a more optimal value 576 * @card: the MMC card associated with the data transfer 577 * @sz: original transfer size 578 * 579 * Pads the original data size with a number of extra bytes in 580 * order to avoid controller bugs and/or performance hits 581 * (e.g. some controllers revert to PIO for certain sizes). 582 * 583 * Returns the improved size, which might be unmodified. 584 * 585 * Note that this function is only relevant when issuing a 586 * single scatter gather entry. 587 */ 588 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) 589 { 590 /* 591 * FIXME: We don't have a system for the controller to tell 592 * the core about its problems yet, so for now we just 32-bit 593 * align the size. 594 */ 595 sz = ((sz + 3) / 4) * 4; 596 597 return sz; 598 } 599 EXPORT_SYMBOL(mmc_align_data_size); 600 601 /** 602 * mmc_host_enable - enable a host. 603 * @host: mmc host to enable 604 * 605 * Hosts that support power saving can use the 'enable' and 'disable' 606 * methods to exit and enter power saving states. For more information 607 * see comments for struct mmc_host_ops. 608 */ 609 int mmc_host_enable(struct mmc_host *host) 610 { 611 if (!(host->caps & MMC_CAP_DISABLE)) 612 return 0; 613 614 if (host->en_dis_recurs) 615 return 0; 616 617 if (host->nesting_cnt++) 618 return 0; 619 620 cancel_delayed_work_sync(&host->disable); 621 622 if (host->enabled) 623 return 0; 624 625 if (host->ops->enable) { 626 int err; 627 628 host->en_dis_recurs = 1; 629 mmc_host_clk_hold(host); 630 err = host->ops->enable(host); 631 mmc_host_clk_release(host); 632 host->en_dis_recurs = 0; 633 634 if (err) { 635 pr_debug("%s: enable error %d\n", 636 mmc_hostname(host), err); 637 return err; 638 } 639 } 640 host->enabled = 1; 641 return 0; 642 } 643 EXPORT_SYMBOL(mmc_host_enable); 644 645 static int mmc_host_do_disable(struct mmc_host *host, int lazy) 646 { 647 if (host->ops->disable) { 648 int err; 649 650 host->en_dis_recurs = 1; 651 mmc_host_clk_hold(host); 652 err = host->ops->disable(host, lazy); 653 mmc_host_clk_release(host); 654 host->en_dis_recurs = 0; 655 656 if (err < 0) { 657 pr_debug("%s: disable error %d\n", 658 mmc_hostname(host), err); 659 return err; 660 } 661 if (err > 0) { 662 unsigned long delay = msecs_to_jiffies(err); 663 664 mmc_schedule_delayed_work(&host->disable, delay); 665 } 666 } 667 host->enabled = 0; 668 return 0; 669 } 670 671 /** 672 * mmc_host_disable - disable a host. 673 * @host: mmc host to disable 674 * 675 * Hosts that support power saving can use the 'enable' and 'disable' 676 * methods to exit and enter power saving states. For more information 677 * see comments for struct mmc_host_ops. 678 */ 679 int mmc_host_disable(struct mmc_host *host) 680 { 681 int err; 682 683 if (!(host->caps & MMC_CAP_DISABLE)) 684 return 0; 685 686 if (host->en_dis_recurs) 687 return 0; 688 689 if (--host->nesting_cnt) 690 return 0; 691 692 if (!host->enabled) 693 return 0; 694 695 err = mmc_host_do_disable(host, 0); 696 return err; 697 } 698 EXPORT_SYMBOL(mmc_host_disable); 699 700 /** 701 * __mmc_claim_host - exclusively claim a host 702 * @host: mmc host to claim 703 * @abort: whether or not the operation should be aborted 704 * 705 * Claim a host for a set of operations. If @abort is non null and 706 * dereference a non-zero value then this will return prematurely with 707 * that non-zero value without acquiring the lock. Returns zero 708 * with the lock held otherwise. 709 */ 710 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) 711 { 712 DECLARE_WAITQUEUE(wait, current); 713 unsigned long flags; 714 int stop; 715 716 might_sleep(); 717 718 add_wait_queue(&host->wq, &wait); 719 spin_lock_irqsave(&host->lock, flags); 720 while (1) { 721 set_current_state(TASK_UNINTERRUPTIBLE); 722 stop = abort ? atomic_read(abort) : 0; 723 if (stop || !host->claimed || host->claimer == current) 724 break; 725 spin_unlock_irqrestore(&host->lock, flags); 726 schedule(); 727 spin_lock_irqsave(&host->lock, flags); 728 } 729 set_current_state(TASK_RUNNING); 730 if (!stop) { 731 host->claimed = 1; 732 host->claimer = current; 733 host->claim_cnt += 1; 734 } else 735 wake_up(&host->wq); 736 spin_unlock_irqrestore(&host->lock, flags); 737 remove_wait_queue(&host->wq, &wait); 738 if (!stop) 739 mmc_host_enable(host); 740 return stop; 741 } 742 743 EXPORT_SYMBOL(__mmc_claim_host); 744 745 /** 746 * mmc_try_claim_host - try exclusively to claim a host 747 * @host: mmc host to claim 748 * 749 * Returns %1 if the host is claimed, %0 otherwise. 750 */ 751 int mmc_try_claim_host(struct mmc_host *host) 752 { 753 int claimed_host = 0; 754 unsigned long flags; 755 756 spin_lock_irqsave(&host->lock, flags); 757 if (!host->claimed || host->claimer == current) { 758 host->claimed = 1; 759 host->claimer = current; 760 host->claim_cnt += 1; 761 claimed_host = 1; 762 } 763 spin_unlock_irqrestore(&host->lock, flags); 764 return claimed_host; 765 } 766 EXPORT_SYMBOL(mmc_try_claim_host); 767 768 /** 769 * mmc_do_release_host - release a claimed host 770 * @host: mmc host to release 771 * 772 * If you successfully claimed a host, this function will 773 * release it again. 774 */ 775 void mmc_do_release_host(struct mmc_host *host) 776 { 777 unsigned long flags; 778 779 spin_lock_irqsave(&host->lock, flags); 780 if (--host->claim_cnt) { 781 /* Release for nested claim */ 782 spin_unlock_irqrestore(&host->lock, flags); 783 } else { 784 host->claimed = 0; 785 host->claimer = NULL; 786 spin_unlock_irqrestore(&host->lock, flags); 787 wake_up(&host->wq); 788 } 789 } 790 EXPORT_SYMBOL(mmc_do_release_host); 791 792 void mmc_host_deeper_disable(struct work_struct *work) 793 { 794 struct mmc_host *host = 795 container_of(work, struct mmc_host, disable.work); 796 797 /* If the host is claimed then we do not want to disable it anymore */ 798 if (!mmc_try_claim_host(host)) 799 return; 800 mmc_host_do_disable(host, 1); 801 mmc_do_release_host(host); 802 } 803 804 /** 805 * mmc_host_lazy_disable - lazily disable a host. 806 * @host: mmc host to disable 807 * 808 * Hosts that support power saving can use the 'enable' and 'disable' 809 * methods to exit and enter power saving states. For more information 810 * see comments for struct mmc_host_ops. 811 */ 812 int mmc_host_lazy_disable(struct mmc_host *host) 813 { 814 if (!(host->caps & MMC_CAP_DISABLE)) 815 return 0; 816 817 if (host->en_dis_recurs) 818 return 0; 819 820 if (--host->nesting_cnt) 821 return 0; 822 823 if (!host->enabled) 824 return 0; 825 826 if (host->disable_delay) { 827 mmc_schedule_delayed_work(&host->disable, 828 msecs_to_jiffies(host->disable_delay)); 829 return 0; 830 } else 831 return mmc_host_do_disable(host, 1); 832 } 833 EXPORT_SYMBOL(mmc_host_lazy_disable); 834 835 /** 836 * mmc_release_host - release a host 837 * @host: mmc host to release 838 * 839 * Release a MMC host, allowing others to claim the host 840 * for their operations. 841 */ 842 void mmc_release_host(struct mmc_host *host) 843 { 844 WARN_ON(!host->claimed); 845 846 mmc_host_lazy_disable(host); 847 848 mmc_do_release_host(host); 849 } 850 851 EXPORT_SYMBOL(mmc_release_host); 852 853 /* 854 * Internal function that does the actual ios call to the host driver, 855 * optionally printing some debug output. 856 */ 857 static inline void mmc_set_ios(struct mmc_host *host) 858 { 859 struct mmc_ios *ios = &host->ios; 860 861 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " 862 "width %u timing %u\n", 863 mmc_hostname(host), ios->clock, ios->bus_mode, 864 ios->power_mode, ios->chip_select, ios->vdd, 865 ios->bus_width, ios->timing); 866 867 if (ios->clock > 0) 868 mmc_set_ungated(host); 869 host->ops->set_ios(host, ios); 870 } 871 872 /* 873 * Control chip select pin on a host. 874 */ 875 void mmc_set_chip_select(struct mmc_host *host, int mode) 876 { 877 mmc_host_clk_hold(host); 878 host->ios.chip_select = mode; 879 mmc_set_ios(host); 880 mmc_host_clk_release(host); 881 } 882 883 /* 884 * Sets the host clock to the highest possible frequency that 885 * is below "hz". 886 */ 887 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz) 888 { 889 WARN_ON(hz < host->f_min); 890 891 if (hz > host->f_max) 892 hz = host->f_max; 893 894 host->ios.clock = hz; 895 mmc_set_ios(host); 896 } 897 898 void mmc_set_clock(struct mmc_host *host, unsigned int hz) 899 { 900 mmc_host_clk_hold(host); 901 __mmc_set_clock(host, hz); 902 mmc_host_clk_release(host); 903 } 904 905 #ifdef CONFIG_MMC_CLKGATE 906 /* 907 * This gates the clock by setting it to 0 Hz. 908 */ 909 void mmc_gate_clock(struct mmc_host *host) 910 { 911 unsigned long flags; 912 913 spin_lock_irqsave(&host->clk_lock, flags); 914 host->clk_old = host->ios.clock; 915 host->ios.clock = 0; 916 host->clk_gated = true; 917 spin_unlock_irqrestore(&host->clk_lock, flags); 918 mmc_set_ios(host); 919 } 920 921 /* 922 * This restores the clock from gating by using the cached 923 * clock value. 924 */ 925 void mmc_ungate_clock(struct mmc_host *host) 926 { 927 /* 928 * We should previously have gated the clock, so the clock shall 929 * be 0 here! The clock may however be 0 during initialization, 930 * when some request operations are performed before setting 931 * the frequency. When ungate is requested in that situation 932 * we just ignore the call. 933 */ 934 if (host->clk_old) { 935 BUG_ON(host->ios.clock); 936 /* This call will also set host->clk_gated to false */ 937 __mmc_set_clock(host, host->clk_old); 938 } 939 } 940 941 void mmc_set_ungated(struct mmc_host *host) 942 { 943 unsigned long flags; 944 945 /* 946 * We've been given a new frequency while the clock is gated, 947 * so make sure we regard this as ungating it. 948 */ 949 spin_lock_irqsave(&host->clk_lock, flags); 950 host->clk_gated = false; 951 spin_unlock_irqrestore(&host->clk_lock, flags); 952 } 953 954 #else 955 void mmc_set_ungated(struct mmc_host *host) 956 { 957 } 958 #endif 959 960 /* 961 * Change the bus mode (open drain/push-pull) of a host. 962 */ 963 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 964 { 965 mmc_host_clk_hold(host); 966 host->ios.bus_mode = mode; 967 mmc_set_ios(host); 968 mmc_host_clk_release(host); 969 } 970 971 /* 972 * Change data bus width of a host. 973 */ 974 void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 975 { 976 mmc_host_clk_hold(host); 977 host->ios.bus_width = width; 978 mmc_set_ios(host); 979 mmc_host_clk_release(host); 980 } 981 982 /** 983 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number 984 * @vdd: voltage (mV) 985 * @low_bits: prefer low bits in boundary cases 986 * 987 * This function returns the OCR bit number according to the provided @vdd 988 * value. If conversion is not possible a negative errno value returned. 989 * 990 * Depending on the @low_bits flag the function prefers low or high OCR bits 991 * on boundary voltages. For example, 992 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33); 993 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34); 994 * 995 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21). 996 */ 997 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits) 998 { 999 const int max_bit = ilog2(MMC_VDD_35_36); 1000 int bit; 1001 1002 if (vdd < 1650 || vdd > 3600) 1003 return -EINVAL; 1004 1005 if (vdd >= 1650 && vdd <= 1950) 1006 return ilog2(MMC_VDD_165_195); 1007 1008 if (low_bits) 1009 vdd -= 1; 1010 1011 /* Base 2000 mV, step 100 mV, bit's base 8. */ 1012 bit = (vdd - 2000) / 100 + 8; 1013 if (bit > max_bit) 1014 return max_bit; 1015 return bit; 1016 } 1017 1018 /** 1019 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask 1020 * @vdd_min: minimum voltage value (mV) 1021 * @vdd_max: maximum voltage value (mV) 1022 * 1023 * This function returns the OCR mask bits according to the provided @vdd_min 1024 * and @vdd_max values. If conversion is not possible the function returns 0. 1025 * 1026 * Notes wrt boundary cases: 1027 * This function sets the OCR bits for all boundary voltages, for example 1028 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 | 1029 * MMC_VDD_34_35 mask. 1030 */ 1031 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) 1032 { 1033 u32 mask = 0; 1034 1035 if (vdd_max < vdd_min) 1036 return 0; 1037 1038 /* Prefer high bits for the boundary vdd_max values. */ 1039 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false); 1040 if (vdd_max < 0) 1041 return 0; 1042 1043 /* Prefer low bits for the boundary vdd_min values. */ 1044 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true); 1045 if (vdd_min < 0) 1046 return 0; 1047 1048 /* Fill the mask, from max bit to min bit. */ 1049 while (vdd_max >= vdd_min) 1050 mask |= 1 << vdd_max--; 1051 1052 return mask; 1053 } 1054 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); 1055 1056 #ifdef CONFIG_REGULATOR 1057 1058 /** 1059 * mmc_regulator_get_ocrmask - return mask of supported voltages 1060 * @supply: regulator to use 1061 * 1062 * This returns either a negative errno, or a mask of voltages that 1063 * can be provided to MMC/SD/SDIO devices using the specified voltage 1064 * regulator. This would normally be called before registering the 1065 * MMC host adapter. 1066 */ 1067 int mmc_regulator_get_ocrmask(struct regulator *supply) 1068 { 1069 int result = 0; 1070 int count; 1071 int i; 1072 1073 count = regulator_count_voltages(supply); 1074 if (count < 0) 1075 return count; 1076 1077 for (i = 0; i < count; i++) { 1078 int vdd_uV; 1079 int vdd_mV; 1080 1081 vdd_uV = regulator_list_voltage(supply, i); 1082 if (vdd_uV <= 0) 1083 continue; 1084 1085 vdd_mV = vdd_uV / 1000; 1086 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); 1087 } 1088 1089 return result; 1090 } 1091 EXPORT_SYMBOL(mmc_regulator_get_ocrmask); 1092 1093 /** 1094 * mmc_regulator_set_ocr - set regulator to match host->ios voltage 1095 * @mmc: the host to regulate 1096 * @supply: regulator to use 1097 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) 1098 * 1099 * Returns zero on success, else negative errno. 1100 * 1101 * MMC host drivers may use this to enable or disable a regulator using 1102 * a particular supply voltage. This would normally be called from the 1103 * set_ios() method. 1104 */ 1105 int mmc_regulator_set_ocr(struct mmc_host *mmc, 1106 struct regulator *supply, 1107 unsigned short vdd_bit) 1108 { 1109 int result = 0; 1110 int min_uV, max_uV; 1111 1112 if (vdd_bit) { 1113 int tmp; 1114 int voltage; 1115 1116 /* REVISIT mmc_vddrange_to_ocrmask() may have set some 1117 * bits this regulator doesn't quite support ... don't 1118 * be too picky, most cards and regulators are OK with 1119 * a 0.1V range goof (it's a small error percentage). 1120 */ 1121 tmp = vdd_bit - ilog2(MMC_VDD_165_195); 1122 if (tmp == 0) { 1123 min_uV = 1650 * 1000; 1124 max_uV = 1950 * 1000; 1125 } else { 1126 min_uV = 1900 * 1000 + tmp * 100 * 1000; 1127 max_uV = min_uV + 100 * 1000; 1128 } 1129 1130 /* avoid needless changes to this voltage; the regulator 1131 * might not allow this operation 1132 */ 1133 voltage = regulator_get_voltage(supply); 1134 1135 if (mmc->caps2 & MMC_CAP2_BROKEN_VOLTAGE) 1136 min_uV = max_uV = voltage; 1137 1138 if (voltage < 0) 1139 result = voltage; 1140 else if (voltage < min_uV || voltage > max_uV) 1141 result = regulator_set_voltage(supply, min_uV, max_uV); 1142 else 1143 result = 0; 1144 1145 if (result == 0 && !mmc->regulator_enabled) { 1146 result = regulator_enable(supply); 1147 if (!result) 1148 mmc->regulator_enabled = true; 1149 } 1150 } else if (mmc->regulator_enabled) { 1151 result = regulator_disable(supply); 1152 if (result == 0) 1153 mmc->regulator_enabled = false; 1154 } 1155 1156 if (result) 1157 dev_err(mmc_dev(mmc), 1158 "could not set regulator OCR (%d)\n", result); 1159 return result; 1160 } 1161 EXPORT_SYMBOL(mmc_regulator_set_ocr); 1162 1163 #endif /* CONFIG_REGULATOR */ 1164 1165 /* 1166 * Mask off any voltages we don't support and select 1167 * the lowest voltage 1168 */ 1169 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) 1170 { 1171 int bit; 1172 1173 ocr &= host->ocr_avail; 1174 1175 bit = ffs(ocr); 1176 if (bit) { 1177 bit -= 1; 1178 1179 ocr &= 3 << bit; 1180 1181 mmc_host_clk_hold(host); 1182 host->ios.vdd = bit; 1183 mmc_set_ios(host); 1184 mmc_host_clk_release(host); 1185 } else { 1186 pr_warning("%s: host doesn't support card's voltages\n", 1187 mmc_hostname(host)); 1188 ocr = 0; 1189 } 1190 1191 return ocr; 1192 } 1193 1194 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11) 1195 { 1196 struct mmc_command cmd = {0}; 1197 int err = 0; 1198 1199 BUG_ON(!host); 1200 1201 /* 1202 * Send CMD11 only if the request is to switch the card to 1203 * 1.8V signalling. 1204 */ 1205 if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) { 1206 cmd.opcode = SD_SWITCH_VOLTAGE; 1207 cmd.arg = 0; 1208 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1209 1210 err = mmc_wait_for_cmd(host, &cmd, 0); 1211 if (err) 1212 return err; 1213 1214 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) 1215 return -EIO; 1216 } 1217 1218 host->ios.signal_voltage = signal_voltage; 1219 1220 if (host->ops->start_signal_voltage_switch) { 1221 mmc_host_clk_hold(host); 1222 err = host->ops->start_signal_voltage_switch(host, &host->ios); 1223 mmc_host_clk_release(host); 1224 } 1225 1226 return err; 1227 } 1228 1229 /* 1230 * Select timing parameters for host. 1231 */ 1232 void mmc_set_timing(struct mmc_host *host, unsigned int timing) 1233 { 1234 mmc_host_clk_hold(host); 1235 host->ios.timing = timing; 1236 mmc_set_ios(host); 1237 mmc_host_clk_release(host); 1238 } 1239 1240 /* 1241 * Select appropriate driver type for host. 1242 */ 1243 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) 1244 { 1245 mmc_host_clk_hold(host); 1246 host->ios.drv_type = drv_type; 1247 mmc_set_ios(host); 1248 mmc_host_clk_release(host); 1249 } 1250 1251 static void mmc_poweroff_notify(struct mmc_host *host) 1252 { 1253 struct mmc_card *card; 1254 unsigned int timeout; 1255 unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION; 1256 int err = 0; 1257 1258 card = host->card; 1259 mmc_claim_host(host); 1260 1261 /* 1262 * Send power notify command only if card 1263 * is mmc and notify state is powered ON 1264 */ 1265 if (card && mmc_card_mmc(card) && 1266 (card->poweroff_notify_state == MMC_POWERED_ON)) { 1267 1268 if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) { 1269 notify_type = EXT_CSD_POWER_OFF_SHORT; 1270 timeout = card->ext_csd.generic_cmd6_time; 1271 card->poweroff_notify_state = MMC_POWEROFF_SHORT; 1272 } else { 1273 notify_type = EXT_CSD_POWER_OFF_LONG; 1274 timeout = card->ext_csd.power_off_longtime; 1275 card->poweroff_notify_state = MMC_POWEROFF_LONG; 1276 } 1277 1278 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1279 EXT_CSD_POWER_OFF_NOTIFICATION, 1280 notify_type, timeout); 1281 1282 if (err && err != -EBADMSG) 1283 pr_err("Device failed to respond within %d poweroff " 1284 "time. Forcefully powering down the device\n", 1285 timeout); 1286 1287 /* Set the card state to no notification after the poweroff */ 1288 card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION; 1289 } 1290 mmc_release_host(host); 1291 } 1292 1293 /* 1294 * Apply power to the MMC stack. This is a two-stage process. 1295 * First, we enable power to the card without the clock running. 1296 * We then wait a bit for the power to stabilise. Finally, 1297 * enable the bus drivers and clock to the card. 1298 * 1299 * We must _NOT_ enable the clock prior to power stablising. 1300 * 1301 * If a host does all the power sequencing itself, ignore the 1302 * initial MMC_POWER_UP stage. 1303 */ 1304 static void mmc_power_up(struct mmc_host *host) 1305 { 1306 int bit; 1307 1308 mmc_host_clk_hold(host); 1309 1310 /* If ocr is set, we use it */ 1311 if (host->ocr) 1312 bit = ffs(host->ocr) - 1; 1313 else 1314 bit = fls(host->ocr_avail) - 1; 1315 1316 host->ios.vdd = bit; 1317 if (mmc_host_is_spi(host)) 1318 host->ios.chip_select = MMC_CS_HIGH; 1319 else 1320 host->ios.chip_select = MMC_CS_DONTCARE; 1321 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 1322 host->ios.power_mode = MMC_POWER_UP; 1323 host->ios.bus_width = MMC_BUS_WIDTH_1; 1324 host->ios.timing = MMC_TIMING_LEGACY; 1325 mmc_set_ios(host); 1326 1327 /* 1328 * This delay should be sufficient to allow the power supply 1329 * to reach the minimum voltage. 1330 */ 1331 mmc_delay(10); 1332 1333 host->ios.clock = host->f_init; 1334 1335 host->ios.power_mode = MMC_POWER_ON; 1336 mmc_set_ios(host); 1337 1338 /* 1339 * This delay must be at least 74 clock sizes, or 1 ms, or the 1340 * time required to reach a stable voltage. 1341 */ 1342 mmc_delay(10); 1343 1344 mmc_host_clk_release(host); 1345 } 1346 1347 void mmc_power_off(struct mmc_host *host) 1348 { 1349 int err = 0; 1350 mmc_host_clk_hold(host); 1351 1352 host->ios.clock = 0; 1353 host->ios.vdd = 0; 1354 1355 /* 1356 * For eMMC 4.5 device send AWAKE command before 1357 * POWER_OFF_NOTIFY command, because in sleep state 1358 * eMMC 4.5 devices respond to only RESET and AWAKE cmd 1359 */ 1360 if (host->card && mmc_card_is_sleep(host->card) && 1361 host->bus_ops->resume) { 1362 err = host->bus_ops->resume(host); 1363 1364 if (!err) 1365 mmc_poweroff_notify(host); 1366 else 1367 pr_warning("%s: error %d during resume " 1368 "(continue with poweroff sequence)\n", 1369 mmc_hostname(host), err); 1370 } 1371 1372 /* 1373 * Reset ocr mask to be the highest possible voltage supported for 1374 * this mmc host. This value will be used at next power up. 1375 */ 1376 host->ocr = 1 << (fls(host->ocr_avail) - 1); 1377 1378 if (!mmc_host_is_spi(host)) { 1379 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1380 host->ios.chip_select = MMC_CS_DONTCARE; 1381 } 1382 host->ios.power_mode = MMC_POWER_OFF; 1383 host->ios.bus_width = MMC_BUS_WIDTH_1; 1384 host->ios.timing = MMC_TIMING_LEGACY; 1385 mmc_set_ios(host); 1386 1387 /* 1388 * Some configurations, such as the 802.11 SDIO card in the OLPC 1389 * XO-1.5, require a short delay after poweroff before the card 1390 * can be successfully turned on again. 1391 */ 1392 mmc_delay(1); 1393 1394 mmc_host_clk_release(host); 1395 } 1396 1397 /* 1398 * Cleanup when the last reference to the bus operator is dropped. 1399 */ 1400 static void __mmc_release_bus(struct mmc_host *host) 1401 { 1402 BUG_ON(!host); 1403 BUG_ON(host->bus_refs); 1404 BUG_ON(!host->bus_dead); 1405 1406 host->bus_ops = NULL; 1407 } 1408 1409 /* 1410 * Increase reference count of bus operator 1411 */ 1412 static inline void mmc_bus_get(struct mmc_host *host) 1413 { 1414 unsigned long flags; 1415 1416 spin_lock_irqsave(&host->lock, flags); 1417 host->bus_refs++; 1418 spin_unlock_irqrestore(&host->lock, flags); 1419 } 1420 1421 /* 1422 * Decrease reference count of bus operator and free it if 1423 * it is the last reference. 1424 */ 1425 static inline void mmc_bus_put(struct mmc_host *host) 1426 { 1427 unsigned long flags; 1428 1429 spin_lock_irqsave(&host->lock, flags); 1430 host->bus_refs--; 1431 if ((host->bus_refs == 0) && host->bus_ops) 1432 __mmc_release_bus(host); 1433 spin_unlock_irqrestore(&host->lock, flags); 1434 } 1435 1436 /* 1437 * Assign a mmc bus handler to a host. Only one bus handler may control a 1438 * host at any given time. 1439 */ 1440 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) 1441 { 1442 unsigned long flags; 1443 1444 BUG_ON(!host); 1445 BUG_ON(!ops); 1446 1447 WARN_ON(!host->claimed); 1448 1449 spin_lock_irqsave(&host->lock, flags); 1450 1451 BUG_ON(host->bus_ops); 1452 BUG_ON(host->bus_refs); 1453 1454 host->bus_ops = ops; 1455 host->bus_refs = 1; 1456 host->bus_dead = 0; 1457 1458 spin_unlock_irqrestore(&host->lock, flags); 1459 } 1460 1461 /* 1462 * Remove the current bus handler from a host. 1463 */ 1464 void mmc_detach_bus(struct mmc_host *host) 1465 { 1466 unsigned long flags; 1467 1468 BUG_ON(!host); 1469 1470 WARN_ON(!host->claimed); 1471 WARN_ON(!host->bus_ops); 1472 1473 spin_lock_irqsave(&host->lock, flags); 1474 1475 host->bus_dead = 1; 1476 1477 spin_unlock_irqrestore(&host->lock, flags); 1478 1479 mmc_bus_put(host); 1480 } 1481 1482 /** 1483 * mmc_detect_change - process change of state on a MMC socket 1484 * @host: host which changed state. 1485 * @delay: optional delay to wait before detection (jiffies) 1486 * 1487 * MMC drivers should call this when they detect a card has been 1488 * inserted or removed. The MMC layer will confirm that any 1489 * present card is still functional, and initialize any newly 1490 * inserted. 1491 */ 1492 void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1493 { 1494 #ifdef CONFIG_MMC_DEBUG 1495 unsigned long flags; 1496 spin_lock_irqsave(&host->lock, flags); 1497 WARN_ON(host->removed); 1498 spin_unlock_irqrestore(&host->lock, flags); 1499 #endif 1500 host->detect_change = 1; 1501 mmc_schedule_delayed_work(&host->detect, delay); 1502 } 1503 1504 EXPORT_SYMBOL(mmc_detect_change); 1505 1506 void mmc_init_erase(struct mmc_card *card) 1507 { 1508 unsigned int sz; 1509 1510 if (is_power_of_2(card->erase_size)) 1511 card->erase_shift = ffs(card->erase_size) - 1; 1512 else 1513 card->erase_shift = 0; 1514 1515 /* 1516 * It is possible to erase an arbitrarily large area of an SD or MMC 1517 * card. That is not desirable because it can take a long time 1518 * (minutes) potentially delaying more important I/O, and also the 1519 * timeout calculations become increasingly hugely over-estimated. 1520 * Consequently, 'pref_erase' is defined as a guide to limit erases 1521 * to that size and alignment. 1522 * 1523 * For SD cards that define Allocation Unit size, limit erases to one 1524 * Allocation Unit at a time. For MMC cards that define High Capacity 1525 * Erase Size, whether it is switched on or not, limit to that size. 1526 * Otherwise just have a stab at a good value. For modern cards it 1527 * will end up being 4MiB. Note that if the value is too small, it 1528 * can end up taking longer to erase. 1529 */ 1530 if (mmc_card_sd(card) && card->ssr.au) { 1531 card->pref_erase = card->ssr.au; 1532 card->erase_shift = ffs(card->ssr.au) - 1; 1533 } else if (card->ext_csd.hc_erase_size) { 1534 card->pref_erase = card->ext_csd.hc_erase_size; 1535 } else { 1536 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; 1537 if (sz < 128) 1538 card->pref_erase = 512 * 1024 / 512; 1539 else if (sz < 512) 1540 card->pref_erase = 1024 * 1024 / 512; 1541 else if (sz < 1024) 1542 card->pref_erase = 2 * 1024 * 1024 / 512; 1543 else 1544 card->pref_erase = 4 * 1024 * 1024 / 512; 1545 if (card->pref_erase < card->erase_size) 1546 card->pref_erase = card->erase_size; 1547 else { 1548 sz = card->pref_erase % card->erase_size; 1549 if (sz) 1550 card->pref_erase += card->erase_size - sz; 1551 } 1552 } 1553 } 1554 1555 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, 1556 unsigned int arg, unsigned int qty) 1557 { 1558 unsigned int erase_timeout; 1559 1560 if (card->ext_csd.erase_group_def & 1) { 1561 /* High Capacity Erase Group Size uses HC timeouts */ 1562 if (arg == MMC_TRIM_ARG) 1563 erase_timeout = card->ext_csd.trim_timeout; 1564 else 1565 erase_timeout = card->ext_csd.hc_erase_timeout; 1566 } else { 1567 /* CSD Erase Group Size uses write timeout */ 1568 unsigned int mult = (10 << card->csd.r2w_factor); 1569 unsigned int timeout_clks = card->csd.tacc_clks * mult; 1570 unsigned int timeout_us; 1571 1572 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ 1573 if (card->csd.tacc_ns < 1000000) 1574 timeout_us = (card->csd.tacc_ns * mult) / 1000; 1575 else 1576 timeout_us = (card->csd.tacc_ns / 1000) * mult; 1577 1578 /* 1579 * ios.clock is only a target. The real clock rate might be 1580 * less but not that much less, so fudge it by multiplying by 2. 1581 */ 1582 timeout_clks <<= 1; 1583 timeout_us += (timeout_clks * 1000) / 1584 (mmc_host_clk_rate(card->host) / 1000); 1585 1586 erase_timeout = timeout_us / 1000; 1587 1588 /* 1589 * Theoretically, the calculation could underflow so round up 1590 * to 1ms in that case. 1591 */ 1592 if (!erase_timeout) 1593 erase_timeout = 1; 1594 } 1595 1596 /* Multiplier for secure operations */ 1597 if (arg & MMC_SECURE_ARGS) { 1598 if (arg == MMC_SECURE_ERASE_ARG) 1599 erase_timeout *= card->ext_csd.sec_erase_mult; 1600 else 1601 erase_timeout *= card->ext_csd.sec_trim_mult; 1602 } 1603 1604 erase_timeout *= qty; 1605 1606 /* 1607 * Ensure at least a 1 second timeout for SPI as per 1608 * 'mmc_set_data_timeout()' 1609 */ 1610 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) 1611 erase_timeout = 1000; 1612 1613 return erase_timeout; 1614 } 1615 1616 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card, 1617 unsigned int arg, 1618 unsigned int qty) 1619 { 1620 unsigned int erase_timeout; 1621 1622 if (card->ssr.erase_timeout) { 1623 /* Erase timeout specified in SD Status Register (SSR) */ 1624 erase_timeout = card->ssr.erase_timeout * qty + 1625 card->ssr.erase_offset; 1626 } else { 1627 /* 1628 * Erase timeout not specified in SD Status Register (SSR) so 1629 * use 250ms per write block. 1630 */ 1631 erase_timeout = 250 * qty; 1632 } 1633 1634 /* Must not be less than 1 second */ 1635 if (erase_timeout < 1000) 1636 erase_timeout = 1000; 1637 1638 return erase_timeout; 1639 } 1640 1641 static unsigned int mmc_erase_timeout(struct mmc_card *card, 1642 unsigned int arg, 1643 unsigned int qty) 1644 { 1645 if (mmc_card_sd(card)) 1646 return mmc_sd_erase_timeout(card, arg, qty); 1647 else 1648 return mmc_mmc_erase_timeout(card, arg, qty); 1649 } 1650 1651 static int mmc_do_erase(struct mmc_card *card, unsigned int from, 1652 unsigned int to, unsigned int arg) 1653 { 1654 struct mmc_command cmd = {0}; 1655 unsigned int qty = 0; 1656 int err; 1657 1658 /* 1659 * qty is used to calculate the erase timeout which depends on how many 1660 * erase groups (or allocation units in SD terminology) are affected. 1661 * We count erasing part of an erase group as one erase group. 1662 * For SD, the allocation units are always a power of 2. For MMC, the 1663 * erase group size is almost certainly also power of 2, but it does not 1664 * seem to insist on that in the JEDEC standard, so we fall back to 1665 * division in that case. SD may not specify an allocation unit size, 1666 * in which case the timeout is based on the number of write blocks. 1667 * 1668 * Note that the timeout for secure trim 2 will only be correct if the 1669 * number of erase groups specified is the same as the total of all 1670 * preceding secure trim 1 commands. Since the power may have been 1671 * lost since the secure trim 1 commands occurred, it is generally 1672 * impossible to calculate the secure trim 2 timeout correctly. 1673 */ 1674 if (card->erase_shift) 1675 qty += ((to >> card->erase_shift) - 1676 (from >> card->erase_shift)) + 1; 1677 else if (mmc_card_sd(card)) 1678 qty += to - from + 1; 1679 else 1680 qty += ((to / card->erase_size) - 1681 (from / card->erase_size)) + 1; 1682 1683 if (!mmc_card_blockaddr(card)) { 1684 from <<= 9; 1685 to <<= 9; 1686 } 1687 1688 if (mmc_card_sd(card)) 1689 cmd.opcode = SD_ERASE_WR_BLK_START; 1690 else 1691 cmd.opcode = MMC_ERASE_GROUP_START; 1692 cmd.arg = from; 1693 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1694 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1695 if (err) { 1696 pr_err("mmc_erase: group start error %d, " 1697 "status %#x\n", err, cmd.resp[0]); 1698 err = -EIO; 1699 goto out; 1700 } 1701 1702 memset(&cmd, 0, sizeof(struct mmc_command)); 1703 if (mmc_card_sd(card)) 1704 cmd.opcode = SD_ERASE_WR_BLK_END; 1705 else 1706 cmd.opcode = MMC_ERASE_GROUP_END; 1707 cmd.arg = to; 1708 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1709 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1710 if (err) { 1711 pr_err("mmc_erase: group end error %d, status %#x\n", 1712 err, cmd.resp[0]); 1713 err = -EIO; 1714 goto out; 1715 } 1716 1717 memset(&cmd, 0, sizeof(struct mmc_command)); 1718 cmd.opcode = MMC_ERASE; 1719 cmd.arg = arg; 1720 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1721 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty); 1722 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1723 if (err) { 1724 pr_err("mmc_erase: erase error %d, status %#x\n", 1725 err, cmd.resp[0]); 1726 err = -EIO; 1727 goto out; 1728 } 1729 1730 if (mmc_host_is_spi(card->host)) 1731 goto out; 1732 1733 do { 1734 memset(&cmd, 0, sizeof(struct mmc_command)); 1735 cmd.opcode = MMC_SEND_STATUS; 1736 cmd.arg = card->rca << 16; 1737 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1738 /* Do not retry else we can't see errors */ 1739 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1740 if (err || (cmd.resp[0] & 0xFDF92000)) { 1741 pr_err("error %d requesting status %#x\n", 1742 err, cmd.resp[0]); 1743 err = -EIO; 1744 goto out; 1745 } 1746 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 1747 R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG); 1748 out: 1749 return err; 1750 } 1751 1752 /** 1753 * mmc_erase - erase sectors. 1754 * @card: card to erase 1755 * @from: first sector to erase 1756 * @nr: number of sectors to erase 1757 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG) 1758 * 1759 * Caller must claim host before calling this function. 1760 */ 1761 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, 1762 unsigned int arg) 1763 { 1764 unsigned int rem, to = from + nr; 1765 1766 if (!(card->host->caps & MMC_CAP_ERASE) || 1767 !(card->csd.cmdclass & CCC_ERASE)) 1768 return -EOPNOTSUPP; 1769 1770 if (!card->erase_size) 1771 return -EOPNOTSUPP; 1772 1773 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG) 1774 return -EOPNOTSUPP; 1775 1776 if ((arg & MMC_SECURE_ARGS) && 1777 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) 1778 return -EOPNOTSUPP; 1779 1780 if ((arg & MMC_TRIM_ARGS) && 1781 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) 1782 return -EOPNOTSUPP; 1783 1784 if (arg == MMC_SECURE_ERASE_ARG) { 1785 if (from % card->erase_size || nr % card->erase_size) 1786 return -EINVAL; 1787 } 1788 1789 if (arg == MMC_ERASE_ARG) { 1790 rem = from % card->erase_size; 1791 if (rem) { 1792 rem = card->erase_size - rem; 1793 from += rem; 1794 if (nr > rem) 1795 nr -= rem; 1796 else 1797 return 0; 1798 } 1799 rem = nr % card->erase_size; 1800 if (rem) 1801 nr -= rem; 1802 } 1803 1804 if (nr == 0) 1805 return 0; 1806 1807 to = from + nr; 1808 1809 if (to <= from) 1810 return -EINVAL; 1811 1812 /* 'from' and 'to' are inclusive */ 1813 to -= 1; 1814 1815 return mmc_do_erase(card, from, to, arg); 1816 } 1817 EXPORT_SYMBOL(mmc_erase); 1818 1819 int mmc_can_erase(struct mmc_card *card) 1820 { 1821 if ((card->host->caps & MMC_CAP_ERASE) && 1822 (card->csd.cmdclass & CCC_ERASE) && card->erase_size) 1823 return 1; 1824 return 0; 1825 } 1826 EXPORT_SYMBOL(mmc_can_erase); 1827 1828 int mmc_can_trim(struct mmc_card *card) 1829 { 1830 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) 1831 return 1; 1832 if (mmc_can_discard(card)) 1833 return 1; 1834 return 0; 1835 } 1836 EXPORT_SYMBOL(mmc_can_trim); 1837 1838 int mmc_can_discard(struct mmc_card *card) 1839 { 1840 /* 1841 * As there's no way to detect the discard support bit at v4.5 1842 * use the s/w feature support filed. 1843 */ 1844 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE) 1845 return 1; 1846 return 0; 1847 } 1848 EXPORT_SYMBOL(mmc_can_discard); 1849 1850 int mmc_can_sanitize(struct mmc_card *card) 1851 { 1852 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE) 1853 return 1; 1854 return 0; 1855 } 1856 EXPORT_SYMBOL(mmc_can_sanitize); 1857 1858 int mmc_can_secure_erase_trim(struct mmc_card *card) 1859 { 1860 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) 1861 return 1; 1862 return 0; 1863 } 1864 EXPORT_SYMBOL(mmc_can_secure_erase_trim); 1865 1866 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, 1867 unsigned int nr) 1868 { 1869 if (!card->erase_size) 1870 return 0; 1871 if (from % card->erase_size || nr % card->erase_size) 1872 return 0; 1873 return 1; 1874 } 1875 EXPORT_SYMBOL(mmc_erase_group_aligned); 1876 1877 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card, 1878 unsigned int arg) 1879 { 1880 struct mmc_host *host = card->host; 1881 unsigned int max_discard, x, y, qty = 0, max_qty, timeout; 1882 unsigned int last_timeout = 0; 1883 1884 if (card->erase_shift) 1885 max_qty = UINT_MAX >> card->erase_shift; 1886 else if (mmc_card_sd(card)) 1887 max_qty = UINT_MAX; 1888 else 1889 max_qty = UINT_MAX / card->erase_size; 1890 1891 /* Find the largest qty with an OK timeout */ 1892 do { 1893 y = 0; 1894 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) { 1895 timeout = mmc_erase_timeout(card, arg, qty + x); 1896 if (timeout > host->max_discard_to) 1897 break; 1898 if (timeout < last_timeout) 1899 break; 1900 last_timeout = timeout; 1901 y = x; 1902 } 1903 qty += y; 1904 } while (y); 1905 1906 if (!qty) 1907 return 0; 1908 1909 if (qty == 1) 1910 return 1; 1911 1912 /* Convert qty to sectors */ 1913 if (card->erase_shift) 1914 max_discard = --qty << card->erase_shift; 1915 else if (mmc_card_sd(card)) 1916 max_discard = qty; 1917 else 1918 max_discard = --qty * card->erase_size; 1919 1920 return max_discard; 1921 } 1922 1923 unsigned int mmc_calc_max_discard(struct mmc_card *card) 1924 { 1925 struct mmc_host *host = card->host; 1926 unsigned int max_discard, max_trim; 1927 1928 if (!host->max_discard_to) 1929 return UINT_MAX; 1930 1931 /* 1932 * Without erase_group_def set, MMC erase timeout depends on clock 1933 * frequence which can change. In that case, the best choice is 1934 * just the preferred erase size. 1935 */ 1936 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1)) 1937 return card->pref_erase; 1938 1939 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG); 1940 if (mmc_can_trim(card)) { 1941 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG); 1942 if (max_trim < max_discard) 1943 max_discard = max_trim; 1944 } else if (max_discard < card->erase_size) { 1945 max_discard = 0; 1946 } 1947 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n", 1948 mmc_hostname(host), max_discard, host->max_discard_to); 1949 return max_discard; 1950 } 1951 EXPORT_SYMBOL(mmc_calc_max_discard); 1952 1953 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) 1954 { 1955 struct mmc_command cmd = {0}; 1956 1957 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card)) 1958 return 0; 1959 1960 cmd.opcode = MMC_SET_BLOCKLEN; 1961 cmd.arg = blocklen; 1962 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1963 return mmc_wait_for_cmd(card->host, &cmd, 5); 1964 } 1965 EXPORT_SYMBOL(mmc_set_blocklen); 1966 1967 static void mmc_hw_reset_for_init(struct mmc_host *host) 1968 { 1969 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 1970 return; 1971 mmc_host_clk_hold(host); 1972 host->ops->hw_reset(host); 1973 mmc_host_clk_release(host); 1974 } 1975 1976 int mmc_can_reset(struct mmc_card *card) 1977 { 1978 u8 rst_n_function; 1979 1980 if (!mmc_card_mmc(card)) 1981 return 0; 1982 rst_n_function = card->ext_csd.rst_n_function; 1983 if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED) 1984 return 0; 1985 return 1; 1986 } 1987 EXPORT_SYMBOL(mmc_can_reset); 1988 1989 static int mmc_do_hw_reset(struct mmc_host *host, int check) 1990 { 1991 struct mmc_card *card = host->card; 1992 1993 if (!host->bus_ops->power_restore) 1994 return -EOPNOTSUPP; 1995 1996 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 1997 return -EOPNOTSUPP; 1998 1999 if (!card) 2000 return -EINVAL; 2001 2002 if (!mmc_can_reset(card)) 2003 return -EOPNOTSUPP; 2004 2005 mmc_host_clk_hold(host); 2006 mmc_set_clock(host, host->f_init); 2007 2008 host->ops->hw_reset(host); 2009 2010 /* If the reset has happened, then a status command will fail */ 2011 if (check) { 2012 struct mmc_command cmd = {0}; 2013 int err; 2014 2015 cmd.opcode = MMC_SEND_STATUS; 2016 if (!mmc_host_is_spi(card->host)) 2017 cmd.arg = card->rca << 16; 2018 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 2019 err = mmc_wait_for_cmd(card->host, &cmd, 0); 2020 if (!err) { 2021 mmc_host_clk_release(host); 2022 return -ENOSYS; 2023 } 2024 } 2025 2026 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR); 2027 if (mmc_host_is_spi(host)) { 2028 host->ios.chip_select = MMC_CS_HIGH; 2029 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 2030 } else { 2031 host->ios.chip_select = MMC_CS_DONTCARE; 2032 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 2033 } 2034 host->ios.bus_width = MMC_BUS_WIDTH_1; 2035 host->ios.timing = MMC_TIMING_LEGACY; 2036 mmc_set_ios(host); 2037 2038 mmc_host_clk_release(host); 2039 2040 return host->bus_ops->power_restore(host); 2041 } 2042 2043 int mmc_hw_reset(struct mmc_host *host) 2044 { 2045 return mmc_do_hw_reset(host, 0); 2046 } 2047 EXPORT_SYMBOL(mmc_hw_reset); 2048 2049 int mmc_hw_reset_check(struct mmc_host *host) 2050 { 2051 return mmc_do_hw_reset(host, 1); 2052 } 2053 EXPORT_SYMBOL(mmc_hw_reset_check); 2054 2055 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) 2056 { 2057 host->f_init = freq; 2058 2059 #ifdef CONFIG_MMC_DEBUG 2060 pr_info("%s: %s: trying to init card at %u Hz\n", 2061 mmc_hostname(host), __func__, host->f_init); 2062 #endif 2063 mmc_power_up(host); 2064 2065 /* 2066 * Some eMMCs (with VCCQ always on) may not be reset after power up, so 2067 * do a hardware reset if possible. 2068 */ 2069 mmc_hw_reset_for_init(host); 2070 2071 /* Initialization should be done at 3.3 V I/O voltage. */ 2072 mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0); 2073 2074 /* 2075 * sdio_reset sends CMD52 to reset card. Since we do not know 2076 * if the card is being re-initialized, just send it. CMD52 2077 * should be ignored by SD/eMMC cards. 2078 */ 2079 sdio_reset(host); 2080 mmc_go_idle(host); 2081 2082 mmc_send_if_cond(host, host->ocr_avail); 2083 2084 /* Order's important: probe SDIO, then SD, then MMC */ 2085 if (!mmc_attach_sdio(host)) 2086 return 0; 2087 if (!mmc_attach_sd(host)) 2088 return 0; 2089 if (!mmc_attach_mmc(host)) 2090 return 0; 2091 2092 mmc_power_off(host); 2093 return -EIO; 2094 } 2095 2096 int _mmc_detect_card_removed(struct mmc_host *host) 2097 { 2098 int ret; 2099 2100 if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive) 2101 return 0; 2102 2103 if (!host->card || mmc_card_removed(host->card)) 2104 return 1; 2105 2106 ret = host->bus_ops->alive(host); 2107 if (ret) { 2108 mmc_card_set_removed(host->card); 2109 pr_debug("%s: card remove detected\n", mmc_hostname(host)); 2110 } 2111 2112 return ret; 2113 } 2114 2115 int mmc_detect_card_removed(struct mmc_host *host) 2116 { 2117 struct mmc_card *card = host->card; 2118 2119 WARN_ON(!host->claimed); 2120 /* 2121 * The card will be considered unchanged unless we have been asked to 2122 * detect a change or host requires polling to provide card detection. 2123 */ 2124 if (card && !host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL)) 2125 return mmc_card_removed(card); 2126 2127 host->detect_change = 0; 2128 2129 return _mmc_detect_card_removed(host); 2130 } 2131 EXPORT_SYMBOL(mmc_detect_card_removed); 2132 2133 void mmc_rescan(struct work_struct *work) 2134 { 2135 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; 2136 struct mmc_host *host = 2137 container_of(work, struct mmc_host, detect.work); 2138 int i; 2139 2140 if (host->rescan_disable) 2141 return; 2142 2143 mmc_bus_get(host); 2144 2145 /* 2146 * if there is a _removable_ card registered, check whether it is 2147 * still present 2148 */ 2149 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead 2150 && !(host->caps & MMC_CAP_NONREMOVABLE)) 2151 host->bus_ops->detect(host); 2152 2153 host->detect_change = 0; 2154 2155 /* 2156 * Let mmc_bus_put() free the bus/bus_ops if we've found that 2157 * the card is no longer present. 2158 */ 2159 mmc_bus_put(host); 2160 mmc_bus_get(host); 2161 2162 /* if there still is a card present, stop here */ 2163 if (host->bus_ops != NULL) { 2164 mmc_bus_put(host); 2165 goto out; 2166 } 2167 2168 /* 2169 * Only we can add a new handler, so it's safe to 2170 * release the lock here. 2171 */ 2172 mmc_bus_put(host); 2173 2174 if (host->ops->get_cd && host->ops->get_cd(host) == 0) 2175 goto out; 2176 2177 mmc_claim_host(host); 2178 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 2179 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) 2180 break; 2181 if (freqs[i] <= host->f_min) 2182 break; 2183 } 2184 mmc_release_host(host); 2185 2186 out: 2187 if (host->caps & MMC_CAP_NEEDS_POLL) 2188 mmc_schedule_delayed_work(&host->detect, HZ); 2189 } 2190 2191 void mmc_start_host(struct mmc_host *host) 2192 { 2193 mmc_power_off(host); 2194 mmc_detect_change(host, 0); 2195 } 2196 2197 void mmc_stop_host(struct mmc_host *host) 2198 { 2199 #ifdef CONFIG_MMC_DEBUG 2200 unsigned long flags; 2201 spin_lock_irqsave(&host->lock, flags); 2202 host->removed = 1; 2203 spin_unlock_irqrestore(&host->lock, flags); 2204 #endif 2205 2206 if (host->caps & MMC_CAP_DISABLE) 2207 cancel_delayed_work(&host->disable); 2208 cancel_delayed_work_sync(&host->detect); 2209 mmc_flush_scheduled_work(); 2210 2211 /* clear pm flags now and let card drivers set them as needed */ 2212 host->pm_flags = 0; 2213 2214 mmc_bus_get(host); 2215 if (host->bus_ops && !host->bus_dead) { 2216 /* Calling bus_ops->remove() with a claimed host can deadlock */ 2217 if (host->bus_ops->remove) 2218 host->bus_ops->remove(host); 2219 2220 mmc_claim_host(host); 2221 mmc_detach_bus(host); 2222 mmc_power_off(host); 2223 mmc_release_host(host); 2224 mmc_bus_put(host); 2225 return; 2226 } 2227 mmc_bus_put(host); 2228 2229 BUG_ON(host->card); 2230 2231 mmc_power_off(host); 2232 } 2233 2234 int mmc_power_save_host(struct mmc_host *host) 2235 { 2236 int ret = 0; 2237 2238 #ifdef CONFIG_MMC_DEBUG 2239 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__); 2240 #endif 2241 2242 mmc_bus_get(host); 2243 2244 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 2245 mmc_bus_put(host); 2246 return -EINVAL; 2247 } 2248 2249 if (host->bus_ops->power_save) 2250 ret = host->bus_ops->power_save(host); 2251 2252 mmc_bus_put(host); 2253 2254 mmc_power_off(host); 2255 2256 return ret; 2257 } 2258 EXPORT_SYMBOL(mmc_power_save_host); 2259 2260 int mmc_power_restore_host(struct mmc_host *host) 2261 { 2262 int ret; 2263 2264 #ifdef CONFIG_MMC_DEBUG 2265 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__); 2266 #endif 2267 2268 mmc_bus_get(host); 2269 2270 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 2271 mmc_bus_put(host); 2272 return -EINVAL; 2273 } 2274 2275 mmc_power_up(host); 2276 ret = host->bus_ops->power_restore(host); 2277 2278 mmc_bus_put(host); 2279 2280 return ret; 2281 } 2282 EXPORT_SYMBOL(mmc_power_restore_host); 2283 2284 int mmc_card_awake(struct mmc_host *host) 2285 { 2286 int err = -ENOSYS; 2287 2288 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD) 2289 return 0; 2290 2291 mmc_bus_get(host); 2292 2293 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 2294 err = host->bus_ops->awake(host); 2295 2296 mmc_bus_put(host); 2297 2298 return err; 2299 } 2300 EXPORT_SYMBOL(mmc_card_awake); 2301 2302 int mmc_card_sleep(struct mmc_host *host) 2303 { 2304 int err = -ENOSYS; 2305 2306 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD) 2307 return 0; 2308 2309 mmc_bus_get(host); 2310 2311 if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep) 2312 err = host->bus_ops->sleep(host); 2313 2314 mmc_bus_put(host); 2315 2316 return err; 2317 } 2318 EXPORT_SYMBOL(mmc_card_sleep); 2319 2320 int mmc_card_can_sleep(struct mmc_host *host) 2321 { 2322 struct mmc_card *card = host->card; 2323 2324 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3) 2325 return 1; 2326 return 0; 2327 } 2328 EXPORT_SYMBOL(mmc_card_can_sleep); 2329 2330 /* 2331 * Flush the cache to the non-volatile storage. 2332 */ 2333 int mmc_flush_cache(struct mmc_card *card) 2334 { 2335 struct mmc_host *host = card->host; 2336 int err = 0; 2337 2338 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL)) 2339 return err; 2340 2341 if (mmc_card_mmc(card) && 2342 (card->ext_csd.cache_size > 0) && 2343 (card->ext_csd.cache_ctrl & 1)) { 2344 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 2345 EXT_CSD_FLUSH_CACHE, 1, 0); 2346 if (err) 2347 pr_err("%s: cache flush error %d\n", 2348 mmc_hostname(card->host), err); 2349 } 2350 2351 return err; 2352 } 2353 EXPORT_SYMBOL(mmc_flush_cache); 2354 2355 /* 2356 * Turn the cache ON/OFF. 2357 * Turning the cache OFF shall trigger flushing of the data 2358 * to the non-volatile storage. 2359 */ 2360 int mmc_cache_ctrl(struct mmc_host *host, u8 enable) 2361 { 2362 struct mmc_card *card = host->card; 2363 unsigned int timeout; 2364 int err = 0; 2365 2366 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) || 2367 mmc_card_is_removable(host)) 2368 return err; 2369 2370 if (card && mmc_card_mmc(card) && 2371 (card->ext_csd.cache_size > 0)) { 2372 enable = !!enable; 2373 2374 if (card->ext_csd.cache_ctrl ^ enable) { 2375 timeout = enable ? card->ext_csd.generic_cmd6_time : 0; 2376 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 2377 EXT_CSD_CACHE_CTRL, enable, timeout); 2378 if (err) 2379 pr_err("%s: cache %s error %d\n", 2380 mmc_hostname(card->host), 2381 enable ? "on" : "off", 2382 err); 2383 else 2384 card->ext_csd.cache_ctrl = enable; 2385 } 2386 } 2387 2388 return err; 2389 } 2390 EXPORT_SYMBOL(mmc_cache_ctrl); 2391 2392 #ifdef CONFIG_PM 2393 2394 /** 2395 * mmc_suspend_host - suspend a host 2396 * @host: mmc host 2397 */ 2398 int mmc_suspend_host(struct mmc_host *host) 2399 { 2400 int err = 0; 2401 2402 if (host->caps & MMC_CAP_DISABLE) 2403 cancel_delayed_work(&host->disable); 2404 cancel_delayed_work(&host->detect); 2405 mmc_flush_scheduled_work(); 2406 if (mmc_try_claim_host(host)) { 2407 err = mmc_cache_ctrl(host, 0); 2408 mmc_do_release_host(host); 2409 } else { 2410 err = -EBUSY; 2411 } 2412 2413 if (err) 2414 goto out; 2415 2416 mmc_bus_get(host); 2417 if (host->bus_ops && !host->bus_dead) { 2418 2419 /* 2420 * A long response time is not acceptable for device drivers 2421 * when doing suspend. Prevent mmc_claim_host in the suspend 2422 * sequence, to potentially wait "forever" by trying to 2423 * pre-claim the host. 2424 */ 2425 if (mmc_try_claim_host(host)) { 2426 if (host->bus_ops->suspend) { 2427 err = host->bus_ops->suspend(host); 2428 } 2429 mmc_do_release_host(host); 2430 2431 if (err == -ENOSYS || !host->bus_ops->resume) { 2432 /* 2433 * We simply "remove" the card in this case. 2434 * It will be redetected on resume. (Calling 2435 * bus_ops->remove() with a claimed host can 2436 * deadlock.) 2437 */ 2438 if (host->bus_ops->remove) 2439 host->bus_ops->remove(host); 2440 mmc_claim_host(host); 2441 mmc_detach_bus(host); 2442 mmc_power_off(host); 2443 mmc_release_host(host); 2444 host->pm_flags = 0; 2445 err = 0; 2446 } 2447 } else { 2448 err = -EBUSY; 2449 } 2450 } 2451 mmc_bus_put(host); 2452 2453 if (!err && !mmc_card_keep_power(host)) 2454 mmc_power_off(host); 2455 2456 out: 2457 return err; 2458 } 2459 2460 EXPORT_SYMBOL(mmc_suspend_host); 2461 2462 /** 2463 * mmc_resume_host - resume a previously suspended host 2464 * @host: mmc host 2465 */ 2466 int mmc_resume_host(struct mmc_host *host) 2467 { 2468 int err = 0; 2469 2470 mmc_bus_get(host); 2471 if (host->bus_ops && !host->bus_dead) { 2472 if (!mmc_card_keep_power(host)) { 2473 mmc_power_up(host); 2474 mmc_select_voltage(host, host->ocr); 2475 /* 2476 * Tell runtime PM core we just powered up the card, 2477 * since it still believes the card is powered off. 2478 * Note that currently runtime PM is only enabled 2479 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD 2480 */ 2481 if (mmc_card_sdio(host->card) && 2482 (host->caps & MMC_CAP_POWER_OFF_CARD)) { 2483 pm_runtime_disable(&host->card->dev); 2484 pm_runtime_set_active(&host->card->dev); 2485 pm_runtime_enable(&host->card->dev); 2486 } 2487 } 2488 BUG_ON(!host->bus_ops->resume); 2489 err = host->bus_ops->resume(host); 2490 if (err) { 2491 pr_warning("%s: error %d during resume " 2492 "(card was removed?)\n", 2493 mmc_hostname(host), err); 2494 err = 0; 2495 } 2496 } 2497 host->pm_flags &= ~MMC_PM_KEEP_POWER; 2498 mmc_bus_put(host); 2499 2500 return err; 2501 } 2502 EXPORT_SYMBOL(mmc_resume_host); 2503 2504 /* Do the card removal on suspend if card is assumed removeable 2505 * Do that in pm notifier while userspace isn't yet frozen, so we will be able 2506 to sync the card. 2507 */ 2508 int mmc_pm_notify(struct notifier_block *notify_block, 2509 unsigned long mode, void *unused) 2510 { 2511 struct mmc_host *host = container_of( 2512 notify_block, struct mmc_host, pm_notify); 2513 unsigned long flags; 2514 2515 2516 switch (mode) { 2517 case PM_HIBERNATION_PREPARE: 2518 case PM_SUSPEND_PREPARE: 2519 2520 spin_lock_irqsave(&host->lock, flags); 2521 host->rescan_disable = 1; 2522 host->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT; 2523 spin_unlock_irqrestore(&host->lock, flags); 2524 cancel_delayed_work_sync(&host->detect); 2525 2526 if (!host->bus_ops || host->bus_ops->suspend) 2527 break; 2528 2529 /* Calling bus_ops->remove() with a claimed host can deadlock */ 2530 if (host->bus_ops->remove) 2531 host->bus_ops->remove(host); 2532 2533 mmc_claim_host(host); 2534 mmc_detach_bus(host); 2535 mmc_power_off(host); 2536 mmc_release_host(host); 2537 host->pm_flags = 0; 2538 break; 2539 2540 case PM_POST_SUSPEND: 2541 case PM_POST_HIBERNATION: 2542 case PM_POST_RESTORE: 2543 2544 spin_lock_irqsave(&host->lock, flags); 2545 host->rescan_disable = 0; 2546 host->power_notify_type = MMC_HOST_PW_NOTIFY_LONG; 2547 spin_unlock_irqrestore(&host->lock, flags); 2548 mmc_detect_change(host, 0); 2549 2550 } 2551 2552 return 0; 2553 } 2554 #endif 2555 2556 static int __init mmc_init(void) 2557 { 2558 int ret; 2559 2560 workqueue = alloc_ordered_workqueue("kmmcd", 0); 2561 if (!workqueue) 2562 return -ENOMEM; 2563 2564 ret = mmc_register_bus(); 2565 if (ret) 2566 goto destroy_workqueue; 2567 2568 ret = mmc_register_host_class(); 2569 if (ret) 2570 goto unregister_bus; 2571 2572 ret = sdio_register_bus(); 2573 if (ret) 2574 goto unregister_host_class; 2575 2576 return 0; 2577 2578 unregister_host_class: 2579 mmc_unregister_host_class(); 2580 unregister_bus: 2581 mmc_unregister_bus(); 2582 destroy_workqueue: 2583 destroy_workqueue(workqueue); 2584 2585 return ret; 2586 } 2587 2588 static void __exit mmc_exit(void) 2589 { 2590 sdio_unregister_bus(); 2591 mmc_unregister_host_class(); 2592 mmc_unregister_bus(); 2593 destroy_workqueue(workqueue); 2594 } 2595 2596 subsys_initcall(mmc_init); 2597 module_exit(mmc_exit); 2598 2599 MODULE_LICENSE("GPL"); 2600