1 /* 2 * linux/drivers/mmc/core/core.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/completion.h> 17 #include <linux/device.h> 18 #include <linux/delay.h> 19 #include <linux/pagemap.h> 20 #include <linux/err.h> 21 #include <linux/leds.h> 22 #include <linux/scatterlist.h> 23 #include <linux/log2.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/suspend.h> 27 #include <linux/fault-inject.h> 28 #include <linux/random.h> 29 30 #include <linux/mmc/card.h> 31 #include <linux/mmc/host.h> 32 #include <linux/mmc/mmc.h> 33 #include <linux/mmc/sd.h> 34 35 #include "core.h" 36 #include "bus.h" 37 #include "host.h" 38 #include "sdio_bus.h" 39 40 #include "mmc_ops.h" 41 #include "sd_ops.h" 42 #include "sdio_ops.h" 43 44 static struct workqueue_struct *workqueue; 45 46 /* 47 * Enabling software CRCs on the data blocks can be a significant (30%) 48 * performance cost, and for other reasons may not always be desired. 49 * So we allow it it to be disabled. 50 */ 51 int use_spi_crc = 1; 52 module_param(use_spi_crc, bool, 0); 53 54 /* 55 * We normally treat cards as removed during suspend if they are not 56 * known to be on a non-removable bus, to avoid the risk of writing 57 * back data to a different card after resume. Allow this to be 58 * overridden if necessary. 59 */ 60 #ifdef CONFIG_MMC_UNSAFE_RESUME 61 int mmc_assume_removable; 62 #else 63 int mmc_assume_removable = 1; 64 #endif 65 EXPORT_SYMBOL(mmc_assume_removable); 66 module_param_named(removable, mmc_assume_removable, bool, 0644); 67 MODULE_PARM_DESC( 68 removable, 69 "MMC/SD cards are removable and may be removed during suspend"); 70 71 /* 72 * Internal function. Schedule delayed work in the MMC work queue. 73 */ 74 static int mmc_schedule_delayed_work(struct delayed_work *work, 75 unsigned long delay) 76 { 77 return queue_delayed_work(workqueue, work, delay); 78 } 79 80 /* 81 * Internal function. Flush all scheduled work from the MMC work queue. 82 */ 83 static void mmc_flush_scheduled_work(void) 84 { 85 flush_workqueue(workqueue); 86 } 87 88 #ifdef CONFIG_FAIL_MMC_REQUEST 89 90 /* 91 * Internal function. Inject random data errors. 92 * If mmc_data is NULL no errors are injected. 93 */ 94 static void mmc_should_fail_request(struct mmc_host *host, 95 struct mmc_request *mrq) 96 { 97 struct mmc_command *cmd = mrq->cmd; 98 struct mmc_data *data = mrq->data; 99 static const int data_errors[] = { 100 -ETIMEDOUT, 101 -EILSEQ, 102 -EIO, 103 }; 104 105 if (!data) 106 return; 107 108 if (cmd->error || data->error || 109 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) 110 return; 111 112 data->error = data_errors[random32() % ARRAY_SIZE(data_errors)]; 113 data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9; 114 } 115 116 #else /* CONFIG_FAIL_MMC_REQUEST */ 117 118 static inline void mmc_should_fail_request(struct mmc_host *host, 119 struct mmc_request *mrq) 120 { 121 } 122 123 #endif /* CONFIG_FAIL_MMC_REQUEST */ 124 125 /** 126 * mmc_request_done - finish processing an MMC request 127 * @host: MMC host which completed request 128 * @mrq: MMC request which request 129 * 130 * MMC drivers should call this function when they have completed 131 * their processing of a request. 132 */ 133 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) 134 { 135 struct mmc_command *cmd = mrq->cmd; 136 int err = cmd->error; 137 138 if (err && cmd->retries && mmc_host_is_spi(host)) { 139 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) 140 cmd->retries = 0; 141 } 142 143 if (err && cmd->retries) { 144 /* 145 * Request starter must handle retries - see 146 * mmc_wait_for_req_done(). 147 */ 148 if (mrq->done) 149 mrq->done(mrq); 150 } else { 151 mmc_should_fail_request(host, mrq); 152 153 led_trigger_event(host->led, LED_OFF); 154 155 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", 156 mmc_hostname(host), cmd->opcode, err, 157 cmd->resp[0], cmd->resp[1], 158 cmd->resp[2], cmd->resp[3]); 159 160 if (mrq->data) { 161 pr_debug("%s: %d bytes transferred: %d\n", 162 mmc_hostname(host), 163 mrq->data->bytes_xfered, mrq->data->error); 164 } 165 166 if (mrq->stop) { 167 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n", 168 mmc_hostname(host), mrq->stop->opcode, 169 mrq->stop->error, 170 mrq->stop->resp[0], mrq->stop->resp[1], 171 mrq->stop->resp[2], mrq->stop->resp[3]); 172 } 173 174 if (mrq->done) 175 mrq->done(mrq); 176 177 mmc_host_clk_release(host); 178 } 179 } 180 181 EXPORT_SYMBOL(mmc_request_done); 182 183 static void 184 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 185 { 186 #ifdef CONFIG_MMC_DEBUG 187 unsigned int i, sz; 188 struct scatterlist *sg; 189 #endif 190 191 pr_debug("%s: starting CMD%u arg %08x flags %08x\n", 192 mmc_hostname(host), mrq->cmd->opcode, 193 mrq->cmd->arg, mrq->cmd->flags); 194 195 if (mrq->data) { 196 pr_debug("%s: blksz %d blocks %d flags %08x " 197 "tsac %d ms nsac %d\n", 198 mmc_hostname(host), mrq->data->blksz, 199 mrq->data->blocks, mrq->data->flags, 200 mrq->data->timeout_ns / 1000000, 201 mrq->data->timeout_clks); 202 } 203 204 if (mrq->stop) { 205 pr_debug("%s: CMD%u arg %08x flags %08x\n", 206 mmc_hostname(host), mrq->stop->opcode, 207 mrq->stop->arg, mrq->stop->flags); 208 } 209 210 WARN_ON(!host->claimed); 211 212 mrq->cmd->error = 0; 213 mrq->cmd->mrq = mrq; 214 if (mrq->data) { 215 BUG_ON(mrq->data->blksz > host->max_blk_size); 216 BUG_ON(mrq->data->blocks > host->max_blk_count); 217 BUG_ON(mrq->data->blocks * mrq->data->blksz > 218 host->max_req_size); 219 220 #ifdef CONFIG_MMC_DEBUG 221 sz = 0; 222 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) 223 sz += sg->length; 224 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); 225 #endif 226 227 mrq->cmd->data = mrq->data; 228 mrq->data->error = 0; 229 mrq->data->mrq = mrq; 230 if (mrq->stop) { 231 mrq->data->stop = mrq->stop; 232 mrq->stop->error = 0; 233 mrq->stop->mrq = mrq; 234 } 235 } 236 mmc_host_clk_hold(host); 237 led_trigger_event(host->led, LED_FULL); 238 host->ops->request(host, mrq); 239 } 240 241 static void mmc_wait_done(struct mmc_request *mrq) 242 { 243 complete(&mrq->completion); 244 } 245 246 static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) 247 { 248 init_completion(&mrq->completion); 249 mrq->done = mmc_wait_done; 250 mmc_start_request(host, mrq); 251 } 252 253 static void mmc_wait_for_req_done(struct mmc_host *host, 254 struct mmc_request *mrq) 255 { 256 struct mmc_command *cmd; 257 258 while (1) { 259 wait_for_completion(&mrq->completion); 260 261 cmd = mrq->cmd; 262 if (!cmd->error || !cmd->retries) 263 break; 264 265 pr_debug("%s: req failed (CMD%u): %d, retrying...\n", 266 mmc_hostname(host), cmd->opcode, cmd->error); 267 cmd->retries--; 268 cmd->error = 0; 269 host->ops->request(host, mrq); 270 } 271 } 272 273 /** 274 * mmc_pre_req - Prepare for a new request 275 * @host: MMC host to prepare command 276 * @mrq: MMC request to prepare for 277 * @is_first_req: true if there is no previous started request 278 * that may run in parellel to this call, otherwise false 279 * 280 * mmc_pre_req() is called in prior to mmc_start_req() to let 281 * host prepare for the new request. Preparation of a request may be 282 * performed while another request is running on the host. 283 */ 284 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, 285 bool is_first_req) 286 { 287 if (host->ops->pre_req) 288 host->ops->pre_req(host, mrq, is_first_req); 289 } 290 291 /** 292 * mmc_post_req - Post process a completed request 293 * @host: MMC host to post process command 294 * @mrq: MMC request to post process for 295 * @err: Error, if non zero, clean up any resources made in pre_req 296 * 297 * Let the host post process a completed request. Post processing of 298 * a request may be performed while another reuqest is running. 299 */ 300 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, 301 int err) 302 { 303 if (host->ops->post_req) 304 host->ops->post_req(host, mrq, err); 305 } 306 307 /** 308 * mmc_start_req - start a non-blocking request 309 * @host: MMC host to start command 310 * @areq: async request to start 311 * @error: out parameter returns 0 for success, otherwise non zero 312 * 313 * Start a new MMC custom command request for a host. 314 * If there is on ongoing async request wait for completion 315 * of that request and start the new one and return. 316 * Does not wait for the new request to complete. 317 * 318 * Returns the completed request, NULL in case of none completed. 319 * Wait for the an ongoing request (previoulsy started) to complete and 320 * return the completed request. If there is no ongoing request, NULL 321 * is returned without waiting. NULL is not an error condition. 322 */ 323 struct mmc_async_req *mmc_start_req(struct mmc_host *host, 324 struct mmc_async_req *areq, int *error) 325 { 326 int err = 0; 327 struct mmc_async_req *data = host->areq; 328 329 /* Prepare a new request */ 330 if (areq) 331 mmc_pre_req(host, areq->mrq, !host->areq); 332 333 if (host->areq) { 334 mmc_wait_for_req_done(host, host->areq->mrq); 335 err = host->areq->err_check(host->card, host->areq); 336 if (err) { 337 /* post process the completed failed request */ 338 mmc_post_req(host, host->areq->mrq, 0); 339 if (areq) 340 /* 341 * Cancel the new prepared request, because 342 * it can't run until the failed 343 * request has been properly handled. 344 */ 345 mmc_post_req(host, areq->mrq, -EINVAL); 346 347 host->areq = NULL; 348 goto out; 349 } 350 } 351 352 if (areq) 353 __mmc_start_req(host, areq->mrq); 354 355 if (host->areq) 356 mmc_post_req(host, host->areq->mrq, 0); 357 358 host->areq = areq; 359 out: 360 if (error) 361 *error = err; 362 return data; 363 } 364 EXPORT_SYMBOL(mmc_start_req); 365 366 /** 367 * mmc_wait_for_req - start a request and wait for completion 368 * @host: MMC host to start command 369 * @mrq: MMC request to start 370 * 371 * Start a new MMC custom command request for a host, and wait 372 * for the command to complete. Does not attempt to parse the 373 * response. 374 */ 375 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 376 { 377 __mmc_start_req(host, mrq); 378 mmc_wait_for_req_done(host, mrq); 379 } 380 EXPORT_SYMBOL(mmc_wait_for_req); 381 382 /** 383 * mmc_interrupt_hpi - Issue for High priority Interrupt 384 * @card: the MMC card associated with the HPI transfer 385 * 386 * Issued High Priority Interrupt, and check for card status 387 * util out-of prg-state. 388 */ 389 int mmc_interrupt_hpi(struct mmc_card *card) 390 { 391 int err; 392 u32 status; 393 394 BUG_ON(!card); 395 396 if (!card->ext_csd.hpi_en) { 397 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 398 return 1; 399 } 400 401 mmc_claim_host(card->host); 402 err = mmc_send_status(card, &status); 403 if (err) { 404 pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 405 goto out; 406 } 407 408 /* 409 * If the card status is in PRG-state, we can send the HPI command. 410 */ 411 if (R1_CURRENT_STATE(status) == R1_STATE_PRG) { 412 do { 413 /* 414 * We don't know when the HPI command will finish 415 * processing, so we need to resend HPI until out 416 * of prg-state, and keep checking the card status 417 * with SEND_STATUS. If a timeout error occurs when 418 * sending the HPI command, we are already out of 419 * prg-state. 420 */ 421 err = mmc_send_hpi_cmd(card, &status); 422 if (err) 423 pr_debug("%s: abort HPI (%d error)\n", 424 mmc_hostname(card->host), err); 425 426 err = mmc_send_status(card, &status); 427 if (err) 428 break; 429 } while (R1_CURRENT_STATE(status) == R1_STATE_PRG); 430 } else 431 pr_debug("%s: Left prg-state\n", mmc_hostname(card->host)); 432 433 out: 434 mmc_release_host(card->host); 435 return err; 436 } 437 EXPORT_SYMBOL(mmc_interrupt_hpi); 438 439 /** 440 * mmc_wait_for_cmd - start a command and wait for completion 441 * @host: MMC host to start command 442 * @cmd: MMC command to start 443 * @retries: maximum number of retries 444 * 445 * Start a new MMC command for a host, and wait for the command 446 * to complete. Return any error that occurred while the command 447 * was executing. Do not attempt to parse the response. 448 */ 449 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 450 { 451 struct mmc_request mrq = {NULL}; 452 453 WARN_ON(!host->claimed); 454 455 memset(cmd->resp, 0, sizeof(cmd->resp)); 456 cmd->retries = retries; 457 458 mrq.cmd = cmd; 459 cmd->data = NULL; 460 461 mmc_wait_for_req(host, &mrq); 462 463 return cmd->error; 464 } 465 466 EXPORT_SYMBOL(mmc_wait_for_cmd); 467 468 /** 469 * mmc_set_data_timeout - set the timeout for a data command 470 * @data: data phase for command 471 * @card: the MMC card associated with the data transfer 472 * 473 * Computes the data timeout parameters according to the 474 * correct algorithm given the card type. 475 */ 476 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) 477 { 478 unsigned int mult; 479 480 /* 481 * SDIO cards only define an upper 1 s limit on access. 482 */ 483 if (mmc_card_sdio(card)) { 484 data->timeout_ns = 1000000000; 485 data->timeout_clks = 0; 486 return; 487 } 488 489 /* 490 * SD cards use a 100 multiplier rather than 10 491 */ 492 mult = mmc_card_sd(card) ? 100 : 10; 493 494 /* 495 * Scale up the multiplier (and therefore the timeout) by 496 * the r2w factor for writes. 497 */ 498 if (data->flags & MMC_DATA_WRITE) 499 mult <<= card->csd.r2w_factor; 500 501 data->timeout_ns = card->csd.tacc_ns * mult; 502 data->timeout_clks = card->csd.tacc_clks * mult; 503 504 /* 505 * SD cards also have an upper limit on the timeout. 506 */ 507 if (mmc_card_sd(card)) { 508 unsigned int timeout_us, limit_us; 509 510 timeout_us = data->timeout_ns / 1000; 511 if (mmc_host_clk_rate(card->host)) 512 timeout_us += data->timeout_clks * 1000 / 513 (mmc_host_clk_rate(card->host) / 1000); 514 515 if (data->flags & MMC_DATA_WRITE) 516 /* 517 * The limit is really 250 ms, but that is 518 * insufficient for some crappy cards. 519 */ 520 limit_us = 300000; 521 else 522 limit_us = 100000; 523 524 /* 525 * SDHC cards always use these fixed values. 526 */ 527 if (timeout_us > limit_us || mmc_card_blockaddr(card)) { 528 data->timeout_ns = limit_us * 1000; 529 data->timeout_clks = 0; 530 } 531 } 532 /* 533 * Some cards need very high timeouts if driven in SPI mode. 534 * The worst observed timeout was 900ms after writing a 535 * continuous stream of data until the internal logic 536 * overflowed. 537 */ 538 if (mmc_host_is_spi(card->host)) { 539 if (data->flags & MMC_DATA_WRITE) { 540 if (data->timeout_ns < 1000000000) 541 data->timeout_ns = 1000000000; /* 1s */ 542 } else { 543 if (data->timeout_ns < 100000000) 544 data->timeout_ns = 100000000; /* 100ms */ 545 } 546 } 547 } 548 EXPORT_SYMBOL(mmc_set_data_timeout); 549 550 /** 551 * mmc_align_data_size - pads a transfer size to a more optimal value 552 * @card: the MMC card associated with the data transfer 553 * @sz: original transfer size 554 * 555 * Pads the original data size with a number of extra bytes in 556 * order to avoid controller bugs and/or performance hits 557 * (e.g. some controllers revert to PIO for certain sizes). 558 * 559 * Returns the improved size, which might be unmodified. 560 * 561 * Note that this function is only relevant when issuing a 562 * single scatter gather entry. 563 */ 564 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) 565 { 566 /* 567 * FIXME: We don't have a system for the controller to tell 568 * the core about its problems yet, so for now we just 32-bit 569 * align the size. 570 */ 571 sz = ((sz + 3) / 4) * 4; 572 573 return sz; 574 } 575 EXPORT_SYMBOL(mmc_align_data_size); 576 577 /** 578 * mmc_host_enable - enable a host. 579 * @host: mmc host to enable 580 * 581 * Hosts that support power saving can use the 'enable' and 'disable' 582 * methods to exit and enter power saving states. For more information 583 * see comments for struct mmc_host_ops. 584 */ 585 int mmc_host_enable(struct mmc_host *host) 586 { 587 if (!(host->caps & MMC_CAP_DISABLE)) 588 return 0; 589 590 if (host->en_dis_recurs) 591 return 0; 592 593 if (host->nesting_cnt++) 594 return 0; 595 596 cancel_delayed_work_sync(&host->disable); 597 598 if (host->enabled) 599 return 0; 600 601 if (host->ops->enable) { 602 int err; 603 604 host->en_dis_recurs = 1; 605 err = host->ops->enable(host); 606 host->en_dis_recurs = 0; 607 608 if (err) { 609 pr_debug("%s: enable error %d\n", 610 mmc_hostname(host), err); 611 return err; 612 } 613 } 614 host->enabled = 1; 615 return 0; 616 } 617 EXPORT_SYMBOL(mmc_host_enable); 618 619 static int mmc_host_do_disable(struct mmc_host *host, int lazy) 620 { 621 if (host->ops->disable) { 622 int err; 623 624 host->en_dis_recurs = 1; 625 err = host->ops->disable(host, lazy); 626 host->en_dis_recurs = 0; 627 628 if (err < 0) { 629 pr_debug("%s: disable error %d\n", 630 mmc_hostname(host), err); 631 return err; 632 } 633 if (err > 0) { 634 unsigned long delay = msecs_to_jiffies(err); 635 636 mmc_schedule_delayed_work(&host->disable, delay); 637 } 638 } 639 host->enabled = 0; 640 return 0; 641 } 642 643 /** 644 * mmc_host_disable - disable a host. 645 * @host: mmc host to disable 646 * 647 * Hosts that support power saving can use the 'enable' and 'disable' 648 * methods to exit and enter power saving states. For more information 649 * see comments for struct mmc_host_ops. 650 */ 651 int mmc_host_disable(struct mmc_host *host) 652 { 653 int err; 654 655 if (!(host->caps & MMC_CAP_DISABLE)) 656 return 0; 657 658 if (host->en_dis_recurs) 659 return 0; 660 661 if (--host->nesting_cnt) 662 return 0; 663 664 if (!host->enabled) 665 return 0; 666 667 err = mmc_host_do_disable(host, 0); 668 return err; 669 } 670 EXPORT_SYMBOL(mmc_host_disable); 671 672 /** 673 * __mmc_claim_host - exclusively claim a host 674 * @host: mmc host to claim 675 * @abort: whether or not the operation should be aborted 676 * 677 * Claim a host for a set of operations. If @abort is non null and 678 * dereference a non-zero value then this will return prematurely with 679 * that non-zero value without acquiring the lock. Returns zero 680 * with the lock held otherwise. 681 */ 682 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) 683 { 684 DECLARE_WAITQUEUE(wait, current); 685 unsigned long flags; 686 int stop; 687 688 might_sleep(); 689 690 add_wait_queue(&host->wq, &wait); 691 spin_lock_irqsave(&host->lock, flags); 692 while (1) { 693 set_current_state(TASK_UNINTERRUPTIBLE); 694 stop = abort ? atomic_read(abort) : 0; 695 if (stop || !host->claimed || host->claimer == current) 696 break; 697 spin_unlock_irqrestore(&host->lock, flags); 698 schedule(); 699 spin_lock_irqsave(&host->lock, flags); 700 } 701 set_current_state(TASK_RUNNING); 702 if (!stop) { 703 host->claimed = 1; 704 host->claimer = current; 705 host->claim_cnt += 1; 706 } else 707 wake_up(&host->wq); 708 spin_unlock_irqrestore(&host->lock, flags); 709 remove_wait_queue(&host->wq, &wait); 710 if (!stop) 711 mmc_host_enable(host); 712 return stop; 713 } 714 715 EXPORT_SYMBOL(__mmc_claim_host); 716 717 /** 718 * mmc_try_claim_host - try exclusively to claim a host 719 * @host: mmc host to claim 720 * 721 * Returns %1 if the host is claimed, %0 otherwise. 722 */ 723 int mmc_try_claim_host(struct mmc_host *host) 724 { 725 int claimed_host = 0; 726 unsigned long flags; 727 728 spin_lock_irqsave(&host->lock, flags); 729 if (!host->claimed || host->claimer == current) { 730 host->claimed = 1; 731 host->claimer = current; 732 host->claim_cnt += 1; 733 claimed_host = 1; 734 } 735 spin_unlock_irqrestore(&host->lock, flags); 736 return claimed_host; 737 } 738 EXPORT_SYMBOL(mmc_try_claim_host); 739 740 /** 741 * mmc_do_release_host - release a claimed host 742 * @host: mmc host to release 743 * 744 * If you successfully claimed a host, this function will 745 * release it again. 746 */ 747 void mmc_do_release_host(struct mmc_host *host) 748 { 749 unsigned long flags; 750 751 spin_lock_irqsave(&host->lock, flags); 752 if (--host->claim_cnt) { 753 /* Release for nested claim */ 754 spin_unlock_irqrestore(&host->lock, flags); 755 } else { 756 host->claimed = 0; 757 host->claimer = NULL; 758 spin_unlock_irqrestore(&host->lock, flags); 759 wake_up(&host->wq); 760 } 761 } 762 EXPORT_SYMBOL(mmc_do_release_host); 763 764 void mmc_host_deeper_disable(struct work_struct *work) 765 { 766 struct mmc_host *host = 767 container_of(work, struct mmc_host, disable.work); 768 769 /* If the host is claimed then we do not want to disable it anymore */ 770 if (!mmc_try_claim_host(host)) 771 return; 772 mmc_host_do_disable(host, 1); 773 mmc_do_release_host(host); 774 } 775 776 /** 777 * mmc_host_lazy_disable - lazily disable a host. 778 * @host: mmc host to disable 779 * 780 * Hosts that support power saving can use the 'enable' and 'disable' 781 * methods to exit and enter power saving states. For more information 782 * see comments for struct mmc_host_ops. 783 */ 784 int mmc_host_lazy_disable(struct mmc_host *host) 785 { 786 if (!(host->caps & MMC_CAP_DISABLE)) 787 return 0; 788 789 if (host->en_dis_recurs) 790 return 0; 791 792 if (--host->nesting_cnt) 793 return 0; 794 795 if (!host->enabled) 796 return 0; 797 798 if (host->disable_delay) { 799 mmc_schedule_delayed_work(&host->disable, 800 msecs_to_jiffies(host->disable_delay)); 801 return 0; 802 } else 803 return mmc_host_do_disable(host, 1); 804 } 805 EXPORT_SYMBOL(mmc_host_lazy_disable); 806 807 /** 808 * mmc_release_host - release a host 809 * @host: mmc host to release 810 * 811 * Release a MMC host, allowing others to claim the host 812 * for their operations. 813 */ 814 void mmc_release_host(struct mmc_host *host) 815 { 816 WARN_ON(!host->claimed); 817 818 mmc_host_lazy_disable(host); 819 820 mmc_do_release_host(host); 821 } 822 823 EXPORT_SYMBOL(mmc_release_host); 824 825 /* 826 * Internal function that does the actual ios call to the host driver, 827 * optionally printing some debug output. 828 */ 829 static inline void mmc_set_ios(struct mmc_host *host) 830 { 831 struct mmc_ios *ios = &host->ios; 832 833 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " 834 "width %u timing %u\n", 835 mmc_hostname(host), ios->clock, ios->bus_mode, 836 ios->power_mode, ios->chip_select, ios->vdd, 837 ios->bus_width, ios->timing); 838 839 if (ios->clock > 0) 840 mmc_set_ungated(host); 841 host->ops->set_ios(host, ios); 842 } 843 844 /* 845 * Control chip select pin on a host. 846 */ 847 void mmc_set_chip_select(struct mmc_host *host, int mode) 848 { 849 mmc_host_clk_hold(host); 850 host->ios.chip_select = mode; 851 mmc_set_ios(host); 852 mmc_host_clk_release(host); 853 } 854 855 /* 856 * Sets the host clock to the highest possible frequency that 857 * is below "hz". 858 */ 859 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz) 860 { 861 WARN_ON(hz < host->f_min); 862 863 if (hz > host->f_max) 864 hz = host->f_max; 865 866 host->ios.clock = hz; 867 mmc_set_ios(host); 868 } 869 870 void mmc_set_clock(struct mmc_host *host, unsigned int hz) 871 { 872 mmc_host_clk_hold(host); 873 __mmc_set_clock(host, hz); 874 mmc_host_clk_release(host); 875 } 876 877 #ifdef CONFIG_MMC_CLKGATE 878 /* 879 * This gates the clock by setting it to 0 Hz. 880 */ 881 void mmc_gate_clock(struct mmc_host *host) 882 { 883 unsigned long flags; 884 885 spin_lock_irqsave(&host->clk_lock, flags); 886 host->clk_old = host->ios.clock; 887 host->ios.clock = 0; 888 host->clk_gated = true; 889 spin_unlock_irqrestore(&host->clk_lock, flags); 890 mmc_set_ios(host); 891 } 892 893 /* 894 * This restores the clock from gating by using the cached 895 * clock value. 896 */ 897 void mmc_ungate_clock(struct mmc_host *host) 898 { 899 /* 900 * We should previously have gated the clock, so the clock shall 901 * be 0 here! The clock may however be 0 during initialization, 902 * when some request operations are performed before setting 903 * the frequency. When ungate is requested in that situation 904 * we just ignore the call. 905 */ 906 if (host->clk_old) { 907 BUG_ON(host->ios.clock); 908 /* This call will also set host->clk_gated to false */ 909 __mmc_set_clock(host, host->clk_old); 910 } 911 } 912 913 void mmc_set_ungated(struct mmc_host *host) 914 { 915 unsigned long flags; 916 917 /* 918 * We've been given a new frequency while the clock is gated, 919 * so make sure we regard this as ungating it. 920 */ 921 spin_lock_irqsave(&host->clk_lock, flags); 922 host->clk_gated = false; 923 spin_unlock_irqrestore(&host->clk_lock, flags); 924 } 925 926 #else 927 void mmc_set_ungated(struct mmc_host *host) 928 { 929 } 930 #endif 931 932 /* 933 * Change the bus mode (open drain/push-pull) of a host. 934 */ 935 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 936 { 937 mmc_host_clk_hold(host); 938 host->ios.bus_mode = mode; 939 mmc_set_ios(host); 940 mmc_host_clk_release(host); 941 } 942 943 /* 944 * Change data bus width of a host. 945 */ 946 void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 947 { 948 mmc_host_clk_hold(host); 949 host->ios.bus_width = width; 950 mmc_set_ios(host); 951 mmc_host_clk_release(host); 952 } 953 954 /** 955 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number 956 * @vdd: voltage (mV) 957 * @low_bits: prefer low bits in boundary cases 958 * 959 * This function returns the OCR bit number according to the provided @vdd 960 * value. If conversion is not possible a negative errno value returned. 961 * 962 * Depending on the @low_bits flag the function prefers low or high OCR bits 963 * on boundary voltages. For example, 964 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33); 965 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34); 966 * 967 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21). 968 */ 969 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits) 970 { 971 const int max_bit = ilog2(MMC_VDD_35_36); 972 int bit; 973 974 if (vdd < 1650 || vdd > 3600) 975 return -EINVAL; 976 977 if (vdd >= 1650 && vdd <= 1950) 978 return ilog2(MMC_VDD_165_195); 979 980 if (low_bits) 981 vdd -= 1; 982 983 /* Base 2000 mV, step 100 mV, bit's base 8. */ 984 bit = (vdd - 2000) / 100 + 8; 985 if (bit > max_bit) 986 return max_bit; 987 return bit; 988 } 989 990 /** 991 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask 992 * @vdd_min: minimum voltage value (mV) 993 * @vdd_max: maximum voltage value (mV) 994 * 995 * This function returns the OCR mask bits according to the provided @vdd_min 996 * and @vdd_max values. If conversion is not possible the function returns 0. 997 * 998 * Notes wrt boundary cases: 999 * This function sets the OCR bits for all boundary voltages, for example 1000 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 | 1001 * MMC_VDD_34_35 mask. 1002 */ 1003 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) 1004 { 1005 u32 mask = 0; 1006 1007 if (vdd_max < vdd_min) 1008 return 0; 1009 1010 /* Prefer high bits for the boundary vdd_max values. */ 1011 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false); 1012 if (vdd_max < 0) 1013 return 0; 1014 1015 /* Prefer low bits for the boundary vdd_min values. */ 1016 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true); 1017 if (vdd_min < 0) 1018 return 0; 1019 1020 /* Fill the mask, from max bit to min bit. */ 1021 while (vdd_max >= vdd_min) 1022 mask |= 1 << vdd_max--; 1023 1024 return mask; 1025 } 1026 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); 1027 1028 #ifdef CONFIG_REGULATOR 1029 1030 /** 1031 * mmc_regulator_get_ocrmask - return mask of supported voltages 1032 * @supply: regulator to use 1033 * 1034 * This returns either a negative errno, or a mask of voltages that 1035 * can be provided to MMC/SD/SDIO devices using the specified voltage 1036 * regulator. This would normally be called before registering the 1037 * MMC host adapter. 1038 */ 1039 int mmc_regulator_get_ocrmask(struct regulator *supply) 1040 { 1041 int result = 0; 1042 int count; 1043 int i; 1044 1045 count = regulator_count_voltages(supply); 1046 if (count < 0) 1047 return count; 1048 1049 for (i = 0; i < count; i++) { 1050 int vdd_uV; 1051 int vdd_mV; 1052 1053 vdd_uV = regulator_list_voltage(supply, i); 1054 if (vdd_uV <= 0) 1055 continue; 1056 1057 vdd_mV = vdd_uV / 1000; 1058 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); 1059 } 1060 1061 return result; 1062 } 1063 EXPORT_SYMBOL(mmc_regulator_get_ocrmask); 1064 1065 /** 1066 * mmc_regulator_set_ocr - set regulator to match host->ios voltage 1067 * @mmc: the host to regulate 1068 * @supply: regulator to use 1069 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) 1070 * 1071 * Returns zero on success, else negative errno. 1072 * 1073 * MMC host drivers may use this to enable or disable a regulator using 1074 * a particular supply voltage. This would normally be called from the 1075 * set_ios() method. 1076 */ 1077 int mmc_regulator_set_ocr(struct mmc_host *mmc, 1078 struct regulator *supply, 1079 unsigned short vdd_bit) 1080 { 1081 int result = 0; 1082 int min_uV, max_uV; 1083 1084 if (vdd_bit) { 1085 int tmp; 1086 int voltage; 1087 1088 /* REVISIT mmc_vddrange_to_ocrmask() may have set some 1089 * bits this regulator doesn't quite support ... don't 1090 * be too picky, most cards and regulators are OK with 1091 * a 0.1V range goof (it's a small error percentage). 1092 */ 1093 tmp = vdd_bit - ilog2(MMC_VDD_165_195); 1094 if (tmp == 0) { 1095 min_uV = 1650 * 1000; 1096 max_uV = 1950 * 1000; 1097 } else { 1098 min_uV = 1900 * 1000 + tmp * 100 * 1000; 1099 max_uV = min_uV + 100 * 1000; 1100 } 1101 1102 /* avoid needless changes to this voltage; the regulator 1103 * might not allow this operation 1104 */ 1105 voltage = regulator_get_voltage(supply); 1106 if (voltage < 0) 1107 result = voltage; 1108 else if (voltage < min_uV || voltage > max_uV) 1109 result = regulator_set_voltage(supply, min_uV, max_uV); 1110 else 1111 result = 0; 1112 1113 if (result == 0 && !mmc->regulator_enabled) { 1114 result = regulator_enable(supply); 1115 if (!result) 1116 mmc->regulator_enabled = true; 1117 } 1118 } else if (mmc->regulator_enabled) { 1119 result = regulator_disable(supply); 1120 if (result == 0) 1121 mmc->regulator_enabled = false; 1122 } 1123 1124 if (result) 1125 dev_err(mmc_dev(mmc), 1126 "could not set regulator OCR (%d)\n", result); 1127 return result; 1128 } 1129 EXPORT_SYMBOL(mmc_regulator_set_ocr); 1130 1131 #endif /* CONFIG_REGULATOR */ 1132 1133 /* 1134 * Mask off any voltages we don't support and select 1135 * the lowest voltage 1136 */ 1137 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) 1138 { 1139 int bit; 1140 1141 ocr &= host->ocr_avail; 1142 1143 bit = ffs(ocr); 1144 if (bit) { 1145 bit -= 1; 1146 1147 ocr &= 3 << bit; 1148 1149 mmc_host_clk_hold(host); 1150 host->ios.vdd = bit; 1151 mmc_set_ios(host); 1152 mmc_host_clk_release(host); 1153 } else { 1154 pr_warning("%s: host doesn't support card's voltages\n", 1155 mmc_hostname(host)); 1156 ocr = 0; 1157 } 1158 1159 return ocr; 1160 } 1161 1162 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11) 1163 { 1164 struct mmc_command cmd = {0}; 1165 int err = 0; 1166 1167 BUG_ON(!host); 1168 1169 /* 1170 * Send CMD11 only if the request is to switch the card to 1171 * 1.8V signalling. 1172 */ 1173 if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) { 1174 cmd.opcode = SD_SWITCH_VOLTAGE; 1175 cmd.arg = 0; 1176 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1177 1178 err = mmc_wait_for_cmd(host, &cmd, 0); 1179 if (err) 1180 return err; 1181 1182 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) 1183 return -EIO; 1184 } 1185 1186 host->ios.signal_voltage = signal_voltage; 1187 1188 if (host->ops->start_signal_voltage_switch) 1189 err = host->ops->start_signal_voltage_switch(host, &host->ios); 1190 1191 return err; 1192 } 1193 1194 /* 1195 * Select timing parameters for host. 1196 */ 1197 void mmc_set_timing(struct mmc_host *host, unsigned int timing) 1198 { 1199 mmc_host_clk_hold(host); 1200 host->ios.timing = timing; 1201 mmc_set_ios(host); 1202 mmc_host_clk_release(host); 1203 } 1204 1205 /* 1206 * Select appropriate driver type for host. 1207 */ 1208 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) 1209 { 1210 mmc_host_clk_hold(host); 1211 host->ios.drv_type = drv_type; 1212 mmc_set_ios(host); 1213 mmc_host_clk_release(host); 1214 } 1215 1216 /* 1217 * Apply power to the MMC stack. This is a two-stage process. 1218 * First, we enable power to the card without the clock running. 1219 * We then wait a bit for the power to stabilise. Finally, 1220 * enable the bus drivers and clock to the card. 1221 * 1222 * We must _NOT_ enable the clock prior to power stablising. 1223 * 1224 * If a host does all the power sequencing itself, ignore the 1225 * initial MMC_POWER_UP stage. 1226 */ 1227 static void mmc_power_up(struct mmc_host *host) 1228 { 1229 int bit; 1230 1231 mmc_host_clk_hold(host); 1232 1233 /* If ocr is set, we use it */ 1234 if (host->ocr) 1235 bit = ffs(host->ocr) - 1; 1236 else 1237 bit = fls(host->ocr_avail) - 1; 1238 1239 host->ios.vdd = bit; 1240 if (mmc_host_is_spi(host)) 1241 host->ios.chip_select = MMC_CS_HIGH; 1242 else 1243 host->ios.chip_select = MMC_CS_DONTCARE; 1244 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 1245 host->ios.power_mode = MMC_POWER_UP; 1246 host->ios.bus_width = MMC_BUS_WIDTH_1; 1247 host->ios.timing = MMC_TIMING_LEGACY; 1248 mmc_set_ios(host); 1249 1250 /* 1251 * This delay should be sufficient to allow the power supply 1252 * to reach the minimum voltage. 1253 */ 1254 mmc_delay(10); 1255 1256 host->ios.clock = host->f_init; 1257 1258 host->ios.power_mode = MMC_POWER_ON; 1259 mmc_set_ios(host); 1260 1261 /* 1262 * This delay must be at least 74 clock sizes, or 1 ms, or the 1263 * time required to reach a stable voltage. 1264 */ 1265 mmc_delay(10); 1266 1267 mmc_host_clk_release(host); 1268 } 1269 1270 void mmc_power_off(struct mmc_host *host) 1271 { 1272 struct mmc_card *card; 1273 unsigned int notify_type; 1274 unsigned int timeout; 1275 int err; 1276 1277 mmc_host_clk_hold(host); 1278 1279 card = host->card; 1280 host->ios.clock = 0; 1281 host->ios.vdd = 0; 1282 1283 if (card && mmc_card_mmc(card) && 1284 (card->poweroff_notify_state == MMC_POWERED_ON)) { 1285 1286 if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) { 1287 notify_type = EXT_CSD_POWER_OFF_SHORT; 1288 timeout = card->ext_csd.generic_cmd6_time; 1289 card->poweroff_notify_state = MMC_POWEROFF_SHORT; 1290 } else { 1291 notify_type = EXT_CSD_POWER_OFF_LONG; 1292 timeout = card->ext_csd.power_off_longtime; 1293 card->poweroff_notify_state = MMC_POWEROFF_LONG; 1294 } 1295 1296 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1297 EXT_CSD_POWER_OFF_NOTIFICATION, 1298 notify_type, timeout); 1299 1300 if (err && err != -EBADMSG) 1301 pr_err("Device failed to respond within %d poweroff " 1302 "time. Forcefully powering down the device\n", 1303 timeout); 1304 1305 /* Set the card state to no notification after the poweroff */ 1306 card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION; 1307 } 1308 1309 /* 1310 * Reset ocr mask to be the highest possible voltage supported for 1311 * this mmc host. This value will be used at next power up. 1312 */ 1313 host->ocr = 1 << (fls(host->ocr_avail) - 1); 1314 1315 if (!mmc_host_is_spi(host)) { 1316 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1317 host->ios.chip_select = MMC_CS_DONTCARE; 1318 } 1319 host->ios.power_mode = MMC_POWER_OFF; 1320 host->ios.bus_width = MMC_BUS_WIDTH_1; 1321 host->ios.timing = MMC_TIMING_LEGACY; 1322 mmc_set_ios(host); 1323 1324 /* 1325 * Some configurations, such as the 802.11 SDIO card in the OLPC 1326 * XO-1.5, require a short delay after poweroff before the card 1327 * can be successfully turned on again. 1328 */ 1329 mmc_delay(1); 1330 1331 mmc_host_clk_release(host); 1332 } 1333 1334 /* 1335 * Cleanup when the last reference to the bus operator is dropped. 1336 */ 1337 static void __mmc_release_bus(struct mmc_host *host) 1338 { 1339 BUG_ON(!host); 1340 BUG_ON(host->bus_refs); 1341 BUG_ON(!host->bus_dead); 1342 1343 host->bus_ops = NULL; 1344 } 1345 1346 /* 1347 * Increase reference count of bus operator 1348 */ 1349 static inline void mmc_bus_get(struct mmc_host *host) 1350 { 1351 unsigned long flags; 1352 1353 spin_lock_irqsave(&host->lock, flags); 1354 host->bus_refs++; 1355 spin_unlock_irqrestore(&host->lock, flags); 1356 } 1357 1358 /* 1359 * Decrease reference count of bus operator and free it if 1360 * it is the last reference. 1361 */ 1362 static inline void mmc_bus_put(struct mmc_host *host) 1363 { 1364 unsigned long flags; 1365 1366 spin_lock_irqsave(&host->lock, flags); 1367 host->bus_refs--; 1368 if ((host->bus_refs == 0) && host->bus_ops) 1369 __mmc_release_bus(host); 1370 spin_unlock_irqrestore(&host->lock, flags); 1371 } 1372 1373 /* 1374 * Assign a mmc bus handler to a host. Only one bus handler may control a 1375 * host at any given time. 1376 */ 1377 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) 1378 { 1379 unsigned long flags; 1380 1381 BUG_ON(!host); 1382 BUG_ON(!ops); 1383 1384 WARN_ON(!host->claimed); 1385 1386 spin_lock_irqsave(&host->lock, flags); 1387 1388 BUG_ON(host->bus_ops); 1389 BUG_ON(host->bus_refs); 1390 1391 host->bus_ops = ops; 1392 host->bus_refs = 1; 1393 host->bus_dead = 0; 1394 1395 spin_unlock_irqrestore(&host->lock, flags); 1396 } 1397 1398 /* 1399 * Remove the current bus handler from a host. 1400 */ 1401 void mmc_detach_bus(struct mmc_host *host) 1402 { 1403 unsigned long flags; 1404 1405 BUG_ON(!host); 1406 1407 WARN_ON(!host->claimed); 1408 WARN_ON(!host->bus_ops); 1409 1410 spin_lock_irqsave(&host->lock, flags); 1411 1412 host->bus_dead = 1; 1413 1414 spin_unlock_irqrestore(&host->lock, flags); 1415 1416 mmc_bus_put(host); 1417 } 1418 1419 /** 1420 * mmc_detect_change - process change of state on a MMC socket 1421 * @host: host which changed state. 1422 * @delay: optional delay to wait before detection (jiffies) 1423 * 1424 * MMC drivers should call this when they detect a card has been 1425 * inserted or removed. The MMC layer will confirm that any 1426 * present card is still functional, and initialize any newly 1427 * inserted. 1428 */ 1429 void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1430 { 1431 #ifdef CONFIG_MMC_DEBUG 1432 unsigned long flags; 1433 spin_lock_irqsave(&host->lock, flags); 1434 WARN_ON(host->removed); 1435 spin_unlock_irqrestore(&host->lock, flags); 1436 #endif 1437 1438 mmc_schedule_delayed_work(&host->detect, delay); 1439 } 1440 1441 EXPORT_SYMBOL(mmc_detect_change); 1442 1443 void mmc_init_erase(struct mmc_card *card) 1444 { 1445 unsigned int sz; 1446 1447 if (is_power_of_2(card->erase_size)) 1448 card->erase_shift = ffs(card->erase_size) - 1; 1449 else 1450 card->erase_shift = 0; 1451 1452 /* 1453 * It is possible to erase an arbitrarily large area of an SD or MMC 1454 * card. That is not desirable because it can take a long time 1455 * (minutes) potentially delaying more important I/O, and also the 1456 * timeout calculations become increasingly hugely over-estimated. 1457 * Consequently, 'pref_erase' is defined as a guide to limit erases 1458 * to that size and alignment. 1459 * 1460 * For SD cards that define Allocation Unit size, limit erases to one 1461 * Allocation Unit at a time. For MMC cards that define High Capacity 1462 * Erase Size, whether it is switched on or not, limit to that size. 1463 * Otherwise just have a stab at a good value. For modern cards it 1464 * will end up being 4MiB. Note that if the value is too small, it 1465 * can end up taking longer to erase. 1466 */ 1467 if (mmc_card_sd(card) && card->ssr.au) { 1468 card->pref_erase = card->ssr.au; 1469 card->erase_shift = ffs(card->ssr.au) - 1; 1470 } else if (card->ext_csd.hc_erase_size) { 1471 card->pref_erase = card->ext_csd.hc_erase_size; 1472 } else { 1473 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; 1474 if (sz < 128) 1475 card->pref_erase = 512 * 1024 / 512; 1476 else if (sz < 512) 1477 card->pref_erase = 1024 * 1024 / 512; 1478 else if (sz < 1024) 1479 card->pref_erase = 2 * 1024 * 1024 / 512; 1480 else 1481 card->pref_erase = 4 * 1024 * 1024 / 512; 1482 if (card->pref_erase < card->erase_size) 1483 card->pref_erase = card->erase_size; 1484 else { 1485 sz = card->pref_erase % card->erase_size; 1486 if (sz) 1487 card->pref_erase += card->erase_size - sz; 1488 } 1489 } 1490 } 1491 1492 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, 1493 unsigned int arg, unsigned int qty) 1494 { 1495 unsigned int erase_timeout; 1496 1497 if (card->ext_csd.erase_group_def & 1) { 1498 /* High Capacity Erase Group Size uses HC timeouts */ 1499 if (arg == MMC_TRIM_ARG) 1500 erase_timeout = card->ext_csd.trim_timeout; 1501 else 1502 erase_timeout = card->ext_csd.hc_erase_timeout; 1503 } else { 1504 /* CSD Erase Group Size uses write timeout */ 1505 unsigned int mult = (10 << card->csd.r2w_factor); 1506 unsigned int timeout_clks = card->csd.tacc_clks * mult; 1507 unsigned int timeout_us; 1508 1509 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ 1510 if (card->csd.tacc_ns < 1000000) 1511 timeout_us = (card->csd.tacc_ns * mult) / 1000; 1512 else 1513 timeout_us = (card->csd.tacc_ns / 1000) * mult; 1514 1515 /* 1516 * ios.clock is only a target. The real clock rate might be 1517 * less but not that much less, so fudge it by multiplying by 2. 1518 */ 1519 timeout_clks <<= 1; 1520 timeout_us += (timeout_clks * 1000) / 1521 (mmc_host_clk_rate(card->host) / 1000); 1522 1523 erase_timeout = timeout_us / 1000; 1524 1525 /* 1526 * Theoretically, the calculation could underflow so round up 1527 * to 1ms in that case. 1528 */ 1529 if (!erase_timeout) 1530 erase_timeout = 1; 1531 } 1532 1533 /* Multiplier for secure operations */ 1534 if (arg & MMC_SECURE_ARGS) { 1535 if (arg == MMC_SECURE_ERASE_ARG) 1536 erase_timeout *= card->ext_csd.sec_erase_mult; 1537 else 1538 erase_timeout *= card->ext_csd.sec_trim_mult; 1539 } 1540 1541 erase_timeout *= qty; 1542 1543 /* 1544 * Ensure at least a 1 second timeout for SPI as per 1545 * 'mmc_set_data_timeout()' 1546 */ 1547 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) 1548 erase_timeout = 1000; 1549 1550 return erase_timeout; 1551 } 1552 1553 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card, 1554 unsigned int arg, 1555 unsigned int qty) 1556 { 1557 unsigned int erase_timeout; 1558 1559 if (card->ssr.erase_timeout) { 1560 /* Erase timeout specified in SD Status Register (SSR) */ 1561 erase_timeout = card->ssr.erase_timeout * qty + 1562 card->ssr.erase_offset; 1563 } else { 1564 /* 1565 * Erase timeout not specified in SD Status Register (SSR) so 1566 * use 250ms per write block. 1567 */ 1568 erase_timeout = 250 * qty; 1569 } 1570 1571 /* Must not be less than 1 second */ 1572 if (erase_timeout < 1000) 1573 erase_timeout = 1000; 1574 1575 return erase_timeout; 1576 } 1577 1578 static unsigned int mmc_erase_timeout(struct mmc_card *card, 1579 unsigned int arg, 1580 unsigned int qty) 1581 { 1582 if (mmc_card_sd(card)) 1583 return mmc_sd_erase_timeout(card, arg, qty); 1584 else 1585 return mmc_mmc_erase_timeout(card, arg, qty); 1586 } 1587 1588 static int mmc_do_erase(struct mmc_card *card, unsigned int from, 1589 unsigned int to, unsigned int arg) 1590 { 1591 struct mmc_command cmd = {0}; 1592 unsigned int qty = 0; 1593 int err; 1594 1595 /* 1596 * qty is used to calculate the erase timeout which depends on how many 1597 * erase groups (or allocation units in SD terminology) are affected. 1598 * We count erasing part of an erase group as one erase group. 1599 * For SD, the allocation units are always a power of 2. For MMC, the 1600 * erase group size is almost certainly also power of 2, but it does not 1601 * seem to insist on that in the JEDEC standard, so we fall back to 1602 * division in that case. SD may not specify an allocation unit size, 1603 * in which case the timeout is based on the number of write blocks. 1604 * 1605 * Note that the timeout for secure trim 2 will only be correct if the 1606 * number of erase groups specified is the same as the total of all 1607 * preceding secure trim 1 commands. Since the power may have been 1608 * lost since the secure trim 1 commands occurred, it is generally 1609 * impossible to calculate the secure trim 2 timeout correctly. 1610 */ 1611 if (card->erase_shift) 1612 qty += ((to >> card->erase_shift) - 1613 (from >> card->erase_shift)) + 1; 1614 else if (mmc_card_sd(card)) 1615 qty += to - from + 1; 1616 else 1617 qty += ((to / card->erase_size) - 1618 (from / card->erase_size)) + 1; 1619 1620 if (!mmc_card_blockaddr(card)) { 1621 from <<= 9; 1622 to <<= 9; 1623 } 1624 1625 if (mmc_card_sd(card)) 1626 cmd.opcode = SD_ERASE_WR_BLK_START; 1627 else 1628 cmd.opcode = MMC_ERASE_GROUP_START; 1629 cmd.arg = from; 1630 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1631 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1632 if (err) { 1633 pr_err("mmc_erase: group start error %d, " 1634 "status %#x\n", err, cmd.resp[0]); 1635 err = -EIO; 1636 goto out; 1637 } 1638 1639 memset(&cmd, 0, sizeof(struct mmc_command)); 1640 if (mmc_card_sd(card)) 1641 cmd.opcode = SD_ERASE_WR_BLK_END; 1642 else 1643 cmd.opcode = MMC_ERASE_GROUP_END; 1644 cmd.arg = to; 1645 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1646 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1647 if (err) { 1648 pr_err("mmc_erase: group end error %d, status %#x\n", 1649 err, cmd.resp[0]); 1650 err = -EIO; 1651 goto out; 1652 } 1653 1654 memset(&cmd, 0, sizeof(struct mmc_command)); 1655 cmd.opcode = MMC_ERASE; 1656 cmd.arg = arg; 1657 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1658 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty); 1659 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1660 if (err) { 1661 pr_err("mmc_erase: erase error %d, status %#x\n", 1662 err, cmd.resp[0]); 1663 err = -EIO; 1664 goto out; 1665 } 1666 1667 if (mmc_host_is_spi(card->host)) 1668 goto out; 1669 1670 do { 1671 memset(&cmd, 0, sizeof(struct mmc_command)); 1672 cmd.opcode = MMC_SEND_STATUS; 1673 cmd.arg = card->rca << 16; 1674 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1675 /* Do not retry else we can't see errors */ 1676 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1677 if (err || (cmd.resp[0] & 0xFDF92000)) { 1678 pr_err("error %d requesting status %#x\n", 1679 err, cmd.resp[0]); 1680 err = -EIO; 1681 goto out; 1682 } 1683 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 1684 R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG); 1685 out: 1686 return err; 1687 } 1688 1689 /** 1690 * mmc_erase - erase sectors. 1691 * @card: card to erase 1692 * @from: first sector to erase 1693 * @nr: number of sectors to erase 1694 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG) 1695 * 1696 * Caller must claim host before calling this function. 1697 */ 1698 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, 1699 unsigned int arg) 1700 { 1701 unsigned int rem, to = from + nr; 1702 1703 if (!(card->host->caps & MMC_CAP_ERASE) || 1704 !(card->csd.cmdclass & CCC_ERASE)) 1705 return -EOPNOTSUPP; 1706 1707 if (!card->erase_size) 1708 return -EOPNOTSUPP; 1709 1710 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG) 1711 return -EOPNOTSUPP; 1712 1713 if ((arg & MMC_SECURE_ARGS) && 1714 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) 1715 return -EOPNOTSUPP; 1716 1717 if ((arg & MMC_TRIM_ARGS) && 1718 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) 1719 return -EOPNOTSUPP; 1720 1721 if (arg == MMC_SECURE_ERASE_ARG) { 1722 if (from % card->erase_size || nr % card->erase_size) 1723 return -EINVAL; 1724 } 1725 1726 if (arg == MMC_ERASE_ARG) { 1727 rem = from % card->erase_size; 1728 if (rem) { 1729 rem = card->erase_size - rem; 1730 from += rem; 1731 if (nr > rem) 1732 nr -= rem; 1733 else 1734 return 0; 1735 } 1736 rem = nr % card->erase_size; 1737 if (rem) 1738 nr -= rem; 1739 } 1740 1741 if (nr == 0) 1742 return 0; 1743 1744 to = from + nr; 1745 1746 if (to <= from) 1747 return -EINVAL; 1748 1749 /* 'from' and 'to' are inclusive */ 1750 to -= 1; 1751 1752 return mmc_do_erase(card, from, to, arg); 1753 } 1754 EXPORT_SYMBOL(mmc_erase); 1755 1756 int mmc_can_erase(struct mmc_card *card) 1757 { 1758 if ((card->host->caps & MMC_CAP_ERASE) && 1759 (card->csd.cmdclass & CCC_ERASE) && card->erase_size) 1760 return 1; 1761 return 0; 1762 } 1763 EXPORT_SYMBOL(mmc_can_erase); 1764 1765 int mmc_can_trim(struct mmc_card *card) 1766 { 1767 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) 1768 return 1; 1769 if (mmc_can_discard(card)) 1770 return 1; 1771 return 0; 1772 } 1773 EXPORT_SYMBOL(mmc_can_trim); 1774 1775 int mmc_can_discard(struct mmc_card *card) 1776 { 1777 /* 1778 * As there's no way to detect the discard support bit at v4.5 1779 * use the s/w feature support filed. 1780 */ 1781 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE) 1782 return 1; 1783 return 0; 1784 } 1785 EXPORT_SYMBOL(mmc_can_discard); 1786 1787 int mmc_can_sanitize(struct mmc_card *card) 1788 { 1789 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE) 1790 return 1; 1791 return 0; 1792 } 1793 EXPORT_SYMBOL(mmc_can_sanitize); 1794 1795 int mmc_can_secure_erase_trim(struct mmc_card *card) 1796 { 1797 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) 1798 return 1; 1799 return 0; 1800 } 1801 EXPORT_SYMBOL(mmc_can_secure_erase_trim); 1802 1803 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, 1804 unsigned int nr) 1805 { 1806 if (!card->erase_size) 1807 return 0; 1808 if (from % card->erase_size || nr % card->erase_size) 1809 return 0; 1810 return 1; 1811 } 1812 EXPORT_SYMBOL(mmc_erase_group_aligned); 1813 1814 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card, 1815 unsigned int arg) 1816 { 1817 struct mmc_host *host = card->host; 1818 unsigned int max_discard, x, y, qty = 0, max_qty, timeout; 1819 unsigned int last_timeout = 0; 1820 1821 if (card->erase_shift) 1822 max_qty = UINT_MAX >> card->erase_shift; 1823 else if (mmc_card_sd(card)) 1824 max_qty = UINT_MAX; 1825 else 1826 max_qty = UINT_MAX / card->erase_size; 1827 1828 /* Find the largest qty with an OK timeout */ 1829 do { 1830 y = 0; 1831 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) { 1832 timeout = mmc_erase_timeout(card, arg, qty + x); 1833 if (timeout > host->max_discard_to) 1834 break; 1835 if (timeout < last_timeout) 1836 break; 1837 last_timeout = timeout; 1838 y = x; 1839 } 1840 qty += y; 1841 } while (y); 1842 1843 if (!qty) 1844 return 0; 1845 1846 if (qty == 1) 1847 return 1; 1848 1849 /* Convert qty to sectors */ 1850 if (card->erase_shift) 1851 max_discard = --qty << card->erase_shift; 1852 else if (mmc_card_sd(card)) 1853 max_discard = qty; 1854 else 1855 max_discard = --qty * card->erase_size; 1856 1857 return max_discard; 1858 } 1859 1860 unsigned int mmc_calc_max_discard(struct mmc_card *card) 1861 { 1862 struct mmc_host *host = card->host; 1863 unsigned int max_discard, max_trim; 1864 1865 if (!host->max_discard_to) 1866 return UINT_MAX; 1867 1868 /* 1869 * Without erase_group_def set, MMC erase timeout depends on clock 1870 * frequence which can change. In that case, the best choice is 1871 * just the preferred erase size. 1872 */ 1873 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1)) 1874 return card->pref_erase; 1875 1876 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG); 1877 if (mmc_can_trim(card)) { 1878 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG); 1879 if (max_trim < max_discard) 1880 max_discard = max_trim; 1881 } else if (max_discard < card->erase_size) { 1882 max_discard = 0; 1883 } 1884 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n", 1885 mmc_hostname(host), max_discard, host->max_discard_to); 1886 return max_discard; 1887 } 1888 EXPORT_SYMBOL(mmc_calc_max_discard); 1889 1890 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) 1891 { 1892 struct mmc_command cmd = {0}; 1893 1894 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card)) 1895 return 0; 1896 1897 cmd.opcode = MMC_SET_BLOCKLEN; 1898 cmd.arg = blocklen; 1899 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1900 return mmc_wait_for_cmd(card->host, &cmd, 5); 1901 } 1902 EXPORT_SYMBOL(mmc_set_blocklen); 1903 1904 static void mmc_hw_reset_for_init(struct mmc_host *host) 1905 { 1906 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 1907 return; 1908 mmc_host_clk_hold(host); 1909 host->ops->hw_reset(host); 1910 mmc_host_clk_release(host); 1911 } 1912 1913 int mmc_can_reset(struct mmc_card *card) 1914 { 1915 u8 rst_n_function; 1916 1917 if (!mmc_card_mmc(card)) 1918 return 0; 1919 rst_n_function = card->ext_csd.rst_n_function; 1920 if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED) 1921 return 0; 1922 return 1; 1923 } 1924 EXPORT_SYMBOL(mmc_can_reset); 1925 1926 static int mmc_do_hw_reset(struct mmc_host *host, int check) 1927 { 1928 struct mmc_card *card = host->card; 1929 1930 if (!host->bus_ops->power_restore) 1931 return -EOPNOTSUPP; 1932 1933 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 1934 return -EOPNOTSUPP; 1935 1936 if (!card) 1937 return -EINVAL; 1938 1939 if (!mmc_can_reset(card)) 1940 return -EOPNOTSUPP; 1941 1942 mmc_host_clk_hold(host); 1943 mmc_set_clock(host, host->f_init); 1944 1945 host->ops->hw_reset(host); 1946 1947 /* If the reset has happened, then a status command will fail */ 1948 if (check) { 1949 struct mmc_command cmd = {0}; 1950 int err; 1951 1952 cmd.opcode = MMC_SEND_STATUS; 1953 if (!mmc_host_is_spi(card->host)) 1954 cmd.arg = card->rca << 16; 1955 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 1956 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1957 if (!err) { 1958 mmc_host_clk_release(host); 1959 return -ENOSYS; 1960 } 1961 } 1962 1963 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR); 1964 if (mmc_host_is_spi(host)) { 1965 host->ios.chip_select = MMC_CS_HIGH; 1966 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 1967 } else { 1968 host->ios.chip_select = MMC_CS_DONTCARE; 1969 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1970 } 1971 host->ios.bus_width = MMC_BUS_WIDTH_1; 1972 host->ios.timing = MMC_TIMING_LEGACY; 1973 mmc_set_ios(host); 1974 1975 mmc_host_clk_release(host); 1976 1977 return host->bus_ops->power_restore(host); 1978 } 1979 1980 int mmc_hw_reset(struct mmc_host *host) 1981 { 1982 return mmc_do_hw_reset(host, 0); 1983 } 1984 EXPORT_SYMBOL(mmc_hw_reset); 1985 1986 int mmc_hw_reset_check(struct mmc_host *host) 1987 { 1988 return mmc_do_hw_reset(host, 1); 1989 } 1990 EXPORT_SYMBOL(mmc_hw_reset_check); 1991 1992 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) 1993 { 1994 host->f_init = freq; 1995 1996 #ifdef CONFIG_MMC_DEBUG 1997 pr_info("%s: %s: trying to init card at %u Hz\n", 1998 mmc_hostname(host), __func__, host->f_init); 1999 #endif 2000 mmc_power_up(host); 2001 2002 /* 2003 * Some eMMCs (with VCCQ always on) may not be reset after power up, so 2004 * do a hardware reset if possible. 2005 */ 2006 mmc_hw_reset_for_init(host); 2007 2008 /* 2009 * sdio_reset sends CMD52 to reset card. Since we do not know 2010 * if the card is being re-initialized, just send it. CMD52 2011 * should be ignored by SD/eMMC cards. 2012 */ 2013 sdio_reset(host); 2014 mmc_go_idle(host); 2015 2016 mmc_send_if_cond(host, host->ocr_avail); 2017 2018 /* Order's important: probe SDIO, then SD, then MMC */ 2019 if (!mmc_attach_sdio(host)) 2020 return 0; 2021 if (!mmc_attach_sd(host)) 2022 return 0; 2023 if (!mmc_attach_mmc(host)) 2024 return 0; 2025 2026 mmc_power_off(host); 2027 return -EIO; 2028 } 2029 2030 void mmc_rescan(struct work_struct *work) 2031 { 2032 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; 2033 struct mmc_host *host = 2034 container_of(work, struct mmc_host, detect.work); 2035 int i; 2036 2037 if (host->rescan_disable) 2038 return; 2039 2040 mmc_bus_get(host); 2041 2042 /* 2043 * if there is a _removable_ card registered, check whether it is 2044 * still present 2045 */ 2046 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead 2047 && !(host->caps & MMC_CAP_NONREMOVABLE)) 2048 host->bus_ops->detect(host); 2049 2050 /* 2051 * Let mmc_bus_put() free the bus/bus_ops if we've found that 2052 * the card is no longer present. 2053 */ 2054 mmc_bus_put(host); 2055 mmc_bus_get(host); 2056 2057 /* if there still is a card present, stop here */ 2058 if (host->bus_ops != NULL) { 2059 mmc_bus_put(host); 2060 goto out; 2061 } 2062 2063 /* 2064 * Only we can add a new handler, so it's safe to 2065 * release the lock here. 2066 */ 2067 mmc_bus_put(host); 2068 2069 if (host->ops->get_cd && host->ops->get_cd(host) == 0) 2070 goto out; 2071 2072 mmc_claim_host(host); 2073 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 2074 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) 2075 break; 2076 if (freqs[i] <= host->f_min) 2077 break; 2078 } 2079 mmc_release_host(host); 2080 2081 out: 2082 if (host->caps & MMC_CAP_NEEDS_POLL) 2083 mmc_schedule_delayed_work(&host->detect, HZ); 2084 } 2085 2086 void mmc_start_host(struct mmc_host *host) 2087 { 2088 mmc_power_off(host); 2089 mmc_detect_change(host, 0); 2090 } 2091 2092 void mmc_stop_host(struct mmc_host *host) 2093 { 2094 #ifdef CONFIG_MMC_DEBUG 2095 unsigned long flags; 2096 spin_lock_irqsave(&host->lock, flags); 2097 host->removed = 1; 2098 spin_unlock_irqrestore(&host->lock, flags); 2099 #endif 2100 2101 if (host->caps & MMC_CAP_DISABLE) 2102 cancel_delayed_work(&host->disable); 2103 cancel_delayed_work_sync(&host->detect); 2104 mmc_flush_scheduled_work(); 2105 2106 /* clear pm flags now and let card drivers set them as needed */ 2107 host->pm_flags = 0; 2108 2109 mmc_bus_get(host); 2110 if (host->bus_ops && !host->bus_dead) { 2111 if (host->bus_ops->remove) 2112 host->bus_ops->remove(host); 2113 2114 mmc_claim_host(host); 2115 mmc_detach_bus(host); 2116 mmc_power_off(host); 2117 mmc_release_host(host); 2118 mmc_bus_put(host); 2119 return; 2120 } 2121 mmc_bus_put(host); 2122 2123 BUG_ON(host->card); 2124 2125 mmc_power_off(host); 2126 } 2127 2128 int mmc_power_save_host(struct mmc_host *host) 2129 { 2130 int ret = 0; 2131 2132 #ifdef CONFIG_MMC_DEBUG 2133 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__); 2134 #endif 2135 2136 mmc_bus_get(host); 2137 2138 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 2139 mmc_bus_put(host); 2140 return -EINVAL; 2141 } 2142 2143 if (host->bus_ops->power_save) 2144 ret = host->bus_ops->power_save(host); 2145 2146 mmc_bus_put(host); 2147 2148 mmc_power_off(host); 2149 2150 return ret; 2151 } 2152 EXPORT_SYMBOL(mmc_power_save_host); 2153 2154 int mmc_power_restore_host(struct mmc_host *host) 2155 { 2156 int ret; 2157 2158 #ifdef CONFIG_MMC_DEBUG 2159 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__); 2160 #endif 2161 2162 mmc_bus_get(host); 2163 2164 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 2165 mmc_bus_put(host); 2166 return -EINVAL; 2167 } 2168 2169 mmc_power_up(host); 2170 ret = host->bus_ops->power_restore(host); 2171 2172 mmc_bus_put(host); 2173 2174 return ret; 2175 } 2176 EXPORT_SYMBOL(mmc_power_restore_host); 2177 2178 int mmc_card_awake(struct mmc_host *host) 2179 { 2180 int err = -ENOSYS; 2181 2182 mmc_bus_get(host); 2183 2184 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 2185 err = host->bus_ops->awake(host); 2186 2187 mmc_bus_put(host); 2188 2189 return err; 2190 } 2191 EXPORT_SYMBOL(mmc_card_awake); 2192 2193 int mmc_card_sleep(struct mmc_host *host) 2194 { 2195 int err = -ENOSYS; 2196 2197 mmc_bus_get(host); 2198 2199 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 2200 err = host->bus_ops->sleep(host); 2201 2202 mmc_bus_put(host); 2203 2204 return err; 2205 } 2206 EXPORT_SYMBOL(mmc_card_sleep); 2207 2208 int mmc_card_can_sleep(struct mmc_host *host) 2209 { 2210 struct mmc_card *card = host->card; 2211 2212 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3) 2213 return 1; 2214 return 0; 2215 } 2216 EXPORT_SYMBOL(mmc_card_can_sleep); 2217 2218 /* 2219 * Flush the cache to the non-volatile storage. 2220 */ 2221 int mmc_flush_cache(struct mmc_card *card) 2222 { 2223 struct mmc_host *host = card->host; 2224 int err = 0; 2225 2226 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL)) 2227 return err; 2228 2229 if (mmc_card_mmc(card) && 2230 (card->ext_csd.cache_size > 0) && 2231 (card->ext_csd.cache_ctrl & 1)) { 2232 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 2233 EXT_CSD_FLUSH_CACHE, 1, 0); 2234 if (err) 2235 pr_err("%s: cache flush error %d\n", 2236 mmc_hostname(card->host), err); 2237 } 2238 2239 return err; 2240 } 2241 EXPORT_SYMBOL(mmc_flush_cache); 2242 2243 /* 2244 * Turn the cache ON/OFF. 2245 * Turning the cache OFF shall trigger flushing of the data 2246 * to the non-volatile storage. 2247 */ 2248 int mmc_cache_ctrl(struct mmc_host *host, u8 enable) 2249 { 2250 struct mmc_card *card = host->card; 2251 int err = 0; 2252 2253 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) || 2254 mmc_card_is_removable(host)) 2255 return err; 2256 2257 if (card && mmc_card_mmc(card) && 2258 (card->ext_csd.cache_size > 0)) { 2259 enable = !!enable; 2260 2261 if (card->ext_csd.cache_ctrl ^ enable) 2262 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 2263 EXT_CSD_CACHE_CTRL, enable, 0); 2264 if (err) 2265 pr_err("%s: cache %s error %d\n", 2266 mmc_hostname(card->host), 2267 enable ? "on" : "off", 2268 err); 2269 else 2270 card->ext_csd.cache_ctrl = enable; 2271 } 2272 2273 return err; 2274 } 2275 EXPORT_SYMBOL(mmc_cache_ctrl); 2276 2277 #ifdef CONFIG_PM 2278 2279 /** 2280 * mmc_suspend_host - suspend a host 2281 * @host: mmc host 2282 */ 2283 int mmc_suspend_host(struct mmc_host *host) 2284 { 2285 int err = 0; 2286 2287 if (host->caps & MMC_CAP_DISABLE) 2288 cancel_delayed_work(&host->disable); 2289 cancel_delayed_work(&host->detect); 2290 mmc_flush_scheduled_work(); 2291 err = mmc_cache_ctrl(host, 0); 2292 if (err) 2293 goto out; 2294 2295 mmc_bus_get(host); 2296 if (host->bus_ops && !host->bus_dead) { 2297 2298 /* 2299 * A long response time is not acceptable for device drivers 2300 * when doing suspend. Prevent mmc_claim_host in the suspend 2301 * sequence, to potentially wait "forever" by trying to 2302 * pre-claim the host. 2303 */ 2304 if (mmc_try_claim_host(host)) { 2305 if (host->bus_ops->suspend) 2306 err = host->bus_ops->suspend(host); 2307 if (err == -ENOSYS || !host->bus_ops->resume) { 2308 /* 2309 * We simply "remove" the card in this case. 2310 * It will be redetected on resume. 2311 */ 2312 if (host->bus_ops->remove) 2313 host->bus_ops->remove(host); 2314 mmc_claim_host(host); 2315 mmc_detach_bus(host); 2316 mmc_power_off(host); 2317 mmc_release_host(host); 2318 host->pm_flags = 0; 2319 err = 0; 2320 } 2321 mmc_do_release_host(host); 2322 } else { 2323 err = -EBUSY; 2324 } 2325 } 2326 mmc_bus_put(host); 2327 2328 if (!err && !mmc_card_keep_power(host)) 2329 mmc_power_off(host); 2330 2331 out: 2332 return err; 2333 } 2334 2335 EXPORT_SYMBOL(mmc_suspend_host); 2336 2337 /** 2338 * mmc_resume_host - resume a previously suspended host 2339 * @host: mmc host 2340 */ 2341 int mmc_resume_host(struct mmc_host *host) 2342 { 2343 int err = 0; 2344 2345 mmc_bus_get(host); 2346 if (host->bus_ops && !host->bus_dead) { 2347 if (!mmc_card_keep_power(host)) { 2348 mmc_power_up(host); 2349 mmc_select_voltage(host, host->ocr); 2350 /* 2351 * Tell runtime PM core we just powered up the card, 2352 * since it still believes the card is powered off. 2353 * Note that currently runtime PM is only enabled 2354 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD 2355 */ 2356 if (mmc_card_sdio(host->card) && 2357 (host->caps & MMC_CAP_POWER_OFF_CARD)) { 2358 pm_runtime_disable(&host->card->dev); 2359 pm_runtime_set_active(&host->card->dev); 2360 pm_runtime_enable(&host->card->dev); 2361 } 2362 } 2363 BUG_ON(!host->bus_ops->resume); 2364 err = host->bus_ops->resume(host); 2365 if (err) { 2366 pr_warning("%s: error %d during resume " 2367 "(card was removed?)\n", 2368 mmc_hostname(host), err); 2369 err = 0; 2370 } 2371 } 2372 host->pm_flags &= ~MMC_PM_KEEP_POWER; 2373 mmc_bus_put(host); 2374 2375 return err; 2376 } 2377 EXPORT_SYMBOL(mmc_resume_host); 2378 2379 /* Do the card removal on suspend if card is assumed removeable 2380 * Do that in pm notifier while userspace isn't yet frozen, so we will be able 2381 to sync the card. 2382 */ 2383 int mmc_pm_notify(struct notifier_block *notify_block, 2384 unsigned long mode, void *unused) 2385 { 2386 struct mmc_host *host = container_of( 2387 notify_block, struct mmc_host, pm_notify); 2388 unsigned long flags; 2389 2390 2391 switch (mode) { 2392 case PM_HIBERNATION_PREPARE: 2393 case PM_SUSPEND_PREPARE: 2394 2395 spin_lock_irqsave(&host->lock, flags); 2396 host->rescan_disable = 1; 2397 host->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT; 2398 spin_unlock_irqrestore(&host->lock, flags); 2399 cancel_delayed_work_sync(&host->detect); 2400 2401 if (!host->bus_ops || host->bus_ops->suspend) 2402 break; 2403 2404 mmc_claim_host(host); 2405 2406 if (host->bus_ops->remove) 2407 host->bus_ops->remove(host); 2408 2409 mmc_detach_bus(host); 2410 mmc_power_off(host); 2411 mmc_release_host(host); 2412 host->pm_flags = 0; 2413 break; 2414 2415 case PM_POST_SUSPEND: 2416 case PM_POST_HIBERNATION: 2417 case PM_POST_RESTORE: 2418 2419 spin_lock_irqsave(&host->lock, flags); 2420 host->rescan_disable = 0; 2421 host->power_notify_type = MMC_HOST_PW_NOTIFY_LONG; 2422 spin_unlock_irqrestore(&host->lock, flags); 2423 mmc_detect_change(host, 0); 2424 2425 } 2426 2427 return 0; 2428 } 2429 #endif 2430 2431 static int __init mmc_init(void) 2432 { 2433 int ret; 2434 2435 workqueue = alloc_ordered_workqueue("kmmcd", 0); 2436 if (!workqueue) 2437 return -ENOMEM; 2438 2439 ret = mmc_register_bus(); 2440 if (ret) 2441 goto destroy_workqueue; 2442 2443 ret = mmc_register_host_class(); 2444 if (ret) 2445 goto unregister_bus; 2446 2447 ret = sdio_register_bus(); 2448 if (ret) 2449 goto unregister_host_class; 2450 2451 return 0; 2452 2453 unregister_host_class: 2454 mmc_unregister_host_class(); 2455 unregister_bus: 2456 mmc_unregister_bus(); 2457 destroy_workqueue: 2458 destroy_workqueue(workqueue); 2459 2460 return ret; 2461 } 2462 2463 static void __exit mmc_exit(void) 2464 { 2465 sdio_unregister_bus(); 2466 mmc_unregister_host_class(); 2467 mmc_unregister_bus(); 2468 destroy_workqueue(workqueue); 2469 } 2470 2471 subsys_initcall(mmc_init); 2472 module_exit(mmc_exit); 2473 2474 MODULE_LICENSE("GPL"); 2475