1 /* 2 * linux/drivers/mmc/core/core.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/completion.h> 17 #include <linux/device.h> 18 #include <linux/delay.h> 19 #include <linux/pagemap.h> 20 #include <linux/err.h> 21 #include <linux/leds.h> 22 #include <linux/scatterlist.h> 23 #include <linux/log2.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/pm_runtime.h> 26 27 #include <linux/mmc/card.h> 28 #include <linux/mmc/host.h> 29 #include <linux/mmc/mmc.h> 30 #include <linux/mmc/sd.h> 31 32 #include "core.h" 33 #include "bus.h" 34 #include "host.h" 35 #include "sdio_bus.h" 36 37 #include "mmc_ops.h" 38 #include "sd_ops.h" 39 #include "sdio_ops.h" 40 41 static struct workqueue_struct *workqueue; 42 43 /* 44 * Enabling software CRCs on the data blocks can be a significant (30%) 45 * performance cost, and for other reasons may not always be desired. 46 * So we allow it it to be disabled. 47 */ 48 int use_spi_crc = 1; 49 module_param(use_spi_crc, bool, 0); 50 51 /* 52 * We normally treat cards as removed during suspend if they are not 53 * known to be on a non-removable bus, to avoid the risk of writing 54 * back data to a different card after resume. Allow this to be 55 * overridden if necessary. 56 */ 57 #ifdef CONFIG_MMC_UNSAFE_RESUME 58 int mmc_assume_removable; 59 #else 60 int mmc_assume_removable = 1; 61 #endif 62 EXPORT_SYMBOL(mmc_assume_removable); 63 module_param_named(removable, mmc_assume_removable, bool, 0644); 64 MODULE_PARM_DESC( 65 removable, 66 "MMC/SD cards are removable and may be removed during suspend"); 67 68 /* 69 * Internal function. Schedule delayed work in the MMC work queue. 70 */ 71 static int mmc_schedule_delayed_work(struct delayed_work *work, 72 unsigned long delay) 73 { 74 return queue_delayed_work(workqueue, work, delay); 75 } 76 77 /* 78 * Internal function. Flush all scheduled work from the MMC work queue. 79 */ 80 static void mmc_flush_scheduled_work(void) 81 { 82 flush_workqueue(workqueue); 83 } 84 85 /** 86 * mmc_request_done - finish processing an MMC request 87 * @host: MMC host which completed request 88 * @mrq: MMC request which request 89 * 90 * MMC drivers should call this function when they have completed 91 * their processing of a request. 92 */ 93 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) 94 { 95 struct mmc_command *cmd = mrq->cmd; 96 int err = cmd->error; 97 98 if (err && cmd->retries && mmc_host_is_spi(host)) { 99 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) 100 cmd->retries = 0; 101 } 102 103 if (err && cmd->retries) { 104 pr_debug("%s: req failed (CMD%u): %d, retrying...\n", 105 mmc_hostname(host), cmd->opcode, err); 106 107 cmd->retries--; 108 cmd->error = 0; 109 host->ops->request(host, mrq); 110 } else { 111 led_trigger_event(host->led, LED_OFF); 112 113 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", 114 mmc_hostname(host), cmd->opcode, err, 115 cmd->resp[0], cmd->resp[1], 116 cmd->resp[2], cmd->resp[3]); 117 118 if (mrq->data) { 119 pr_debug("%s: %d bytes transferred: %d\n", 120 mmc_hostname(host), 121 mrq->data->bytes_xfered, mrq->data->error); 122 } 123 124 if (mrq->stop) { 125 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n", 126 mmc_hostname(host), mrq->stop->opcode, 127 mrq->stop->error, 128 mrq->stop->resp[0], mrq->stop->resp[1], 129 mrq->stop->resp[2], mrq->stop->resp[3]); 130 } 131 132 if (mrq->done) 133 mrq->done(mrq); 134 135 mmc_host_clk_gate(host); 136 } 137 } 138 139 EXPORT_SYMBOL(mmc_request_done); 140 141 static void 142 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 143 { 144 #ifdef CONFIG_MMC_DEBUG 145 unsigned int i, sz; 146 struct scatterlist *sg; 147 #endif 148 149 pr_debug("%s: starting CMD%u arg %08x flags %08x\n", 150 mmc_hostname(host), mrq->cmd->opcode, 151 mrq->cmd->arg, mrq->cmd->flags); 152 153 if (mrq->data) { 154 pr_debug("%s: blksz %d blocks %d flags %08x " 155 "tsac %d ms nsac %d\n", 156 mmc_hostname(host), mrq->data->blksz, 157 mrq->data->blocks, mrq->data->flags, 158 mrq->data->timeout_ns / 1000000, 159 mrq->data->timeout_clks); 160 } 161 162 if (mrq->stop) { 163 pr_debug("%s: CMD%u arg %08x flags %08x\n", 164 mmc_hostname(host), mrq->stop->opcode, 165 mrq->stop->arg, mrq->stop->flags); 166 } 167 168 WARN_ON(!host->claimed); 169 170 mrq->cmd->error = 0; 171 mrq->cmd->mrq = mrq; 172 if (mrq->data) { 173 BUG_ON(mrq->data->blksz > host->max_blk_size); 174 BUG_ON(mrq->data->blocks > host->max_blk_count); 175 BUG_ON(mrq->data->blocks * mrq->data->blksz > 176 host->max_req_size); 177 178 #ifdef CONFIG_MMC_DEBUG 179 sz = 0; 180 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) 181 sz += sg->length; 182 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); 183 #endif 184 185 mrq->cmd->data = mrq->data; 186 mrq->data->error = 0; 187 mrq->data->mrq = mrq; 188 if (mrq->stop) { 189 mrq->data->stop = mrq->stop; 190 mrq->stop->error = 0; 191 mrq->stop->mrq = mrq; 192 } 193 } 194 mmc_host_clk_ungate(host); 195 led_trigger_event(host->led, LED_FULL); 196 host->ops->request(host, mrq); 197 } 198 199 static void mmc_wait_done(struct mmc_request *mrq) 200 { 201 complete(mrq->done_data); 202 } 203 204 /** 205 * mmc_wait_for_req - start a request and wait for completion 206 * @host: MMC host to start command 207 * @mrq: MMC request to start 208 * 209 * Start a new MMC custom command request for a host, and wait 210 * for the command to complete. Does not attempt to parse the 211 * response. 212 */ 213 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 214 { 215 DECLARE_COMPLETION_ONSTACK(complete); 216 217 mrq->done_data = &complete; 218 mrq->done = mmc_wait_done; 219 220 mmc_start_request(host, mrq); 221 222 wait_for_completion(&complete); 223 } 224 225 EXPORT_SYMBOL(mmc_wait_for_req); 226 227 /** 228 * mmc_wait_for_cmd - start a command and wait for completion 229 * @host: MMC host to start command 230 * @cmd: MMC command to start 231 * @retries: maximum number of retries 232 * 233 * Start a new MMC command for a host, and wait for the command 234 * to complete. Return any error that occurred while the command 235 * was executing. Do not attempt to parse the response. 236 */ 237 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 238 { 239 struct mmc_request mrq; 240 241 WARN_ON(!host->claimed); 242 243 memset(&mrq, 0, sizeof(struct mmc_request)); 244 245 memset(cmd->resp, 0, sizeof(cmd->resp)); 246 cmd->retries = retries; 247 248 mrq.cmd = cmd; 249 cmd->data = NULL; 250 251 mmc_wait_for_req(host, &mrq); 252 253 return cmd->error; 254 } 255 256 EXPORT_SYMBOL(mmc_wait_for_cmd); 257 258 /** 259 * mmc_set_data_timeout - set the timeout for a data command 260 * @data: data phase for command 261 * @card: the MMC card associated with the data transfer 262 * 263 * Computes the data timeout parameters according to the 264 * correct algorithm given the card type. 265 */ 266 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) 267 { 268 unsigned int mult; 269 270 /* 271 * SDIO cards only define an upper 1 s limit on access. 272 */ 273 if (mmc_card_sdio(card)) { 274 data->timeout_ns = 1000000000; 275 data->timeout_clks = 0; 276 return; 277 } 278 279 /* 280 * SD cards use a 100 multiplier rather than 10 281 */ 282 mult = mmc_card_sd(card) ? 100 : 10; 283 284 /* 285 * Scale up the multiplier (and therefore the timeout) by 286 * the r2w factor for writes. 287 */ 288 if (data->flags & MMC_DATA_WRITE) 289 mult <<= card->csd.r2w_factor; 290 291 data->timeout_ns = card->csd.tacc_ns * mult; 292 data->timeout_clks = card->csd.tacc_clks * mult; 293 294 /* 295 * SD cards also have an upper limit on the timeout. 296 */ 297 if (mmc_card_sd(card)) { 298 unsigned int timeout_us, limit_us; 299 300 timeout_us = data->timeout_ns / 1000; 301 if (mmc_host_clk_rate(card->host)) 302 timeout_us += data->timeout_clks * 1000 / 303 (mmc_host_clk_rate(card->host) / 1000); 304 305 if (data->flags & MMC_DATA_WRITE) 306 /* 307 * The limit is really 250 ms, but that is 308 * insufficient for some crappy cards. 309 */ 310 limit_us = 300000; 311 else 312 limit_us = 100000; 313 314 /* 315 * SDHC cards always use these fixed values. 316 */ 317 if (timeout_us > limit_us || mmc_card_blockaddr(card)) { 318 data->timeout_ns = limit_us * 1000; 319 data->timeout_clks = 0; 320 } 321 } 322 /* 323 * Some cards need very high timeouts if driven in SPI mode. 324 * The worst observed timeout was 900ms after writing a 325 * continuous stream of data until the internal logic 326 * overflowed. 327 */ 328 if (mmc_host_is_spi(card->host)) { 329 if (data->flags & MMC_DATA_WRITE) { 330 if (data->timeout_ns < 1000000000) 331 data->timeout_ns = 1000000000; /* 1s */ 332 } else { 333 if (data->timeout_ns < 100000000) 334 data->timeout_ns = 100000000; /* 100ms */ 335 } 336 } 337 } 338 EXPORT_SYMBOL(mmc_set_data_timeout); 339 340 /** 341 * mmc_align_data_size - pads a transfer size to a more optimal value 342 * @card: the MMC card associated with the data transfer 343 * @sz: original transfer size 344 * 345 * Pads the original data size with a number of extra bytes in 346 * order to avoid controller bugs and/or performance hits 347 * (e.g. some controllers revert to PIO for certain sizes). 348 * 349 * Returns the improved size, which might be unmodified. 350 * 351 * Note that this function is only relevant when issuing a 352 * single scatter gather entry. 353 */ 354 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) 355 { 356 /* 357 * FIXME: We don't have a system for the controller to tell 358 * the core about its problems yet, so for now we just 32-bit 359 * align the size. 360 */ 361 sz = ((sz + 3) / 4) * 4; 362 363 return sz; 364 } 365 EXPORT_SYMBOL(mmc_align_data_size); 366 367 /** 368 * mmc_host_enable - enable a host. 369 * @host: mmc host to enable 370 * 371 * Hosts that support power saving can use the 'enable' and 'disable' 372 * methods to exit and enter power saving states. For more information 373 * see comments for struct mmc_host_ops. 374 */ 375 int mmc_host_enable(struct mmc_host *host) 376 { 377 if (!(host->caps & MMC_CAP_DISABLE)) 378 return 0; 379 380 if (host->en_dis_recurs) 381 return 0; 382 383 if (host->nesting_cnt++) 384 return 0; 385 386 cancel_delayed_work_sync(&host->disable); 387 388 if (host->enabled) 389 return 0; 390 391 if (host->ops->enable) { 392 int err; 393 394 host->en_dis_recurs = 1; 395 err = host->ops->enable(host); 396 host->en_dis_recurs = 0; 397 398 if (err) { 399 pr_debug("%s: enable error %d\n", 400 mmc_hostname(host), err); 401 return err; 402 } 403 } 404 host->enabled = 1; 405 return 0; 406 } 407 EXPORT_SYMBOL(mmc_host_enable); 408 409 static int mmc_host_do_disable(struct mmc_host *host, int lazy) 410 { 411 if (host->ops->disable) { 412 int err; 413 414 host->en_dis_recurs = 1; 415 err = host->ops->disable(host, lazy); 416 host->en_dis_recurs = 0; 417 418 if (err < 0) { 419 pr_debug("%s: disable error %d\n", 420 mmc_hostname(host), err); 421 return err; 422 } 423 if (err > 0) { 424 unsigned long delay = msecs_to_jiffies(err); 425 426 mmc_schedule_delayed_work(&host->disable, delay); 427 } 428 } 429 host->enabled = 0; 430 return 0; 431 } 432 433 /** 434 * mmc_host_disable - disable a host. 435 * @host: mmc host to disable 436 * 437 * Hosts that support power saving can use the 'enable' and 'disable' 438 * methods to exit and enter power saving states. For more information 439 * see comments for struct mmc_host_ops. 440 */ 441 int mmc_host_disable(struct mmc_host *host) 442 { 443 int err; 444 445 if (!(host->caps & MMC_CAP_DISABLE)) 446 return 0; 447 448 if (host->en_dis_recurs) 449 return 0; 450 451 if (--host->nesting_cnt) 452 return 0; 453 454 if (!host->enabled) 455 return 0; 456 457 err = mmc_host_do_disable(host, 0); 458 return err; 459 } 460 EXPORT_SYMBOL(mmc_host_disable); 461 462 /** 463 * __mmc_claim_host - exclusively claim a host 464 * @host: mmc host to claim 465 * @abort: whether or not the operation should be aborted 466 * 467 * Claim a host for a set of operations. If @abort is non null and 468 * dereference a non-zero value then this will return prematurely with 469 * that non-zero value without acquiring the lock. Returns zero 470 * with the lock held otherwise. 471 */ 472 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) 473 { 474 DECLARE_WAITQUEUE(wait, current); 475 unsigned long flags; 476 int stop; 477 478 might_sleep(); 479 480 add_wait_queue(&host->wq, &wait); 481 spin_lock_irqsave(&host->lock, flags); 482 while (1) { 483 set_current_state(TASK_UNINTERRUPTIBLE); 484 stop = abort ? atomic_read(abort) : 0; 485 if (stop || !host->claimed || host->claimer == current) 486 break; 487 spin_unlock_irqrestore(&host->lock, flags); 488 schedule(); 489 spin_lock_irqsave(&host->lock, flags); 490 } 491 set_current_state(TASK_RUNNING); 492 if (!stop) { 493 host->claimed = 1; 494 host->claimer = current; 495 host->claim_cnt += 1; 496 } else 497 wake_up(&host->wq); 498 spin_unlock_irqrestore(&host->lock, flags); 499 remove_wait_queue(&host->wq, &wait); 500 if (!stop) 501 mmc_host_enable(host); 502 return stop; 503 } 504 505 EXPORT_SYMBOL(__mmc_claim_host); 506 507 /** 508 * mmc_try_claim_host - try exclusively to claim a host 509 * @host: mmc host to claim 510 * 511 * Returns %1 if the host is claimed, %0 otherwise. 512 */ 513 int mmc_try_claim_host(struct mmc_host *host) 514 { 515 int claimed_host = 0; 516 unsigned long flags; 517 518 spin_lock_irqsave(&host->lock, flags); 519 if (!host->claimed || host->claimer == current) { 520 host->claimed = 1; 521 host->claimer = current; 522 host->claim_cnt += 1; 523 claimed_host = 1; 524 } 525 spin_unlock_irqrestore(&host->lock, flags); 526 return claimed_host; 527 } 528 EXPORT_SYMBOL(mmc_try_claim_host); 529 530 /** 531 * mmc_do_release_host - release a claimed host 532 * @host: mmc host to release 533 * 534 * If you successfully claimed a host, this function will 535 * release it again. 536 */ 537 void mmc_do_release_host(struct mmc_host *host) 538 { 539 unsigned long flags; 540 541 spin_lock_irqsave(&host->lock, flags); 542 if (--host->claim_cnt) { 543 /* Release for nested claim */ 544 spin_unlock_irqrestore(&host->lock, flags); 545 } else { 546 host->claimed = 0; 547 host->claimer = NULL; 548 spin_unlock_irqrestore(&host->lock, flags); 549 wake_up(&host->wq); 550 } 551 } 552 EXPORT_SYMBOL(mmc_do_release_host); 553 554 void mmc_host_deeper_disable(struct work_struct *work) 555 { 556 struct mmc_host *host = 557 container_of(work, struct mmc_host, disable.work); 558 559 /* If the host is claimed then we do not want to disable it anymore */ 560 if (!mmc_try_claim_host(host)) 561 return; 562 mmc_host_do_disable(host, 1); 563 mmc_do_release_host(host); 564 } 565 566 /** 567 * mmc_host_lazy_disable - lazily disable a host. 568 * @host: mmc host to disable 569 * 570 * Hosts that support power saving can use the 'enable' and 'disable' 571 * methods to exit and enter power saving states. For more information 572 * see comments for struct mmc_host_ops. 573 */ 574 int mmc_host_lazy_disable(struct mmc_host *host) 575 { 576 if (!(host->caps & MMC_CAP_DISABLE)) 577 return 0; 578 579 if (host->en_dis_recurs) 580 return 0; 581 582 if (--host->nesting_cnt) 583 return 0; 584 585 if (!host->enabled) 586 return 0; 587 588 if (host->disable_delay) { 589 mmc_schedule_delayed_work(&host->disable, 590 msecs_to_jiffies(host->disable_delay)); 591 return 0; 592 } else 593 return mmc_host_do_disable(host, 1); 594 } 595 EXPORT_SYMBOL(mmc_host_lazy_disable); 596 597 /** 598 * mmc_release_host - release a host 599 * @host: mmc host to release 600 * 601 * Release a MMC host, allowing others to claim the host 602 * for their operations. 603 */ 604 void mmc_release_host(struct mmc_host *host) 605 { 606 WARN_ON(!host->claimed); 607 608 mmc_host_lazy_disable(host); 609 610 mmc_do_release_host(host); 611 } 612 613 EXPORT_SYMBOL(mmc_release_host); 614 615 /* 616 * Internal function that does the actual ios call to the host driver, 617 * optionally printing some debug output. 618 */ 619 static inline void mmc_set_ios(struct mmc_host *host) 620 { 621 struct mmc_ios *ios = &host->ios; 622 623 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " 624 "width %u timing %u\n", 625 mmc_hostname(host), ios->clock, ios->bus_mode, 626 ios->power_mode, ios->chip_select, ios->vdd, 627 ios->bus_width, ios->timing); 628 629 if (ios->clock > 0) 630 mmc_set_ungated(host); 631 host->ops->set_ios(host, ios); 632 } 633 634 /* 635 * Control chip select pin on a host. 636 */ 637 void mmc_set_chip_select(struct mmc_host *host, int mode) 638 { 639 host->ios.chip_select = mode; 640 mmc_set_ios(host); 641 } 642 643 /* 644 * Sets the host clock to the highest possible frequency that 645 * is below "hz". 646 */ 647 void mmc_set_clock(struct mmc_host *host, unsigned int hz) 648 { 649 WARN_ON(hz < host->f_min); 650 651 if (hz > host->f_max) 652 hz = host->f_max; 653 654 host->ios.clock = hz; 655 mmc_set_ios(host); 656 } 657 658 #ifdef CONFIG_MMC_CLKGATE 659 /* 660 * This gates the clock by setting it to 0 Hz. 661 */ 662 void mmc_gate_clock(struct mmc_host *host) 663 { 664 unsigned long flags; 665 666 spin_lock_irqsave(&host->clk_lock, flags); 667 host->clk_old = host->ios.clock; 668 host->ios.clock = 0; 669 host->clk_gated = true; 670 spin_unlock_irqrestore(&host->clk_lock, flags); 671 mmc_set_ios(host); 672 } 673 674 /* 675 * This restores the clock from gating by using the cached 676 * clock value. 677 */ 678 void mmc_ungate_clock(struct mmc_host *host) 679 { 680 /* 681 * We should previously have gated the clock, so the clock shall 682 * be 0 here! The clock may however be 0 during initialization, 683 * when some request operations are performed before setting 684 * the frequency. When ungate is requested in that situation 685 * we just ignore the call. 686 */ 687 if (host->clk_old) { 688 BUG_ON(host->ios.clock); 689 /* This call will also set host->clk_gated to false */ 690 mmc_set_clock(host, host->clk_old); 691 } 692 } 693 694 void mmc_set_ungated(struct mmc_host *host) 695 { 696 unsigned long flags; 697 698 /* 699 * We've been given a new frequency while the clock is gated, 700 * so make sure we regard this as ungating it. 701 */ 702 spin_lock_irqsave(&host->clk_lock, flags); 703 host->clk_gated = false; 704 spin_unlock_irqrestore(&host->clk_lock, flags); 705 } 706 707 #else 708 void mmc_set_ungated(struct mmc_host *host) 709 { 710 } 711 #endif 712 713 /* 714 * Change the bus mode (open drain/push-pull) of a host. 715 */ 716 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 717 { 718 host->ios.bus_mode = mode; 719 mmc_set_ios(host); 720 } 721 722 /* 723 * Change data bus width and DDR mode of a host. 724 */ 725 void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width, 726 unsigned int ddr) 727 { 728 host->ios.bus_width = width; 729 host->ios.ddr = ddr; 730 mmc_set_ios(host); 731 } 732 733 /* 734 * Change data bus width of a host. 735 */ 736 void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 737 { 738 mmc_set_bus_width_ddr(host, width, MMC_SDR_MODE); 739 } 740 741 /** 742 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number 743 * @vdd: voltage (mV) 744 * @low_bits: prefer low bits in boundary cases 745 * 746 * This function returns the OCR bit number according to the provided @vdd 747 * value. If conversion is not possible a negative errno value returned. 748 * 749 * Depending on the @low_bits flag the function prefers low or high OCR bits 750 * on boundary voltages. For example, 751 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33); 752 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34); 753 * 754 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21). 755 */ 756 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits) 757 { 758 const int max_bit = ilog2(MMC_VDD_35_36); 759 int bit; 760 761 if (vdd < 1650 || vdd > 3600) 762 return -EINVAL; 763 764 if (vdd >= 1650 && vdd <= 1950) 765 return ilog2(MMC_VDD_165_195); 766 767 if (low_bits) 768 vdd -= 1; 769 770 /* Base 2000 mV, step 100 mV, bit's base 8. */ 771 bit = (vdd - 2000) / 100 + 8; 772 if (bit > max_bit) 773 return max_bit; 774 return bit; 775 } 776 777 /** 778 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask 779 * @vdd_min: minimum voltage value (mV) 780 * @vdd_max: maximum voltage value (mV) 781 * 782 * This function returns the OCR mask bits according to the provided @vdd_min 783 * and @vdd_max values. If conversion is not possible the function returns 0. 784 * 785 * Notes wrt boundary cases: 786 * This function sets the OCR bits for all boundary voltages, for example 787 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 | 788 * MMC_VDD_34_35 mask. 789 */ 790 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) 791 { 792 u32 mask = 0; 793 794 if (vdd_max < vdd_min) 795 return 0; 796 797 /* Prefer high bits for the boundary vdd_max values. */ 798 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false); 799 if (vdd_max < 0) 800 return 0; 801 802 /* Prefer low bits for the boundary vdd_min values. */ 803 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true); 804 if (vdd_min < 0) 805 return 0; 806 807 /* Fill the mask, from max bit to min bit. */ 808 while (vdd_max >= vdd_min) 809 mask |= 1 << vdd_max--; 810 811 return mask; 812 } 813 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); 814 815 #ifdef CONFIG_REGULATOR 816 817 /** 818 * mmc_regulator_get_ocrmask - return mask of supported voltages 819 * @supply: regulator to use 820 * 821 * This returns either a negative errno, or a mask of voltages that 822 * can be provided to MMC/SD/SDIO devices using the specified voltage 823 * regulator. This would normally be called before registering the 824 * MMC host adapter. 825 */ 826 int mmc_regulator_get_ocrmask(struct regulator *supply) 827 { 828 int result = 0; 829 int count; 830 int i; 831 832 count = regulator_count_voltages(supply); 833 if (count < 0) 834 return count; 835 836 for (i = 0; i < count; i++) { 837 int vdd_uV; 838 int vdd_mV; 839 840 vdd_uV = regulator_list_voltage(supply, i); 841 if (vdd_uV <= 0) 842 continue; 843 844 vdd_mV = vdd_uV / 1000; 845 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); 846 } 847 848 return result; 849 } 850 EXPORT_SYMBOL(mmc_regulator_get_ocrmask); 851 852 /** 853 * mmc_regulator_set_ocr - set regulator to match host->ios voltage 854 * @mmc: the host to regulate 855 * @supply: regulator to use 856 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) 857 * 858 * Returns zero on success, else negative errno. 859 * 860 * MMC host drivers may use this to enable or disable a regulator using 861 * a particular supply voltage. This would normally be called from the 862 * set_ios() method. 863 */ 864 int mmc_regulator_set_ocr(struct mmc_host *mmc, 865 struct regulator *supply, 866 unsigned short vdd_bit) 867 { 868 int result = 0; 869 int min_uV, max_uV; 870 871 if (vdd_bit) { 872 int tmp; 873 int voltage; 874 875 /* REVISIT mmc_vddrange_to_ocrmask() may have set some 876 * bits this regulator doesn't quite support ... don't 877 * be too picky, most cards and regulators are OK with 878 * a 0.1V range goof (it's a small error percentage). 879 */ 880 tmp = vdd_bit - ilog2(MMC_VDD_165_195); 881 if (tmp == 0) { 882 min_uV = 1650 * 1000; 883 max_uV = 1950 * 1000; 884 } else { 885 min_uV = 1900 * 1000 + tmp * 100 * 1000; 886 max_uV = min_uV + 100 * 1000; 887 } 888 889 /* avoid needless changes to this voltage; the regulator 890 * might not allow this operation 891 */ 892 voltage = regulator_get_voltage(supply); 893 if (voltage < 0) 894 result = voltage; 895 else if (voltage < min_uV || voltage > max_uV) 896 result = regulator_set_voltage(supply, min_uV, max_uV); 897 else 898 result = 0; 899 900 if (result == 0 && !mmc->regulator_enabled) { 901 result = regulator_enable(supply); 902 if (!result) 903 mmc->regulator_enabled = true; 904 } 905 } else if (mmc->regulator_enabled) { 906 result = regulator_disable(supply); 907 if (result == 0) 908 mmc->regulator_enabled = false; 909 } 910 911 if (result) 912 dev_err(mmc_dev(mmc), 913 "could not set regulator OCR (%d)\n", result); 914 return result; 915 } 916 EXPORT_SYMBOL(mmc_regulator_set_ocr); 917 918 #endif /* CONFIG_REGULATOR */ 919 920 /* 921 * Mask off any voltages we don't support and select 922 * the lowest voltage 923 */ 924 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) 925 { 926 int bit; 927 928 ocr &= host->ocr_avail; 929 930 bit = ffs(ocr); 931 if (bit) { 932 bit -= 1; 933 934 ocr &= 3 << bit; 935 936 host->ios.vdd = bit; 937 mmc_set_ios(host); 938 } else { 939 pr_warning("%s: host doesn't support card's voltages\n", 940 mmc_hostname(host)); 941 ocr = 0; 942 } 943 944 return ocr; 945 } 946 947 /* 948 * Select timing parameters for host. 949 */ 950 void mmc_set_timing(struct mmc_host *host, unsigned int timing) 951 { 952 host->ios.timing = timing; 953 mmc_set_ios(host); 954 } 955 956 /* 957 * Apply power to the MMC stack. This is a two-stage process. 958 * First, we enable power to the card without the clock running. 959 * We then wait a bit for the power to stabilise. Finally, 960 * enable the bus drivers and clock to the card. 961 * 962 * We must _NOT_ enable the clock prior to power stablising. 963 * 964 * If a host does all the power sequencing itself, ignore the 965 * initial MMC_POWER_UP stage. 966 */ 967 static void mmc_power_up(struct mmc_host *host) 968 { 969 int bit; 970 971 /* If ocr is set, we use it */ 972 if (host->ocr) 973 bit = ffs(host->ocr) - 1; 974 else 975 bit = fls(host->ocr_avail) - 1; 976 977 host->ios.vdd = bit; 978 if (mmc_host_is_spi(host)) { 979 host->ios.chip_select = MMC_CS_HIGH; 980 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 981 } else { 982 host->ios.chip_select = MMC_CS_DONTCARE; 983 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 984 } 985 host->ios.power_mode = MMC_POWER_UP; 986 host->ios.bus_width = MMC_BUS_WIDTH_1; 987 host->ios.timing = MMC_TIMING_LEGACY; 988 mmc_set_ios(host); 989 990 /* 991 * This delay should be sufficient to allow the power supply 992 * to reach the minimum voltage. 993 */ 994 mmc_delay(10); 995 996 host->ios.clock = host->f_init; 997 998 host->ios.power_mode = MMC_POWER_ON; 999 mmc_set_ios(host); 1000 1001 /* 1002 * This delay must be at least 74 clock sizes, or 1 ms, or the 1003 * time required to reach a stable voltage. 1004 */ 1005 mmc_delay(10); 1006 } 1007 1008 static void mmc_power_off(struct mmc_host *host) 1009 { 1010 host->ios.clock = 0; 1011 host->ios.vdd = 0; 1012 1013 /* 1014 * Reset ocr mask to be the highest possible voltage supported for 1015 * this mmc host. This value will be used at next power up. 1016 */ 1017 host->ocr = 1 << (fls(host->ocr_avail) - 1); 1018 1019 if (!mmc_host_is_spi(host)) { 1020 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1021 host->ios.chip_select = MMC_CS_DONTCARE; 1022 } 1023 host->ios.power_mode = MMC_POWER_OFF; 1024 host->ios.bus_width = MMC_BUS_WIDTH_1; 1025 host->ios.timing = MMC_TIMING_LEGACY; 1026 mmc_set_ios(host); 1027 } 1028 1029 /* 1030 * Cleanup when the last reference to the bus operator is dropped. 1031 */ 1032 static void __mmc_release_bus(struct mmc_host *host) 1033 { 1034 BUG_ON(!host); 1035 BUG_ON(host->bus_refs); 1036 BUG_ON(!host->bus_dead); 1037 1038 host->bus_ops = NULL; 1039 } 1040 1041 /* 1042 * Increase reference count of bus operator 1043 */ 1044 static inline void mmc_bus_get(struct mmc_host *host) 1045 { 1046 unsigned long flags; 1047 1048 spin_lock_irqsave(&host->lock, flags); 1049 host->bus_refs++; 1050 spin_unlock_irqrestore(&host->lock, flags); 1051 } 1052 1053 /* 1054 * Decrease reference count of bus operator and free it if 1055 * it is the last reference. 1056 */ 1057 static inline void mmc_bus_put(struct mmc_host *host) 1058 { 1059 unsigned long flags; 1060 1061 spin_lock_irqsave(&host->lock, flags); 1062 host->bus_refs--; 1063 if ((host->bus_refs == 0) && host->bus_ops) 1064 __mmc_release_bus(host); 1065 spin_unlock_irqrestore(&host->lock, flags); 1066 } 1067 1068 /* 1069 * Assign a mmc bus handler to a host. Only one bus handler may control a 1070 * host at any given time. 1071 */ 1072 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) 1073 { 1074 unsigned long flags; 1075 1076 BUG_ON(!host); 1077 BUG_ON(!ops); 1078 1079 WARN_ON(!host->claimed); 1080 1081 spin_lock_irqsave(&host->lock, flags); 1082 1083 BUG_ON(host->bus_ops); 1084 BUG_ON(host->bus_refs); 1085 1086 host->bus_ops = ops; 1087 host->bus_refs = 1; 1088 host->bus_dead = 0; 1089 1090 spin_unlock_irqrestore(&host->lock, flags); 1091 } 1092 1093 /* 1094 * Remove the current bus handler from a host. Assumes that there are 1095 * no interesting cards left, so the bus is powered down. 1096 */ 1097 void mmc_detach_bus(struct mmc_host *host) 1098 { 1099 unsigned long flags; 1100 1101 BUG_ON(!host); 1102 1103 WARN_ON(!host->claimed); 1104 WARN_ON(!host->bus_ops); 1105 1106 spin_lock_irqsave(&host->lock, flags); 1107 1108 host->bus_dead = 1; 1109 1110 spin_unlock_irqrestore(&host->lock, flags); 1111 1112 mmc_power_off(host); 1113 1114 mmc_bus_put(host); 1115 } 1116 1117 /** 1118 * mmc_detect_change - process change of state on a MMC socket 1119 * @host: host which changed state. 1120 * @delay: optional delay to wait before detection (jiffies) 1121 * 1122 * MMC drivers should call this when they detect a card has been 1123 * inserted or removed. The MMC layer will confirm that any 1124 * present card is still functional, and initialize any newly 1125 * inserted. 1126 */ 1127 void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1128 { 1129 #ifdef CONFIG_MMC_DEBUG 1130 unsigned long flags; 1131 spin_lock_irqsave(&host->lock, flags); 1132 WARN_ON(host->removed); 1133 spin_unlock_irqrestore(&host->lock, flags); 1134 #endif 1135 1136 mmc_schedule_delayed_work(&host->detect, delay); 1137 } 1138 1139 EXPORT_SYMBOL(mmc_detect_change); 1140 1141 void mmc_init_erase(struct mmc_card *card) 1142 { 1143 unsigned int sz; 1144 1145 if (is_power_of_2(card->erase_size)) 1146 card->erase_shift = ffs(card->erase_size) - 1; 1147 else 1148 card->erase_shift = 0; 1149 1150 /* 1151 * It is possible to erase an arbitrarily large area of an SD or MMC 1152 * card. That is not desirable because it can take a long time 1153 * (minutes) potentially delaying more important I/O, and also the 1154 * timeout calculations become increasingly hugely over-estimated. 1155 * Consequently, 'pref_erase' is defined as a guide to limit erases 1156 * to that size and alignment. 1157 * 1158 * For SD cards that define Allocation Unit size, limit erases to one 1159 * Allocation Unit at a time. For MMC cards that define High Capacity 1160 * Erase Size, whether it is switched on or not, limit to that size. 1161 * Otherwise just have a stab at a good value. For modern cards it 1162 * will end up being 4MiB. Note that if the value is too small, it 1163 * can end up taking longer to erase. 1164 */ 1165 if (mmc_card_sd(card) && card->ssr.au) { 1166 card->pref_erase = card->ssr.au; 1167 card->erase_shift = ffs(card->ssr.au) - 1; 1168 } else if (card->ext_csd.hc_erase_size) { 1169 card->pref_erase = card->ext_csd.hc_erase_size; 1170 } else { 1171 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; 1172 if (sz < 128) 1173 card->pref_erase = 512 * 1024 / 512; 1174 else if (sz < 512) 1175 card->pref_erase = 1024 * 1024 / 512; 1176 else if (sz < 1024) 1177 card->pref_erase = 2 * 1024 * 1024 / 512; 1178 else 1179 card->pref_erase = 4 * 1024 * 1024 / 512; 1180 if (card->pref_erase < card->erase_size) 1181 card->pref_erase = card->erase_size; 1182 else { 1183 sz = card->pref_erase % card->erase_size; 1184 if (sz) 1185 card->pref_erase += card->erase_size - sz; 1186 } 1187 } 1188 } 1189 1190 static void mmc_set_mmc_erase_timeout(struct mmc_card *card, 1191 struct mmc_command *cmd, 1192 unsigned int arg, unsigned int qty) 1193 { 1194 unsigned int erase_timeout; 1195 1196 if (card->ext_csd.erase_group_def & 1) { 1197 /* High Capacity Erase Group Size uses HC timeouts */ 1198 if (arg == MMC_TRIM_ARG) 1199 erase_timeout = card->ext_csd.trim_timeout; 1200 else 1201 erase_timeout = card->ext_csd.hc_erase_timeout; 1202 } else { 1203 /* CSD Erase Group Size uses write timeout */ 1204 unsigned int mult = (10 << card->csd.r2w_factor); 1205 unsigned int timeout_clks = card->csd.tacc_clks * mult; 1206 unsigned int timeout_us; 1207 1208 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ 1209 if (card->csd.tacc_ns < 1000000) 1210 timeout_us = (card->csd.tacc_ns * mult) / 1000; 1211 else 1212 timeout_us = (card->csd.tacc_ns / 1000) * mult; 1213 1214 /* 1215 * ios.clock is only a target. The real clock rate might be 1216 * less but not that much less, so fudge it by multiplying by 2. 1217 */ 1218 timeout_clks <<= 1; 1219 timeout_us += (timeout_clks * 1000) / 1220 (card->host->ios.clock / 1000); 1221 1222 erase_timeout = timeout_us / 1000; 1223 1224 /* 1225 * Theoretically, the calculation could underflow so round up 1226 * to 1ms in that case. 1227 */ 1228 if (!erase_timeout) 1229 erase_timeout = 1; 1230 } 1231 1232 /* Multiplier for secure operations */ 1233 if (arg & MMC_SECURE_ARGS) { 1234 if (arg == MMC_SECURE_ERASE_ARG) 1235 erase_timeout *= card->ext_csd.sec_erase_mult; 1236 else 1237 erase_timeout *= card->ext_csd.sec_trim_mult; 1238 } 1239 1240 erase_timeout *= qty; 1241 1242 /* 1243 * Ensure at least a 1 second timeout for SPI as per 1244 * 'mmc_set_data_timeout()' 1245 */ 1246 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) 1247 erase_timeout = 1000; 1248 1249 cmd->erase_timeout = erase_timeout; 1250 } 1251 1252 static void mmc_set_sd_erase_timeout(struct mmc_card *card, 1253 struct mmc_command *cmd, unsigned int arg, 1254 unsigned int qty) 1255 { 1256 if (card->ssr.erase_timeout) { 1257 /* Erase timeout specified in SD Status Register (SSR) */ 1258 cmd->erase_timeout = card->ssr.erase_timeout * qty + 1259 card->ssr.erase_offset; 1260 } else { 1261 /* 1262 * Erase timeout not specified in SD Status Register (SSR) so 1263 * use 250ms per write block. 1264 */ 1265 cmd->erase_timeout = 250 * qty; 1266 } 1267 1268 /* Must not be less than 1 second */ 1269 if (cmd->erase_timeout < 1000) 1270 cmd->erase_timeout = 1000; 1271 } 1272 1273 static void mmc_set_erase_timeout(struct mmc_card *card, 1274 struct mmc_command *cmd, unsigned int arg, 1275 unsigned int qty) 1276 { 1277 if (mmc_card_sd(card)) 1278 mmc_set_sd_erase_timeout(card, cmd, arg, qty); 1279 else 1280 mmc_set_mmc_erase_timeout(card, cmd, arg, qty); 1281 } 1282 1283 static int mmc_do_erase(struct mmc_card *card, unsigned int from, 1284 unsigned int to, unsigned int arg) 1285 { 1286 struct mmc_command cmd; 1287 unsigned int qty = 0; 1288 int err; 1289 1290 /* 1291 * qty is used to calculate the erase timeout which depends on how many 1292 * erase groups (or allocation units in SD terminology) are affected. 1293 * We count erasing part of an erase group as one erase group. 1294 * For SD, the allocation units are always a power of 2. For MMC, the 1295 * erase group size is almost certainly also power of 2, but it does not 1296 * seem to insist on that in the JEDEC standard, so we fall back to 1297 * division in that case. SD may not specify an allocation unit size, 1298 * in which case the timeout is based on the number of write blocks. 1299 * 1300 * Note that the timeout for secure trim 2 will only be correct if the 1301 * number of erase groups specified is the same as the total of all 1302 * preceding secure trim 1 commands. Since the power may have been 1303 * lost since the secure trim 1 commands occurred, it is generally 1304 * impossible to calculate the secure trim 2 timeout correctly. 1305 */ 1306 if (card->erase_shift) 1307 qty += ((to >> card->erase_shift) - 1308 (from >> card->erase_shift)) + 1; 1309 else if (mmc_card_sd(card)) 1310 qty += to - from + 1; 1311 else 1312 qty += ((to / card->erase_size) - 1313 (from / card->erase_size)) + 1; 1314 1315 if (!mmc_card_blockaddr(card)) { 1316 from <<= 9; 1317 to <<= 9; 1318 } 1319 1320 memset(&cmd, 0, sizeof(struct mmc_command)); 1321 if (mmc_card_sd(card)) 1322 cmd.opcode = SD_ERASE_WR_BLK_START; 1323 else 1324 cmd.opcode = MMC_ERASE_GROUP_START; 1325 cmd.arg = from; 1326 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1327 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1328 if (err) { 1329 printk(KERN_ERR "mmc_erase: group start error %d, " 1330 "status %#x\n", err, cmd.resp[0]); 1331 err = -EINVAL; 1332 goto out; 1333 } 1334 1335 memset(&cmd, 0, sizeof(struct mmc_command)); 1336 if (mmc_card_sd(card)) 1337 cmd.opcode = SD_ERASE_WR_BLK_END; 1338 else 1339 cmd.opcode = MMC_ERASE_GROUP_END; 1340 cmd.arg = to; 1341 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1342 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1343 if (err) { 1344 printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n", 1345 err, cmd.resp[0]); 1346 err = -EINVAL; 1347 goto out; 1348 } 1349 1350 memset(&cmd, 0, sizeof(struct mmc_command)); 1351 cmd.opcode = MMC_ERASE; 1352 cmd.arg = arg; 1353 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1354 mmc_set_erase_timeout(card, &cmd, arg, qty); 1355 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1356 if (err) { 1357 printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n", 1358 err, cmd.resp[0]); 1359 err = -EIO; 1360 goto out; 1361 } 1362 1363 if (mmc_host_is_spi(card->host)) 1364 goto out; 1365 1366 do { 1367 memset(&cmd, 0, sizeof(struct mmc_command)); 1368 cmd.opcode = MMC_SEND_STATUS; 1369 cmd.arg = card->rca << 16; 1370 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1371 /* Do not retry else we can't see errors */ 1372 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1373 if (err || (cmd.resp[0] & 0xFDF92000)) { 1374 printk(KERN_ERR "error %d requesting status %#x\n", 1375 err, cmd.resp[0]); 1376 err = -EIO; 1377 goto out; 1378 } 1379 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 1380 R1_CURRENT_STATE(cmd.resp[0]) == 7); 1381 out: 1382 return err; 1383 } 1384 1385 /** 1386 * mmc_erase - erase sectors. 1387 * @card: card to erase 1388 * @from: first sector to erase 1389 * @nr: number of sectors to erase 1390 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG) 1391 * 1392 * Caller must claim host before calling this function. 1393 */ 1394 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, 1395 unsigned int arg) 1396 { 1397 unsigned int rem, to = from + nr; 1398 1399 if (!(card->host->caps & MMC_CAP_ERASE) || 1400 !(card->csd.cmdclass & CCC_ERASE)) 1401 return -EOPNOTSUPP; 1402 1403 if (!card->erase_size) 1404 return -EOPNOTSUPP; 1405 1406 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG) 1407 return -EOPNOTSUPP; 1408 1409 if ((arg & MMC_SECURE_ARGS) && 1410 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) 1411 return -EOPNOTSUPP; 1412 1413 if ((arg & MMC_TRIM_ARGS) && 1414 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) 1415 return -EOPNOTSUPP; 1416 1417 if (arg == MMC_SECURE_ERASE_ARG) { 1418 if (from % card->erase_size || nr % card->erase_size) 1419 return -EINVAL; 1420 } 1421 1422 if (arg == MMC_ERASE_ARG) { 1423 rem = from % card->erase_size; 1424 if (rem) { 1425 rem = card->erase_size - rem; 1426 from += rem; 1427 if (nr > rem) 1428 nr -= rem; 1429 else 1430 return 0; 1431 } 1432 rem = nr % card->erase_size; 1433 if (rem) 1434 nr -= rem; 1435 } 1436 1437 if (nr == 0) 1438 return 0; 1439 1440 to = from + nr; 1441 1442 if (to <= from) 1443 return -EINVAL; 1444 1445 /* 'from' and 'to' are inclusive */ 1446 to -= 1; 1447 1448 return mmc_do_erase(card, from, to, arg); 1449 } 1450 EXPORT_SYMBOL(mmc_erase); 1451 1452 int mmc_can_erase(struct mmc_card *card) 1453 { 1454 if ((card->host->caps & MMC_CAP_ERASE) && 1455 (card->csd.cmdclass & CCC_ERASE) && card->erase_size) 1456 return 1; 1457 return 0; 1458 } 1459 EXPORT_SYMBOL(mmc_can_erase); 1460 1461 int mmc_can_trim(struct mmc_card *card) 1462 { 1463 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) 1464 return 1; 1465 return 0; 1466 } 1467 EXPORT_SYMBOL(mmc_can_trim); 1468 1469 int mmc_can_secure_erase_trim(struct mmc_card *card) 1470 { 1471 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) 1472 return 1; 1473 return 0; 1474 } 1475 EXPORT_SYMBOL(mmc_can_secure_erase_trim); 1476 1477 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, 1478 unsigned int nr) 1479 { 1480 if (!card->erase_size) 1481 return 0; 1482 if (from % card->erase_size || nr % card->erase_size) 1483 return 0; 1484 return 1; 1485 } 1486 EXPORT_SYMBOL(mmc_erase_group_aligned); 1487 1488 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) 1489 { 1490 struct mmc_command cmd; 1491 1492 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card)) 1493 return 0; 1494 1495 memset(&cmd, 0, sizeof(struct mmc_command)); 1496 cmd.opcode = MMC_SET_BLOCKLEN; 1497 cmd.arg = blocklen; 1498 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1499 return mmc_wait_for_cmd(card->host, &cmd, 5); 1500 } 1501 EXPORT_SYMBOL(mmc_set_blocklen); 1502 1503 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) 1504 { 1505 host->f_init = freq; 1506 1507 #ifdef CONFIG_MMC_DEBUG 1508 pr_info("%s: %s: trying to init card at %u Hz\n", 1509 mmc_hostname(host), __func__, host->f_init); 1510 #endif 1511 mmc_power_up(host); 1512 1513 /* 1514 * sdio_reset sends CMD52 to reset card. Since we do not know 1515 * if the card is being re-initialized, just send it. CMD52 1516 * should be ignored by SD/eMMC cards. 1517 */ 1518 sdio_reset(host); 1519 mmc_go_idle(host); 1520 1521 mmc_send_if_cond(host, host->ocr_avail); 1522 1523 /* Order's important: probe SDIO, then SD, then MMC */ 1524 if (!mmc_attach_sdio(host)) 1525 return 0; 1526 if (!mmc_attach_sd(host)) 1527 return 0; 1528 if (!mmc_attach_mmc(host)) 1529 return 0; 1530 1531 mmc_power_off(host); 1532 return -EIO; 1533 } 1534 1535 void mmc_rescan(struct work_struct *work) 1536 { 1537 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; 1538 struct mmc_host *host = 1539 container_of(work, struct mmc_host, detect.work); 1540 int i; 1541 1542 if (host->rescan_disable) 1543 return; 1544 1545 mmc_bus_get(host); 1546 1547 /* 1548 * if there is a _removable_ card registered, check whether it is 1549 * still present 1550 */ 1551 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead 1552 && !(host->caps & MMC_CAP_NONREMOVABLE)) 1553 host->bus_ops->detect(host); 1554 1555 /* 1556 * Let mmc_bus_put() free the bus/bus_ops if we've found that 1557 * the card is no longer present. 1558 */ 1559 mmc_bus_put(host); 1560 mmc_bus_get(host); 1561 1562 /* if there still is a card present, stop here */ 1563 if (host->bus_ops != NULL) { 1564 mmc_bus_put(host); 1565 goto out; 1566 } 1567 1568 /* 1569 * Only we can add a new handler, so it's safe to 1570 * release the lock here. 1571 */ 1572 mmc_bus_put(host); 1573 1574 if (host->ops->get_cd && host->ops->get_cd(host) == 0) 1575 goto out; 1576 1577 mmc_claim_host(host); 1578 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 1579 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) 1580 break; 1581 if (freqs[i] < host->f_min) 1582 break; 1583 } 1584 mmc_release_host(host); 1585 1586 out: 1587 if (host->caps & MMC_CAP_NEEDS_POLL) 1588 mmc_schedule_delayed_work(&host->detect, HZ); 1589 } 1590 1591 void mmc_start_host(struct mmc_host *host) 1592 { 1593 mmc_power_off(host); 1594 mmc_detect_change(host, 0); 1595 } 1596 1597 void mmc_stop_host(struct mmc_host *host) 1598 { 1599 #ifdef CONFIG_MMC_DEBUG 1600 unsigned long flags; 1601 spin_lock_irqsave(&host->lock, flags); 1602 host->removed = 1; 1603 spin_unlock_irqrestore(&host->lock, flags); 1604 #endif 1605 1606 if (host->caps & MMC_CAP_DISABLE) 1607 cancel_delayed_work(&host->disable); 1608 cancel_delayed_work_sync(&host->detect); 1609 mmc_flush_scheduled_work(); 1610 1611 /* clear pm flags now and let card drivers set them as needed */ 1612 host->pm_flags = 0; 1613 1614 mmc_bus_get(host); 1615 if (host->bus_ops && !host->bus_dead) { 1616 if (host->bus_ops->remove) 1617 host->bus_ops->remove(host); 1618 1619 mmc_claim_host(host); 1620 mmc_detach_bus(host); 1621 mmc_release_host(host); 1622 mmc_bus_put(host); 1623 return; 1624 } 1625 mmc_bus_put(host); 1626 1627 BUG_ON(host->card); 1628 1629 mmc_power_off(host); 1630 } 1631 1632 int mmc_power_save_host(struct mmc_host *host) 1633 { 1634 int ret = 0; 1635 1636 mmc_bus_get(host); 1637 1638 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 1639 mmc_bus_put(host); 1640 return -EINVAL; 1641 } 1642 1643 if (host->bus_ops->power_save) 1644 ret = host->bus_ops->power_save(host); 1645 1646 mmc_bus_put(host); 1647 1648 mmc_power_off(host); 1649 1650 return ret; 1651 } 1652 EXPORT_SYMBOL(mmc_power_save_host); 1653 1654 int mmc_power_restore_host(struct mmc_host *host) 1655 { 1656 int ret; 1657 1658 mmc_bus_get(host); 1659 1660 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 1661 mmc_bus_put(host); 1662 return -EINVAL; 1663 } 1664 1665 mmc_power_up(host); 1666 ret = host->bus_ops->power_restore(host); 1667 1668 mmc_bus_put(host); 1669 1670 return ret; 1671 } 1672 EXPORT_SYMBOL(mmc_power_restore_host); 1673 1674 int mmc_card_awake(struct mmc_host *host) 1675 { 1676 int err = -ENOSYS; 1677 1678 mmc_bus_get(host); 1679 1680 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 1681 err = host->bus_ops->awake(host); 1682 1683 mmc_bus_put(host); 1684 1685 return err; 1686 } 1687 EXPORT_SYMBOL(mmc_card_awake); 1688 1689 int mmc_card_sleep(struct mmc_host *host) 1690 { 1691 int err = -ENOSYS; 1692 1693 mmc_bus_get(host); 1694 1695 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 1696 err = host->bus_ops->sleep(host); 1697 1698 mmc_bus_put(host); 1699 1700 return err; 1701 } 1702 EXPORT_SYMBOL(mmc_card_sleep); 1703 1704 int mmc_card_can_sleep(struct mmc_host *host) 1705 { 1706 struct mmc_card *card = host->card; 1707 1708 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3) 1709 return 1; 1710 return 0; 1711 } 1712 EXPORT_SYMBOL(mmc_card_can_sleep); 1713 1714 #ifdef CONFIG_PM 1715 1716 /** 1717 * mmc_suspend_host - suspend a host 1718 * @host: mmc host 1719 */ 1720 int mmc_suspend_host(struct mmc_host *host) 1721 { 1722 int err = 0; 1723 1724 if (host->caps & MMC_CAP_DISABLE) 1725 cancel_delayed_work(&host->disable); 1726 cancel_delayed_work(&host->detect); 1727 mmc_flush_scheduled_work(); 1728 1729 mmc_bus_get(host); 1730 if (host->bus_ops && !host->bus_dead) { 1731 if (host->bus_ops->suspend) 1732 err = host->bus_ops->suspend(host); 1733 if (err == -ENOSYS || !host->bus_ops->resume) { 1734 /* 1735 * We simply "remove" the card in this case. 1736 * It will be redetected on resume. 1737 */ 1738 if (host->bus_ops->remove) 1739 host->bus_ops->remove(host); 1740 mmc_claim_host(host); 1741 mmc_detach_bus(host); 1742 mmc_release_host(host); 1743 host->pm_flags = 0; 1744 err = 0; 1745 } 1746 } 1747 mmc_bus_put(host); 1748 1749 if (!err && !(host->pm_flags & MMC_PM_KEEP_POWER)) 1750 mmc_power_off(host); 1751 1752 return err; 1753 } 1754 1755 EXPORT_SYMBOL(mmc_suspend_host); 1756 1757 /** 1758 * mmc_resume_host - resume a previously suspended host 1759 * @host: mmc host 1760 */ 1761 int mmc_resume_host(struct mmc_host *host) 1762 { 1763 int err = 0; 1764 1765 mmc_bus_get(host); 1766 if (host->bus_ops && !host->bus_dead) { 1767 if (!(host->pm_flags & MMC_PM_KEEP_POWER)) { 1768 mmc_power_up(host); 1769 mmc_select_voltage(host, host->ocr); 1770 /* 1771 * Tell runtime PM core we just powered up the card, 1772 * since it still believes the card is powered off. 1773 * Note that currently runtime PM is only enabled 1774 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD 1775 */ 1776 if (mmc_card_sdio(host->card) && 1777 (host->caps & MMC_CAP_POWER_OFF_CARD)) { 1778 pm_runtime_disable(&host->card->dev); 1779 pm_runtime_set_active(&host->card->dev); 1780 pm_runtime_enable(&host->card->dev); 1781 } 1782 } 1783 BUG_ON(!host->bus_ops->resume); 1784 err = host->bus_ops->resume(host); 1785 if (err) { 1786 printk(KERN_WARNING "%s: error %d during resume " 1787 "(card was removed?)\n", 1788 mmc_hostname(host), err); 1789 err = 0; 1790 } 1791 } 1792 mmc_bus_put(host); 1793 1794 return err; 1795 } 1796 EXPORT_SYMBOL(mmc_resume_host); 1797 1798 /* Do the card removal on suspend if card is assumed removeable 1799 * Do that in pm notifier while userspace isn't yet frozen, so we will be able 1800 to sync the card. 1801 */ 1802 int mmc_pm_notify(struct notifier_block *notify_block, 1803 unsigned long mode, void *unused) 1804 { 1805 struct mmc_host *host = container_of( 1806 notify_block, struct mmc_host, pm_notify); 1807 unsigned long flags; 1808 1809 1810 switch (mode) { 1811 case PM_HIBERNATION_PREPARE: 1812 case PM_SUSPEND_PREPARE: 1813 1814 spin_lock_irqsave(&host->lock, flags); 1815 host->rescan_disable = 1; 1816 spin_unlock_irqrestore(&host->lock, flags); 1817 cancel_delayed_work_sync(&host->detect); 1818 1819 if (!host->bus_ops || host->bus_ops->suspend) 1820 break; 1821 1822 mmc_claim_host(host); 1823 1824 if (host->bus_ops->remove) 1825 host->bus_ops->remove(host); 1826 1827 mmc_detach_bus(host); 1828 mmc_release_host(host); 1829 host->pm_flags = 0; 1830 break; 1831 1832 case PM_POST_SUSPEND: 1833 case PM_POST_HIBERNATION: 1834 case PM_POST_RESTORE: 1835 1836 spin_lock_irqsave(&host->lock, flags); 1837 host->rescan_disable = 0; 1838 spin_unlock_irqrestore(&host->lock, flags); 1839 mmc_detect_change(host, 0); 1840 1841 } 1842 1843 return 0; 1844 } 1845 #endif 1846 1847 static int __init mmc_init(void) 1848 { 1849 int ret; 1850 1851 workqueue = alloc_ordered_workqueue("kmmcd", 0); 1852 if (!workqueue) 1853 return -ENOMEM; 1854 1855 ret = mmc_register_bus(); 1856 if (ret) 1857 goto destroy_workqueue; 1858 1859 ret = mmc_register_host_class(); 1860 if (ret) 1861 goto unregister_bus; 1862 1863 ret = sdio_register_bus(); 1864 if (ret) 1865 goto unregister_host_class; 1866 1867 return 0; 1868 1869 unregister_host_class: 1870 mmc_unregister_host_class(); 1871 unregister_bus: 1872 mmc_unregister_bus(); 1873 destroy_workqueue: 1874 destroy_workqueue(workqueue); 1875 1876 return ret; 1877 } 1878 1879 static void __exit mmc_exit(void) 1880 { 1881 sdio_unregister_bus(); 1882 mmc_unregister_host_class(); 1883 mmc_unregister_bus(); 1884 destroy_workqueue(workqueue); 1885 } 1886 1887 subsys_initcall(mmc_init); 1888 module_exit(mmc_exit); 1889 1890 MODULE_LICENSE("GPL"); 1891