1 /* 2 * linux/drivers/mmc/core/host.c 3 * 4 * Copyright (C) 2003 Russell King, All Rights Reserved. 5 * Copyright (C) 2007-2008 Pierre Ossman 6 * Copyright (C) 2010 Linus Walleij 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * MMC host class device management 13 */ 14 15 #include <linux/device.h> 16 #include <linux/err.h> 17 #include <linux/idr.h> 18 #include <linux/of.h> 19 #include <linux/of_gpio.h> 20 #include <linux/pagemap.h> 21 #include <linux/export.h> 22 #include <linux/leds.h> 23 #include <linux/slab.h> 24 #include <linux/suspend.h> 25 26 #include <linux/mmc/host.h> 27 #include <linux/mmc/card.h> 28 #include <linux/mmc/slot-gpio.h> 29 30 #include "core.h" 31 #include "host.h" 32 #include "slot-gpio.h" 33 #include "pwrseq.h" 34 35 #define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev) 36 37 static DEFINE_IDR(mmc_host_idr); 38 static DEFINE_SPINLOCK(mmc_host_lock); 39 40 static void mmc_host_classdev_release(struct device *dev) 41 { 42 struct mmc_host *host = cls_dev_to_mmc_host(dev); 43 spin_lock(&mmc_host_lock); 44 idr_remove(&mmc_host_idr, host->index); 45 spin_unlock(&mmc_host_lock); 46 kfree(host); 47 } 48 49 static struct class mmc_host_class = { 50 .name = "mmc_host", 51 .dev_release = mmc_host_classdev_release, 52 }; 53 54 int mmc_register_host_class(void) 55 { 56 return class_register(&mmc_host_class); 57 } 58 59 void mmc_unregister_host_class(void) 60 { 61 class_unregister(&mmc_host_class); 62 } 63 64 #ifdef CONFIG_MMC_CLKGATE 65 static ssize_t clkgate_delay_show(struct device *dev, 66 struct device_attribute *attr, char *buf) 67 { 68 struct mmc_host *host = cls_dev_to_mmc_host(dev); 69 return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay); 70 } 71 72 static ssize_t clkgate_delay_store(struct device *dev, 73 struct device_attribute *attr, const char *buf, size_t count) 74 { 75 struct mmc_host *host = cls_dev_to_mmc_host(dev); 76 unsigned long flags, value; 77 78 if (kstrtoul(buf, 0, &value)) 79 return -EINVAL; 80 81 spin_lock_irqsave(&host->clk_lock, flags); 82 host->clkgate_delay = value; 83 spin_unlock_irqrestore(&host->clk_lock, flags); 84 return count; 85 } 86 87 /* 88 * Enabling clock gating will make the core call out to the host 89 * once up and once down when it performs a request or card operation 90 * intermingled in any fashion. The driver will see this through 91 * set_ios() operations with ios.clock field set to 0 to gate (disable) 92 * the block clock, and to the old frequency to enable it again. 93 */ 94 static void mmc_host_clk_gate_delayed(struct mmc_host *host) 95 { 96 unsigned long tick_ns; 97 unsigned long freq = host->ios.clock; 98 unsigned long flags; 99 100 if (!freq) { 101 pr_debug("%s: frequency set to 0 in disable function, " 102 "this means the clock is already disabled.\n", 103 mmc_hostname(host)); 104 return; 105 } 106 /* 107 * New requests may have appeared while we were scheduling, 108 * then there is no reason to delay the check before 109 * clk_disable(). 110 */ 111 spin_lock_irqsave(&host->clk_lock, flags); 112 113 /* 114 * Delay n bus cycles (at least 8 from MMC spec) before attempting 115 * to disable the MCI block clock. The reference count may have 116 * gone up again after this delay due to rescheduling! 117 */ 118 if (!host->clk_requests) { 119 spin_unlock_irqrestore(&host->clk_lock, flags); 120 tick_ns = DIV_ROUND_UP(1000000000, freq); 121 ndelay(host->clk_delay * tick_ns); 122 } else { 123 /* New users appeared while waiting for this work */ 124 spin_unlock_irqrestore(&host->clk_lock, flags); 125 return; 126 } 127 mutex_lock(&host->clk_gate_mutex); 128 spin_lock_irqsave(&host->clk_lock, flags); 129 if (!host->clk_requests) { 130 spin_unlock_irqrestore(&host->clk_lock, flags); 131 /* This will set host->ios.clock to 0 */ 132 mmc_gate_clock(host); 133 spin_lock_irqsave(&host->clk_lock, flags); 134 pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); 135 } 136 spin_unlock_irqrestore(&host->clk_lock, flags); 137 mutex_unlock(&host->clk_gate_mutex); 138 } 139 140 /* 141 * Internal work. Work to disable the clock at some later point. 142 */ 143 static void mmc_host_clk_gate_work(struct work_struct *work) 144 { 145 struct mmc_host *host = container_of(work, struct mmc_host, 146 clk_gate_work.work); 147 148 mmc_host_clk_gate_delayed(host); 149 } 150 151 /** 152 * mmc_host_clk_hold - ungate hardware MCI clocks 153 * @host: host to ungate. 154 * 155 * Makes sure the host ios.clock is restored to a non-zero value 156 * past this call. Increase clock reference count and ungate clock 157 * if we're the first user. 158 */ 159 void mmc_host_clk_hold(struct mmc_host *host) 160 { 161 unsigned long flags; 162 163 /* cancel any clock gating work scheduled by mmc_host_clk_release() */ 164 cancel_delayed_work_sync(&host->clk_gate_work); 165 mutex_lock(&host->clk_gate_mutex); 166 spin_lock_irqsave(&host->clk_lock, flags); 167 if (host->clk_gated) { 168 spin_unlock_irqrestore(&host->clk_lock, flags); 169 mmc_ungate_clock(host); 170 spin_lock_irqsave(&host->clk_lock, flags); 171 pr_debug("%s: ungated MCI clock\n", mmc_hostname(host)); 172 } 173 host->clk_requests++; 174 spin_unlock_irqrestore(&host->clk_lock, flags); 175 mutex_unlock(&host->clk_gate_mutex); 176 } 177 178 /** 179 * mmc_host_may_gate_card - check if this card may be gated 180 * @card: card to check. 181 */ 182 static bool mmc_host_may_gate_card(struct mmc_card *card) 183 { 184 /* If there is no card we may gate it */ 185 if (!card) 186 return true; 187 /* 188 * Don't gate SDIO cards! These need to be clocked at all times 189 * since they may be independent systems generating interrupts 190 * and other events. The clock requests counter from the core will 191 * go down to zero since the core does not need it, but we will not 192 * gate the clock, because there is somebody out there that may still 193 * be using it. 194 */ 195 return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING); 196 } 197 198 /** 199 * mmc_host_clk_release - gate off hardware MCI clocks 200 * @host: host to gate. 201 * 202 * Calls the host driver with ios.clock set to zero as often as possible 203 * in order to gate off hardware MCI clocks. Decrease clock reference 204 * count and schedule disabling of clock. 205 */ 206 void mmc_host_clk_release(struct mmc_host *host) 207 { 208 unsigned long flags; 209 210 spin_lock_irqsave(&host->clk_lock, flags); 211 host->clk_requests--; 212 if (mmc_host_may_gate_card(host->card) && 213 !host->clk_requests) 214 schedule_delayed_work(&host->clk_gate_work, 215 msecs_to_jiffies(host->clkgate_delay)); 216 spin_unlock_irqrestore(&host->clk_lock, flags); 217 } 218 219 /** 220 * mmc_host_clk_rate - get current clock frequency setting 221 * @host: host to get the clock frequency for. 222 * 223 * Returns current clock frequency regardless of gating. 224 */ 225 unsigned int mmc_host_clk_rate(struct mmc_host *host) 226 { 227 unsigned long freq; 228 unsigned long flags; 229 230 spin_lock_irqsave(&host->clk_lock, flags); 231 if (host->clk_gated) 232 freq = host->clk_old; 233 else 234 freq = host->ios.clock; 235 spin_unlock_irqrestore(&host->clk_lock, flags); 236 return freq; 237 } 238 239 /** 240 * mmc_host_clk_init - set up clock gating code 241 * @host: host with potential clock to control 242 */ 243 static inline void mmc_host_clk_init(struct mmc_host *host) 244 { 245 host->clk_requests = 0; 246 /* Hold MCI clock for 8 cycles by default */ 247 host->clk_delay = 8; 248 /* 249 * Default clock gating delay is 0ms to avoid wasting power. 250 * This value can be tuned by writing into sysfs entry. 251 */ 252 host->clkgate_delay = 0; 253 host->clk_gated = false; 254 INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); 255 spin_lock_init(&host->clk_lock); 256 mutex_init(&host->clk_gate_mutex); 257 } 258 259 /** 260 * mmc_host_clk_exit - shut down clock gating code 261 * @host: host with potential clock to control 262 */ 263 static inline void mmc_host_clk_exit(struct mmc_host *host) 264 { 265 /* 266 * Wait for any outstanding gate and then make sure we're 267 * ungated before exiting. 268 */ 269 if (cancel_delayed_work_sync(&host->clk_gate_work)) 270 mmc_host_clk_gate_delayed(host); 271 if (host->clk_gated) 272 mmc_host_clk_hold(host); 273 /* There should be only one user now */ 274 WARN_ON(host->clk_requests > 1); 275 } 276 277 static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) 278 { 279 host->clkgate_delay_attr.show = clkgate_delay_show; 280 host->clkgate_delay_attr.store = clkgate_delay_store; 281 sysfs_attr_init(&host->clkgate_delay_attr.attr); 282 host->clkgate_delay_attr.attr.name = "clkgate_delay"; 283 host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR; 284 if (device_create_file(&host->class_dev, &host->clkgate_delay_attr)) 285 pr_err("%s: Failed to create clkgate_delay sysfs entry\n", 286 mmc_hostname(host)); 287 } 288 #else 289 290 static inline void mmc_host_clk_init(struct mmc_host *host) 291 { 292 } 293 294 static inline void mmc_host_clk_exit(struct mmc_host *host) 295 { 296 } 297 298 static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) 299 { 300 } 301 302 #endif 303 304 void mmc_retune_enable(struct mmc_host *host) 305 { 306 host->can_retune = 1; 307 if (host->retune_period) 308 mod_timer(&host->retune_timer, 309 jiffies + host->retune_period * HZ); 310 } 311 312 void mmc_retune_disable(struct mmc_host *host) 313 { 314 host->can_retune = 0; 315 del_timer_sync(&host->retune_timer); 316 host->retune_now = 0; 317 host->need_retune = 0; 318 } 319 320 void mmc_retune_timer_stop(struct mmc_host *host) 321 { 322 del_timer_sync(&host->retune_timer); 323 } 324 EXPORT_SYMBOL(mmc_retune_timer_stop); 325 326 void mmc_retune_hold(struct mmc_host *host) 327 { 328 if (!host->hold_retune) 329 host->retune_now = 1; 330 host->hold_retune += 1; 331 } 332 333 void mmc_retune_release(struct mmc_host *host) 334 { 335 if (host->hold_retune) 336 host->hold_retune -= 1; 337 else 338 WARN_ON(1); 339 } 340 341 int mmc_retune(struct mmc_host *host) 342 { 343 int err; 344 345 if (host->retune_now) 346 host->retune_now = 0; 347 else 348 return 0; 349 350 if (!host->need_retune || host->doing_retune || !host->card) 351 return 0; 352 353 host->need_retune = 0; 354 355 host->doing_retune = 1; 356 357 err = mmc_execute_tuning(host->card); 358 359 host->doing_retune = 0; 360 361 return err; 362 } 363 364 static void mmc_retune_timer(unsigned long data) 365 { 366 struct mmc_host *host = (struct mmc_host *)data; 367 368 mmc_retune_needed(host); 369 } 370 371 /** 372 * mmc_of_parse() - parse host's device-tree node 373 * @host: host whose node should be parsed. 374 * 375 * To keep the rest of the MMC subsystem unaware of whether DT has been 376 * used to to instantiate and configure this host instance or not, we 377 * parse the properties and set respective generic mmc-host flags and 378 * parameters. 379 */ 380 int mmc_of_parse(struct mmc_host *host) 381 { 382 struct device_node *np; 383 u32 bus_width; 384 int len, ret; 385 bool cd_cap_invert, cd_gpio_invert = false; 386 bool ro_cap_invert, ro_gpio_invert = false; 387 388 if (!host->parent || !host->parent->of_node) 389 return 0; 390 391 np = host->parent->of_node; 392 393 /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */ 394 if (of_property_read_u32(np, "bus-width", &bus_width) < 0) { 395 dev_dbg(host->parent, 396 "\"bus-width\" property is missing, assuming 1 bit.\n"); 397 bus_width = 1; 398 } 399 400 switch (bus_width) { 401 case 8: 402 host->caps |= MMC_CAP_8_BIT_DATA; 403 /* Hosts capable of 8-bit transfers can also do 4 bits */ 404 case 4: 405 host->caps |= MMC_CAP_4_BIT_DATA; 406 break; 407 case 1: 408 break; 409 default: 410 dev_err(host->parent, 411 "Invalid \"bus-width\" value %u!\n", bus_width); 412 return -EINVAL; 413 } 414 415 /* f_max is obtained from the optional "max-frequency" property */ 416 of_property_read_u32(np, "max-frequency", &host->f_max); 417 418 /* 419 * Configure CD and WP pins. They are both by default active low to 420 * match the SDHCI spec. If GPIOs are provided for CD and / or WP, the 421 * mmc-gpio helpers are used to attach, configure and use them. If 422 * polarity inversion is specified in DT, one of MMC_CAP2_CD_ACTIVE_HIGH 423 * and MMC_CAP2_RO_ACTIVE_HIGH capability-2 flags is set. If the 424 * "broken-cd" property is provided, the MMC_CAP_NEEDS_POLL capability 425 * is set. If the "non-removable" property is found, the 426 * MMC_CAP_NONREMOVABLE capability is set and no card-detection 427 * configuration is performed. 428 */ 429 430 /* Parse Card Detection */ 431 if (of_find_property(np, "non-removable", &len)) { 432 host->caps |= MMC_CAP_NONREMOVABLE; 433 } else { 434 cd_cap_invert = of_property_read_bool(np, "cd-inverted"); 435 436 if (of_find_property(np, "broken-cd", &len)) 437 host->caps |= MMC_CAP_NEEDS_POLL; 438 439 ret = mmc_gpiod_request_cd(host, "cd", 0, true, 440 0, &cd_gpio_invert); 441 if (!ret) 442 dev_info(host->parent, "Got CD GPIO\n"); 443 else if (ret != -ENOENT) 444 return ret; 445 446 /* 447 * There are two ways to flag that the CD line is inverted: 448 * through the cd-inverted flag and by the GPIO line itself 449 * being inverted from the GPIO subsystem. This is a leftover 450 * from the times when the GPIO subsystem did not make it 451 * possible to flag a line as inverted. 452 * 453 * If the capability on the host AND the GPIO line are 454 * both inverted, the end result is that the CD line is 455 * not inverted. 456 */ 457 if (cd_cap_invert ^ cd_gpio_invert) 458 host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; 459 } 460 461 /* Parse Write Protection */ 462 ro_cap_invert = of_property_read_bool(np, "wp-inverted"); 463 464 ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); 465 if (!ret) 466 dev_info(host->parent, "Got WP GPIO\n"); 467 else if (ret != -ENOENT) 468 return ret; 469 470 /* See the comment on CD inversion above */ 471 if (ro_cap_invert ^ ro_gpio_invert) 472 host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; 473 474 if (of_find_property(np, "cap-sd-highspeed", &len)) 475 host->caps |= MMC_CAP_SD_HIGHSPEED; 476 if (of_find_property(np, "cap-mmc-highspeed", &len)) 477 host->caps |= MMC_CAP_MMC_HIGHSPEED; 478 if (of_find_property(np, "sd-uhs-sdr12", &len)) 479 host->caps |= MMC_CAP_UHS_SDR12; 480 if (of_find_property(np, "sd-uhs-sdr25", &len)) 481 host->caps |= MMC_CAP_UHS_SDR25; 482 if (of_find_property(np, "sd-uhs-sdr50", &len)) 483 host->caps |= MMC_CAP_UHS_SDR50; 484 if (of_find_property(np, "sd-uhs-sdr104", &len)) 485 host->caps |= MMC_CAP_UHS_SDR104; 486 if (of_find_property(np, "sd-uhs-ddr50", &len)) 487 host->caps |= MMC_CAP_UHS_DDR50; 488 if (of_find_property(np, "cap-power-off-card", &len)) 489 host->caps |= MMC_CAP_POWER_OFF_CARD; 490 if (of_find_property(np, "cap-sdio-irq", &len)) 491 host->caps |= MMC_CAP_SDIO_IRQ; 492 if (of_find_property(np, "full-pwr-cycle", &len)) 493 host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE; 494 if (of_find_property(np, "keep-power-in-suspend", &len)) 495 host->pm_caps |= MMC_PM_KEEP_POWER; 496 if (of_find_property(np, "enable-sdio-wakeup", &len)) 497 host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; 498 if (of_find_property(np, "mmc-ddr-1_8v", &len)) 499 host->caps |= MMC_CAP_1_8V_DDR; 500 if (of_find_property(np, "mmc-ddr-1_2v", &len)) 501 host->caps |= MMC_CAP_1_2V_DDR; 502 if (of_find_property(np, "mmc-hs200-1_8v", &len)) 503 host->caps2 |= MMC_CAP2_HS200_1_8V_SDR; 504 if (of_find_property(np, "mmc-hs200-1_2v", &len)) 505 host->caps2 |= MMC_CAP2_HS200_1_2V_SDR; 506 if (of_find_property(np, "mmc-hs400-1_8v", &len)) 507 host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR; 508 if (of_find_property(np, "mmc-hs400-1_2v", &len)) 509 host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR; 510 511 host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr); 512 if (host->dsr_req && (host->dsr & ~0xffff)) { 513 dev_err(host->parent, 514 "device tree specified broken value for DSR: 0x%x, ignoring\n", 515 host->dsr); 516 host->dsr_req = 0; 517 } 518 519 return mmc_pwrseq_alloc(host); 520 } 521 522 EXPORT_SYMBOL(mmc_of_parse); 523 524 /** 525 * mmc_alloc_host - initialise the per-host structure. 526 * @extra: sizeof private data structure 527 * @dev: pointer to host device model structure 528 * 529 * Initialise the per-host structure. 530 */ 531 struct mmc_host *mmc_alloc_host(int extra, struct device *dev) 532 { 533 int err; 534 struct mmc_host *host; 535 536 host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL); 537 if (!host) 538 return NULL; 539 540 /* scanning will be enabled when we're ready */ 541 host->rescan_disable = 1; 542 idr_preload(GFP_KERNEL); 543 spin_lock(&mmc_host_lock); 544 err = idr_alloc(&mmc_host_idr, host, 0, 0, GFP_NOWAIT); 545 if (err >= 0) 546 host->index = err; 547 spin_unlock(&mmc_host_lock); 548 idr_preload_end(); 549 if (err < 0) { 550 kfree(host); 551 return NULL; 552 } 553 554 dev_set_name(&host->class_dev, "mmc%d", host->index); 555 556 host->parent = dev; 557 host->class_dev.parent = dev; 558 host->class_dev.class = &mmc_host_class; 559 device_initialize(&host->class_dev); 560 561 if (mmc_gpio_alloc(host)) { 562 put_device(&host->class_dev); 563 return NULL; 564 } 565 566 mmc_host_clk_init(host); 567 568 spin_lock_init(&host->lock); 569 init_waitqueue_head(&host->wq); 570 INIT_DELAYED_WORK(&host->detect, mmc_rescan); 571 #ifdef CONFIG_PM 572 host->pm_notify.notifier_call = mmc_pm_notify; 573 #endif 574 setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host); 575 576 /* 577 * By default, hosts do not support SGIO or large requests. 578 * They have to set these according to their abilities. 579 */ 580 host->max_segs = 1; 581 host->max_seg_size = PAGE_CACHE_SIZE; 582 583 host->max_req_size = PAGE_CACHE_SIZE; 584 host->max_blk_size = 512; 585 host->max_blk_count = PAGE_CACHE_SIZE / 512; 586 587 return host; 588 } 589 590 EXPORT_SYMBOL(mmc_alloc_host); 591 592 /** 593 * mmc_add_host - initialise host hardware 594 * @host: mmc host 595 * 596 * Register the host with the driver model. The host must be 597 * prepared to start servicing requests before this function 598 * completes. 599 */ 600 int mmc_add_host(struct mmc_host *host) 601 { 602 int err; 603 604 WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && 605 !host->ops->enable_sdio_irq); 606 607 err = device_add(&host->class_dev); 608 if (err) 609 return err; 610 611 led_trigger_register_simple(dev_name(&host->class_dev), &host->led); 612 613 #ifdef CONFIG_DEBUG_FS 614 mmc_add_host_debugfs(host); 615 #endif 616 mmc_host_clk_sysfs_init(host); 617 618 mmc_start_host(host); 619 register_pm_notifier(&host->pm_notify); 620 621 return 0; 622 } 623 624 EXPORT_SYMBOL(mmc_add_host); 625 626 /** 627 * mmc_remove_host - remove host hardware 628 * @host: mmc host 629 * 630 * Unregister and remove all cards associated with this host, 631 * and power down the MMC bus. No new requests will be issued 632 * after this function has returned. 633 */ 634 void mmc_remove_host(struct mmc_host *host) 635 { 636 unregister_pm_notifier(&host->pm_notify); 637 mmc_stop_host(host); 638 639 #ifdef CONFIG_DEBUG_FS 640 mmc_remove_host_debugfs(host); 641 #endif 642 643 device_del(&host->class_dev); 644 645 led_trigger_unregister_simple(host->led); 646 647 mmc_host_clk_exit(host); 648 } 649 650 EXPORT_SYMBOL(mmc_remove_host); 651 652 /** 653 * mmc_free_host - free the host structure 654 * @host: mmc host 655 * 656 * Free the host once all references to it have been dropped. 657 */ 658 void mmc_free_host(struct mmc_host *host) 659 { 660 mmc_pwrseq_free(host); 661 put_device(&host->class_dev); 662 } 663 664 EXPORT_SYMBOL(mmc_free_host); 665