1 /* 2 * linux/drivers/mmc/core/host.c 3 * 4 * Copyright (C) 2003 Russell King, All Rights Reserved. 5 * Copyright (C) 2007-2008 Pierre Ossman 6 * Copyright (C) 2010 Linus Walleij 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * MMC host class device management 13 */ 14 15 #include <linux/device.h> 16 #include <linux/err.h> 17 #include <linux/idr.h> 18 #include <linux/of.h> 19 #include <linux/of_gpio.h> 20 #include <linux/pagemap.h> 21 #include <linux/export.h> 22 #include <linux/leds.h> 23 #include <linux/slab.h> 24 #include <linux/suspend.h> 25 26 #include <linux/mmc/host.h> 27 #include <linux/mmc/card.h> 28 #include <linux/mmc/slot-gpio.h> 29 30 #include "core.h" 31 #include "host.h" 32 #include "slot-gpio.h" 33 #include "pwrseq.h" 34 35 #define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev) 36 37 static DEFINE_IDR(mmc_host_idr); 38 static DEFINE_SPINLOCK(mmc_host_lock); 39 40 static void mmc_host_classdev_release(struct device *dev) 41 { 42 struct mmc_host *host = cls_dev_to_mmc_host(dev); 43 spin_lock(&mmc_host_lock); 44 idr_remove(&mmc_host_idr, host->index); 45 spin_unlock(&mmc_host_lock); 46 kfree(host); 47 } 48 49 static struct class mmc_host_class = { 50 .name = "mmc_host", 51 .dev_release = mmc_host_classdev_release, 52 }; 53 54 int mmc_register_host_class(void) 55 { 56 return class_register(&mmc_host_class); 57 } 58 59 void mmc_unregister_host_class(void) 60 { 61 class_unregister(&mmc_host_class); 62 } 63 64 #ifdef CONFIG_MMC_CLKGATE 65 static ssize_t clkgate_delay_show(struct device *dev, 66 struct device_attribute *attr, char *buf) 67 { 68 struct mmc_host *host = cls_dev_to_mmc_host(dev); 69 return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay); 70 } 71 72 static ssize_t clkgate_delay_store(struct device *dev, 73 struct device_attribute *attr, const char *buf, size_t count) 74 { 75 struct mmc_host *host = cls_dev_to_mmc_host(dev); 76 unsigned long flags, value; 77 78 if (kstrtoul(buf, 0, &value)) 79 return -EINVAL; 80 81 spin_lock_irqsave(&host->clk_lock, flags); 82 host->clkgate_delay = value; 83 spin_unlock_irqrestore(&host->clk_lock, flags); 84 return count; 85 } 86 87 /* 88 * Enabling clock gating will make the core call out to the host 89 * once up and once down when it performs a request or card operation 90 * intermingled in any fashion. The driver will see this through 91 * set_ios() operations with ios.clock field set to 0 to gate (disable) 92 * the block clock, and to the old frequency to enable it again. 93 */ 94 static void mmc_host_clk_gate_delayed(struct mmc_host *host) 95 { 96 unsigned long tick_ns; 97 unsigned long freq = host->ios.clock; 98 unsigned long flags; 99 100 if (!freq) { 101 pr_debug("%s: frequency set to 0 in disable function, " 102 "this means the clock is already disabled.\n", 103 mmc_hostname(host)); 104 return; 105 } 106 /* 107 * New requests may have appeared while we were scheduling, 108 * then there is no reason to delay the check before 109 * clk_disable(). 110 */ 111 spin_lock_irqsave(&host->clk_lock, flags); 112 113 /* 114 * Delay n bus cycles (at least 8 from MMC spec) before attempting 115 * to disable the MCI block clock. The reference count may have 116 * gone up again after this delay due to rescheduling! 117 */ 118 if (!host->clk_requests) { 119 spin_unlock_irqrestore(&host->clk_lock, flags); 120 tick_ns = DIV_ROUND_UP(1000000000, freq); 121 ndelay(host->clk_delay * tick_ns); 122 } else { 123 /* New users appeared while waiting for this work */ 124 spin_unlock_irqrestore(&host->clk_lock, flags); 125 return; 126 } 127 mutex_lock(&host->clk_gate_mutex); 128 spin_lock_irqsave(&host->clk_lock, flags); 129 if (!host->clk_requests) { 130 spin_unlock_irqrestore(&host->clk_lock, flags); 131 /* This will set host->ios.clock to 0 */ 132 mmc_gate_clock(host); 133 spin_lock_irqsave(&host->clk_lock, flags); 134 pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); 135 } 136 spin_unlock_irqrestore(&host->clk_lock, flags); 137 mutex_unlock(&host->clk_gate_mutex); 138 } 139 140 /* 141 * Internal work. Work to disable the clock at some later point. 142 */ 143 static void mmc_host_clk_gate_work(struct work_struct *work) 144 { 145 struct mmc_host *host = container_of(work, struct mmc_host, 146 clk_gate_work.work); 147 148 mmc_host_clk_gate_delayed(host); 149 } 150 151 /** 152 * mmc_host_clk_hold - ungate hardware MCI clocks 153 * @host: host to ungate. 154 * 155 * Makes sure the host ios.clock is restored to a non-zero value 156 * past this call. Increase clock reference count and ungate clock 157 * if we're the first user. 158 */ 159 void mmc_host_clk_hold(struct mmc_host *host) 160 { 161 unsigned long flags; 162 163 /* cancel any clock gating work scheduled by mmc_host_clk_release() */ 164 cancel_delayed_work_sync(&host->clk_gate_work); 165 mutex_lock(&host->clk_gate_mutex); 166 spin_lock_irqsave(&host->clk_lock, flags); 167 if (host->clk_gated) { 168 spin_unlock_irqrestore(&host->clk_lock, flags); 169 mmc_ungate_clock(host); 170 spin_lock_irqsave(&host->clk_lock, flags); 171 pr_debug("%s: ungated MCI clock\n", mmc_hostname(host)); 172 } 173 host->clk_requests++; 174 spin_unlock_irqrestore(&host->clk_lock, flags); 175 mutex_unlock(&host->clk_gate_mutex); 176 } 177 178 /** 179 * mmc_host_may_gate_card - check if this card may be gated 180 * @card: card to check. 181 */ 182 static bool mmc_host_may_gate_card(struct mmc_card *card) 183 { 184 /* If there is no card we may gate it */ 185 if (!card) 186 return true; 187 /* 188 * Don't gate SDIO cards! These need to be clocked at all times 189 * since they may be independent systems generating interrupts 190 * and other events. The clock requests counter from the core will 191 * go down to zero since the core does not need it, but we will not 192 * gate the clock, because there is somebody out there that may still 193 * be using it. 194 */ 195 return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING); 196 } 197 198 /** 199 * mmc_host_clk_release - gate off hardware MCI clocks 200 * @host: host to gate. 201 * 202 * Calls the host driver with ios.clock set to zero as often as possible 203 * in order to gate off hardware MCI clocks. Decrease clock reference 204 * count and schedule disabling of clock. 205 */ 206 void mmc_host_clk_release(struct mmc_host *host) 207 { 208 unsigned long flags; 209 210 spin_lock_irqsave(&host->clk_lock, flags); 211 host->clk_requests--; 212 if (mmc_host_may_gate_card(host->card) && 213 !host->clk_requests) 214 schedule_delayed_work(&host->clk_gate_work, 215 msecs_to_jiffies(host->clkgate_delay)); 216 spin_unlock_irqrestore(&host->clk_lock, flags); 217 } 218 219 /** 220 * mmc_host_clk_rate - get current clock frequency setting 221 * @host: host to get the clock frequency for. 222 * 223 * Returns current clock frequency regardless of gating. 224 */ 225 unsigned int mmc_host_clk_rate(struct mmc_host *host) 226 { 227 unsigned long freq; 228 unsigned long flags; 229 230 spin_lock_irqsave(&host->clk_lock, flags); 231 if (host->clk_gated) 232 freq = host->clk_old; 233 else 234 freq = host->ios.clock; 235 spin_unlock_irqrestore(&host->clk_lock, flags); 236 return freq; 237 } 238 239 /** 240 * mmc_host_clk_init - set up clock gating code 241 * @host: host with potential clock to control 242 */ 243 static inline void mmc_host_clk_init(struct mmc_host *host) 244 { 245 host->clk_requests = 0; 246 /* Hold MCI clock for 8 cycles by default */ 247 host->clk_delay = 8; 248 /* 249 * Default clock gating delay is 0ms to avoid wasting power. 250 * This value can be tuned by writing into sysfs entry. 251 */ 252 host->clkgate_delay = 0; 253 host->clk_gated = false; 254 INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); 255 spin_lock_init(&host->clk_lock); 256 mutex_init(&host->clk_gate_mutex); 257 } 258 259 /** 260 * mmc_host_clk_exit - shut down clock gating code 261 * @host: host with potential clock to control 262 */ 263 static inline void mmc_host_clk_exit(struct mmc_host *host) 264 { 265 /* 266 * Wait for any outstanding gate and then make sure we're 267 * ungated before exiting. 268 */ 269 if (cancel_delayed_work_sync(&host->clk_gate_work)) 270 mmc_host_clk_gate_delayed(host); 271 if (host->clk_gated) 272 mmc_host_clk_hold(host); 273 /* There should be only one user now */ 274 WARN_ON(host->clk_requests > 1); 275 } 276 277 static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) 278 { 279 host->clkgate_delay_attr.show = clkgate_delay_show; 280 host->clkgate_delay_attr.store = clkgate_delay_store; 281 sysfs_attr_init(&host->clkgate_delay_attr.attr); 282 host->clkgate_delay_attr.attr.name = "clkgate_delay"; 283 host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR; 284 if (device_create_file(&host->class_dev, &host->clkgate_delay_attr)) 285 pr_err("%s: Failed to create clkgate_delay sysfs entry\n", 286 mmc_hostname(host)); 287 } 288 #else 289 290 static inline void mmc_host_clk_init(struct mmc_host *host) 291 { 292 } 293 294 static inline void mmc_host_clk_exit(struct mmc_host *host) 295 { 296 } 297 298 static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) 299 { 300 } 301 302 #endif 303 304 /** 305 * mmc_of_parse() - parse host's device-tree node 306 * @host: host whose node should be parsed. 307 * 308 * To keep the rest of the MMC subsystem unaware of whether DT has been 309 * used to to instantiate and configure this host instance or not, we 310 * parse the properties and set respective generic mmc-host flags and 311 * parameters. 312 */ 313 int mmc_of_parse(struct mmc_host *host) 314 { 315 struct device_node *np; 316 u32 bus_width; 317 int len, ret; 318 bool cd_cap_invert, cd_gpio_invert = false; 319 bool ro_cap_invert, ro_gpio_invert = false; 320 321 if (!host->parent || !host->parent->of_node) 322 return 0; 323 324 np = host->parent->of_node; 325 326 /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */ 327 if (of_property_read_u32(np, "bus-width", &bus_width) < 0) { 328 dev_dbg(host->parent, 329 "\"bus-width\" property is missing, assuming 1 bit.\n"); 330 bus_width = 1; 331 } 332 333 switch (bus_width) { 334 case 8: 335 host->caps |= MMC_CAP_8_BIT_DATA; 336 /* Hosts capable of 8-bit transfers can also do 4 bits */ 337 case 4: 338 host->caps |= MMC_CAP_4_BIT_DATA; 339 break; 340 case 1: 341 break; 342 default: 343 dev_err(host->parent, 344 "Invalid \"bus-width\" value %u!\n", bus_width); 345 return -EINVAL; 346 } 347 348 /* f_max is obtained from the optional "max-frequency" property */ 349 of_property_read_u32(np, "max-frequency", &host->f_max); 350 351 /* 352 * Configure CD and WP pins. They are both by default active low to 353 * match the SDHCI spec. If GPIOs are provided for CD and / or WP, the 354 * mmc-gpio helpers are used to attach, configure and use them. If 355 * polarity inversion is specified in DT, one of MMC_CAP2_CD_ACTIVE_HIGH 356 * and MMC_CAP2_RO_ACTIVE_HIGH capability-2 flags is set. If the 357 * "broken-cd" property is provided, the MMC_CAP_NEEDS_POLL capability 358 * is set. If the "non-removable" property is found, the 359 * MMC_CAP_NONREMOVABLE capability is set and no card-detection 360 * configuration is performed. 361 */ 362 363 /* Parse Card Detection */ 364 if (of_find_property(np, "non-removable", &len)) { 365 host->caps |= MMC_CAP_NONREMOVABLE; 366 } else { 367 cd_cap_invert = of_property_read_bool(np, "cd-inverted"); 368 369 if (of_find_property(np, "broken-cd", &len)) 370 host->caps |= MMC_CAP_NEEDS_POLL; 371 372 ret = mmc_gpiod_request_cd(host, "cd", 0, true, 373 0, &cd_gpio_invert); 374 if (!ret) 375 dev_info(host->parent, "Got CD GPIO\n"); 376 else if (ret != -ENOENT) 377 return ret; 378 379 /* 380 * There are two ways to flag that the CD line is inverted: 381 * through the cd-inverted flag and by the GPIO line itself 382 * being inverted from the GPIO subsystem. This is a leftover 383 * from the times when the GPIO subsystem did not make it 384 * possible to flag a line as inverted. 385 * 386 * If the capability on the host AND the GPIO line are 387 * both inverted, the end result is that the CD line is 388 * not inverted. 389 */ 390 if (cd_cap_invert ^ cd_gpio_invert) 391 host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; 392 } 393 394 /* Parse Write Protection */ 395 ro_cap_invert = of_property_read_bool(np, "wp-inverted"); 396 397 ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); 398 if (!ret) 399 dev_info(host->parent, "Got WP GPIO\n"); 400 else if (ret != -ENOENT) 401 return ret; 402 403 /* See the comment on CD inversion above */ 404 if (ro_cap_invert ^ ro_gpio_invert) 405 host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; 406 407 if (of_find_property(np, "cap-sd-highspeed", &len)) 408 host->caps |= MMC_CAP_SD_HIGHSPEED; 409 if (of_find_property(np, "cap-mmc-highspeed", &len)) 410 host->caps |= MMC_CAP_MMC_HIGHSPEED; 411 if (of_find_property(np, "sd-uhs-sdr12", &len)) 412 host->caps |= MMC_CAP_UHS_SDR12; 413 if (of_find_property(np, "sd-uhs-sdr25", &len)) 414 host->caps |= MMC_CAP_UHS_SDR25; 415 if (of_find_property(np, "sd-uhs-sdr50", &len)) 416 host->caps |= MMC_CAP_UHS_SDR50; 417 if (of_find_property(np, "sd-uhs-sdr104", &len)) 418 host->caps |= MMC_CAP_UHS_SDR104; 419 if (of_find_property(np, "sd-uhs-ddr50", &len)) 420 host->caps |= MMC_CAP_UHS_DDR50; 421 if (of_find_property(np, "cap-power-off-card", &len)) 422 host->caps |= MMC_CAP_POWER_OFF_CARD; 423 if (of_find_property(np, "cap-sdio-irq", &len)) 424 host->caps |= MMC_CAP_SDIO_IRQ; 425 if (of_find_property(np, "full-pwr-cycle", &len)) 426 host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE; 427 if (of_find_property(np, "keep-power-in-suspend", &len)) 428 host->pm_caps |= MMC_PM_KEEP_POWER; 429 if (of_find_property(np, "enable-sdio-wakeup", &len)) 430 host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; 431 if (of_find_property(np, "mmc-ddr-1_8v", &len)) 432 host->caps |= MMC_CAP_1_8V_DDR; 433 if (of_find_property(np, "mmc-ddr-1_2v", &len)) 434 host->caps |= MMC_CAP_1_2V_DDR; 435 if (of_find_property(np, "mmc-hs200-1_8v", &len)) 436 host->caps2 |= MMC_CAP2_HS200_1_8V_SDR; 437 if (of_find_property(np, "mmc-hs200-1_2v", &len)) 438 host->caps2 |= MMC_CAP2_HS200_1_2V_SDR; 439 if (of_find_property(np, "mmc-hs400-1_8v", &len)) 440 host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR; 441 if (of_find_property(np, "mmc-hs400-1_2v", &len)) 442 host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR; 443 444 host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr); 445 if (host->dsr_req && (host->dsr & ~0xffff)) { 446 dev_err(host->parent, 447 "device tree specified broken value for DSR: 0x%x, ignoring\n", 448 host->dsr); 449 host->dsr_req = 0; 450 } 451 452 return mmc_pwrseq_alloc(host); 453 } 454 455 EXPORT_SYMBOL(mmc_of_parse); 456 457 /** 458 * mmc_alloc_host - initialise the per-host structure. 459 * @extra: sizeof private data structure 460 * @dev: pointer to host device model structure 461 * 462 * Initialise the per-host structure. 463 */ 464 struct mmc_host *mmc_alloc_host(int extra, struct device *dev) 465 { 466 int err; 467 struct mmc_host *host; 468 469 host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL); 470 if (!host) 471 return NULL; 472 473 /* scanning will be enabled when we're ready */ 474 host->rescan_disable = 1; 475 idr_preload(GFP_KERNEL); 476 spin_lock(&mmc_host_lock); 477 err = idr_alloc(&mmc_host_idr, host, 0, 0, GFP_NOWAIT); 478 if (err >= 0) 479 host->index = err; 480 spin_unlock(&mmc_host_lock); 481 idr_preload_end(); 482 if (err < 0) { 483 kfree(host); 484 return NULL; 485 } 486 487 dev_set_name(&host->class_dev, "mmc%d", host->index); 488 489 host->parent = dev; 490 host->class_dev.parent = dev; 491 host->class_dev.class = &mmc_host_class; 492 device_initialize(&host->class_dev); 493 494 if (mmc_gpio_alloc(host)) { 495 put_device(&host->class_dev); 496 return NULL; 497 } 498 499 mmc_host_clk_init(host); 500 501 spin_lock_init(&host->lock); 502 init_waitqueue_head(&host->wq); 503 INIT_DELAYED_WORK(&host->detect, mmc_rescan); 504 #ifdef CONFIG_PM 505 host->pm_notify.notifier_call = mmc_pm_notify; 506 #endif 507 508 /* 509 * By default, hosts do not support SGIO or large requests. 510 * They have to set these according to their abilities. 511 */ 512 host->max_segs = 1; 513 host->max_seg_size = PAGE_CACHE_SIZE; 514 515 host->max_req_size = PAGE_CACHE_SIZE; 516 host->max_blk_size = 512; 517 host->max_blk_count = PAGE_CACHE_SIZE / 512; 518 519 return host; 520 } 521 522 EXPORT_SYMBOL(mmc_alloc_host); 523 524 /** 525 * mmc_add_host - initialise host hardware 526 * @host: mmc host 527 * 528 * Register the host with the driver model. The host must be 529 * prepared to start servicing requests before this function 530 * completes. 531 */ 532 int mmc_add_host(struct mmc_host *host) 533 { 534 int err; 535 536 WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && 537 !host->ops->enable_sdio_irq); 538 539 err = device_add(&host->class_dev); 540 if (err) 541 return err; 542 543 led_trigger_register_simple(dev_name(&host->class_dev), &host->led); 544 545 #ifdef CONFIG_DEBUG_FS 546 mmc_add_host_debugfs(host); 547 #endif 548 mmc_host_clk_sysfs_init(host); 549 550 mmc_start_host(host); 551 register_pm_notifier(&host->pm_notify); 552 553 return 0; 554 } 555 556 EXPORT_SYMBOL(mmc_add_host); 557 558 /** 559 * mmc_remove_host - remove host hardware 560 * @host: mmc host 561 * 562 * Unregister and remove all cards associated with this host, 563 * and power down the MMC bus. No new requests will be issued 564 * after this function has returned. 565 */ 566 void mmc_remove_host(struct mmc_host *host) 567 { 568 unregister_pm_notifier(&host->pm_notify); 569 mmc_stop_host(host); 570 571 #ifdef CONFIG_DEBUG_FS 572 mmc_remove_host_debugfs(host); 573 #endif 574 575 device_del(&host->class_dev); 576 577 led_trigger_unregister_simple(host->led); 578 579 mmc_host_clk_exit(host); 580 } 581 582 EXPORT_SYMBOL(mmc_remove_host); 583 584 /** 585 * mmc_free_host - free the host structure 586 * @host: mmc host 587 * 588 * Free the host once all references to it have been dropped. 589 */ 590 void mmc_free_host(struct mmc_host *host) 591 { 592 mmc_pwrseq_free(host); 593 put_device(&host->class_dev); 594 } 595 596 EXPORT_SYMBOL(mmc_free_host); 597