1 /* 2 * linux/drivers/mmc/core/host.c 3 * 4 * Copyright (C) 2003 Russell King, All Rights Reserved. 5 * Copyright (C) 2007-2008 Pierre Ossman 6 * Copyright (C) 2010 Linus Walleij 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * MMC host class device management 13 */ 14 15 #include <linux/device.h> 16 #include <linux/err.h> 17 #include <linux/idr.h> 18 #include <linux/pagemap.h> 19 #include <linux/leds.h> 20 #include <linux/slab.h> 21 #include <linux/suspend.h> 22 23 #include <linux/mmc/host.h> 24 #include <linux/mmc/card.h> 25 26 #include "core.h" 27 #include "host.h" 28 29 #define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev) 30 31 static void mmc_host_classdev_release(struct device *dev) 32 { 33 struct mmc_host *host = cls_dev_to_mmc_host(dev); 34 kfree(host); 35 } 36 37 static struct class mmc_host_class = { 38 .name = "mmc_host", 39 .dev_release = mmc_host_classdev_release, 40 }; 41 42 int mmc_register_host_class(void) 43 { 44 return class_register(&mmc_host_class); 45 } 46 47 void mmc_unregister_host_class(void) 48 { 49 class_unregister(&mmc_host_class); 50 } 51 52 static DEFINE_IDR(mmc_host_idr); 53 static DEFINE_SPINLOCK(mmc_host_lock); 54 55 #ifdef CONFIG_MMC_CLKGATE 56 57 /* 58 * Enabling clock gating will make the core call out to the host 59 * once up and once down when it performs a request or card operation 60 * intermingled in any fashion. The driver will see this through 61 * set_ios() operations with ios.clock field set to 0 to gate (disable) 62 * the block clock, and to the old frequency to enable it again. 63 */ 64 static void mmc_host_clk_gate_delayed(struct mmc_host *host) 65 { 66 unsigned long tick_ns; 67 unsigned long freq = host->ios.clock; 68 unsigned long flags; 69 70 if (!freq) { 71 pr_debug("%s: frequency set to 0 in disable function, " 72 "this means the clock is already disabled.\n", 73 mmc_hostname(host)); 74 return; 75 } 76 /* 77 * New requests may have appeared while we were scheduling, 78 * then there is no reason to delay the check before 79 * clk_disable(). 80 */ 81 spin_lock_irqsave(&host->clk_lock, flags); 82 83 /* 84 * Delay n bus cycles (at least 8 from MMC spec) before attempting 85 * to disable the MCI block clock. The reference count may have 86 * gone up again after this delay due to rescheduling! 87 */ 88 if (!host->clk_requests) { 89 spin_unlock_irqrestore(&host->clk_lock, flags); 90 tick_ns = DIV_ROUND_UP(1000000000, freq); 91 ndelay(host->clk_delay * tick_ns); 92 } else { 93 /* New users appeared while waiting for this work */ 94 spin_unlock_irqrestore(&host->clk_lock, flags); 95 return; 96 } 97 mmc_claim_host(host); 98 spin_lock_irqsave(&host->clk_lock, flags); 99 if (!host->clk_requests) { 100 spin_unlock_irqrestore(&host->clk_lock, flags); 101 /* This will set host->ios.clock to 0 */ 102 mmc_gate_clock(host); 103 spin_lock_irqsave(&host->clk_lock, flags); 104 pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); 105 } 106 spin_unlock_irqrestore(&host->clk_lock, flags); 107 mmc_release_host(host); 108 } 109 110 /* 111 * Internal work. Work to disable the clock at some later point. 112 */ 113 static void mmc_host_clk_gate_work(struct work_struct *work) 114 { 115 struct mmc_host *host = container_of(work, struct mmc_host, 116 clk_gate_work); 117 118 mmc_host_clk_gate_delayed(host); 119 } 120 121 /** 122 * mmc_host_clk_ungate - ungate hardware MCI clocks 123 * @host: host to ungate. 124 * 125 * Makes sure the host ios.clock is restored to a non-zero value 126 * past this call. Increase clock reference count and ungate clock 127 * if we're the first user. 128 */ 129 void mmc_host_clk_ungate(struct mmc_host *host) 130 { 131 unsigned long flags; 132 133 mmc_claim_host(host); 134 spin_lock_irqsave(&host->clk_lock, flags); 135 if (host->clk_gated) { 136 spin_unlock_irqrestore(&host->clk_lock, flags); 137 mmc_ungate_clock(host); 138 spin_lock_irqsave(&host->clk_lock, flags); 139 pr_debug("%s: ungated MCI clock\n", mmc_hostname(host)); 140 } 141 host->clk_requests++; 142 spin_unlock_irqrestore(&host->clk_lock, flags); 143 mmc_release_host(host); 144 } 145 146 /** 147 * mmc_host_may_gate_card - check if this card may be gated 148 * @card: card to check. 149 */ 150 static bool mmc_host_may_gate_card(struct mmc_card *card) 151 { 152 /* If there is no card we may gate it */ 153 if (!card) 154 return true; 155 /* 156 * Don't gate SDIO cards! These need to be clocked at all times 157 * since they may be independent systems generating interrupts 158 * and other events. The clock requests counter from the core will 159 * go down to zero since the core does not need it, but we will not 160 * gate the clock, because there is somebody out there that may still 161 * be using it. 162 */ 163 return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING); 164 } 165 166 /** 167 * mmc_host_clk_gate - gate off hardware MCI clocks 168 * @host: host to gate. 169 * 170 * Calls the host driver with ios.clock set to zero as often as possible 171 * in order to gate off hardware MCI clocks. Decrease clock reference 172 * count and schedule disabling of clock. 173 */ 174 void mmc_host_clk_gate(struct mmc_host *host) 175 { 176 unsigned long flags; 177 178 spin_lock_irqsave(&host->clk_lock, flags); 179 host->clk_requests--; 180 if (mmc_host_may_gate_card(host->card) && 181 !host->clk_requests) 182 schedule_work(&host->clk_gate_work); 183 spin_unlock_irqrestore(&host->clk_lock, flags); 184 } 185 186 /** 187 * mmc_host_clk_rate - get current clock frequency setting 188 * @host: host to get the clock frequency for. 189 * 190 * Returns current clock frequency regardless of gating. 191 */ 192 unsigned int mmc_host_clk_rate(struct mmc_host *host) 193 { 194 unsigned long freq; 195 unsigned long flags; 196 197 spin_lock_irqsave(&host->clk_lock, flags); 198 if (host->clk_gated) 199 freq = host->clk_old; 200 else 201 freq = host->ios.clock; 202 spin_unlock_irqrestore(&host->clk_lock, flags); 203 return freq; 204 } 205 206 /** 207 * mmc_host_clk_init - set up clock gating code 208 * @host: host with potential clock to control 209 */ 210 static inline void mmc_host_clk_init(struct mmc_host *host) 211 { 212 host->clk_requests = 0; 213 /* Hold MCI clock for 8 cycles by default */ 214 host->clk_delay = 8; 215 host->clk_gated = false; 216 INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); 217 spin_lock_init(&host->clk_lock); 218 } 219 220 /** 221 * mmc_host_clk_exit - shut down clock gating code 222 * @host: host with potential clock to control 223 */ 224 static inline void mmc_host_clk_exit(struct mmc_host *host) 225 { 226 /* 227 * Wait for any outstanding gate and then make sure we're 228 * ungated before exiting. 229 */ 230 if (cancel_work_sync(&host->clk_gate_work)) 231 mmc_host_clk_gate_delayed(host); 232 if (host->clk_gated) 233 mmc_host_clk_ungate(host); 234 /* There should be only one user now */ 235 WARN_ON(host->clk_requests > 1); 236 } 237 238 #else 239 240 static inline void mmc_host_clk_init(struct mmc_host *host) 241 { 242 } 243 244 static inline void mmc_host_clk_exit(struct mmc_host *host) 245 { 246 } 247 248 #endif 249 250 /** 251 * mmc_alloc_host - initialise the per-host structure. 252 * @extra: sizeof private data structure 253 * @dev: pointer to host device model structure 254 * 255 * Initialise the per-host structure. 256 */ 257 struct mmc_host *mmc_alloc_host(int extra, struct device *dev) 258 { 259 int err; 260 struct mmc_host *host; 261 262 if (!idr_pre_get(&mmc_host_idr, GFP_KERNEL)) 263 return NULL; 264 265 host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL); 266 if (!host) 267 return NULL; 268 269 spin_lock(&mmc_host_lock); 270 err = idr_get_new(&mmc_host_idr, host, &host->index); 271 spin_unlock(&mmc_host_lock); 272 if (err) 273 goto free; 274 275 dev_set_name(&host->class_dev, "mmc%d", host->index); 276 277 host->parent = dev; 278 host->class_dev.parent = dev; 279 host->class_dev.class = &mmc_host_class; 280 device_initialize(&host->class_dev); 281 282 mmc_host_clk_init(host); 283 284 spin_lock_init(&host->lock); 285 init_waitqueue_head(&host->wq); 286 INIT_DELAYED_WORK(&host->detect, mmc_rescan); 287 INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable); 288 #ifdef CONFIG_PM 289 host->pm_notify.notifier_call = mmc_pm_notify; 290 #endif 291 292 /* 293 * By default, hosts do not support SGIO or large requests. 294 * They have to set these according to their abilities. 295 */ 296 host->max_segs = 1; 297 host->max_seg_size = PAGE_CACHE_SIZE; 298 299 host->max_req_size = PAGE_CACHE_SIZE; 300 host->max_blk_size = 512; 301 host->max_blk_count = PAGE_CACHE_SIZE / 512; 302 303 return host; 304 305 free: 306 kfree(host); 307 return NULL; 308 } 309 310 EXPORT_SYMBOL(mmc_alloc_host); 311 312 /** 313 * mmc_add_host - initialise host hardware 314 * @host: mmc host 315 * 316 * Register the host with the driver model. The host must be 317 * prepared to start servicing requests before this function 318 * completes. 319 */ 320 int mmc_add_host(struct mmc_host *host) 321 { 322 int err; 323 324 WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && 325 !host->ops->enable_sdio_irq); 326 327 led_trigger_register_simple(dev_name(&host->class_dev), &host->led); 328 329 err = device_add(&host->class_dev); 330 if (err) 331 return err; 332 333 #ifdef CONFIG_DEBUG_FS 334 mmc_add_host_debugfs(host); 335 #endif 336 337 mmc_start_host(host); 338 register_pm_notifier(&host->pm_notify); 339 340 return 0; 341 } 342 343 EXPORT_SYMBOL(mmc_add_host); 344 345 /** 346 * mmc_remove_host - remove host hardware 347 * @host: mmc host 348 * 349 * Unregister and remove all cards associated with this host, 350 * and power down the MMC bus. No new requests will be issued 351 * after this function has returned. 352 */ 353 void mmc_remove_host(struct mmc_host *host) 354 { 355 unregister_pm_notifier(&host->pm_notify); 356 mmc_stop_host(host); 357 358 #ifdef CONFIG_DEBUG_FS 359 mmc_remove_host_debugfs(host); 360 #endif 361 362 device_del(&host->class_dev); 363 364 led_trigger_unregister_simple(host->led); 365 366 mmc_host_clk_exit(host); 367 } 368 369 EXPORT_SYMBOL(mmc_remove_host); 370 371 /** 372 * mmc_free_host - free the host structure 373 * @host: mmc host 374 * 375 * Free the host once all references to it have been dropped. 376 */ 377 void mmc_free_host(struct mmc_host *host) 378 { 379 spin_lock(&mmc_host_lock); 380 idr_remove(&mmc_host_idr, host->index); 381 spin_unlock(&mmc_host_lock); 382 383 put_device(&host->class_dev); 384 } 385 386 EXPORT_SYMBOL(mmc_free_host); 387