xref: /linux/drivers/mmc/core/host.c (revision 8b06f7538a1bb6ad15969114e31be7a99420125b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/drivers/mmc/core/host.c
4  *
5  *  Copyright (C) 2003 Russell King, All Rights Reserved.
6  *  Copyright (C) 2007-2008 Pierre Ossman
7  *  Copyright (C) 2010 Linus Walleij
8  *
9  *  MMC host class device management
10  */
11 
12 #include <linux/device.h>
13 #include <linux/err.h>
14 #include <linux/idr.h>
15 #include <linux/of.h>
16 #include <linux/pagemap.h>
17 #include <linux/pm_wakeup.h>
18 #include <linux/export.h>
19 #include <linux/leds.h>
20 #include <linux/slab.h>
21 
22 #include <linux/mmc/host.h>
23 #include <linux/mmc/card.h>
24 #include <linux/mmc/slot-gpio.h>
25 
26 #include "core.h"
27 #include "crypto.h"
28 #include "host.h"
29 #include "slot-gpio.h"
30 #include "pwrseq.h"
31 #include "sdio_ops.h"
32 
33 #define cls_dev_to_mmc_host(d)	container_of(d, struct mmc_host, class_dev)
34 
35 static DEFINE_IDA(mmc_host_ida);
36 
37 #ifdef CONFIG_PM_SLEEP
mmc_host_class_prepare(struct device * dev)38 static int mmc_host_class_prepare(struct device *dev)
39 {
40 	struct mmc_host *host = cls_dev_to_mmc_host(dev);
41 
42 	/*
43 	 * It's safe to access the bus_ops pointer, as both userspace and the
44 	 * workqueue for detecting cards are frozen at this point.
45 	 */
46 	if (!host->bus_ops)
47 		return 0;
48 
49 	/* Validate conditions for system suspend. */
50 	if (host->bus_ops->pre_suspend)
51 		return host->bus_ops->pre_suspend(host);
52 
53 	return 0;
54 }
55 
mmc_host_class_complete(struct device * dev)56 static void mmc_host_class_complete(struct device *dev)
57 {
58 	struct mmc_host *host = cls_dev_to_mmc_host(dev);
59 
60 	_mmc_detect_change(host, 0, false);
61 }
62 
63 static const struct dev_pm_ops mmc_host_class_dev_pm_ops = {
64 	.prepare = mmc_host_class_prepare,
65 	.complete = mmc_host_class_complete,
66 };
67 
68 #define MMC_HOST_CLASS_DEV_PM_OPS (&mmc_host_class_dev_pm_ops)
69 #else
70 #define MMC_HOST_CLASS_DEV_PM_OPS NULL
71 #endif
72 
mmc_host_classdev_release(struct device * dev)73 static void mmc_host_classdev_release(struct device *dev)
74 {
75 	struct mmc_host *host = cls_dev_to_mmc_host(dev);
76 	wakeup_source_unregister(host->ws);
77 	if (of_alias_get_id(host->parent->of_node, "mmc") < 0)
78 		ida_free(&mmc_host_ida, host->index);
79 	kfree(host);
80 }
81 
mmc_host_classdev_shutdown(struct device * dev)82 static int mmc_host_classdev_shutdown(struct device *dev)
83 {
84 	struct mmc_host *host = cls_dev_to_mmc_host(dev);
85 
86 	__mmc_stop_host(host);
87 	return 0;
88 }
89 
90 static const struct class mmc_host_class = {
91 	.name		= "mmc_host",
92 	.dev_release	= mmc_host_classdev_release,
93 	.shutdown_pre	= mmc_host_classdev_shutdown,
94 	.pm		= MMC_HOST_CLASS_DEV_PM_OPS,
95 };
96 
mmc_register_host_class(void)97 int mmc_register_host_class(void)
98 {
99 	return class_register(&mmc_host_class);
100 }
101 
mmc_unregister_host_class(void)102 void mmc_unregister_host_class(void)
103 {
104 	class_unregister(&mmc_host_class);
105 }
106 
107 /**
108  * mmc_retune_enable() - enter a transfer mode that requires retuning
109  * @host: host which should retune now
110  */
mmc_retune_enable(struct mmc_host * host)111 void mmc_retune_enable(struct mmc_host *host)
112 {
113 	host->can_retune = 1;
114 	if (host->retune_period)
115 		mod_timer(&host->retune_timer,
116 			  jiffies + host->retune_period * HZ);
117 }
118 
119 /*
120  * Pause re-tuning for a small set of operations.  The pause begins after the
121  * next command.
122  */
mmc_retune_pause(struct mmc_host * host)123 void mmc_retune_pause(struct mmc_host *host)
124 {
125 	if (!host->retune_paused) {
126 		host->retune_paused = 1;
127 		mmc_retune_hold(host);
128 	}
129 }
130 EXPORT_SYMBOL(mmc_retune_pause);
131 
mmc_retune_unpause(struct mmc_host * host)132 void mmc_retune_unpause(struct mmc_host *host)
133 {
134 	if (host->retune_paused) {
135 		host->retune_paused = 0;
136 		mmc_retune_release(host);
137 	}
138 }
139 EXPORT_SYMBOL(mmc_retune_unpause);
140 
141 /**
142  * mmc_retune_disable() - exit a transfer mode that requires retuning
143  * @host: host which should not retune anymore
144  *
145  * It is not meant for temporarily preventing retuning!
146  */
mmc_retune_disable(struct mmc_host * host)147 void mmc_retune_disable(struct mmc_host *host)
148 {
149 	mmc_retune_unpause(host);
150 	host->can_retune = 0;
151 	del_timer_sync(&host->retune_timer);
152 	mmc_retune_clear(host);
153 }
154 
mmc_retune_timer_stop(struct mmc_host * host)155 void mmc_retune_timer_stop(struct mmc_host *host)
156 {
157 	del_timer_sync(&host->retune_timer);
158 }
159 EXPORT_SYMBOL(mmc_retune_timer_stop);
160 
mmc_retune_hold(struct mmc_host * host)161 void mmc_retune_hold(struct mmc_host *host)
162 {
163 	if (!host->hold_retune)
164 		host->retune_now = 1;
165 	host->hold_retune += 1;
166 }
167 
mmc_retune_release(struct mmc_host * host)168 void mmc_retune_release(struct mmc_host *host)
169 {
170 	if (host->hold_retune)
171 		host->hold_retune -= 1;
172 	else
173 		WARN_ON(1);
174 }
175 EXPORT_SYMBOL(mmc_retune_release);
176 
mmc_retune(struct mmc_host * host)177 int mmc_retune(struct mmc_host *host)
178 {
179 	bool return_to_hs400 = false;
180 	int err;
181 
182 	if (host->retune_now)
183 		host->retune_now = 0;
184 	else
185 		return 0;
186 
187 	if (!host->need_retune || host->doing_retune || !host->card)
188 		return 0;
189 
190 	host->need_retune = 0;
191 
192 	host->doing_retune = 1;
193 
194 	if (host->ios.timing == MMC_TIMING_MMC_HS400) {
195 		err = mmc_hs400_to_hs200(host->card);
196 		if (err)
197 			goto out;
198 
199 		return_to_hs400 = true;
200 	}
201 
202 	err = mmc_execute_tuning(host->card);
203 	if (err)
204 		goto out;
205 
206 	if (return_to_hs400)
207 		err = mmc_hs200_to_hs400(host->card);
208 out:
209 	host->doing_retune = 0;
210 
211 	return err;
212 }
213 
mmc_retune_timer(struct timer_list * t)214 static void mmc_retune_timer(struct timer_list *t)
215 {
216 	struct mmc_host *host = from_timer(host, t, retune_timer);
217 
218 	mmc_retune_needed(host);
219 }
220 
mmc_of_parse_timing_phase(struct device * dev,const char * prop,struct mmc_clk_phase * phase)221 static void mmc_of_parse_timing_phase(struct device *dev, const char *prop,
222 				      struct mmc_clk_phase *phase)
223 {
224 	int degrees[2] = {0};
225 	int rc;
226 
227 	rc = device_property_read_u32_array(dev, prop, degrees, 2);
228 	phase->valid = !rc;
229 	if (phase->valid) {
230 		phase->in_deg = degrees[0];
231 		phase->out_deg = degrees[1];
232 	}
233 }
234 
235 void
mmc_of_parse_clk_phase(struct device * dev,struct mmc_clk_phase_map * map)236 mmc_of_parse_clk_phase(struct device *dev, struct mmc_clk_phase_map *map)
237 {
238 	mmc_of_parse_timing_phase(dev, "clk-phase-legacy",
239 				  &map->phase[MMC_TIMING_LEGACY]);
240 	mmc_of_parse_timing_phase(dev, "clk-phase-mmc-hs",
241 				  &map->phase[MMC_TIMING_MMC_HS]);
242 	mmc_of_parse_timing_phase(dev, "clk-phase-sd-hs",
243 				  &map->phase[MMC_TIMING_SD_HS]);
244 	mmc_of_parse_timing_phase(dev, "clk-phase-uhs-sdr12",
245 				  &map->phase[MMC_TIMING_UHS_SDR12]);
246 	mmc_of_parse_timing_phase(dev, "clk-phase-uhs-sdr25",
247 				  &map->phase[MMC_TIMING_UHS_SDR25]);
248 	mmc_of_parse_timing_phase(dev, "clk-phase-uhs-sdr50",
249 				  &map->phase[MMC_TIMING_UHS_SDR50]);
250 	mmc_of_parse_timing_phase(dev, "clk-phase-uhs-sdr104",
251 				  &map->phase[MMC_TIMING_UHS_SDR104]);
252 	mmc_of_parse_timing_phase(dev, "clk-phase-uhs-ddr50",
253 				  &map->phase[MMC_TIMING_UHS_DDR50]);
254 	mmc_of_parse_timing_phase(dev, "clk-phase-mmc-ddr52",
255 				  &map->phase[MMC_TIMING_MMC_DDR52]);
256 	mmc_of_parse_timing_phase(dev, "clk-phase-mmc-hs200",
257 				  &map->phase[MMC_TIMING_MMC_HS200]);
258 	mmc_of_parse_timing_phase(dev, "clk-phase-mmc-hs400",
259 				  &map->phase[MMC_TIMING_MMC_HS400]);
260 }
261 EXPORT_SYMBOL(mmc_of_parse_clk_phase);
262 
263 /**
264  * mmc_of_parse() - parse host's device properties
265  * @host: host whose properties should be parsed.
266  *
267  * To keep the rest of the MMC subsystem unaware of whether DT has been
268  * used to instantiate and configure this host instance or not, we
269  * parse the properties and set respective generic mmc-host flags and
270  * parameters.
271  */
mmc_of_parse(struct mmc_host * host)272 int mmc_of_parse(struct mmc_host *host)
273 {
274 	struct device *dev = host->parent;
275 	u32 bus_width, drv_type, cd_debounce_delay_ms;
276 	int ret;
277 
278 	if (!dev || !dev_fwnode(dev))
279 		return 0;
280 
281 	/* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
282 	if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) {
283 		dev_dbg(host->parent,
284 			"\"bus-width\" property is missing, assuming 1 bit.\n");
285 		bus_width = 1;
286 	}
287 
288 	switch (bus_width) {
289 	case 8:
290 		host->caps |= MMC_CAP_8_BIT_DATA;
291 		fallthrough;	/* Hosts capable of 8-bit can also do 4 bits */
292 	case 4:
293 		host->caps |= MMC_CAP_4_BIT_DATA;
294 		break;
295 	case 1:
296 		break;
297 	default:
298 		dev_err(host->parent,
299 			"Invalid \"bus-width\" value %u!\n", bus_width);
300 		return -EINVAL;
301 	}
302 
303 	/* f_max is obtained from the optional "max-frequency" property */
304 	device_property_read_u32(dev, "max-frequency", &host->f_max);
305 
306 	/*
307 	 * Configure CD and WP pins. They are both by default active low to
308 	 * match the SDHCI spec. If GPIOs are provided for CD and / or WP, the
309 	 * mmc-gpio helpers are used to attach, configure and use them. If
310 	 * polarity inversion is specified in DT, one of MMC_CAP2_CD_ACTIVE_HIGH
311 	 * and MMC_CAP2_RO_ACTIVE_HIGH capability-2 flags is set. If the
312 	 * "broken-cd" property is provided, the MMC_CAP_NEEDS_POLL capability
313 	 * is set. If the "non-removable" property is found, the
314 	 * MMC_CAP_NONREMOVABLE capability is set and no card-detection
315 	 * configuration is performed.
316 	 */
317 
318 	/* Parse Card Detection */
319 
320 	if (device_property_read_bool(dev, "non-removable")) {
321 		host->caps |= MMC_CAP_NONREMOVABLE;
322 	} else {
323 		if (device_property_read_bool(dev, "cd-inverted"))
324 			host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
325 
326 		if (device_property_read_u32(dev, "cd-debounce-delay-ms",
327 					     &cd_debounce_delay_ms))
328 			cd_debounce_delay_ms = 200;
329 
330 		if (device_property_read_bool(dev, "broken-cd"))
331 			host->caps |= MMC_CAP_NEEDS_POLL;
332 
333 		ret = mmc_gpiod_request_cd(host, "cd", 0, false,
334 					   cd_debounce_delay_ms * 1000);
335 		if (!ret)
336 			dev_info(host->parent, "Got CD GPIO\n");
337 		else if (ret != -ENOENT && ret != -ENOSYS)
338 			return ret;
339 	}
340 
341 	/* Parse Write Protection */
342 
343 	if (device_property_read_bool(dev, "wp-inverted"))
344 		host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
345 
346 	ret = mmc_gpiod_request_ro(host, "wp", 0, 0);
347 	if (!ret)
348 		dev_info(host->parent, "Got WP GPIO\n");
349 	else if (ret != -ENOENT && ret != -ENOSYS)
350 		return ret;
351 
352 	if (device_property_read_bool(dev, "disable-wp"))
353 		host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
354 
355 	if (device_property_read_bool(dev, "cap-sd-highspeed"))
356 		host->caps |= MMC_CAP_SD_HIGHSPEED;
357 	if (device_property_read_bool(dev, "cap-mmc-highspeed"))
358 		host->caps |= MMC_CAP_MMC_HIGHSPEED;
359 	if (device_property_read_bool(dev, "sd-uhs-sdr12"))
360 		host->caps |= MMC_CAP_UHS_SDR12;
361 	if (device_property_read_bool(dev, "sd-uhs-sdr25"))
362 		host->caps |= MMC_CAP_UHS_SDR25;
363 	if (device_property_read_bool(dev, "sd-uhs-sdr50"))
364 		host->caps |= MMC_CAP_UHS_SDR50;
365 	if (device_property_read_bool(dev, "sd-uhs-sdr104"))
366 		host->caps |= MMC_CAP_UHS_SDR104;
367 	if (device_property_read_bool(dev, "sd-uhs-ddr50"))
368 		host->caps |= MMC_CAP_UHS_DDR50;
369 	if (device_property_read_bool(dev, "cap-power-off-card"))
370 		host->caps |= MMC_CAP_POWER_OFF_CARD;
371 	if (device_property_read_bool(dev, "cap-mmc-hw-reset"))
372 		host->caps |= MMC_CAP_HW_RESET;
373 	if (device_property_read_bool(dev, "cap-sdio-irq"))
374 		host->caps |= MMC_CAP_SDIO_IRQ;
375 	if (device_property_read_bool(dev, "full-pwr-cycle"))
376 		host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
377 	if (device_property_read_bool(dev, "full-pwr-cycle-in-suspend"))
378 		host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND;
379 	if (device_property_read_bool(dev, "keep-power-in-suspend"))
380 		host->pm_caps |= MMC_PM_KEEP_POWER;
381 	if (device_property_read_bool(dev, "wakeup-source") ||
382 	    device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
383 		host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
384 	if (device_property_read_bool(dev, "mmc-ddr-3_3v"))
385 		host->caps |= MMC_CAP_3_3V_DDR;
386 	if (device_property_read_bool(dev, "mmc-ddr-1_8v"))
387 		host->caps |= MMC_CAP_1_8V_DDR;
388 	if (device_property_read_bool(dev, "mmc-ddr-1_2v"))
389 		host->caps |= MMC_CAP_1_2V_DDR;
390 	if (device_property_read_bool(dev, "mmc-hs200-1_8v"))
391 		host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
392 	if (device_property_read_bool(dev, "mmc-hs200-1_2v"))
393 		host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
394 	if (device_property_read_bool(dev, "mmc-hs400-1_8v"))
395 		host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
396 	if (device_property_read_bool(dev, "mmc-hs400-1_2v"))
397 		host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
398 	if (device_property_read_bool(dev, "mmc-hs400-enhanced-strobe"))
399 		host->caps2 |= MMC_CAP2_HS400_ES;
400 	if (device_property_read_bool(dev, "no-sdio"))
401 		host->caps2 |= MMC_CAP2_NO_SDIO;
402 	if (device_property_read_bool(dev, "no-sd"))
403 		host->caps2 |= MMC_CAP2_NO_SD;
404 	if (device_property_read_bool(dev, "no-mmc"))
405 		host->caps2 |= MMC_CAP2_NO_MMC;
406 	if (device_property_read_bool(dev, "no-mmc-hs400"))
407 		host->caps2 &= ~(MMC_CAP2_HS400_1_8V | MMC_CAP2_HS400_1_2V |
408 				 MMC_CAP2_HS400_ES);
409 
410 	/* Must be after "non-removable" check */
411 	if (device_property_read_u32(dev, "fixed-emmc-driver-type", &drv_type) == 0) {
412 		if (host->caps & MMC_CAP_NONREMOVABLE)
413 			host->fixed_drv_type = drv_type;
414 		else
415 			dev_err(host->parent,
416 				"can't use fixed driver type, media is removable\n");
417 	}
418 
419 	host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
420 	if (host->dsr_req && (host->dsr & ~0xffff)) {
421 		dev_err(host->parent,
422 			"device tree specified broken value for DSR: 0x%x, ignoring\n",
423 			host->dsr);
424 		host->dsr_req = 0;
425 	}
426 
427 	device_property_read_u32(dev, "post-power-on-delay-ms",
428 				 &host->ios.power_delay_ms);
429 
430 	return mmc_pwrseq_alloc(host);
431 }
432 
433 EXPORT_SYMBOL(mmc_of_parse);
434 
435 /**
436  * mmc_of_parse_voltage - return mask of supported voltages
437  * @host: host whose properties should be parsed.
438  * @mask: mask of voltages available for MMC/SD/SDIO
439  *
440  * Parse the "voltage-ranges" property, returning zero if it is not
441  * found, negative errno if the voltage-range specification is invalid,
442  * or one if the voltage-range is specified and successfully parsed.
443  */
mmc_of_parse_voltage(struct mmc_host * host,u32 * mask)444 int mmc_of_parse_voltage(struct mmc_host *host, u32 *mask)
445 {
446 	const char *prop = "voltage-ranges";
447 	struct device *dev = host->parent;
448 	u32 *voltage_ranges;
449 	int num_ranges, i;
450 	int ret;
451 
452 	if (!device_property_present(dev, prop)) {
453 		dev_dbg(dev, "%s unspecified\n", prop);
454 		return 0;
455 	}
456 
457 	ret = device_property_count_u32(dev, prop);
458 	if (ret < 0)
459 		return ret;
460 
461 	num_ranges = ret / 2;
462 	if (!num_ranges) {
463 		dev_err(dev, "%s empty\n", prop);
464 		return -EINVAL;
465 	}
466 
467 	voltage_ranges = kcalloc(2 * num_ranges, sizeof(*voltage_ranges), GFP_KERNEL);
468 	if (!voltage_ranges)
469 		return -ENOMEM;
470 
471 	ret = device_property_read_u32_array(dev, prop, voltage_ranges, 2 * num_ranges);
472 	if (ret) {
473 		kfree(voltage_ranges);
474 		return ret;
475 	}
476 
477 	for (i = 0; i < num_ranges; i++) {
478 		const int j = i * 2;
479 		u32 ocr_mask;
480 
481 		ocr_mask = mmc_vddrange_to_ocrmask(voltage_ranges[j + 0],
482 						   voltage_ranges[j + 1]);
483 		if (!ocr_mask) {
484 			dev_err(dev, "range #%d in %s is invalid\n", i, prop);
485 			kfree(voltage_ranges);
486 			return -EINVAL;
487 		}
488 		*mask |= ocr_mask;
489 	}
490 
491 	kfree(voltage_ranges);
492 
493 	return 1;
494 }
495 EXPORT_SYMBOL(mmc_of_parse_voltage);
496 
497 /**
498  * mmc_first_nonreserved_index() - get the first index that is not reserved
499  */
mmc_first_nonreserved_index(void)500 static int mmc_first_nonreserved_index(void)
501 {
502 	int max;
503 
504 	max = of_alias_get_highest_id("mmc");
505 	if (max < 0)
506 		return 0;
507 
508 	return max + 1;
509 }
510 
511 /**
512  *	mmc_alloc_host - initialise the per-host structure.
513  *	@extra: sizeof private data structure
514  *	@dev: pointer to host device model structure
515  *
516  *	Initialise the per-host structure.
517  */
mmc_alloc_host(int extra,struct device * dev)518 struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
519 {
520 	int index;
521 	struct mmc_host *host;
522 	int alias_id, min_idx, max_idx;
523 
524 	host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL);
525 	if (!host)
526 		return NULL;
527 
528 	/* scanning will be enabled when we're ready */
529 	host->rescan_disable = 1;
530 
531 	alias_id = of_alias_get_id(dev->of_node, "mmc");
532 	if (alias_id >= 0) {
533 		index = alias_id;
534 	} else {
535 		min_idx = mmc_first_nonreserved_index();
536 		max_idx = 0;
537 
538 		index = ida_alloc_range(&mmc_host_ida, min_idx, max_idx - 1,
539 					GFP_KERNEL);
540 		if (index < 0) {
541 			kfree(host);
542 			return NULL;
543 		}
544 	}
545 
546 	host->index = index;
547 
548 	dev_set_name(&host->class_dev, "mmc%d", host->index);
549 	host->ws = wakeup_source_register(NULL, dev_name(&host->class_dev));
550 
551 	host->parent = dev;
552 	host->class_dev.parent = dev;
553 	host->class_dev.class = &mmc_host_class;
554 	device_initialize(&host->class_dev);
555 	device_enable_async_suspend(&host->class_dev);
556 
557 	if (mmc_gpio_alloc(host)) {
558 		put_device(&host->class_dev);
559 		return NULL;
560 	}
561 
562 	spin_lock_init(&host->lock);
563 	init_waitqueue_head(&host->wq);
564 	INIT_DELAYED_WORK(&host->detect, mmc_rescan);
565 	INIT_WORK(&host->sdio_irq_work, sdio_irq_work);
566 	timer_setup(&host->retune_timer, mmc_retune_timer, 0);
567 
568 	/*
569 	 * By default, hosts do not support SGIO or large requests.
570 	 * They have to set these according to their abilities.
571 	 */
572 	host->max_segs = 1;
573 	host->max_seg_size = PAGE_SIZE;
574 
575 	host->max_req_size = PAGE_SIZE;
576 	host->max_blk_size = 512;
577 	host->max_blk_count = PAGE_SIZE / 512;
578 
579 	host->fixed_drv_type = -EINVAL;
580 	host->ios.power_delay_ms = 10;
581 	host->ios.power_mode = MMC_POWER_UNDEFINED;
582 
583 	return host;
584 }
585 
586 EXPORT_SYMBOL(mmc_alloc_host);
587 
devm_mmc_host_release(struct device * dev,void * res)588 static void devm_mmc_host_release(struct device *dev, void *res)
589 {
590 	mmc_free_host(*(struct mmc_host **)res);
591 }
592 
devm_mmc_alloc_host(struct device * dev,int extra)593 struct mmc_host *devm_mmc_alloc_host(struct device *dev, int extra)
594 {
595 	struct mmc_host **dr, *host;
596 
597 	dr = devres_alloc(devm_mmc_host_release, sizeof(*dr), GFP_KERNEL);
598 	if (!dr)
599 		return NULL;
600 
601 	host = mmc_alloc_host(extra, dev);
602 	if (!host) {
603 		devres_free(dr);
604 		return NULL;
605 	}
606 
607 	*dr = host;
608 	devres_add(dev, dr);
609 
610 	return host;
611 }
612 EXPORT_SYMBOL(devm_mmc_alloc_host);
613 
mmc_validate_host_caps(struct mmc_host * host)614 static int mmc_validate_host_caps(struct mmc_host *host)
615 {
616 	struct device *dev = host->parent;
617 	u32 caps = host->caps, caps2 = host->caps2;
618 
619 	if (caps & MMC_CAP_SDIO_IRQ && !host->ops->enable_sdio_irq) {
620 		dev_warn(dev, "missing ->enable_sdio_irq() ops\n");
621 		return -EINVAL;
622 	}
623 
624 	if (caps2 & (MMC_CAP2_HS400_ES | MMC_CAP2_HS400) &&
625 	    !(caps & MMC_CAP_8_BIT_DATA) && !(caps2 & MMC_CAP2_NO_MMC)) {
626 		dev_warn(dev, "drop HS400 support since no 8-bit bus\n");
627 		host->caps2 = caps2 & ~MMC_CAP2_HS400_ES & ~MMC_CAP2_HS400;
628 	}
629 
630 	return 0;
631 }
632 
633 /**
634  *	mmc_add_host - initialise host hardware
635  *	@host: mmc host
636  *
637  *	Register the host with the driver model. The host must be
638  *	prepared to start servicing requests before this function
639  *	completes.
640  */
mmc_add_host(struct mmc_host * host)641 int mmc_add_host(struct mmc_host *host)
642 {
643 	int err;
644 
645 	err = mmc_validate_host_caps(host);
646 	if (err)
647 		return err;
648 
649 	err = device_add(&host->class_dev);
650 	if (err)
651 		return err;
652 
653 	led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
654 
655 	mmc_add_host_debugfs(host);
656 
657 	mmc_start_host(host);
658 	return 0;
659 }
660 
661 EXPORT_SYMBOL(mmc_add_host);
662 
663 /**
664  *	mmc_remove_host - remove host hardware
665  *	@host: mmc host
666  *
667  *	Unregister and remove all cards associated with this host,
668  *	and power down the MMC bus. No new requests will be issued
669  *	after this function has returned.
670  */
mmc_remove_host(struct mmc_host * host)671 void mmc_remove_host(struct mmc_host *host)
672 {
673 	mmc_stop_host(host);
674 
675 	mmc_remove_host_debugfs(host);
676 
677 	device_del(&host->class_dev);
678 
679 	led_trigger_unregister_simple(host->led);
680 }
681 
682 EXPORT_SYMBOL(mmc_remove_host);
683 
684 /**
685  *	mmc_free_host - free the host structure
686  *	@host: mmc host
687  *
688  *	Free the host once all references to it have been dropped.
689  */
mmc_free_host(struct mmc_host * host)690 void mmc_free_host(struct mmc_host *host)
691 {
692 	cancel_delayed_work_sync(&host->detect);
693 	mmc_pwrseq_free(host);
694 	put_device(&host->class_dev);
695 }
696 
697 EXPORT_SYMBOL(mmc_free_host);
698