xref: /linux/drivers/mmc/core/core.c (revision d39d0ed196aa1685bb24771e92f78633c66ac9cb)
1 /*
2  *  linux/drivers/mmc/core/core.c
3  *
4  *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5  *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7  *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 
26 #include <linux/mmc/card.h>
27 #include <linux/mmc/host.h>
28 #include <linux/mmc/mmc.h>
29 #include <linux/mmc/sd.h>
30 
31 #include "core.h"
32 #include "bus.h"
33 #include "host.h"
34 #include "sdio_bus.h"
35 
36 #include "mmc_ops.h"
37 #include "sd_ops.h"
38 #include "sdio_ops.h"
39 
40 static struct workqueue_struct *workqueue;
41 
42 /*
43  * Enabling software CRCs on the data blocks can be a significant (30%)
44  * performance cost, and for other reasons may not always be desired.
45  * So we allow it it to be disabled.
46  */
47 int use_spi_crc = 1;
48 module_param(use_spi_crc, bool, 0);
49 
50 /*
51  * We normally treat cards as removed during suspend if they are not
52  * known to be on a non-removable bus, to avoid the risk of writing
53  * back data to a different card after resume.  Allow this to be
54  * overridden if necessary.
55  */
56 #ifdef CONFIG_MMC_UNSAFE_RESUME
57 int mmc_assume_removable;
58 #else
59 int mmc_assume_removable = 1;
60 #endif
61 module_param_named(removable, mmc_assume_removable, bool, 0644);
62 MODULE_PARM_DESC(
63 	removable,
64 	"MMC/SD cards are removable and may be removed during suspend");
65 
66 /*
67  * Internal function. Schedule delayed work in the MMC work queue.
68  */
69 static int mmc_schedule_delayed_work(struct delayed_work *work,
70 				     unsigned long delay)
71 {
72 	return queue_delayed_work(workqueue, work, delay);
73 }
74 
75 /*
76  * Internal function. Flush all scheduled work from the MMC work queue.
77  */
78 static void mmc_flush_scheduled_work(void)
79 {
80 	flush_workqueue(workqueue);
81 }
82 
83 /**
84  *	mmc_request_done - finish processing an MMC request
85  *	@host: MMC host which completed request
86  *	@mrq: MMC request which request
87  *
88  *	MMC drivers should call this function when they have completed
89  *	their processing of a request.
90  */
91 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
92 {
93 	struct mmc_command *cmd = mrq->cmd;
94 	int err = cmd->error;
95 
96 	if (err && cmd->retries && mmc_host_is_spi(host)) {
97 		if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
98 			cmd->retries = 0;
99 	}
100 
101 	if (err && cmd->retries) {
102 		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
103 			mmc_hostname(host), cmd->opcode, err);
104 
105 		cmd->retries--;
106 		cmd->error = 0;
107 		host->ops->request(host, mrq);
108 	} else {
109 		led_trigger_event(host->led, LED_OFF);
110 
111 		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
112 			mmc_hostname(host), cmd->opcode, err,
113 			cmd->resp[0], cmd->resp[1],
114 			cmd->resp[2], cmd->resp[3]);
115 
116 		if (mrq->data) {
117 			pr_debug("%s:     %d bytes transferred: %d\n",
118 				mmc_hostname(host),
119 				mrq->data->bytes_xfered, mrq->data->error);
120 		}
121 
122 		if (mrq->stop) {
123 			pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
124 				mmc_hostname(host), mrq->stop->opcode,
125 				mrq->stop->error,
126 				mrq->stop->resp[0], mrq->stop->resp[1],
127 				mrq->stop->resp[2], mrq->stop->resp[3]);
128 		}
129 
130 		if (mrq->done)
131 			mrq->done(mrq);
132 	}
133 }
134 
135 EXPORT_SYMBOL(mmc_request_done);
136 
137 static void
138 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
139 {
140 #ifdef CONFIG_MMC_DEBUG
141 	unsigned int i, sz;
142 	struct scatterlist *sg;
143 #endif
144 
145 	pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
146 		 mmc_hostname(host), mrq->cmd->opcode,
147 		 mrq->cmd->arg, mrq->cmd->flags);
148 
149 	if (mrq->data) {
150 		pr_debug("%s:     blksz %d blocks %d flags %08x "
151 			"tsac %d ms nsac %d\n",
152 			mmc_hostname(host), mrq->data->blksz,
153 			mrq->data->blocks, mrq->data->flags,
154 			mrq->data->timeout_ns / 1000000,
155 			mrq->data->timeout_clks);
156 	}
157 
158 	if (mrq->stop) {
159 		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
160 			 mmc_hostname(host), mrq->stop->opcode,
161 			 mrq->stop->arg, mrq->stop->flags);
162 	}
163 
164 	WARN_ON(!host->claimed);
165 
166 	led_trigger_event(host->led, LED_FULL);
167 
168 	mrq->cmd->error = 0;
169 	mrq->cmd->mrq = mrq;
170 	if (mrq->data) {
171 		BUG_ON(mrq->data->blksz > host->max_blk_size);
172 		BUG_ON(mrq->data->blocks > host->max_blk_count);
173 		BUG_ON(mrq->data->blocks * mrq->data->blksz >
174 			host->max_req_size);
175 
176 #ifdef CONFIG_MMC_DEBUG
177 		sz = 0;
178 		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
179 			sz += sg->length;
180 		BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
181 #endif
182 
183 		mrq->cmd->data = mrq->data;
184 		mrq->data->error = 0;
185 		mrq->data->mrq = mrq;
186 		if (mrq->stop) {
187 			mrq->data->stop = mrq->stop;
188 			mrq->stop->error = 0;
189 			mrq->stop->mrq = mrq;
190 		}
191 	}
192 	host->ops->request(host, mrq);
193 }
194 
195 static void mmc_wait_done(struct mmc_request *mrq)
196 {
197 	complete(mrq->done_data);
198 }
199 
200 /**
201  *	mmc_wait_for_req - start a request and wait for completion
202  *	@host: MMC host to start command
203  *	@mrq: MMC request to start
204  *
205  *	Start a new MMC custom command request for a host, and wait
206  *	for the command to complete. Does not attempt to parse the
207  *	response.
208  */
209 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
210 {
211 	DECLARE_COMPLETION_ONSTACK(complete);
212 
213 	mrq->done_data = &complete;
214 	mrq->done = mmc_wait_done;
215 
216 	mmc_start_request(host, mrq);
217 
218 	wait_for_completion(&complete);
219 }
220 
221 EXPORT_SYMBOL(mmc_wait_for_req);
222 
223 /**
224  *	mmc_wait_for_cmd - start a command and wait for completion
225  *	@host: MMC host to start command
226  *	@cmd: MMC command to start
227  *	@retries: maximum number of retries
228  *
229  *	Start a new MMC command for a host, and wait for the command
230  *	to complete.  Return any error that occurred while the command
231  *	was executing.  Do not attempt to parse the response.
232  */
233 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
234 {
235 	struct mmc_request mrq;
236 
237 	WARN_ON(!host->claimed);
238 
239 	memset(&mrq, 0, sizeof(struct mmc_request));
240 
241 	memset(cmd->resp, 0, sizeof(cmd->resp));
242 	cmd->retries = retries;
243 
244 	mrq.cmd = cmd;
245 	cmd->data = NULL;
246 
247 	mmc_wait_for_req(host, &mrq);
248 
249 	return cmd->error;
250 }
251 
252 EXPORT_SYMBOL(mmc_wait_for_cmd);
253 
254 /**
255  *	mmc_set_data_timeout - set the timeout for a data command
256  *	@data: data phase for command
257  *	@card: the MMC card associated with the data transfer
258  *
259  *	Computes the data timeout parameters according to the
260  *	correct algorithm given the card type.
261  */
262 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
263 {
264 	unsigned int mult;
265 
266 	/*
267 	 * SDIO cards only define an upper 1 s limit on access.
268 	 */
269 	if (mmc_card_sdio(card)) {
270 		data->timeout_ns = 1000000000;
271 		data->timeout_clks = 0;
272 		return;
273 	}
274 
275 	/*
276 	 * SD cards use a 100 multiplier rather than 10
277 	 */
278 	mult = mmc_card_sd(card) ? 100 : 10;
279 
280 	/*
281 	 * Scale up the multiplier (and therefore the timeout) by
282 	 * the r2w factor for writes.
283 	 */
284 	if (data->flags & MMC_DATA_WRITE)
285 		mult <<= card->csd.r2w_factor;
286 
287 	data->timeout_ns = card->csd.tacc_ns * mult;
288 	data->timeout_clks = card->csd.tacc_clks * mult;
289 
290 	/*
291 	 * SD cards also have an upper limit on the timeout.
292 	 */
293 	if (mmc_card_sd(card)) {
294 		unsigned int timeout_us, limit_us;
295 
296 		timeout_us = data->timeout_ns / 1000;
297 		timeout_us += data->timeout_clks * 1000 /
298 			(card->host->ios.clock / 1000);
299 
300 		if (data->flags & MMC_DATA_WRITE)
301 			/*
302 			 * The limit is really 250 ms, but that is
303 			 * insufficient for some crappy cards.
304 			 */
305 			limit_us = 300000;
306 		else
307 			limit_us = 100000;
308 
309 		/*
310 		 * SDHC cards always use these fixed values.
311 		 */
312 		if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
313 			data->timeout_ns = limit_us * 1000;
314 			data->timeout_clks = 0;
315 		}
316 	}
317 	/*
318 	 * Some cards need very high timeouts if driven in SPI mode.
319 	 * The worst observed timeout was 900ms after writing a
320 	 * continuous stream of data until the internal logic
321 	 * overflowed.
322 	 */
323 	if (mmc_host_is_spi(card->host)) {
324 		if (data->flags & MMC_DATA_WRITE) {
325 			if (data->timeout_ns < 1000000000)
326 				data->timeout_ns = 1000000000;	/* 1s */
327 		} else {
328 			if (data->timeout_ns < 100000000)
329 				data->timeout_ns =  100000000;	/* 100ms */
330 		}
331 	}
332 }
333 EXPORT_SYMBOL(mmc_set_data_timeout);
334 
335 /**
336  *	mmc_align_data_size - pads a transfer size to a more optimal value
337  *	@card: the MMC card associated with the data transfer
338  *	@sz: original transfer size
339  *
340  *	Pads the original data size with a number of extra bytes in
341  *	order to avoid controller bugs and/or performance hits
342  *	(e.g. some controllers revert to PIO for certain sizes).
343  *
344  *	Returns the improved size, which might be unmodified.
345  *
346  *	Note that this function is only relevant when issuing a
347  *	single scatter gather entry.
348  */
349 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
350 {
351 	/*
352 	 * FIXME: We don't have a system for the controller to tell
353 	 * the core about its problems yet, so for now we just 32-bit
354 	 * align the size.
355 	 */
356 	sz = ((sz + 3) / 4) * 4;
357 
358 	return sz;
359 }
360 EXPORT_SYMBOL(mmc_align_data_size);
361 
362 /**
363  *	mmc_host_enable - enable a host.
364  *	@host: mmc host to enable
365  *
366  *	Hosts that support power saving can use the 'enable' and 'disable'
367  *	methods to exit and enter power saving states. For more information
368  *	see comments for struct mmc_host_ops.
369  */
370 int mmc_host_enable(struct mmc_host *host)
371 {
372 	if (!(host->caps & MMC_CAP_DISABLE))
373 		return 0;
374 
375 	if (host->en_dis_recurs)
376 		return 0;
377 
378 	if (host->nesting_cnt++)
379 		return 0;
380 
381 	cancel_delayed_work_sync(&host->disable);
382 
383 	if (host->enabled)
384 		return 0;
385 
386 	if (host->ops->enable) {
387 		int err;
388 
389 		host->en_dis_recurs = 1;
390 		err = host->ops->enable(host);
391 		host->en_dis_recurs = 0;
392 
393 		if (err) {
394 			pr_debug("%s: enable error %d\n",
395 				 mmc_hostname(host), err);
396 			return err;
397 		}
398 	}
399 	host->enabled = 1;
400 	return 0;
401 }
402 EXPORT_SYMBOL(mmc_host_enable);
403 
404 static int mmc_host_do_disable(struct mmc_host *host, int lazy)
405 {
406 	if (host->ops->disable) {
407 		int err;
408 
409 		host->en_dis_recurs = 1;
410 		err = host->ops->disable(host, lazy);
411 		host->en_dis_recurs = 0;
412 
413 		if (err < 0) {
414 			pr_debug("%s: disable error %d\n",
415 				 mmc_hostname(host), err);
416 			return err;
417 		}
418 		if (err > 0) {
419 			unsigned long delay = msecs_to_jiffies(err);
420 
421 			mmc_schedule_delayed_work(&host->disable, delay);
422 		}
423 	}
424 	host->enabled = 0;
425 	return 0;
426 }
427 
428 /**
429  *	mmc_host_disable - disable a host.
430  *	@host: mmc host to disable
431  *
432  *	Hosts that support power saving can use the 'enable' and 'disable'
433  *	methods to exit and enter power saving states. For more information
434  *	see comments for struct mmc_host_ops.
435  */
436 int mmc_host_disable(struct mmc_host *host)
437 {
438 	int err;
439 
440 	if (!(host->caps & MMC_CAP_DISABLE))
441 		return 0;
442 
443 	if (host->en_dis_recurs)
444 		return 0;
445 
446 	if (--host->nesting_cnt)
447 		return 0;
448 
449 	if (!host->enabled)
450 		return 0;
451 
452 	err = mmc_host_do_disable(host, 0);
453 	return err;
454 }
455 EXPORT_SYMBOL(mmc_host_disable);
456 
457 /**
458  *	__mmc_claim_host - exclusively claim a host
459  *	@host: mmc host to claim
460  *	@abort: whether or not the operation should be aborted
461  *
462  *	Claim a host for a set of operations.  If @abort is non null and
463  *	dereference a non-zero value then this will return prematurely with
464  *	that non-zero value without acquiring the lock.  Returns zero
465  *	with the lock held otherwise.
466  */
467 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
468 {
469 	DECLARE_WAITQUEUE(wait, current);
470 	unsigned long flags;
471 	int stop;
472 
473 	might_sleep();
474 
475 	add_wait_queue(&host->wq, &wait);
476 	spin_lock_irqsave(&host->lock, flags);
477 	while (1) {
478 		set_current_state(TASK_UNINTERRUPTIBLE);
479 		stop = abort ? atomic_read(abort) : 0;
480 		if (stop || !host->claimed || host->claimer == current)
481 			break;
482 		spin_unlock_irqrestore(&host->lock, flags);
483 		schedule();
484 		spin_lock_irqsave(&host->lock, flags);
485 	}
486 	set_current_state(TASK_RUNNING);
487 	if (!stop) {
488 		host->claimed = 1;
489 		host->claimer = current;
490 		host->claim_cnt += 1;
491 	} else
492 		wake_up(&host->wq);
493 	spin_unlock_irqrestore(&host->lock, flags);
494 	remove_wait_queue(&host->wq, &wait);
495 	if (!stop)
496 		mmc_host_enable(host);
497 	return stop;
498 }
499 
500 EXPORT_SYMBOL(__mmc_claim_host);
501 
502 /**
503  *	mmc_try_claim_host - try exclusively to claim a host
504  *	@host: mmc host to claim
505  *
506  *	Returns %1 if the host is claimed, %0 otherwise.
507  */
508 int mmc_try_claim_host(struct mmc_host *host)
509 {
510 	int claimed_host = 0;
511 	unsigned long flags;
512 
513 	spin_lock_irqsave(&host->lock, flags);
514 	if (!host->claimed || host->claimer == current) {
515 		host->claimed = 1;
516 		host->claimer = current;
517 		host->claim_cnt += 1;
518 		claimed_host = 1;
519 	}
520 	spin_unlock_irqrestore(&host->lock, flags);
521 	return claimed_host;
522 }
523 EXPORT_SYMBOL(mmc_try_claim_host);
524 
525 static void mmc_do_release_host(struct mmc_host *host)
526 {
527 	unsigned long flags;
528 
529 	spin_lock_irqsave(&host->lock, flags);
530 	if (--host->claim_cnt) {
531 		/* Release for nested claim */
532 		spin_unlock_irqrestore(&host->lock, flags);
533 	} else {
534 		host->claimed = 0;
535 		host->claimer = NULL;
536 		spin_unlock_irqrestore(&host->lock, flags);
537 		wake_up(&host->wq);
538 	}
539 }
540 
541 void mmc_host_deeper_disable(struct work_struct *work)
542 {
543 	struct mmc_host *host =
544 		container_of(work, struct mmc_host, disable.work);
545 
546 	/* If the host is claimed then we do not want to disable it anymore */
547 	if (!mmc_try_claim_host(host))
548 		return;
549 	mmc_host_do_disable(host, 1);
550 	mmc_do_release_host(host);
551 }
552 
553 /**
554  *	mmc_host_lazy_disable - lazily disable a host.
555  *	@host: mmc host to disable
556  *
557  *	Hosts that support power saving can use the 'enable' and 'disable'
558  *	methods to exit and enter power saving states. For more information
559  *	see comments for struct mmc_host_ops.
560  */
561 int mmc_host_lazy_disable(struct mmc_host *host)
562 {
563 	if (!(host->caps & MMC_CAP_DISABLE))
564 		return 0;
565 
566 	if (host->en_dis_recurs)
567 		return 0;
568 
569 	if (--host->nesting_cnt)
570 		return 0;
571 
572 	if (!host->enabled)
573 		return 0;
574 
575 	if (host->disable_delay) {
576 		mmc_schedule_delayed_work(&host->disable,
577 				msecs_to_jiffies(host->disable_delay));
578 		return 0;
579 	} else
580 		return mmc_host_do_disable(host, 1);
581 }
582 EXPORT_SYMBOL(mmc_host_lazy_disable);
583 
584 /**
585  *	mmc_release_host - release a host
586  *	@host: mmc host to release
587  *
588  *	Release a MMC host, allowing others to claim the host
589  *	for their operations.
590  */
591 void mmc_release_host(struct mmc_host *host)
592 {
593 	WARN_ON(!host->claimed);
594 
595 	mmc_host_lazy_disable(host);
596 
597 	mmc_do_release_host(host);
598 }
599 
600 EXPORT_SYMBOL(mmc_release_host);
601 
602 /*
603  * Internal function that does the actual ios call to the host driver,
604  * optionally printing some debug output.
605  */
606 static inline void mmc_set_ios(struct mmc_host *host)
607 {
608 	struct mmc_ios *ios = &host->ios;
609 
610 	pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
611 		"width %u timing %u\n",
612 		 mmc_hostname(host), ios->clock, ios->bus_mode,
613 		 ios->power_mode, ios->chip_select, ios->vdd,
614 		 ios->bus_width, ios->timing);
615 
616 	host->ops->set_ios(host, ios);
617 }
618 
619 /*
620  * Control chip select pin on a host.
621  */
622 void mmc_set_chip_select(struct mmc_host *host, int mode)
623 {
624 	host->ios.chip_select = mode;
625 	mmc_set_ios(host);
626 }
627 
628 /*
629  * Sets the host clock to the highest possible frequency that
630  * is below "hz".
631  */
632 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
633 {
634 	WARN_ON(hz < host->f_min);
635 
636 	if (hz > host->f_max)
637 		hz = host->f_max;
638 
639 	host->ios.clock = hz;
640 	mmc_set_ios(host);
641 }
642 
643 /*
644  * Change the bus mode (open drain/push-pull) of a host.
645  */
646 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
647 {
648 	host->ios.bus_mode = mode;
649 	mmc_set_ios(host);
650 }
651 
652 /*
653  * Change data bus width of a host.
654  */
655 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
656 {
657 	host->ios.bus_width = width;
658 	mmc_set_ios(host);
659 }
660 
661 /**
662  * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
663  * @vdd:	voltage (mV)
664  * @low_bits:	prefer low bits in boundary cases
665  *
666  * This function returns the OCR bit number according to the provided @vdd
667  * value. If conversion is not possible a negative errno value returned.
668  *
669  * Depending on the @low_bits flag the function prefers low or high OCR bits
670  * on boundary voltages. For example,
671  * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
672  * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
673  *
674  * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
675  */
676 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
677 {
678 	const int max_bit = ilog2(MMC_VDD_35_36);
679 	int bit;
680 
681 	if (vdd < 1650 || vdd > 3600)
682 		return -EINVAL;
683 
684 	if (vdd >= 1650 && vdd <= 1950)
685 		return ilog2(MMC_VDD_165_195);
686 
687 	if (low_bits)
688 		vdd -= 1;
689 
690 	/* Base 2000 mV, step 100 mV, bit's base 8. */
691 	bit = (vdd - 2000) / 100 + 8;
692 	if (bit > max_bit)
693 		return max_bit;
694 	return bit;
695 }
696 
697 /**
698  * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
699  * @vdd_min:	minimum voltage value (mV)
700  * @vdd_max:	maximum voltage value (mV)
701  *
702  * This function returns the OCR mask bits according to the provided @vdd_min
703  * and @vdd_max values. If conversion is not possible the function returns 0.
704  *
705  * Notes wrt boundary cases:
706  * This function sets the OCR bits for all boundary voltages, for example
707  * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
708  * MMC_VDD_34_35 mask.
709  */
710 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
711 {
712 	u32 mask = 0;
713 
714 	if (vdd_max < vdd_min)
715 		return 0;
716 
717 	/* Prefer high bits for the boundary vdd_max values. */
718 	vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
719 	if (vdd_max < 0)
720 		return 0;
721 
722 	/* Prefer low bits for the boundary vdd_min values. */
723 	vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
724 	if (vdd_min < 0)
725 		return 0;
726 
727 	/* Fill the mask, from max bit to min bit. */
728 	while (vdd_max >= vdd_min)
729 		mask |= 1 << vdd_max--;
730 
731 	return mask;
732 }
733 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
734 
735 #ifdef CONFIG_REGULATOR
736 
737 /**
738  * mmc_regulator_get_ocrmask - return mask of supported voltages
739  * @supply: regulator to use
740  *
741  * This returns either a negative errno, or a mask of voltages that
742  * can be provided to MMC/SD/SDIO devices using the specified voltage
743  * regulator.  This would normally be called before registering the
744  * MMC host adapter.
745  */
746 int mmc_regulator_get_ocrmask(struct regulator *supply)
747 {
748 	int			result = 0;
749 	int			count;
750 	int			i;
751 
752 	count = regulator_count_voltages(supply);
753 	if (count < 0)
754 		return count;
755 
756 	for (i = 0; i < count; i++) {
757 		int		vdd_uV;
758 		int		vdd_mV;
759 
760 		vdd_uV = regulator_list_voltage(supply, i);
761 		if (vdd_uV <= 0)
762 			continue;
763 
764 		vdd_mV = vdd_uV / 1000;
765 		result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
766 	}
767 
768 	return result;
769 }
770 EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
771 
772 /**
773  * mmc_regulator_set_ocr - set regulator to match host->ios voltage
774  * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
775  * @supply: regulator to use
776  *
777  * Returns zero on success, else negative errno.
778  *
779  * MMC host drivers may use this to enable or disable a regulator using
780  * a particular supply voltage.  This would normally be called from the
781  * set_ios() method.
782  */
783 int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit)
784 {
785 	int			result = 0;
786 	int			min_uV, max_uV;
787 	int			enabled;
788 
789 	enabled = regulator_is_enabled(supply);
790 	if (enabled < 0)
791 		return enabled;
792 
793 	if (vdd_bit) {
794 		int		tmp;
795 		int		voltage;
796 
797 		/* REVISIT mmc_vddrange_to_ocrmask() may have set some
798 		 * bits this regulator doesn't quite support ... don't
799 		 * be too picky, most cards and regulators are OK with
800 		 * a 0.1V range goof (it's a small error percentage).
801 		 */
802 		tmp = vdd_bit - ilog2(MMC_VDD_165_195);
803 		if (tmp == 0) {
804 			min_uV = 1650 * 1000;
805 			max_uV = 1950 * 1000;
806 		} else {
807 			min_uV = 1900 * 1000 + tmp * 100 * 1000;
808 			max_uV = min_uV + 100 * 1000;
809 		}
810 
811 		/* avoid needless changes to this voltage; the regulator
812 		 * might not allow this operation
813 		 */
814 		voltage = regulator_get_voltage(supply);
815 		if (voltage < 0)
816 			result = voltage;
817 		else if (voltage < min_uV || voltage > max_uV)
818 			result = regulator_set_voltage(supply, min_uV, max_uV);
819 		else
820 			result = 0;
821 
822 		if (result == 0 && !enabled)
823 			result = regulator_enable(supply);
824 	} else if (enabled) {
825 		result = regulator_disable(supply);
826 	}
827 
828 	return result;
829 }
830 EXPORT_SYMBOL(mmc_regulator_set_ocr);
831 
832 #endif
833 
834 /*
835  * Mask off any voltages we don't support and select
836  * the lowest voltage
837  */
838 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
839 {
840 	int bit;
841 
842 	ocr &= host->ocr_avail;
843 
844 	bit = ffs(ocr);
845 	if (bit) {
846 		bit -= 1;
847 
848 		ocr &= 3 << bit;
849 
850 		host->ios.vdd = bit;
851 		mmc_set_ios(host);
852 	} else {
853 		pr_warning("%s: host doesn't support card's voltages\n",
854 				mmc_hostname(host));
855 		ocr = 0;
856 	}
857 
858 	return ocr;
859 }
860 
861 /*
862  * Select timing parameters for host.
863  */
864 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
865 {
866 	host->ios.timing = timing;
867 	mmc_set_ios(host);
868 }
869 
870 /*
871  * Apply power to the MMC stack.  This is a two-stage process.
872  * First, we enable power to the card without the clock running.
873  * We then wait a bit for the power to stabilise.  Finally,
874  * enable the bus drivers and clock to the card.
875  *
876  * We must _NOT_ enable the clock prior to power stablising.
877  *
878  * If a host does all the power sequencing itself, ignore the
879  * initial MMC_POWER_UP stage.
880  */
881 static void mmc_power_up(struct mmc_host *host)
882 {
883 	int bit;
884 
885 	/* If ocr is set, we use it */
886 	if (host->ocr)
887 		bit = ffs(host->ocr) - 1;
888 	else
889 		bit = fls(host->ocr_avail) - 1;
890 
891 	host->ios.vdd = bit;
892 	if (mmc_host_is_spi(host)) {
893 		host->ios.chip_select = MMC_CS_HIGH;
894 		host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
895 	} else {
896 		host->ios.chip_select = MMC_CS_DONTCARE;
897 		host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
898 	}
899 	host->ios.power_mode = MMC_POWER_UP;
900 	host->ios.bus_width = MMC_BUS_WIDTH_1;
901 	host->ios.timing = MMC_TIMING_LEGACY;
902 	mmc_set_ios(host);
903 
904 	/*
905 	 * This delay should be sufficient to allow the power supply
906 	 * to reach the minimum voltage.
907 	 */
908 	mmc_delay(10);
909 
910 	if (host->f_min > 400000) {
911 		pr_warning("%s: Minimum clock frequency too high for "
912 				"identification mode\n", mmc_hostname(host));
913 		host->ios.clock = host->f_min;
914 	} else
915 		host->ios.clock = 400000;
916 
917 	host->ios.power_mode = MMC_POWER_ON;
918 	mmc_set_ios(host);
919 
920 	/*
921 	 * This delay must be at least 74 clock sizes, or 1 ms, or the
922 	 * time required to reach a stable voltage.
923 	 */
924 	mmc_delay(10);
925 }
926 
927 static void mmc_power_off(struct mmc_host *host)
928 {
929 	host->ios.clock = 0;
930 	host->ios.vdd = 0;
931 	if (!mmc_host_is_spi(host)) {
932 		host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
933 		host->ios.chip_select = MMC_CS_DONTCARE;
934 	}
935 	host->ios.power_mode = MMC_POWER_OFF;
936 	host->ios.bus_width = MMC_BUS_WIDTH_1;
937 	host->ios.timing = MMC_TIMING_LEGACY;
938 	mmc_set_ios(host);
939 }
940 
941 /*
942  * Cleanup when the last reference to the bus operator is dropped.
943  */
944 static void __mmc_release_bus(struct mmc_host *host)
945 {
946 	BUG_ON(!host);
947 	BUG_ON(host->bus_refs);
948 	BUG_ON(!host->bus_dead);
949 
950 	host->bus_ops = NULL;
951 }
952 
953 /*
954  * Increase reference count of bus operator
955  */
956 static inline void mmc_bus_get(struct mmc_host *host)
957 {
958 	unsigned long flags;
959 
960 	spin_lock_irqsave(&host->lock, flags);
961 	host->bus_refs++;
962 	spin_unlock_irqrestore(&host->lock, flags);
963 }
964 
965 /*
966  * Decrease reference count of bus operator and free it if
967  * it is the last reference.
968  */
969 static inline void mmc_bus_put(struct mmc_host *host)
970 {
971 	unsigned long flags;
972 
973 	spin_lock_irqsave(&host->lock, flags);
974 	host->bus_refs--;
975 	if ((host->bus_refs == 0) && host->bus_ops)
976 		__mmc_release_bus(host);
977 	spin_unlock_irqrestore(&host->lock, flags);
978 }
979 
980 /*
981  * Assign a mmc bus handler to a host. Only one bus handler may control a
982  * host at any given time.
983  */
984 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
985 {
986 	unsigned long flags;
987 
988 	BUG_ON(!host);
989 	BUG_ON(!ops);
990 
991 	WARN_ON(!host->claimed);
992 
993 	spin_lock_irqsave(&host->lock, flags);
994 
995 	BUG_ON(host->bus_ops);
996 	BUG_ON(host->bus_refs);
997 
998 	host->bus_ops = ops;
999 	host->bus_refs = 1;
1000 	host->bus_dead = 0;
1001 
1002 	spin_unlock_irqrestore(&host->lock, flags);
1003 }
1004 
1005 /*
1006  * Remove the current bus handler from a host. Assumes that there are
1007  * no interesting cards left, so the bus is powered down.
1008  */
1009 void mmc_detach_bus(struct mmc_host *host)
1010 {
1011 	unsigned long flags;
1012 
1013 	BUG_ON(!host);
1014 
1015 	WARN_ON(!host->claimed);
1016 	WARN_ON(!host->bus_ops);
1017 
1018 	spin_lock_irqsave(&host->lock, flags);
1019 
1020 	host->bus_dead = 1;
1021 
1022 	spin_unlock_irqrestore(&host->lock, flags);
1023 
1024 	mmc_power_off(host);
1025 
1026 	mmc_bus_put(host);
1027 }
1028 
1029 /**
1030  *	mmc_detect_change - process change of state on a MMC socket
1031  *	@host: host which changed state.
1032  *	@delay: optional delay to wait before detection (jiffies)
1033  *
1034  *	MMC drivers should call this when they detect a card has been
1035  *	inserted or removed. The MMC layer will confirm that any
1036  *	present card is still functional, and initialize any newly
1037  *	inserted.
1038  */
1039 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1040 {
1041 #ifdef CONFIG_MMC_DEBUG
1042 	unsigned long flags;
1043 	spin_lock_irqsave(&host->lock, flags);
1044 	WARN_ON(host->removed);
1045 	spin_unlock_irqrestore(&host->lock, flags);
1046 #endif
1047 
1048 	mmc_schedule_delayed_work(&host->detect, delay);
1049 }
1050 
1051 EXPORT_SYMBOL(mmc_detect_change);
1052 
1053 void mmc_init_erase(struct mmc_card *card)
1054 {
1055 	unsigned int sz;
1056 
1057 	if (is_power_of_2(card->erase_size))
1058 		card->erase_shift = ffs(card->erase_size) - 1;
1059 	else
1060 		card->erase_shift = 0;
1061 
1062 	/*
1063 	 * It is possible to erase an arbitrarily large area of an SD or MMC
1064 	 * card.  That is not desirable because it can take a long time
1065 	 * (minutes) potentially delaying more important I/O, and also the
1066 	 * timeout calculations become increasingly hugely over-estimated.
1067 	 * Consequently, 'pref_erase' is defined as a guide to limit erases
1068 	 * to that size and alignment.
1069 	 *
1070 	 * For SD cards that define Allocation Unit size, limit erases to one
1071 	 * Allocation Unit at a time.  For MMC cards that define High Capacity
1072 	 * Erase Size, whether it is switched on or not, limit to that size.
1073 	 * Otherwise just have a stab at a good value.  For modern cards it
1074 	 * will end up being 4MiB.  Note that if the value is too small, it
1075 	 * can end up taking longer to erase.
1076 	 */
1077 	if (mmc_card_sd(card) && card->ssr.au) {
1078 		card->pref_erase = card->ssr.au;
1079 		card->erase_shift = ffs(card->ssr.au) - 1;
1080 	} else if (card->ext_csd.hc_erase_size) {
1081 		card->pref_erase = card->ext_csd.hc_erase_size;
1082 	} else {
1083 		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1084 		if (sz < 128)
1085 			card->pref_erase = 512 * 1024 / 512;
1086 		else if (sz < 512)
1087 			card->pref_erase = 1024 * 1024 / 512;
1088 		else if (sz < 1024)
1089 			card->pref_erase = 2 * 1024 * 1024 / 512;
1090 		else
1091 			card->pref_erase = 4 * 1024 * 1024 / 512;
1092 		if (card->pref_erase < card->erase_size)
1093 			card->pref_erase = card->erase_size;
1094 		else {
1095 			sz = card->pref_erase % card->erase_size;
1096 			if (sz)
1097 				card->pref_erase += card->erase_size - sz;
1098 		}
1099 	}
1100 }
1101 
1102 static void mmc_set_mmc_erase_timeout(struct mmc_card *card,
1103 				      struct mmc_command *cmd,
1104 				      unsigned int arg, unsigned int qty)
1105 {
1106 	unsigned int erase_timeout;
1107 
1108 	if (card->ext_csd.erase_group_def & 1) {
1109 		/* High Capacity Erase Group Size uses HC timeouts */
1110 		if (arg == MMC_TRIM_ARG)
1111 			erase_timeout = card->ext_csd.trim_timeout;
1112 		else
1113 			erase_timeout = card->ext_csd.hc_erase_timeout;
1114 	} else {
1115 		/* CSD Erase Group Size uses write timeout */
1116 		unsigned int mult = (10 << card->csd.r2w_factor);
1117 		unsigned int timeout_clks = card->csd.tacc_clks * mult;
1118 		unsigned int timeout_us;
1119 
1120 		/* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1121 		if (card->csd.tacc_ns < 1000000)
1122 			timeout_us = (card->csd.tacc_ns * mult) / 1000;
1123 		else
1124 			timeout_us = (card->csd.tacc_ns / 1000) * mult;
1125 
1126 		/*
1127 		 * ios.clock is only a target.  The real clock rate might be
1128 		 * less but not that much less, so fudge it by multiplying by 2.
1129 		 */
1130 		timeout_clks <<= 1;
1131 		timeout_us += (timeout_clks * 1000) /
1132 			      (card->host->ios.clock / 1000);
1133 
1134 		erase_timeout = timeout_us / 1000;
1135 
1136 		/*
1137 		 * Theoretically, the calculation could underflow so round up
1138 		 * to 1ms in that case.
1139 		 */
1140 		if (!erase_timeout)
1141 			erase_timeout = 1;
1142 	}
1143 
1144 	/* Multiplier for secure operations */
1145 	if (arg & MMC_SECURE_ARGS) {
1146 		if (arg == MMC_SECURE_ERASE_ARG)
1147 			erase_timeout *= card->ext_csd.sec_erase_mult;
1148 		else
1149 			erase_timeout *= card->ext_csd.sec_trim_mult;
1150 	}
1151 
1152 	erase_timeout *= qty;
1153 
1154 	/*
1155 	 * Ensure at least a 1 second timeout for SPI as per
1156 	 * 'mmc_set_data_timeout()'
1157 	 */
1158 	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1159 		erase_timeout = 1000;
1160 
1161 	cmd->erase_timeout = erase_timeout;
1162 }
1163 
1164 static void mmc_set_sd_erase_timeout(struct mmc_card *card,
1165 				     struct mmc_command *cmd, unsigned int arg,
1166 				     unsigned int qty)
1167 {
1168 	if (card->ssr.erase_timeout) {
1169 		/* Erase timeout specified in SD Status Register (SSR) */
1170 		cmd->erase_timeout = card->ssr.erase_timeout * qty +
1171 				     card->ssr.erase_offset;
1172 	} else {
1173 		/*
1174 		 * Erase timeout not specified in SD Status Register (SSR) so
1175 		 * use 250ms per write block.
1176 		 */
1177 		cmd->erase_timeout = 250 * qty;
1178 	}
1179 
1180 	/* Must not be less than 1 second */
1181 	if (cmd->erase_timeout < 1000)
1182 		cmd->erase_timeout = 1000;
1183 }
1184 
1185 static void mmc_set_erase_timeout(struct mmc_card *card,
1186 				  struct mmc_command *cmd, unsigned int arg,
1187 				  unsigned int qty)
1188 {
1189 	if (mmc_card_sd(card))
1190 		mmc_set_sd_erase_timeout(card, cmd, arg, qty);
1191 	else
1192 		mmc_set_mmc_erase_timeout(card, cmd, arg, qty);
1193 }
1194 
1195 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1196 			unsigned int to, unsigned int arg)
1197 {
1198 	struct mmc_command cmd;
1199 	unsigned int qty = 0;
1200 	int err;
1201 
1202 	/*
1203 	 * qty is used to calculate the erase timeout which depends on how many
1204 	 * erase groups (or allocation units in SD terminology) are affected.
1205 	 * We count erasing part of an erase group as one erase group.
1206 	 * For SD, the allocation units are always a power of 2.  For MMC, the
1207 	 * erase group size is almost certainly also power of 2, but it does not
1208 	 * seem to insist on that in the JEDEC standard, so we fall back to
1209 	 * division in that case.  SD may not specify an allocation unit size,
1210 	 * in which case the timeout is based on the number of write blocks.
1211 	 *
1212 	 * Note that the timeout for secure trim 2 will only be correct if the
1213 	 * number of erase groups specified is the same as the total of all
1214 	 * preceding secure trim 1 commands.  Since the power may have been
1215 	 * lost since the secure trim 1 commands occurred, it is generally
1216 	 * impossible to calculate the secure trim 2 timeout correctly.
1217 	 */
1218 	if (card->erase_shift)
1219 		qty += ((to >> card->erase_shift) -
1220 			(from >> card->erase_shift)) + 1;
1221 	else if (mmc_card_sd(card))
1222 		qty += to - from + 1;
1223 	else
1224 		qty += ((to / card->erase_size) -
1225 			(from / card->erase_size)) + 1;
1226 
1227 	if (!mmc_card_blockaddr(card)) {
1228 		from <<= 9;
1229 		to <<= 9;
1230 	}
1231 
1232 	memset(&cmd, 0, sizeof(struct mmc_command));
1233 	if (mmc_card_sd(card))
1234 		cmd.opcode = SD_ERASE_WR_BLK_START;
1235 	else
1236 		cmd.opcode = MMC_ERASE_GROUP_START;
1237 	cmd.arg = from;
1238 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1239 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1240 	if (err) {
1241 		printk(KERN_ERR "mmc_erase: group start error %d, "
1242 		       "status %#x\n", err, cmd.resp[0]);
1243 		err = -EINVAL;
1244 		goto out;
1245 	}
1246 
1247 	memset(&cmd, 0, sizeof(struct mmc_command));
1248 	if (mmc_card_sd(card))
1249 		cmd.opcode = SD_ERASE_WR_BLK_END;
1250 	else
1251 		cmd.opcode = MMC_ERASE_GROUP_END;
1252 	cmd.arg = to;
1253 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1254 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1255 	if (err) {
1256 		printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n",
1257 		       err, cmd.resp[0]);
1258 		err = -EINVAL;
1259 		goto out;
1260 	}
1261 
1262 	memset(&cmd, 0, sizeof(struct mmc_command));
1263 	cmd.opcode = MMC_ERASE;
1264 	cmd.arg = arg;
1265 	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1266 	mmc_set_erase_timeout(card, &cmd, arg, qty);
1267 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1268 	if (err) {
1269 		printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n",
1270 		       err, cmd.resp[0]);
1271 		err = -EIO;
1272 		goto out;
1273 	}
1274 
1275 	if (mmc_host_is_spi(card->host))
1276 		goto out;
1277 
1278 	do {
1279 		memset(&cmd, 0, sizeof(struct mmc_command));
1280 		cmd.opcode = MMC_SEND_STATUS;
1281 		cmd.arg = card->rca << 16;
1282 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1283 		/* Do not retry else we can't see errors */
1284 		err = mmc_wait_for_cmd(card->host, &cmd, 0);
1285 		if (err || (cmd.resp[0] & 0xFDF92000)) {
1286 			printk(KERN_ERR "error %d requesting status %#x\n",
1287 				err, cmd.resp[0]);
1288 			err = -EIO;
1289 			goto out;
1290 		}
1291 	} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
1292 		 R1_CURRENT_STATE(cmd.resp[0]) == 7);
1293 out:
1294 	return err;
1295 }
1296 
1297 /**
1298  * mmc_erase - erase sectors.
1299  * @card: card to erase
1300  * @from: first sector to erase
1301  * @nr: number of sectors to erase
1302  * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
1303  *
1304  * Caller must claim host before calling this function.
1305  */
1306 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
1307 	      unsigned int arg)
1308 {
1309 	unsigned int rem, to = from + nr;
1310 
1311 	if (!(card->host->caps & MMC_CAP_ERASE) ||
1312 	    !(card->csd.cmdclass & CCC_ERASE))
1313 		return -EOPNOTSUPP;
1314 
1315 	if (!card->erase_size)
1316 		return -EOPNOTSUPP;
1317 
1318 	if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
1319 		return -EOPNOTSUPP;
1320 
1321 	if ((arg & MMC_SECURE_ARGS) &&
1322 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
1323 		return -EOPNOTSUPP;
1324 
1325 	if ((arg & MMC_TRIM_ARGS) &&
1326 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
1327 		return -EOPNOTSUPP;
1328 
1329 	if (arg == MMC_SECURE_ERASE_ARG) {
1330 		if (from % card->erase_size || nr % card->erase_size)
1331 			return -EINVAL;
1332 	}
1333 
1334 	if (arg == MMC_ERASE_ARG) {
1335 		rem = from % card->erase_size;
1336 		if (rem) {
1337 			rem = card->erase_size - rem;
1338 			from += rem;
1339 			if (nr > rem)
1340 				nr -= rem;
1341 			else
1342 				return 0;
1343 		}
1344 		rem = nr % card->erase_size;
1345 		if (rem)
1346 			nr -= rem;
1347 	}
1348 
1349 	if (nr == 0)
1350 		return 0;
1351 
1352 	to = from + nr;
1353 
1354 	if (to <= from)
1355 		return -EINVAL;
1356 
1357 	/* 'from' and 'to' are inclusive */
1358 	to -= 1;
1359 
1360 	return mmc_do_erase(card, from, to, arg);
1361 }
1362 EXPORT_SYMBOL(mmc_erase);
1363 
1364 int mmc_can_erase(struct mmc_card *card)
1365 {
1366 	if ((card->host->caps & MMC_CAP_ERASE) &&
1367 	    (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
1368 		return 1;
1369 	return 0;
1370 }
1371 EXPORT_SYMBOL(mmc_can_erase);
1372 
1373 int mmc_can_trim(struct mmc_card *card)
1374 {
1375 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
1376 		return 1;
1377 	return 0;
1378 }
1379 EXPORT_SYMBOL(mmc_can_trim);
1380 
1381 int mmc_can_secure_erase_trim(struct mmc_card *card)
1382 {
1383 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
1384 		return 1;
1385 	return 0;
1386 }
1387 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
1388 
1389 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1390 			    unsigned int nr)
1391 {
1392 	if (!card->erase_size)
1393 		return 0;
1394 	if (from % card->erase_size || nr % card->erase_size)
1395 		return 0;
1396 	return 1;
1397 }
1398 EXPORT_SYMBOL(mmc_erase_group_aligned);
1399 
1400 void mmc_rescan(struct work_struct *work)
1401 {
1402 	struct mmc_host *host =
1403 		container_of(work, struct mmc_host, detect.work);
1404 	u32 ocr;
1405 	int err;
1406 	unsigned long flags;
1407 
1408 	spin_lock_irqsave(&host->lock, flags);
1409 
1410 	if (host->rescan_disable) {
1411 		spin_unlock_irqrestore(&host->lock, flags);
1412 		return;
1413 	}
1414 
1415 	spin_unlock_irqrestore(&host->lock, flags);
1416 
1417 
1418 	mmc_bus_get(host);
1419 
1420 	/* if there is a card registered, check whether it is still present */
1421 	if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead)
1422 		host->bus_ops->detect(host);
1423 
1424 	mmc_bus_put(host);
1425 
1426 
1427 	mmc_bus_get(host);
1428 
1429 	/* if there still is a card present, stop here */
1430 	if (host->bus_ops != NULL) {
1431 		mmc_bus_put(host);
1432 		goto out;
1433 	}
1434 
1435 	/* detect a newly inserted card */
1436 
1437 	/*
1438 	 * Only we can add a new handler, so it's safe to
1439 	 * release the lock here.
1440 	 */
1441 	mmc_bus_put(host);
1442 
1443 	if (host->ops->get_cd && host->ops->get_cd(host) == 0)
1444 		goto out;
1445 
1446 	mmc_claim_host(host);
1447 
1448 	mmc_power_up(host);
1449 	sdio_reset(host);
1450 	mmc_go_idle(host);
1451 
1452 	mmc_send_if_cond(host, host->ocr_avail);
1453 
1454 	/*
1455 	 * First we search for SDIO...
1456 	 */
1457 	err = mmc_send_io_op_cond(host, 0, &ocr);
1458 	if (!err) {
1459 		if (mmc_attach_sdio(host, ocr)) {
1460 			mmc_claim_host(host);
1461 			/* try SDMEM (but not MMC) even if SDIO is broken */
1462 			if (mmc_send_app_op_cond(host, 0, &ocr))
1463 				goto out_fail;
1464 
1465 			if (mmc_attach_sd(host, ocr))
1466 				mmc_power_off(host);
1467 		}
1468 		goto out;
1469 	}
1470 
1471 	/*
1472 	 * ...then normal SD...
1473 	 */
1474 	err = mmc_send_app_op_cond(host, 0, &ocr);
1475 	if (!err) {
1476 		if (mmc_attach_sd(host, ocr))
1477 			mmc_power_off(host);
1478 		goto out;
1479 	}
1480 
1481 	/*
1482 	 * ...and finally MMC.
1483 	 */
1484 	err = mmc_send_op_cond(host, 0, &ocr);
1485 	if (!err) {
1486 		if (mmc_attach_mmc(host, ocr))
1487 			mmc_power_off(host);
1488 		goto out;
1489 	}
1490 
1491 out_fail:
1492 	mmc_release_host(host);
1493 	mmc_power_off(host);
1494 
1495 out:
1496 	if (host->caps & MMC_CAP_NEEDS_POLL)
1497 		mmc_schedule_delayed_work(&host->detect, HZ);
1498 }
1499 
1500 void mmc_start_host(struct mmc_host *host)
1501 {
1502 	mmc_power_off(host);
1503 	mmc_detect_change(host, 0);
1504 }
1505 
1506 void mmc_stop_host(struct mmc_host *host)
1507 {
1508 #ifdef CONFIG_MMC_DEBUG
1509 	unsigned long flags;
1510 	spin_lock_irqsave(&host->lock, flags);
1511 	host->removed = 1;
1512 	spin_unlock_irqrestore(&host->lock, flags);
1513 #endif
1514 
1515 	if (host->caps & MMC_CAP_DISABLE)
1516 		cancel_delayed_work(&host->disable);
1517 	cancel_delayed_work(&host->detect);
1518 	mmc_flush_scheduled_work();
1519 
1520 	/* clear pm flags now and let card drivers set them as needed */
1521 	host->pm_flags = 0;
1522 
1523 	mmc_bus_get(host);
1524 	if (host->bus_ops && !host->bus_dead) {
1525 		if (host->bus_ops->remove)
1526 			host->bus_ops->remove(host);
1527 
1528 		mmc_claim_host(host);
1529 		mmc_detach_bus(host);
1530 		mmc_release_host(host);
1531 		mmc_bus_put(host);
1532 		return;
1533 	}
1534 	mmc_bus_put(host);
1535 
1536 	BUG_ON(host->card);
1537 
1538 	mmc_power_off(host);
1539 }
1540 
1541 void mmc_power_save_host(struct mmc_host *host)
1542 {
1543 	mmc_bus_get(host);
1544 
1545 	if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
1546 		mmc_bus_put(host);
1547 		return;
1548 	}
1549 
1550 	if (host->bus_ops->power_save)
1551 		host->bus_ops->power_save(host);
1552 
1553 	mmc_bus_put(host);
1554 
1555 	mmc_power_off(host);
1556 }
1557 EXPORT_SYMBOL(mmc_power_save_host);
1558 
1559 void mmc_power_restore_host(struct mmc_host *host)
1560 {
1561 	mmc_bus_get(host);
1562 
1563 	if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
1564 		mmc_bus_put(host);
1565 		return;
1566 	}
1567 
1568 	mmc_power_up(host);
1569 	host->bus_ops->power_restore(host);
1570 
1571 	mmc_bus_put(host);
1572 }
1573 EXPORT_SYMBOL(mmc_power_restore_host);
1574 
1575 int mmc_card_awake(struct mmc_host *host)
1576 {
1577 	int err = -ENOSYS;
1578 
1579 	mmc_bus_get(host);
1580 
1581 	if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
1582 		err = host->bus_ops->awake(host);
1583 
1584 	mmc_bus_put(host);
1585 
1586 	return err;
1587 }
1588 EXPORT_SYMBOL(mmc_card_awake);
1589 
1590 int mmc_card_sleep(struct mmc_host *host)
1591 {
1592 	int err = -ENOSYS;
1593 
1594 	mmc_bus_get(host);
1595 
1596 	if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
1597 		err = host->bus_ops->sleep(host);
1598 
1599 	mmc_bus_put(host);
1600 
1601 	return err;
1602 }
1603 EXPORT_SYMBOL(mmc_card_sleep);
1604 
1605 int mmc_card_can_sleep(struct mmc_host *host)
1606 {
1607 	struct mmc_card *card = host->card;
1608 
1609 	if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
1610 		return 1;
1611 	return 0;
1612 }
1613 EXPORT_SYMBOL(mmc_card_can_sleep);
1614 
1615 #ifdef CONFIG_PM
1616 
1617 /**
1618  *	mmc_suspend_host - suspend a host
1619  *	@host: mmc host
1620  */
1621 int mmc_suspend_host(struct mmc_host *host)
1622 {
1623 	int err = 0;
1624 
1625 	if (host->caps & MMC_CAP_DISABLE)
1626 		cancel_delayed_work(&host->disable);
1627 	cancel_delayed_work(&host->detect);
1628 	mmc_flush_scheduled_work();
1629 
1630 	mmc_bus_get(host);
1631 	if (host->bus_ops && !host->bus_dead) {
1632 		if (host->bus_ops->suspend)
1633 			err = host->bus_ops->suspend(host);
1634 	}
1635 	mmc_bus_put(host);
1636 
1637 	if (!err && !(host->pm_flags & MMC_PM_KEEP_POWER))
1638 		mmc_power_off(host);
1639 
1640 	return err;
1641 }
1642 
1643 EXPORT_SYMBOL(mmc_suspend_host);
1644 
1645 /**
1646  *	mmc_resume_host - resume a previously suspended host
1647  *	@host: mmc host
1648  */
1649 int mmc_resume_host(struct mmc_host *host)
1650 {
1651 	int err = 0;
1652 
1653 	mmc_bus_get(host);
1654 	if (host->bus_ops && !host->bus_dead) {
1655 		if (!(host->pm_flags & MMC_PM_KEEP_POWER)) {
1656 			mmc_power_up(host);
1657 			mmc_select_voltage(host, host->ocr);
1658 		}
1659 		BUG_ON(!host->bus_ops->resume);
1660 		err = host->bus_ops->resume(host);
1661 		if (err) {
1662 			printk(KERN_WARNING "%s: error %d during resume "
1663 					    "(card was removed?)\n",
1664 					    mmc_hostname(host), err);
1665 			err = 0;
1666 		}
1667 	}
1668 	mmc_bus_put(host);
1669 
1670 	return err;
1671 }
1672 EXPORT_SYMBOL(mmc_resume_host);
1673 
1674 /* Do the card removal on suspend if card is assumed removeable
1675  * Do that in pm notifier while userspace isn't yet frozen, so we will be able
1676    to sync the card.
1677 */
1678 int mmc_pm_notify(struct notifier_block *notify_block,
1679 					unsigned long mode, void *unused)
1680 {
1681 	struct mmc_host *host = container_of(
1682 		notify_block, struct mmc_host, pm_notify);
1683 	unsigned long flags;
1684 
1685 
1686 	switch (mode) {
1687 	case PM_HIBERNATION_PREPARE:
1688 	case PM_SUSPEND_PREPARE:
1689 
1690 		spin_lock_irqsave(&host->lock, flags);
1691 		host->rescan_disable = 1;
1692 		spin_unlock_irqrestore(&host->lock, flags);
1693 		cancel_delayed_work_sync(&host->detect);
1694 
1695 		if (!host->bus_ops || host->bus_ops->suspend)
1696 			break;
1697 
1698 		mmc_claim_host(host);
1699 
1700 		if (host->bus_ops->remove)
1701 			host->bus_ops->remove(host);
1702 
1703 		mmc_detach_bus(host);
1704 		mmc_release_host(host);
1705 		host->pm_flags = 0;
1706 		break;
1707 
1708 	case PM_POST_SUSPEND:
1709 	case PM_POST_HIBERNATION:
1710 
1711 		spin_lock_irqsave(&host->lock, flags);
1712 		host->rescan_disable = 0;
1713 		spin_unlock_irqrestore(&host->lock, flags);
1714 		mmc_detect_change(host, 0);
1715 
1716 	}
1717 
1718 	return 0;
1719 }
1720 #endif
1721 
1722 static int __init mmc_init(void)
1723 {
1724 	int ret;
1725 
1726 	workqueue = create_singlethread_workqueue("kmmcd");
1727 	if (!workqueue)
1728 		return -ENOMEM;
1729 
1730 	ret = mmc_register_bus();
1731 	if (ret)
1732 		goto destroy_workqueue;
1733 
1734 	ret = mmc_register_host_class();
1735 	if (ret)
1736 		goto unregister_bus;
1737 
1738 	ret = sdio_register_bus();
1739 	if (ret)
1740 		goto unregister_host_class;
1741 
1742 	return 0;
1743 
1744 unregister_host_class:
1745 	mmc_unregister_host_class();
1746 unregister_bus:
1747 	mmc_unregister_bus();
1748 destroy_workqueue:
1749 	destroy_workqueue(workqueue);
1750 
1751 	return ret;
1752 }
1753 
1754 static void __exit mmc_exit(void)
1755 {
1756 	sdio_unregister_bus();
1757 	mmc_unregister_host_class();
1758 	mmc_unregister_bus();
1759 	destroy_workqueue(workqueue);
1760 }
1761 
1762 subsys_initcall(mmc_init);
1763 module_exit(mmc_exit);
1764 
1765 MODULE_LICENSE("GPL");
1766