xref: /linux/drivers/mmc/core/core.c (revision 079c9534a96da9a85a2a2f9715851050fbfbf749)
1 /*
2  *  linux/drivers/mmc/core/core.c
3  *
4  *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5  *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7  *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/suspend.h>
27 #include <linux/fault-inject.h>
28 #include <linux/random.h>
29 
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/sd.h>
34 
35 #include "core.h"
36 #include "bus.h"
37 #include "host.h"
38 #include "sdio_bus.h"
39 
40 #include "mmc_ops.h"
41 #include "sd_ops.h"
42 #include "sdio_ops.h"
43 
44 static struct workqueue_struct *workqueue;
45 
46 /*
47  * Enabling software CRCs on the data blocks can be a significant (30%)
48  * performance cost, and for other reasons may not always be desired.
49  * So we allow it it to be disabled.
50  */
51 bool use_spi_crc = 1;
52 module_param(use_spi_crc, bool, 0);
53 
54 /*
55  * We normally treat cards as removed during suspend if they are not
56  * known to be on a non-removable bus, to avoid the risk of writing
57  * back data to a different card after resume.  Allow this to be
58  * overridden if necessary.
59  */
60 #ifdef CONFIG_MMC_UNSAFE_RESUME
61 bool mmc_assume_removable;
62 #else
63 bool mmc_assume_removable = 1;
64 #endif
65 EXPORT_SYMBOL(mmc_assume_removable);
66 module_param_named(removable, mmc_assume_removable, bool, 0644);
67 MODULE_PARM_DESC(
68 	removable,
69 	"MMC/SD cards are removable and may be removed during suspend");
70 
71 /*
72  * Internal function. Schedule delayed work in the MMC work queue.
73  */
74 static int mmc_schedule_delayed_work(struct delayed_work *work,
75 				     unsigned long delay)
76 {
77 	return queue_delayed_work(workqueue, work, delay);
78 }
79 
80 /*
81  * Internal function. Flush all scheduled work from the MMC work queue.
82  */
83 static void mmc_flush_scheduled_work(void)
84 {
85 	flush_workqueue(workqueue);
86 }
87 
88 #ifdef CONFIG_FAIL_MMC_REQUEST
89 
90 /*
91  * Internal function. Inject random data errors.
92  * If mmc_data is NULL no errors are injected.
93  */
94 static void mmc_should_fail_request(struct mmc_host *host,
95 				    struct mmc_request *mrq)
96 {
97 	struct mmc_command *cmd = mrq->cmd;
98 	struct mmc_data *data = mrq->data;
99 	static const int data_errors[] = {
100 		-ETIMEDOUT,
101 		-EILSEQ,
102 		-EIO,
103 	};
104 
105 	if (!data)
106 		return;
107 
108 	if (cmd->error || data->error ||
109 	    !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
110 		return;
111 
112 	data->error = data_errors[random32() % ARRAY_SIZE(data_errors)];
113 	data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9;
114 }
115 
116 #else /* CONFIG_FAIL_MMC_REQUEST */
117 
118 static inline void mmc_should_fail_request(struct mmc_host *host,
119 					   struct mmc_request *mrq)
120 {
121 }
122 
123 #endif /* CONFIG_FAIL_MMC_REQUEST */
124 
125 /**
126  *	mmc_request_done - finish processing an MMC request
127  *	@host: MMC host which completed request
128  *	@mrq: MMC request which request
129  *
130  *	MMC drivers should call this function when they have completed
131  *	their processing of a request.
132  */
133 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
134 {
135 	struct mmc_command *cmd = mrq->cmd;
136 	int err = cmd->error;
137 
138 	if (err && cmd->retries && mmc_host_is_spi(host)) {
139 		if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
140 			cmd->retries = 0;
141 	}
142 
143 	if (err && cmd->retries && !mmc_card_removed(host->card)) {
144 		/*
145 		 * Request starter must handle retries - see
146 		 * mmc_wait_for_req_done().
147 		 */
148 		if (mrq->done)
149 			mrq->done(mrq);
150 	} else {
151 		mmc_should_fail_request(host, mrq);
152 
153 		led_trigger_event(host->led, LED_OFF);
154 
155 		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
156 			mmc_hostname(host), cmd->opcode, err,
157 			cmd->resp[0], cmd->resp[1],
158 			cmd->resp[2], cmd->resp[3]);
159 
160 		if (mrq->data) {
161 			pr_debug("%s:     %d bytes transferred: %d\n",
162 				mmc_hostname(host),
163 				mrq->data->bytes_xfered, mrq->data->error);
164 		}
165 
166 		if (mrq->stop) {
167 			pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
168 				mmc_hostname(host), mrq->stop->opcode,
169 				mrq->stop->error,
170 				mrq->stop->resp[0], mrq->stop->resp[1],
171 				mrq->stop->resp[2], mrq->stop->resp[3]);
172 		}
173 
174 		if (mrq->done)
175 			mrq->done(mrq);
176 
177 		mmc_host_clk_release(host);
178 	}
179 }
180 
181 EXPORT_SYMBOL(mmc_request_done);
182 
183 static void
184 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
185 {
186 #ifdef CONFIG_MMC_DEBUG
187 	unsigned int i, sz;
188 	struct scatterlist *sg;
189 #endif
190 
191 	pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
192 		 mmc_hostname(host), mrq->cmd->opcode,
193 		 mrq->cmd->arg, mrq->cmd->flags);
194 
195 	if (mrq->data) {
196 		pr_debug("%s:     blksz %d blocks %d flags %08x "
197 			"tsac %d ms nsac %d\n",
198 			mmc_hostname(host), mrq->data->blksz,
199 			mrq->data->blocks, mrq->data->flags,
200 			mrq->data->timeout_ns / 1000000,
201 			mrq->data->timeout_clks);
202 	}
203 
204 	if (mrq->stop) {
205 		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
206 			 mmc_hostname(host), mrq->stop->opcode,
207 			 mrq->stop->arg, mrq->stop->flags);
208 	}
209 
210 	WARN_ON(!host->claimed);
211 
212 	mrq->cmd->error = 0;
213 	mrq->cmd->mrq = mrq;
214 	if (mrq->data) {
215 		BUG_ON(mrq->data->blksz > host->max_blk_size);
216 		BUG_ON(mrq->data->blocks > host->max_blk_count);
217 		BUG_ON(mrq->data->blocks * mrq->data->blksz >
218 			host->max_req_size);
219 
220 #ifdef CONFIG_MMC_DEBUG
221 		sz = 0;
222 		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
223 			sz += sg->length;
224 		BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
225 #endif
226 
227 		mrq->cmd->data = mrq->data;
228 		mrq->data->error = 0;
229 		mrq->data->mrq = mrq;
230 		if (mrq->stop) {
231 			mrq->data->stop = mrq->stop;
232 			mrq->stop->error = 0;
233 			mrq->stop->mrq = mrq;
234 		}
235 	}
236 	mmc_host_clk_hold(host);
237 	led_trigger_event(host->led, LED_FULL);
238 	host->ops->request(host, mrq);
239 }
240 
241 static void mmc_wait_done(struct mmc_request *mrq)
242 {
243 	complete(&mrq->completion);
244 }
245 
246 static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
247 {
248 	init_completion(&mrq->completion);
249 	mrq->done = mmc_wait_done;
250 	if (mmc_card_removed(host->card)) {
251 		mrq->cmd->error = -ENOMEDIUM;
252 		complete(&mrq->completion);
253 		return;
254 	}
255 	mmc_start_request(host, mrq);
256 }
257 
258 static void mmc_wait_for_req_done(struct mmc_host *host,
259 				  struct mmc_request *mrq)
260 {
261 	struct mmc_command *cmd;
262 
263 	while (1) {
264 		wait_for_completion(&mrq->completion);
265 
266 		cmd = mrq->cmd;
267 		if (!cmd->error || !cmd->retries ||
268 		    mmc_card_removed(host->card))
269 			break;
270 
271 		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
272 			 mmc_hostname(host), cmd->opcode, cmd->error);
273 		cmd->retries--;
274 		cmd->error = 0;
275 		host->ops->request(host, mrq);
276 	}
277 }
278 
279 /**
280  *	mmc_pre_req - Prepare for a new request
281  *	@host: MMC host to prepare command
282  *	@mrq: MMC request to prepare for
283  *	@is_first_req: true if there is no previous started request
284  *                     that may run in parellel to this call, otherwise false
285  *
286  *	mmc_pre_req() is called in prior to mmc_start_req() to let
287  *	host prepare for the new request. Preparation of a request may be
288  *	performed while another request is running on the host.
289  */
290 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
291 		 bool is_first_req)
292 {
293 	if (host->ops->pre_req)
294 		host->ops->pre_req(host, mrq, is_first_req);
295 }
296 
297 /**
298  *	mmc_post_req - Post process a completed request
299  *	@host: MMC host to post process command
300  *	@mrq: MMC request to post process for
301  *	@err: Error, if non zero, clean up any resources made in pre_req
302  *
303  *	Let the host post process a completed request. Post processing of
304  *	a request may be performed while another reuqest is running.
305  */
306 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
307 			 int err)
308 {
309 	if (host->ops->post_req)
310 		host->ops->post_req(host, mrq, err);
311 }
312 
313 /**
314  *	mmc_start_req - start a non-blocking request
315  *	@host: MMC host to start command
316  *	@areq: async request to start
317  *	@error: out parameter returns 0 for success, otherwise non zero
318  *
319  *	Start a new MMC custom command request for a host.
320  *	If there is on ongoing async request wait for completion
321  *	of that request and start the new one and return.
322  *	Does not wait for the new request to complete.
323  *
324  *      Returns the completed request, NULL in case of none completed.
325  *	Wait for the an ongoing request (previoulsy started) to complete and
326  *	return the completed request. If there is no ongoing request, NULL
327  *	is returned without waiting. NULL is not an error condition.
328  */
329 struct mmc_async_req *mmc_start_req(struct mmc_host *host,
330 				    struct mmc_async_req *areq, int *error)
331 {
332 	int err = 0;
333 	struct mmc_async_req *data = host->areq;
334 
335 	/* Prepare a new request */
336 	if (areq)
337 		mmc_pre_req(host, areq->mrq, !host->areq);
338 
339 	if (host->areq) {
340 		mmc_wait_for_req_done(host, host->areq->mrq);
341 		err = host->areq->err_check(host->card, host->areq);
342 		if (err) {
343 			/* post process the completed failed request */
344 			mmc_post_req(host, host->areq->mrq, 0);
345 			if (areq)
346 				/*
347 				 * Cancel the new prepared request, because
348 				 * it can't run until the failed
349 				 * request has been properly handled.
350 				 */
351 				mmc_post_req(host, areq->mrq, -EINVAL);
352 
353 			host->areq = NULL;
354 			goto out;
355 		}
356 	}
357 
358 	if (areq)
359 		__mmc_start_req(host, areq->mrq);
360 
361 	if (host->areq)
362 		mmc_post_req(host, host->areq->mrq, 0);
363 
364 	host->areq = areq;
365  out:
366 	if (error)
367 		*error = err;
368 	return data;
369 }
370 EXPORT_SYMBOL(mmc_start_req);
371 
372 /**
373  *	mmc_wait_for_req - start a request and wait for completion
374  *	@host: MMC host to start command
375  *	@mrq: MMC request to start
376  *
377  *	Start a new MMC custom command request for a host, and wait
378  *	for the command to complete. Does not attempt to parse the
379  *	response.
380  */
381 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
382 {
383 	__mmc_start_req(host, mrq);
384 	mmc_wait_for_req_done(host, mrq);
385 }
386 EXPORT_SYMBOL(mmc_wait_for_req);
387 
388 /**
389  *	mmc_interrupt_hpi - Issue for High priority Interrupt
390  *	@card: the MMC card associated with the HPI transfer
391  *
392  *	Issued High Priority Interrupt, and check for card status
393  *	util out-of prg-state.
394  */
395 int mmc_interrupt_hpi(struct mmc_card *card)
396 {
397 	int err;
398 	u32 status;
399 
400 	BUG_ON(!card);
401 
402 	if (!card->ext_csd.hpi_en) {
403 		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
404 		return 1;
405 	}
406 
407 	mmc_claim_host(card->host);
408 	err = mmc_send_status(card, &status);
409 	if (err) {
410 		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
411 		goto out;
412 	}
413 
414 	/*
415 	 * If the card status is in PRG-state, we can send the HPI command.
416 	 */
417 	if (R1_CURRENT_STATE(status) == R1_STATE_PRG) {
418 		do {
419 			/*
420 			 * We don't know when the HPI command will finish
421 			 * processing, so we need to resend HPI until out
422 			 * of prg-state, and keep checking the card status
423 			 * with SEND_STATUS.  If a timeout error occurs when
424 			 * sending the HPI command, we are already out of
425 			 * prg-state.
426 			 */
427 			err = mmc_send_hpi_cmd(card, &status);
428 			if (err)
429 				pr_debug("%s: abort HPI (%d error)\n",
430 					 mmc_hostname(card->host), err);
431 
432 			err = mmc_send_status(card, &status);
433 			if (err)
434 				break;
435 		} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
436 	} else
437 		pr_debug("%s: Left prg-state\n", mmc_hostname(card->host));
438 
439 out:
440 	mmc_release_host(card->host);
441 	return err;
442 }
443 EXPORT_SYMBOL(mmc_interrupt_hpi);
444 
445 /**
446  *	mmc_wait_for_cmd - start a command and wait for completion
447  *	@host: MMC host to start command
448  *	@cmd: MMC command to start
449  *	@retries: maximum number of retries
450  *
451  *	Start a new MMC command for a host, and wait for the command
452  *	to complete.  Return any error that occurred while the command
453  *	was executing.  Do not attempt to parse the response.
454  */
455 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
456 {
457 	struct mmc_request mrq = {NULL};
458 
459 	WARN_ON(!host->claimed);
460 
461 	memset(cmd->resp, 0, sizeof(cmd->resp));
462 	cmd->retries = retries;
463 
464 	mrq.cmd = cmd;
465 	cmd->data = NULL;
466 
467 	mmc_wait_for_req(host, &mrq);
468 
469 	return cmd->error;
470 }
471 
472 EXPORT_SYMBOL(mmc_wait_for_cmd);
473 
474 /**
475  *	mmc_set_data_timeout - set the timeout for a data command
476  *	@data: data phase for command
477  *	@card: the MMC card associated with the data transfer
478  *
479  *	Computes the data timeout parameters according to the
480  *	correct algorithm given the card type.
481  */
482 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
483 {
484 	unsigned int mult;
485 
486 	/*
487 	 * SDIO cards only define an upper 1 s limit on access.
488 	 */
489 	if (mmc_card_sdio(card)) {
490 		data->timeout_ns = 1000000000;
491 		data->timeout_clks = 0;
492 		return;
493 	}
494 
495 	/*
496 	 * SD cards use a 100 multiplier rather than 10
497 	 */
498 	mult = mmc_card_sd(card) ? 100 : 10;
499 
500 	/*
501 	 * Scale up the multiplier (and therefore the timeout) by
502 	 * the r2w factor for writes.
503 	 */
504 	if (data->flags & MMC_DATA_WRITE)
505 		mult <<= card->csd.r2w_factor;
506 
507 	data->timeout_ns = card->csd.tacc_ns * mult;
508 	data->timeout_clks = card->csd.tacc_clks * mult;
509 
510 	/*
511 	 * SD cards also have an upper limit on the timeout.
512 	 */
513 	if (mmc_card_sd(card)) {
514 		unsigned int timeout_us, limit_us;
515 
516 		timeout_us = data->timeout_ns / 1000;
517 		if (mmc_host_clk_rate(card->host))
518 			timeout_us += data->timeout_clks * 1000 /
519 				(mmc_host_clk_rate(card->host) / 1000);
520 
521 		if (data->flags & MMC_DATA_WRITE)
522 			/*
523 			 * The limit is really 250 ms, but that is
524 			 * insufficient for some crappy cards.
525 			 */
526 			limit_us = 300000;
527 		else
528 			limit_us = 100000;
529 
530 		/*
531 		 * SDHC cards always use these fixed values.
532 		 */
533 		if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
534 			data->timeout_ns = limit_us * 1000;
535 			data->timeout_clks = 0;
536 		}
537 	}
538 
539 	/*
540 	 * Some cards require longer data read timeout than indicated in CSD.
541 	 * Address this by setting the read timeout to a "reasonably high"
542 	 * value. For the cards tested, 300ms has proven enough. If necessary,
543 	 * this value can be increased if other problematic cards require this.
544 	 */
545 	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
546 		data->timeout_ns = 300000000;
547 		data->timeout_clks = 0;
548 	}
549 
550 	/*
551 	 * Some cards need very high timeouts if driven in SPI mode.
552 	 * The worst observed timeout was 900ms after writing a
553 	 * continuous stream of data until the internal logic
554 	 * overflowed.
555 	 */
556 	if (mmc_host_is_spi(card->host)) {
557 		if (data->flags & MMC_DATA_WRITE) {
558 			if (data->timeout_ns < 1000000000)
559 				data->timeout_ns = 1000000000;	/* 1s */
560 		} else {
561 			if (data->timeout_ns < 100000000)
562 				data->timeout_ns =  100000000;	/* 100ms */
563 		}
564 	}
565 }
566 EXPORT_SYMBOL(mmc_set_data_timeout);
567 
568 /**
569  *	mmc_align_data_size - pads a transfer size to a more optimal value
570  *	@card: the MMC card associated with the data transfer
571  *	@sz: original transfer size
572  *
573  *	Pads the original data size with a number of extra bytes in
574  *	order to avoid controller bugs and/or performance hits
575  *	(e.g. some controllers revert to PIO for certain sizes).
576  *
577  *	Returns the improved size, which might be unmodified.
578  *
579  *	Note that this function is only relevant when issuing a
580  *	single scatter gather entry.
581  */
582 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
583 {
584 	/*
585 	 * FIXME: We don't have a system for the controller to tell
586 	 * the core about its problems yet, so for now we just 32-bit
587 	 * align the size.
588 	 */
589 	sz = ((sz + 3) / 4) * 4;
590 
591 	return sz;
592 }
593 EXPORT_SYMBOL(mmc_align_data_size);
594 
595 /**
596  *	mmc_host_enable - enable a host.
597  *	@host: mmc host to enable
598  *
599  *	Hosts that support power saving can use the 'enable' and 'disable'
600  *	methods to exit and enter power saving states. For more information
601  *	see comments for struct mmc_host_ops.
602  */
603 int mmc_host_enable(struct mmc_host *host)
604 {
605 	if (!(host->caps & MMC_CAP_DISABLE))
606 		return 0;
607 
608 	if (host->en_dis_recurs)
609 		return 0;
610 
611 	if (host->nesting_cnt++)
612 		return 0;
613 
614 	cancel_delayed_work_sync(&host->disable);
615 
616 	if (host->enabled)
617 		return 0;
618 
619 	if (host->ops->enable) {
620 		int err;
621 
622 		host->en_dis_recurs = 1;
623 		err = host->ops->enable(host);
624 		host->en_dis_recurs = 0;
625 
626 		if (err) {
627 			pr_debug("%s: enable error %d\n",
628 				 mmc_hostname(host), err);
629 			return err;
630 		}
631 	}
632 	host->enabled = 1;
633 	return 0;
634 }
635 EXPORT_SYMBOL(mmc_host_enable);
636 
637 static int mmc_host_do_disable(struct mmc_host *host, int lazy)
638 {
639 	if (host->ops->disable) {
640 		int err;
641 
642 		host->en_dis_recurs = 1;
643 		err = host->ops->disable(host, lazy);
644 		host->en_dis_recurs = 0;
645 
646 		if (err < 0) {
647 			pr_debug("%s: disable error %d\n",
648 				 mmc_hostname(host), err);
649 			return err;
650 		}
651 		if (err > 0) {
652 			unsigned long delay = msecs_to_jiffies(err);
653 
654 			mmc_schedule_delayed_work(&host->disable, delay);
655 		}
656 	}
657 	host->enabled = 0;
658 	return 0;
659 }
660 
661 /**
662  *	mmc_host_disable - disable a host.
663  *	@host: mmc host to disable
664  *
665  *	Hosts that support power saving can use the 'enable' and 'disable'
666  *	methods to exit and enter power saving states. For more information
667  *	see comments for struct mmc_host_ops.
668  */
669 int mmc_host_disable(struct mmc_host *host)
670 {
671 	int err;
672 
673 	if (!(host->caps & MMC_CAP_DISABLE))
674 		return 0;
675 
676 	if (host->en_dis_recurs)
677 		return 0;
678 
679 	if (--host->nesting_cnt)
680 		return 0;
681 
682 	if (!host->enabled)
683 		return 0;
684 
685 	err = mmc_host_do_disable(host, 0);
686 	return err;
687 }
688 EXPORT_SYMBOL(mmc_host_disable);
689 
690 /**
691  *	__mmc_claim_host - exclusively claim a host
692  *	@host: mmc host to claim
693  *	@abort: whether or not the operation should be aborted
694  *
695  *	Claim a host for a set of operations.  If @abort is non null and
696  *	dereference a non-zero value then this will return prematurely with
697  *	that non-zero value without acquiring the lock.  Returns zero
698  *	with the lock held otherwise.
699  */
700 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
701 {
702 	DECLARE_WAITQUEUE(wait, current);
703 	unsigned long flags;
704 	int stop;
705 
706 	might_sleep();
707 
708 	add_wait_queue(&host->wq, &wait);
709 	spin_lock_irqsave(&host->lock, flags);
710 	while (1) {
711 		set_current_state(TASK_UNINTERRUPTIBLE);
712 		stop = abort ? atomic_read(abort) : 0;
713 		if (stop || !host->claimed || host->claimer == current)
714 			break;
715 		spin_unlock_irqrestore(&host->lock, flags);
716 		schedule();
717 		spin_lock_irqsave(&host->lock, flags);
718 	}
719 	set_current_state(TASK_RUNNING);
720 	if (!stop) {
721 		host->claimed = 1;
722 		host->claimer = current;
723 		host->claim_cnt += 1;
724 	} else
725 		wake_up(&host->wq);
726 	spin_unlock_irqrestore(&host->lock, flags);
727 	remove_wait_queue(&host->wq, &wait);
728 	if (!stop)
729 		mmc_host_enable(host);
730 	return stop;
731 }
732 
733 EXPORT_SYMBOL(__mmc_claim_host);
734 
735 /**
736  *	mmc_try_claim_host - try exclusively to claim a host
737  *	@host: mmc host to claim
738  *
739  *	Returns %1 if the host is claimed, %0 otherwise.
740  */
741 int mmc_try_claim_host(struct mmc_host *host)
742 {
743 	int claimed_host = 0;
744 	unsigned long flags;
745 
746 	spin_lock_irqsave(&host->lock, flags);
747 	if (!host->claimed || host->claimer == current) {
748 		host->claimed = 1;
749 		host->claimer = current;
750 		host->claim_cnt += 1;
751 		claimed_host = 1;
752 	}
753 	spin_unlock_irqrestore(&host->lock, flags);
754 	return claimed_host;
755 }
756 EXPORT_SYMBOL(mmc_try_claim_host);
757 
758 /**
759  *	mmc_do_release_host - release a claimed host
760  *	@host: mmc host to release
761  *
762  *	If you successfully claimed a host, this function will
763  *	release it again.
764  */
765 void mmc_do_release_host(struct mmc_host *host)
766 {
767 	unsigned long flags;
768 
769 	spin_lock_irqsave(&host->lock, flags);
770 	if (--host->claim_cnt) {
771 		/* Release for nested claim */
772 		spin_unlock_irqrestore(&host->lock, flags);
773 	} else {
774 		host->claimed = 0;
775 		host->claimer = NULL;
776 		spin_unlock_irqrestore(&host->lock, flags);
777 		wake_up(&host->wq);
778 	}
779 }
780 EXPORT_SYMBOL(mmc_do_release_host);
781 
782 void mmc_host_deeper_disable(struct work_struct *work)
783 {
784 	struct mmc_host *host =
785 		container_of(work, struct mmc_host, disable.work);
786 
787 	/* If the host is claimed then we do not want to disable it anymore */
788 	if (!mmc_try_claim_host(host))
789 		return;
790 	mmc_host_do_disable(host, 1);
791 	mmc_do_release_host(host);
792 }
793 
794 /**
795  *	mmc_host_lazy_disable - lazily disable a host.
796  *	@host: mmc host to disable
797  *
798  *	Hosts that support power saving can use the 'enable' and 'disable'
799  *	methods to exit and enter power saving states. For more information
800  *	see comments for struct mmc_host_ops.
801  */
802 int mmc_host_lazy_disable(struct mmc_host *host)
803 {
804 	if (!(host->caps & MMC_CAP_DISABLE))
805 		return 0;
806 
807 	if (host->en_dis_recurs)
808 		return 0;
809 
810 	if (--host->nesting_cnt)
811 		return 0;
812 
813 	if (!host->enabled)
814 		return 0;
815 
816 	if (host->disable_delay) {
817 		mmc_schedule_delayed_work(&host->disable,
818 				msecs_to_jiffies(host->disable_delay));
819 		return 0;
820 	} else
821 		return mmc_host_do_disable(host, 1);
822 }
823 EXPORT_SYMBOL(mmc_host_lazy_disable);
824 
825 /**
826  *	mmc_release_host - release a host
827  *	@host: mmc host to release
828  *
829  *	Release a MMC host, allowing others to claim the host
830  *	for their operations.
831  */
832 void mmc_release_host(struct mmc_host *host)
833 {
834 	WARN_ON(!host->claimed);
835 
836 	mmc_host_lazy_disable(host);
837 
838 	mmc_do_release_host(host);
839 }
840 
841 EXPORT_SYMBOL(mmc_release_host);
842 
843 /*
844  * Internal function that does the actual ios call to the host driver,
845  * optionally printing some debug output.
846  */
847 static inline void mmc_set_ios(struct mmc_host *host)
848 {
849 	struct mmc_ios *ios = &host->ios;
850 
851 	pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
852 		"width %u timing %u\n",
853 		 mmc_hostname(host), ios->clock, ios->bus_mode,
854 		 ios->power_mode, ios->chip_select, ios->vdd,
855 		 ios->bus_width, ios->timing);
856 
857 	if (ios->clock > 0)
858 		mmc_set_ungated(host);
859 	host->ops->set_ios(host, ios);
860 }
861 
862 /*
863  * Control chip select pin on a host.
864  */
865 void mmc_set_chip_select(struct mmc_host *host, int mode)
866 {
867 	mmc_host_clk_hold(host);
868 	host->ios.chip_select = mode;
869 	mmc_set_ios(host);
870 	mmc_host_clk_release(host);
871 }
872 
873 /*
874  * Sets the host clock to the highest possible frequency that
875  * is below "hz".
876  */
877 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
878 {
879 	WARN_ON(hz < host->f_min);
880 
881 	if (hz > host->f_max)
882 		hz = host->f_max;
883 
884 	host->ios.clock = hz;
885 	mmc_set_ios(host);
886 }
887 
888 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
889 {
890 	mmc_host_clk_hold(host);
891 	__mmc_set_clock(host, hz);
892 	mmc_host_clk_release(host);
893 }
894 
895 #ifdef CONFIG_MMC_CLKGATE
896 /*
897  * This gates the clock by setting it to 0 Hz.
898  */
899 void mmc_gate_clock(struct mmc_host *host)
900 {
901 	unsigned long flags;
902 
903 	spin_lock_irqsave(&host->clk_lock, flags);
904 	host->clk_old = host->ios.clock;
905 	host->ios.clock = 0;
906 	host->clk_gated = true;
907 	spin_unlock_irqrestore(&host->clk_lock, flags);
908 	mmc_set_ios(host);
909 }
910 
911 /*
912  * This restores the clock from gating by using the cached
913  * clock value.
914  */
915 void mmc_ungate_clock(struct mmc_host *host)
916 {
917 	/*
918 	 * We should previously have gated the clock, so the clock shall
919 	 * be 0 here! The clock may however be 0 during initialization,
920 	 * when some request operations are performed before setting
921 	 * the frequency. When ungate is requested in that situation
922 	 * we just ignore the call.
923 	 */
924 	if (host->clk_old) {
925 		BUG_ON(host->ios.clock);
926 		/* This call will also set host->clk_gated to false */
927 		__mmc_set_clock(host, host->clk_old);
928 	}
929 }
930 
931 void mmc_set_ungated(struct mmc_host *host)
932 {
933 	unsigned long flags;
934 
935 	/*
936 	 * We've been given a new frequency while the clock is gated,
937 	 * so make sure we regard this as ungating it.
938 	 */
939 	spin_lock_irqsave(&host->clk_lock, flags);
940 	host->clk_gated = false;
941 	spin_unlock_irqrestore(&host->clk_lock, flags);
942 }
943 
944 #else
945 void mmc_set_ungated(struct mmc_host *host)
946 {
947 }
948 #endif
949 
950 /*
951  * Change the bus mode (open drain/push-pull) of a host.
952  */
953 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
954 {
955 	mmc_host_clk_hold(host);
956 	host->ios.bus_mode = mode;
957 	mmc_set_ios(host);
958 	mmc_host_clk_release(host);
959 }
960 
961 /*
962  * Change data bus width of a host.
963  */
964 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
965 {
966 	mmc_host_clk_hold(host);
967 	host->ios.bus_width = width;
968 	mmc_set_ios(host);
969 	mmc_host_clk_release(host);
970 }
971 
972 /**
973  * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
974  * @vdd:	voltage (mV)
975  * @low_bits:	prefer low bits in boundary cases
976  *
977  * This function returns the OCR bit number according to the provided @vdd
978  * value. If conversion is not possible a negative errno value returned.
979  *
980  * Depending on the @low_bits flag the function prefers low or high OCR bits
981  * on boundary voltages. For example,
982  * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
983  * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
984  *
985  * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
986  */
987 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
988 {
989 	const int max_bit = ilog2(MMC_VDD_35_36);
990 	int bit;
991 
992 	if (vdd < 1650 || vdd > 3600)
993 		return -EINVAL;
994 
995 	if (vdd >= 1650 && vdd <= 1950)
996 		return ilog2(MMC_VDD_165_195);
997 
998 	if (low_bits)
999 		vdd -= 1;
1000 
1001 	/* Base 2000 mV, step 100 mV, bit's base 8. */
1002 	bit = (vdd - 2000) / 100 + 8;
1003 	if (bit > max_bit)
1004 		return max_bit;
1005 	return bit;
1006 }
1007 
1008 /**
1009  * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1010  * @vdd_min:	minimum voltage value (mV)
1011  * @vdd_max:	maximum voltage value (mV)
1012  *
1013  * This function returns the OCR mask bits according to the provided @vdd_min
1014  * and @vdd_max values. If conversion is not possible the function returns 0.
1015  *
1016  * Notes wrt boundary cases:
1017  * This function sets the OCR bits for all boundary voltages, for example
1018  * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1019  * MMC_VDD_34_35 mask.
1020  */
1021 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1022 {
1023 	u32 mask = 0;
1024 
1025 	if (vdd_max < vdd_min)
1026 		return 0;
1027 
1028 	/* Prefer high bits for the boundary vdd_max values. */
1029 	vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1030 	if (vdd_max < 0)
1031 		return 0;
1032 
1033 	/* Prefer low bits for the boundary vdd_min values. */
1034 	vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1035 	if (vdd_min < 0)
1036 		return 0;
1037 
1038 	/* Fill the mask, from max bit to min bit. */
1039 	while (vdd_max >= vdd_min)
1040 		mask |= 1 << vdd_max--;
1041 
1042 	return mask;
1043 }
1044 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1045 
1046 #ifdef CONFIG_REGULATOR
1047 
1048 /**
1049  * mmc_regulator_get_ocrmask - return mask of supported voltages
1050  * @supply: regulator to use
1051  *
1052  * This returns either a negative errno, or a mask of voltages that
1053  * can be provided to MMC/SD/SDIO devices using the specified voltage
1054  * regulator.  This would normally be called before registering the
1055  * MMC host adapter.
1056  */
1057 int mmc_regulator_get_ocrmask(struct regulator *supply)
1058 {
1059 	int			result = 0;
1060 	int			count;
1061 	int			i;
1062 
1063 	count = regulator_count_voltages(supply);
1064 	if (count < 0)
1065 		return count;
1066 
1067 	for (i = 0; i < count; i++) {
1068 		int		vdd_uV;
1069 		int		vdd_mV;
1070 
1071 		vdd_uV = regulator_list_voltage(supply, i);
1072 		if (vdd_uV <= 0)
1073 			continue;
1074 
1075 		vdd_mV = vdd_uV / 1000;
1076 		result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1077 	}
1078 
1079 	return result;
1080 }
1081 EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
1082 
1083 /**
1084  * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1085  * @mmc: the host to regulate
1086  * @supply: regulator to use
1087  * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1088  *
1089  * Returns zero on success, else negative errno.
1090  *
1091  * MMC host drivers may use this to enable or disable a regulator using
1092  * a particular supply voltage.  This would normally be called from the
1093  * set_ios() method.
1094  */
1095 int mmc_regulator_set_ocr(struct mmc_host *mmc,
1096 			struct regulator *supply,
1097 			unsigned short vdd_bit)
1098 {
1099 	int			result = 0;
1100 	int			min_uV, max_uV;
1101 
1102 	if (vdd_bit) {
1103 		int		tmp;
1104 		int		voltage;
1105 
1106 		/* REVISIT mmc_vddrange_to_ocrmask() may have set some
1107 		 * bits this regulator doesn't quite support ... don't
1108 		 * be too picky, most cards and regulators are OK with
1109 		 * a 0.1V range goof (it's a small error percentage).
1110 		 */
1111 		tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1112 		if (tmp == 0) {
1113 			min_uV = 1650 * 1000;
1114 			max_uV = 1950 * 1000;
1115 		} else {
1116 			min_uV = 1900 * 1000 + tmp * 100 * 1000;
1117 			max_uV = min_uV + 100 * 1000;
1118 		}
1119 
1120 		/* avoid needless changes to this voltage; the regulator
1121 		 * might not allow this operation
1122 		 */
1123 		voltage = regulator_get_voltage(supply);
1124 		if (voltage < 0)
1125 			result = voltage;
1126 		else if (voltage < min_uV || voltage > max_uV)
1127 			result = regulator_set_voltage(supply, min_uV, max_uV);
1128 		else
1129 			result = 0;
1130 
1131 		if (result == 0 && !mmc->regulator_enabled) {
1132 			result = regulator_enable(supply);
1133 			if (!result)
1134 				mmc->regulator_enabled = true;
1135 		}
1136 	} else if (mmc->regulator_enabled) {
1137 		result = regulator_disable(supply);
1138 		if (result == 0)
1139 			mmc->regulator_enabled = false;
1140 	}
1141 
1142 	if (result)
1143 		dev_err(mmc_dev(mmc),
1144 			"could not set regulator OCR (%d)\n", result);
1145 	return result;
1146 }
1147 EXPORT_SYMBOL(mmc_regulator_set_ocr);
1148 
1149 #endif /* CONFIG_REGULATOR */
1150 
1151 /*
1152  * Mask off any voltages we don't support and select
1153  * the lowest voltage
1154  */
1155 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1156 {
1157 	int bit;
1158 
1159 	ocr &= host->ocr_avail;
1160 
1161 	bit = ffs(ocr);
1162 	if (bit) {
1163 		bit -= 1;
1164 
1165 		ocr &= 3 << bit;
1166 
1167 		mmc_host_clk_hold(host);
1168 		host->ios.vdd = bit;
1169 		mmc_set_ios(host);
1170 		mmc_host_clk_release(host);
1171 	} else {
1172 		pr_warning("%s: host doesn't support card's voltages\n",
1173 				mmc_hostname(host));
1174 		ocr = 0;
1175 	}
1176 
1177 	return ocr;
1178 }
1179 
1180 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
1181 {
1182 	struct mmc_command cmd = {0};
1183 	int err = 0;
1184 
1185 	BUG_ON(!host);
1186 
1187 	/*
1188 	 * Send CMD11 only if the request is to switch the card to
1189 	 * 1.8V signalling.
1190 	 */
1191 	if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
1192 		cmd.opcode = SD_SWITCH_VOLTAGE;
1193 		cmd.arg = 0;
1194 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1195 
1196 		err = mmc_wait_for_cmd(host, &cmd, 0);
1197 		if (err)
1198 			return err;
1199 
1200 		if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1201 			return -EIO;
1202 	}
1203 
1204 	host->ios.signal_voltage = signal_voltage;
1205 
1206 	if (host->ops->start_signal_voltage_switch)
1207 		err = host->ops->start_signal_voltage_switch(host, &host->ios);
1208 
1209 	return err;
1210 }
1211 
1212 /*
1213  * Select timing parameters for host.
1214  */
1215 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1216 {
1217 	mmc_host_clk_hold(host);
1218 	host->ios.timing = timing;
1219 	mmc_set_ios(host);
1220 	mmc_host_clk_release(host);
1221 }
1222 
1223 /*
1224  * Select appropriate driver type for host.
1225  */
1226 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1227 {
1228 	mmc_host_clk_hold(host);
1229 	host->ios.drv_type = drv_type;
1230 	mmc_set_ios(host);
1231 	mmc_host_clk_release(host);
1232 }
1233 
1234 static void mmc_poweroff_notify(struct mmc_host *host)
1235 {
1236 	struct mmc_card *card;
1237 	unsigned int timeout;
1238 	unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
1239 	int err = 0;
1240 
1241 	card = host->card;
1242 
1243 	/*
1244 	 * Send power notify command only if card
1245 	 * is mmc and notify state is powered ON
1246 	 */
1247 	if (card && mmc_card_mmc(card) &&
1248 	    (card->poweroff_notify_state == MMC_POWERED_ON)) {
1249 
1250 		if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
1251 			notify_type = EXT_CSD_POWER_OFF_SHORT;
1252 			timeout = card->ext_csd.generic_cmd6_time;
1253 			card->poweroff_notify_state = MMC_POWEROFF_SHORT;
1254 		} else {
1255 			notify_type = EXT_CSD_POWER_OFF_LONG;
1256 			timeout = card->ext_csd.power_off_longtime;
1257 			card->poweroff_notify_state = MMC_POWEROFF_LONG;
1258 		}
1259 
1260 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1261 				 EXT_CSD_POWER_OFF_NOTIFICATION,
1262 				 notify_type, timeout);
1263 
1264 		if (err && err != -EBADMSG)
1265 			pr_err("Device failed to respond within %d poweroff "
1266 			       "time. Forcefully powering down the device\n",
1267 			       timeout);
1268 
1269 		/* Set the card state to no notification after the poweroff */
1270 		card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
1271 	}
1272 }
1273 
1274 /*
1275  * Apply power to the MMC stack.  This is a two-stage process.
1276  * First, we enable power to the card without the clock running.
1277  * We then wait a bit for the power to stabilise.  Finally,
1278  * enable the bus drivers and clock to the card.
1279  *
1280  * We must _NOT_ enable the clock prior to power stablising.
1281  *
1282  * If a host does all the power sequencing itself, ignore the
1283  * initial MMC_POWER_UP stage.
1284  */
1285 static void mmc_power_up(struct mmc_host *host)
1286 {
1287 	int bit;
1288 
1289 	mmc_host_clk_hold(host);
1290 
1291 	/* If ocr is set, we use it */
1292 	if (host->ocr)
1293 		bit = ffs(host->ocr) - 1;
1294 	else
1295 		bit = fls(host->ocr_avail) - 1;
1296 
1297 	host->ios.vdd = bit;
1298 	if (mmc_host_is_spi(host))
1299 		host->ios.chip_select = MMC_CS_HIGH;
1300 	else
1301 		host->ios.chip_select = MMC_CS_DONTCARE;
1302 	host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1303 	host->ios.power_mode = MMC_POWER_UP;
1304 	host->ios.bus_width = MMC_BUS_WIDTH_1;
1305 	host->ios.timing = MMC_TIMING_LEGACY;
1306 	mmc_set_ios(host);
1307 
1308 	/*
1309 	 * This delay should be sufficient to allow the power supply
1310 	 * to reach the minimum voltage.
1311 	 */
1312 	mmc_delay(10);
1313 
1314 	host->ios.clock = host->f_init;
1315 
1316 	host->ios.power_mode = MMC_POWER_ON;
1317 	mmc_set_ios(host);
1318 
1319 	/*
1320 	 * This delay must be at least 74 clock sizes, or 1 ms, or the
1321 	 * time required to reach a stable voltage.
1322 	 */
1323 	mmc_delay(10);
1324 
1325 	mmc_host_clk_release(host);
1326 }
1327 
1328 void mmc_power_off(struct mmc_host *host)
1329 {
1330 	mmc_host_clk_hold(host);
1331 
1332 	host->ios.clock = 0;
1333 	host->ios.vdd = 0;
1334 
1335 	mmc_poweroff_notify(host);
1336 
1337 	/*
1338 	 * Reset ocr mask to be the highest possible voltage supported for
1339 	 * this mmc host. This value will be used at next power up.
1340 	 */
1341 	host->ocr = 1 << (fls(host->ocr_avail) - 1);
1342 
1343 	if (!mmc_host_is_spi(host)) {
1344 		host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1345 		host->ios.chip_select = MMC_CS_DONTCARE;
1346 	}
1347 	host->ios.power_mode = MMC_POWER_OFF;
1348 	host->ios.bus_width = MMC_BUS_WIDTH_1;
1349 	host->ios.timing = MMC_TIMING_LEGACY;
1350 	mmc_set_ios(host);
1351 
1352 	/*
1353 	 * Some configurations, such as the 802.11 SDIO card in the OLPC
1354 	 * XO-1.5, require a short delay after poweroff before the card
1355 	 * can be successfully turned on again.
1356 	 */
1357 	mmc_delay(1);
1358 
1359 	mmc_host_clk_release(host);
1360 }
1361 
1362 /*
1363  * Cleanup when the last reference to the bus operator is dropped.
1364  */
1365 static void __mmc_release_bus(struct mmc_host *host)
1366 {
1367 	BUG_ON(!host);
1368 	BUG_ON(host->bus_refs);
1369 	BUG_ON(!host->bus_dead);
1370 
1371 	host->bus_ops = NULL;
1372 }
1373 
1374 /*
1375  * Increase reference count of bus operator
1376  */
1377 static inline void mmc_bus_get(struct mmc_host *host)
1378 {
1379 	unsigned long flags;
1380 
1381 	spin_lock_irqsave(&host->lock, flags);
1382 	host->bus_refs++;
1383 	spin_unlock_irqrestore(&host->lock, flags);
1384 }
1385 
1386 /*
1387  * Decrease reference count of bus operator and free it if
1388  * it is the last reference.
1389  */
1390 static inline void mmc_bus_put(struct mmc_host *host)
1391 {
1392 	unsigned long flags;
1393 
1394 	spin_lock_irqsave(&host->lock, flags);
1395 	host->bus_refs--;
1396 	if ((host->bus_refs == 0) && host->bus_ops)
1397 		__mmc_release_bus(host);
1398 	spin_unlock_irqrestore(&host->lock, flags);
1399 }
1400 
1401 /*
1402  * Assign a mmc bus handler to a host. Only one bus handler may control a
1403  * host at any given time.
1404  */
1405 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1406 {
1407 	unsigned long flags;
1408 
1409 	BUG_ON(!host);
1410 	BUG_ON(!ops);
1411 
1412 	WARN_ON(!host->claimed);
1413 
1414 	spin_lock_irqsave(&host->lock, flags);
1415 
1416 	BUG_ON(host->bus_ops);
1417 	BUG_ON(host->bus_refs);
1418 
1419 	host->bus_ops = ops;
1420 	host->bus_refs = 1;
1421 	host->bus_dead = 0;
1422 
1423 	spin_unlock_irqrestore(&host->lock, flags);
1424 }
1425 
1426 /*
1427  * Remove the current bus handler from a host.
1428  */
1429 void mmc_detach_bus(struct mmc_host *host)
1430 {
1431 	unsigned long flags;
1432 
1433 	BUG_ON(!host);
1434 
1435 	WARN_ON(!host->claimed);
1436 	WARN_ON(!host->bus_ops);
1437 
1438 	spin_lock_irqsave(&host->lock, flags);
1439 
1440 	host->bus_dead = 1;
1441 
1442 	spin_unlock_irqrestore(&host->lock, flags);
1443 
1444 	mmc_bus_put(host);
1445 }
1446 
1447 /**
1448  *	mmc_detect_change - process change of state on a MMC socket
1449  *	@host: host which changed state.
1450  *	@delay: optional delay to wait before detection (jiffies)
1451  *
1452  *	MMC drivers should call this when they detect a card has been
1453  *	inserted or removed. The MMC layer will confirm that any
1454  *	present card is still functional, and initialize any newly
1455  *	inserted.
1456  */
1457 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1458 {
1459 #ifdef CONFIG_MMC_DEBUG
1460 	unsigned long flags;
1461 	spin_lock_irqsave(&host->lock, flags);
1462 	WARN_ON(host->removed);
1463 	spin_unlock_irqrestore(&host->lock, flags);
1464 #endif
1465 	host->detect_change = 1;
1466 	mmc_schedule_delayed_work(&host->detect, delay);
1467 }
1468 
1469 EXPORT_SYMBOL(mmc_detect_change);
1470 
1471 void mmc_init_erase(struct mmc_card *card)
1472 {
1473 	unsigned int sz;
1474 
1475 	if (is_power_of_2(card->erase_size))
1476 		card->erase_shift = ffs(card->erase_size) - 1;
1477 	else
1478 		card->erase_shift = 0;
1479 
1480 	/*
1481 	 * It is possible to erase an arbitrarily large area of an SD or MMC
1482 	 * card.  That is not desirable because it can take a long time
1483 	 * (minutes) potentially delaying more important I/O, and also the
1484 	 * timeout calculations become increasingly hugely over-estimated.
1485 	 * Consequently, 'pref_erase' is defined as a guide to limit erases
1486 	 * to that size and alignment.
1487 	 *
1488 	 * For SD cards that define Allocation Unit size, limit erases to one
1489 	 * Allocation Unit at a time.  For MMC cards that define High Capacity
1490 	 * Erase Size, whether it is switched on or not, limit to that size.
1491 	 * Otherwise just have a stab at a good value.  For modern cards it
1492 	 * will end up being 4MiB.  Note that if the value is too small, it
1493 	 * can end up taking longer to erase.
1494 	 */
1495 	if (mmc_card_sd(card) && card->ssr.au) {
1496 		card->pref_erase = card->ssr.au;
1497 		card->erase_shift = ffs(card->ssr.au) - 1;
1498 	} else if (card->ext_csd.hc_erase_size) {
1499 		card->pref_erase = card->ext_csd.hc_erase_size;
1500 	} else {
1501 		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1502 		if (sz < 128)
1503 			card->pref_erase = 512 * 1024 / 512;
1504 		else if (sz < 512)
1505 			card->pref_erase = 1024 * 1024 / 512;
1506 		else if (sz < 1024)
1507 			card->pref_erase = 2 * 1024 * 1024 / 512;
1508 		else
1509 			card->pref_erase = 4 * 1024 * 1024 / 512;
1510 		if (card->pref_erase < card->erase_size)
1511 			card->pref_erase = card->erase_size;
1512 		else {
1513 			sz = card->pref_erase % card->erase_size;
1514 			if (sz)
1515 				card->pref_erase += card->erase_size - sz;
1516 		}
1517 	}
1518 }
1519 
1520 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1521 				          unsigned int arg, unsigned int qty)
1522 {
1523 	unsigned int erase_timeout;
1524 
1525 	if (card->ext_csd.erase_group_def & 1) {
1526 		/* High Capacity Erase Group Size uses HC timeouts */
1527 		if (arg == MMC_TRIM_ARG)
1528 			erase_timeout = card->ext_csd.trim_timeout;
1529 		else
1530 			erase_timeout = card->ext_csd.hc_erase_timeout;
1531 	} else {
1532 		/* CSD Erase Group Size uses write timeout */
1533 		unsigned int mult = (10 << card->csd.r2w_factor);
1534 		unsigned int timeout_clks = card->csd.tacc_clks * mult;
1535 		unsigned int timeout_us;
1536 
1537 		/* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1538 		if (card->csd.tacc_ns < 1000000)
1539 			timeout_us = (card->csd.tacc_ns * mult) / 1000;
1540 		else
1541 			timeout_us = (card->csd.tacc_ns / 1000) * mult;
1542 
1543 		/*
1544 		 * ios.clock is only a target.  The real clock rate might be
1545 		 * less but not that much less, so fudge it by multiplying by 2.
1546 		 */
1547 		timeout_clks <<= 1;
1548 		timeout_us += (timeout_clks * 1000) /
1549 			      (mmc_host_clk_rate(card->host) / 1000);
1550 
1551 		erase_timeout = timeout_us / 1000;
1552 
1553 		/*
1554 		 * Theoretically, the calculation could underflow so round up
1555 		 * to 1ms in that case.
1556 		 */
1557 		if (!erase_timeout)
1558 			erase_timeout = 1;
1559 	}
1560 
1561 	/* Multiplier for secure operations */
1562 	if (arg & MMC_SECURE_ARGS) {
1563 		if (arg == MMC_SECURE_ERASE_ARG)
1564 			erase_timeout *= card->ext_csd.sec_erase_mult;
1565 		else
1566 			erase_timeout *= card->ext_csd.sec_trim_mult;
1567 	}
1568 
1569 	erase_timeout *= qty;
1570 
1571 	/*
1572 	 * Ensure at least a 1 second timeout for SPI as per
1573 	 * 'mmc_set_data_timeout()'
1574 	 */
1575 	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1576 		erase_timeout = 1000;
1577 
1578 	return erase_timeout;
1579 }
1580 
1581 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1582 					 unsigned int arg,
1583 					 unsigned int qty)
1584 {
1585 	unsigned int erase_timeout;
1586 
1587 	if (card->ssr.erase_timeout) {
1588 		/* Erase timeout specified in SD Status Register (SSR) */
1589 		erase_timeout = card->ssr.erase_timeout * qty +
1590 				card->ssr.erase_offset;
1591 	} else {
1592 		/*
1593 		 * Erase timeout not specified in SD Status Register (SSR) so
1594 		 * use 250ms per write block.
1595 		 */
1596 		erase_timeout = 250 * qty;
1597 	}
1598 
1599 	/* Must not be less than 1 second */
1600 	if (erase_timeout < 1000)
1601 		erase_timeout = 1000;
1602 
1603 	return erase_timeout;
1604 }
1605 
1606 static unsigned int mmc_erase_timeout(struct mmc_card *card,
1607 				      unsigned int arg,
1608 				      unsigned int qty)
1609 {
1610 	if (mmc_card_sd(card))
1611 		return mmc_sd_erase_timeout(card, arg, qty);
1612 	else
1613 		return mmc_mmc_erase_timeout(card, arg, qty);
1614 }
1615 
1616 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1617 			unsigned int to, unsigned int arg)
1618 {
1619 	struct mmc_command cmd = {0};
1620 	unsigned int qty = 0;
1621 	int err;
1622 
1623 	/*
1624 	 * qty is used to calculate the erase timeout which depends on how many
1625 	 * erase groups (or allocation units in SD terminology) are affected.
1626 	 * We count erasing part of an erase group as one erase group.
1627 	 * For SD, the allocation units are always a power of 2.  For MMC, the
1628 	 * erase group size is almost certainly also power of 2, but it does not
1629 	 * seem to insist on that in the JEDEC standard, so we fall back to
1630 	 * division in that case.  SD may not specify an allocation unit size,
1631 	 * in which case the timeout is based on the number of write blocks.
1632 	 *
1633 	 * Note that the timeout for secure trim 2 will only be correct if the
1634 	 * number of erase groups specified is the same as the total of all
1635 	 * preceding secure trim 1 commands.  Since the power may have been
1636 	 * lost since the secure trim 1 commands occurred, it is generally
1637 	 * impossible to calculate the secure trim 2 timeout correctly.
1638 	 */
1639 	if (card->erase_shift)
1640 		qty += ((to >> card->erase_shift) -
1641 			(from >> card->erase_shift)) + 1;
1642 	else if (mmc_card_sd(card))
1643 		qty += to - from + 1;
1644 	else
1645 		qty += ((to / card->erase_size) -
1646 			(from / card->erase_size)) + 1;
1647 
1648 	if (!mmc_card_blockaddr(card)) {
1649 		from <<= 9;
1650 		to <<= 9;
1651 	}
1652 
1653 	if (mmc_card_sd(card))
1654 		cmd.opcode = SD_ERASE_WR_BLK_START;
1655 	else
1656 		cmd.opcode = MMC_ERASE_GROUP_START;
1657 	cmd.arg = from;
1658 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1659 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1660 	if (err) {
1661 		pr_err("mmc_erase: group start error %d, "
1662 		       "status %#x\n", err, cmd.resp[0]);
1663 		err = -EIO;
1664 		goto out;
1665 	}
1666 
1667 	memset(&cmd, 0, sizeof(struct mmc_command));
1668 	if (mmc_card_sd(card))
1669 		cmd.opcode = SD_ERASE_WR_BLK_END;
1670 	else
1671 		cmd.opcode = MMC_ERASE_GROUP_END;
1672 	cmd.arg = to;
1673 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1674 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1675 	if (err) {
1676 		pr_err("mmc_erase: group end error %d, status %#x\n",
1677 		       err, cmd.resp[0]);
1678 		err = -EIO;
1679 		goto out;
1680 	}
1681 
1682 	memset(&cmd, 0, sizeof(struct mmc_command));
1683 	cmd.opcode = MMC_ERASE;
1684 	cmd.arg = arg;
1685 	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1686 	cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
1687 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1688 	if (err) {
1689 		pr_err("mmc_erase: erase error %d, status %#x\n",
1690 		       err, cmd.resp[0]);
1691 		err = -EIO;
1692 		goto out;
1693 	}
1694 
1695 	if (mmc_host_is_spi(card->host))
1696 		goto out;
1697 
1698 	do {
1699 		memset(&cmd, 0, sizeof(struct mmc_command));
1700 		cmd.opcode = MMC_SEND_STATUS;
1701 		cmd.arg = card->rca << 16;
1702 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1703 		/* Do not retry else we can't see errors */
1704 		err = mmc_wait_for_cmd(card->host, &cmd, 0);
1705 		if (err || (cmd.resp[0] & 0xFDF92000)) {
1706 			pr_err("error %d requesting status %#x\n",
1707 				err, cmd.resp[0]);
1708 			err = -EIO;
1709 			goto out;
1710 		}
1711 	} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
1712 		 R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG);
1713 out:
1714 	return err;
1715 }
1716 
1717 /**
1718  * mmc_erase - erase sectors.
1719  * @card: card to erase
1720  * @from: first sector to erase
1721  * @nr: number of sectors to erase
1722  * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
1723  *
1724  * Caller must claim host before calling this function.
1725  */
1726 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
1727 	      unsigned int arg)
1728 {
1729 	unsigned int rem, to = from + nr;
1730 
1731 	if (!(card->host->caps & MMC_CAP_ERASE) ||
1732 	    !(card->csd.cmdclass & CCC_ERASE))
1733 		return -EOPNOTSUPP;
1734 
1735 	if (!card->erase_size)
1736 		return -EOPNOTSUPP;
1737 
1738 	if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
1739 		return -EOPNOTSUPP;
1740 
1741 	if ((arg & MMC_SECURE_ARGS) &&
1742 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
1743 		return -EOPNOTSUPP;
1744 
1745 	if ((arg & MMC_TRIM_ARGS) &&
1746 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
1747 		return -EOPNOTSUPP;
1748 
1749 	if (arg == MMC_SECURE_ERASE_ARG) {
1750 		if (from % card->erase_size || nr % card->erase_size)
1751 			return -EINVAL;
1752 	}
1753 
1754 	if (arg == MMC_ERASE_ARG) {
1755 		rem = from % card->erase_size;
1756 		if (rem) {
1757 			rem = card->erase_size - rem;
1758 			from += rem;
1759 			if (nr > rem)
1760 				nr -= rem;
1761 			else
1762 				return 0;
1763 		}
1764 		rem = nr % card->erase_size;
1765 		if (rem)
1766 			nr -= rem;
1767 	}
1768 
1769 	if (nr == 0)
1770 		return 0;
1771 
1772 	to = from + nr;
1773 
1774 	if (to <= from)
1775 		return -EINVAL;
1776 
1777 	/* 'from' and 'to' are inclusive */
1778 	to -= 1;
1779 
1780 	return mmc_do_erase(card, from, to, arg);
1781 }
1782 EXPORT_SYMBOL(mmc_erase);
1783 
1784 int mmc_can_erase(struct mmc_card *card)
1785 {
1786 	if ((card->host->caps & MMC_CAP_ERASE) &&
1787 	    (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
1788 		return 1;
1789 	return 0;
1790 }
1791 EXPORT_SYMBOL(mmc_can_erase);
1792 
1793 int mmc_can_trim(struct mmc_card *card)
1794 {
1795 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
1796 		return 1;
1797 	if (mmc_can_discard(card))
1798 		return 1;
1799 	return 0;
1800 }
1801 EXPORT_SYMBOL(mmc_can_trim);
1802 
1803 int mmc_can_discard(struct mmc_card *card)
1804 {
1805 	/*
1806 	 * As there's no way to detect the discard support bit at v4.5
1807 	 * use the s/w feature support filed.
1808 	 */
1809 	if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
1810 		return 1;
1811 	return 0;
1812 }
1813 EXPORT_SYMBOL(mmc_can_discard);
1814 
1815 int mmc_can_sanitize(struct mmc_card *card)
1816 {
1817 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
1818 		return 1;
1819 	return 0;
1820 }
1821 EXPORT_SYMBOL(mmc_can_sanitize);
1822 
1823 int mmc_can_secure_erase_trim(struct mmc_card *card)
1824 {
1825 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
1826 		return 1;
1827 	return 0;
1828 }
1829 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
1830 
1831 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1832 			    unsigned int nr)
1833 {
1834 	if (!card->erase_size)
1835 		return 0;
1836 	if (from % card->erase_size || nr % card->erase_size)
1837 		return 0;
1838 	return 1;
1839 }
1840 EXPORT_SYMBOL(mmc_erase_group_aligned);
1841 
1842 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
1843 					    unsigned int arg)
1844 {
1845 	struct mmc_host *host = card->host;
1846 	unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
1847 	unsigned int last_timeout = 0;
1848 
1849 	if (card->erase_shift)
1850 		max_qty = UINT_MAX >> card->erase_shift;
1851 	else if (mmc_card_sd(card))
1852 		max_qty = UINT_MAX;
1853 	else
1854 		max_qty = UINT_MAX / card->erase_size;
1855 
1856 	/* Find the largest qty with an OK timeout */
1857 	do {
1858 		y = 0;
1859 		for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
1860 			timeout = mmc_erase_timeout(card, arg, qty + x);
1861 			if (timeout > host->max_discard_to)
1862 				break;
1863 			if (timeout < last_timeout)
1864 				break;
1865 			last_timeout = timeout;
1866 			y = x;
1867 		}
1868 		qty += y;
1869 	} while (y);
1870 
1871 	if (!qty)
1872 		return 0;
1873 
1874 	if (qty == 1)
1875 		return 1;
1876 
1877 	/* Convert qty to sectors */
1878 	if (card->erase_shift)
1879 		max_discard = --qty << card->erase_shift;
1880 	else if (mmc_card_sd(card))
1881 		max_discard = qty;
1882 	else
1883 		max_discard = --qty * card->erase_size;
1884 
1885 	return max_discard;
1886 }
1887 
1888 unsigned int mmc_calc_max_discard(struct mmc_card *card)
1889 {
1890 	struct mmc_host *host = card->host;
1891 	unsigned int max_discard, max_trim;
1892 
1893 	if (!host->max_discard_to)
1894 		return UINT_MAX;
1895 
1896 	/*
1897 	 * Without erase_group_def set, MMC erase timeout depends on clock
1898 	 * frequence which can change.  In that case, the best choice is
1899 	 * just the preferred erase size.
1900 	 */
1901 	if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
1902 		return card->pref_erase;
1903 
1904 	max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
1905 	if (mmc_can_trim(card)) {
1906 		max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
1907 		if (max_trim < max_discard)
1908 			max_discard = max_trim;
1909 	} else if (max_discard < card->erase_size) {
1910 		max_discard = 0;
1911 	}
1912 	pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
1913 		 mmc_hostname(host), max_discard, host->max_discard_to);
1914 	return max_discard;
1915 }
1916 EXPORT_SYMBOL(mmc_calc_max_discard);
1917 
1918 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1919 {
1920 	struct mmc_command cmd = {0};
1921 
1922 	if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
1923 		return 0;
1924 
1925 	cmd.opcode = MMC_SET_BLOCKLEN;
1926 	cmd.arg = blocklen;
1927 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1928 	return mmc_wait_for_cmd(card->host, &cmd, 5);
1929 }
1930 EXPORT_SYMBOL(mmc_set_blocklen);
1931 
1932 static void mmc_hw_reset_for_init(struct mmc_host *host)
1933 {
1934 	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1935 		return;
1936 	mmc_host_clk_hold(host);
1937 	host->ops->hw_reset(host);
1938 	mmc_host_clk_release(host);
1939 }
1940 
1941 int mmc_can_reset(struct mmc_card *card)
1942 {
1943 	u8 rst_n_function;
1944 
1945 	if (!mmc_card_mmc(card))
1946 		return 0;
1947 	rst_n_function = card->ext_csd.rst_n_function;
1948 	if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
1949 		return 0;
1950 	return 1;
1951 }
1952 EXPORT_SYMBOL(mmc_can_reset);
1953 
1954 static int mmc_do_hw_reset(struct mmc_host *host, int check)
1955 {
1956 	struct mmc_card *card = host->card;
1957 
1958 	if (!host->bus_ops->power_restore)
1959 		return -EOPNOTSUPP;
1960 
1961 	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1962 		return -EOPNOTSUPP;
1963 
1964 	if (!card)
1965 		return -EINVAL;
1966 
1967 	if (!mmc_can_reset(card))
1968 		return -EOPNOTSUPP;
1969 
1970 	mmc_host_clk_hold(host);
1971 	mmc_set_clock(host, host->f_init);
1972 
1973 	host->ops->hw_reset(host);
1974 
1975 	/* If the reset has happened, then a status command will fail */
1976 	if (check) {
1977 		struct mmc_command cmd = {0};
1978 		int err;
1979 
1980 		cmd.opcode = MMC_SEND_STATUS;
1981 		if (!mmc_host_is_spi(card->host))
1982 			cmd.arg = card->rca << 16;
1983 		cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
1984 		err = mmc_wait_for_cmd(card->host, &cmd, 0);
1985 		if (!err) {
1986 			mmc_host_clk_release(host);
1987 			return -ENOSYS;
1988 		}
1989 	}
1990 
1991 	host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR);
1992 	if (mmc_host_is_spi(host)) {
1993 		host->ios.chip_select = MMC_CS_HIGH;
1994 		host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1995 	} else {
1996 		host->ios.chip_select = MMC_CS_DONTCARE;
1997 		host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1998 	}
1999 	host->ios.bus_width = MMC_BUS_WIDTH_1;
2000 	host->ios.timing = MMC_TIMING_LEGACY;
2001 	mmc_set_ios(host);
2002 
2003 	mmc_host_clk_release(host);
2004 
2005 	return host->bus_ops->power_restore(host);
2006 }
2007 
2008 int mmc_hw_reset(struct mmc_host *host)
2009 {
2010 	return mmc_do_hw_reset(host, 0);
2011 }
2012 EXPORT_SYMBOL(mmc_hw_reset);
2013 
2014 int mmc_hw_reset_check(struct mmc_host *host)
2015 {
2016 	return mmc_do_hw_reset(host, 1);
2017 }
2018 EXPORT_SYMBOL(mmc_hw_reset_check);
2019 
2020 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2021 {
2022 	host->f_init = freq;
2023 
2024 #ifdef CONFIG_MMC_DEBUG
2025 	pr_info("%s: %s: trying to init card at %u Hz\n",
2026 		mmc_hostname(host), __func__, host->f_init);
2027 #endif
2028 	mmc_power_up(host);
2029 
2030 	/*
2031 	 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2032 	 * do a hardware reset if possible.
2033 	 */
2034 	mmc_hw_reset_for_init(host);
2035 
2036 	/*
2037 	 * sdio_reset sends CMD52 to reset card.  Since we do not know
2038 	 * if the card is being re-initialized, just send it.  CMD52
2039 	 * should be ignored by SD/eMMC cards.
2040 	 */
2041 	sdio_reset(host);
2042 	mmc_go_idle(host);
2043 
2044 	mmc_send_if_cond(host, host->ocr_avail);
2045 
2046 	/* Order's important: probe SDIO, then SD, then MMC */
2047 	if (!mmc_attach_sdio(host))
2048 		return 0;
2049 	if (!mmc_attach_sd(host))
2050 		return 0;
2051 	if (!mmc_attach_mmc(host))
2052 		return 0;
2053 
2054 	mmc_power_off(host);
2055 	return -EIO;
2056 }
2057 
2058 int _mmc_detect_card_removed(struct mmc_host *host)
2059 {
2060 	int ret;
2061 
2062 	if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive)
2063 		return 0;
2064 
2065 	if (!host->card || mmc_card_removed(host->card))
2066 		return 1;
2067 
2068 	ret = host->bus_ops->alive(host);
2069 	if (ret) {
2070 		mmc_card_set_removed(host->card);
2071 		pr_debug("%s: card remove detected\n", mmc_hostname(host));
2072 	}
2073 
2074 	return ret;
2075 }
2076 
2077 int mmc_detect_card_removed(struct mmc_host *host)
2078 {
2079 	struct mmc_card *card = host->card;
2080 
2081 	WARN_ON(!host->claimed);
2082 	/*
2083 	 * The card will be considered unchanged unless we have been asked to
2084 	 * detect a change or host requires polling to provide card detection.
2085 	 */
2086 	if (card && !host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2087 		return mmc_card_removed(card);
2088 
2089 	host->detect_change = 0;
2090 
2091 	return _mmc_detect_card_removed(host);
2092 }
2093 EXPORT_SYMBOL(mmc_detect_card_removed);
2094 
2095 void mmc_rescan(struct work_struct *work)
2096 {
2097 	static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
2098 	struct mmc_host *host =
2099 		container_of(work, struct mmc_host, detect.work);
2100 	int i;
2101 
2102 	if (host->rescan_disable)
2103 		return;
2104 
2105 	mmc_bus_get(host);
2106 
2107 	/*
2108 	 * if there is a _removable_ card registered, check whether it is
2109 	 * still present
2110 	 */
2111 	if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
2112 	    && !(host->caps & MMC_CAP_NONREMOVABLE))
2113 		host->bus_ops->detect(host);
2114 
2115 	host->detect_change = 0;
2116 
2117 	/*
2118 	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2119 	 * the card is no longer present.
2120 	 */
2121 	mmc_bus_put(host);
2122 	mmc_bus_get(host);
2123 
2124 	/* if there still is a card present, stop here */
2125 	if (host->bus_ops != NULL) {
2126 		mmc_bus_put(host);
2127 		goto out;
2128 	}
2129 
2130 	/*
2131 	 * Only we can add a new handler, so it's safe to
2132 	 * release the lock here.
2133 	 */
2134 	mmc_bus_put(host);
2135 
2136 	if (host->ops->get_cd && host->ops->get_cd(host) == 0)
2137 		goto out;
2138 
2139 	mmc_claim_host(host);
2140 	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2141 		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2142 			break;
2143 		if (freqs[i] <= host->f_min)
2144 			break;
2145 	}
2146 	mmc_release_host(host);
2147 
2148  out:
2149 	if (host->caps & MMC_CAP_NEEDS_POLL)
2150 		mmc_schedule_delayed_work(&host->detect, HZ);
2151 }
2152 
2153 void mmc_start_host(struct mmc_host *host)
2154 {
2155 	mmc_power_off(host);
2156 	mmc_detect_change(host, 0);
2157 }
2158 
2159 void mmc_stop_host(struct mmc_host *host)
2160 {
2161 #ifdef CONFIG_MMC_DEBUG
2162 	unsigned long flags;
2163 	spin_lock_irqsave(&host->lock, flags);
2164 	host->removed = 1;
2165 	spin_unlock_irqrestore(&host->lock, flags);
2166 #endif
2167 
2168 	if (host->caps & MMC_CAP_DISABLE)
2169 		cancel_delayed_work(&host->disable);
2170 	cancel_delayed_work_sync(&host->detect);
2171 	mmc_flush_scheduled_work();
2172 
2173 	/* clear pm flags now and let card drivers set them as needed */
2174 	host->pm_flags = 0;
2175 
2176 	mmc_bus_get(host);
2177 	if (host->bus_ops && !host->bus_dead) {
2178 		/* Calling bus_ops->remove() with a claimed host can deadlock */
2179 		if (host->bus_ops->remove)
2180 			host->bus_ops->remove(host);
2181 
2182 		mmc_claim_host(host);
2183 		mmc_detach_bus(host);
2184 		mmc_power_off(host);
2185 		mmc_release_host(host);
2186 		mmc_bus_put(host);
2187 		return;
2188 	}
2189 	mmc_bus_put(host);
2190 
2191 	BUG_ON(host->card);
2192 
2193 	mmc_power_off(host);
2194 }
2195 
2196 int mmc_power_save_host(struct mmc_host *host)
2197 {
2198 	int ret = 0;
2199 
2200 #ifdef CONFIG_MMC_DEBUG
2201 	pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2202 #endif
2203 
2204 	mmc_bus_get(host);
2205 
2206 	if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2207 		mmc_bus_put(host);
2208 		return -EINVAL;
2209 	}
2210 
2211 	if (host->bus_ops->power_save)
2212 		ret = host->bus_ops->power_save(host);
2213 
2214 	mmc_bus_put(host);
2215 
2216 	mmc_power_off(host);
2217 
2218 	return ret;
2219 }
2220 EXPORT_SYMBOL(mmc_power_save_host);
2221 
2222 int mmc_power_restore_host(struct mmc_host *host)
2223 {
2224 	int ret;
2225 
2226 #ifdef CONFIG_MMC_DEBUG
2227 	pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2228 #endif
2229 
2230 	mmc_bus_get(host);
2231 
2232 	if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2233 		mmc_bus_put(host);
2234 		return -EINVAL;
2235 	}
2236 
2237 	mmc_power_up(host);
2238 	ret = host->bus_ops->power_restore(host);
2239 
2240 	mmc_bus_put(host);
2241 
2242 	return ret;
2243 }
2244 EXPORT_SYMBOL(mmc_power_restore_host);
2245 
2246 int mmc_card_awake(struct mmc_host *host)
2247 {
2248 	int err = -ENOSYS;
2249 
2250 	if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
2251 		return 0;
2252 
2253 	mmc_bus_get(host);
2254 
2255 	if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
2256 		err = host->bus_ops->awake(host);
2257 
2258 	mmc_bus_put(host);
2259 
2260 	return err;
2261 }
2262 EXPORT_SYMBOL(mmc_card_awake);
2263 
2264 int mmc_card_sleep(struct mmc_host *host)
2265 {
2266 	int err = -ENOSYS;
2267 
2268 	if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
2269 		return 0;
2270 
2271 	mmc_bus_get(host);
2272 
2273 	if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
2274 		err = host->bus_ops->sleep(host);
2275 
2276 	mmc_bus_put(host);
2277 
2278 	return err;
2279 }
2280 EXPORT_SYMBOL(mmc_card_sleep);
2281 
2282 int mmc_card_can_sleep(struct mmc_host *host)
2283 {
2284 	struct mmc_card *card = host->card;
2285 
2286 	if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
2287 		return 1;
2288 	return 0;
2289 }
2290 EXPORT_SYMBOL(mmc_card_can_sleep);
2291 
2292 /*
2293  * Flush the cache to the non-volatile storage.
2294  */
2295 int mmc_flush_cache(struct mmc_card *card)
2296 {
2297 	struct mmc_host *host = card->host;
2298 	int err = 0;
2299 
2300 	if (!(host->caps2 & MMC_CAP2_CACHE_CTRL))
2301 		return err;
2302 
2303 	if (mmc_card_mmc(card) &&
2304 			(card->ext_csd.cache_size > 0) &&
2305 			(card->ext_csd.cache_ctrl & 1)) {
2306 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2307 				EXT_CSD_FLUSH_CACHE, 1, 0);
2308 		if (err)
2309 			pr_err("%s: cache flush error %d\n",
2310 					mmc_hostname(card->host), err);
2311 	}
2312 
2313 	return err;
2314 }
2315 EXPORT_SYMBOL(mmc_flush_cache);
2316 
2317 /*
2318  * Turn the cache ON/OFF.
2319  * Turning the cache OFF shall trigger flushing of the data
2320  * to the non-volatile storage.
2321  */
2322 int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
2323 {
2324 	struct mmc_card *card = host->card;
2325 	unsigned int timeout;
2326 	int err = 0;
2327 
2328 	if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
2329 			mmc_card_is_removable(host))
2330 		return err;
2331 
2332 	if (card && mmc_card_mmc(card) &&
2333 			(card->ext_csd.cache_size > 0)) {
2334 		enable = !!enable;
2335 
2336 		if (card->ext_csd.cache_ctrl ^ enable) {
2337 			timeout = enable ? card->ext_csd.generic_cmd6_time : 0;
2338 			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2339 					EXT_CSD_CACHE_CTRL, enable, timeout);
2340 			if (err)
2341 				pr_err("%s: cache %s error %d\n",
2342 						mmc_hostname(card->host),
2343 						enable ? "on" : "off",
2344 						err);
2345 			else
2346 				card->ext_csd.cache_ctrl = enable;
2347 		}
2348 	}
2349 
2350 	return err;
2351 }
2352 EXPORT_SYMBOL(mmc_cache_ctrl);
2353 
2354 #ifdef CONFIG_PM
2355 
2356 /**
2357  *	mmc_suspend_host - suspend a host
2358  *	@host: mmc host
2359  */
2360 int mmc_suspend_host(struct mmc_host *host)
2361 {
2362 	int err = 0;
2363 
2364 	if (host->caps & MMC_CAP_DISABLE)
2365 		cancel_delayed_work(&host->disable);
2366 	cancel_delayed_work(&host->detect);
2367 	mmc_flush_scheduled_work();
2368 	if (mmc_try_claim_host(host)) {
2369 		err = mmc_cache_ctrl(host, 0);
2370 		mmc_do_release_host(host);
2371 	} else {
2372 		err = -EBUSY;
2373 	}
2374 
2375 	if (err)
2376 		goto out;
2377 
2378 	mmc_bus_get(host);
2379 	if (host->bus_ops && !host->bus_dead) {
2380 
2381 		/*
2382 		 * A long response time is not acceptable for device drivers
2383 		 * when doing suspend. Prevent mmc_claim_host in the suspend
2384 		 * sequence, to potentially wait "forever" by trying to
2385 		 * pre-claim the host.
2386 		 */
2387 		if (mmc_try_claim_host(host)) {
2388 			if (host->bus_ops->suspend) {
2389 				/*
2390 				 * For eMMC 4.5 device send notify command
2391 				 * before sleep, because in sleep state eMMC 4.5
2392 				 * devices respond to only RESET and AWAKE cmd
2393 				 */
2394 				mmc_poweroff_notify(host);
2395 				err = host->bus_ops->suspend(host);
2396 			}
2397 			mmc_do_release_host(host);
2398 
2399 			if (err == -ENOSYS || !host->bus_ops->resume) {
2400 				/*
2401 				 * We simply "remove" the card in this case.
2402 				 * It will be redetected on resume.  (Calling
2403 				 * bus_ops->remove() with a claimed host can
2404 				 * deadlock.)
2405 				 */
2406 				if (host->bus_ops->remove)
2407 					host->bus_ops->remove(host);
2408 				mmc_claim_host(host);
2409 				mmc_detach_bus(host);
2410 				mmc_power_off(host);
2411 				mmc_release_host(host);
2412 				host->pm_flags = 0;
2413 				err = 0;
2414 			}
2415 		} else {
2416 			err = -EBUSY;
2417 		}
2418 	}
2419 	mmc_bus_put(host);
2420 
2421 	if (!err && !mmc_card_keep_power(host))
2422 		mmc_power_off(host);
2423 
2424 out:
2425 	return err;
2426 }
2427 
2428 EXPORT_SYMBOL(mmc_suspend_host);
2429 
2430 /**
2431  *	mmc_resume_host - resume a previously suspended host
2432  *	@host: mmc host
2433  */
2434 int mmc_resume_host(struct mmc_host *host)
2435 {
2436 	int err = 0;
2437 
2438 	mmc_bus_get(host);
2439 	if (host->bus_ops && !host->bus_dead) {
2440 		if (!mmc_card_keep_power(host)) {
2441 			mmc_power_up(host);
2442 			mmc_select_voltage(host, host->ocr);
2443 			/*
2444 			 * Tell runtime PM core we just powered up the card,
2445 			 * since it still believes the card is powered off.
2446 			 * Note that currently runtime PM is only enabled
2447 			 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
2448 			 */
2449 			if (mmc_card_sdio(host->card) &&
2450 			    (host->caps & MMC_CAP_POWER_OFF_CARD)) {
2451 				pm_runtime_disable(&host->card->dev);
2452 				pm_runtime_set_active(&host->card->dev);
2453 				pm_runtime_enable(&host->card->dev);
2454 			}
2455 		}
2456 		BUG_ON(!host->bus_ops->resume);
2457 		err = host->bus_ops->resume(host);
2458 		if (err) {
2459 			pr_warning("%s: error %d during resume "
2460 					    "(card was removed?)\n",
2461 					    mmc_hostname(host), err);
2462 			err = 0;
2463 		}
2464 	}
2465 	host->pm_flags &= ~MMC_PM_KEEP_POWER;
2466 	mmc_bus_put(host);
2467 
2468 	return err;
2469 }
2470 EXPORT_SYMBOL(mmc_resume_host);
2471 
2472 /* Do the card removal on suspend if card is assumed removeable
2473  * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2474    to sync the card.
2475 */
2476 int mmc_pm_notify(struct notifier_block *notify_block,
2477 					unsigned long mode, void *unused)
2478 {
2479 	struct mmc_host *host = container_of(
2480 		notify_block, struct mmc_host, pm_notify);
2481 	unsigned long flags;
2482 
2483 
2484 	switch (mode) {
2485 	case PM_HIBERNATION_PREPARE:
2486 	case PM_SUSPEND_PREPARE:
2487 
2488 		spin_lock_irqsave(&host->lock, flags);
2489 		host->rescan_disable = 1;
2490 		host->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
2491 		spin_unlock_irqrestore(&host->lock, flags);
2492 		cancel_delayed_work_sync(&host->detect);
2493 
2494 		if (!host->bus_ops || host->bus_ops->suspend)
2495 			break;
2496 
2497 		/* Calling bus_ops->remove() with a claimed host can deadlock */
2498 		if (host->bus_ops->remove)
2499 			host->bus_ops->remove(host);
2500 
2501 		mmc_claim_host(host);
2502 		mmc_detach_bus(host);
2503 		mmc_power_off(host);
2504 		mmc_release_host(host);
2505 		host->pm_flags = 0;
2506 		break;
2507 
2508 	case PM_POST_SUSPEND:
2509 	case PM_POST_HIBERNATION:
2510 	case PM_POST_RESTORE:
2511 
2512 		spin_lock_irqsave(&host->lock, flags);
2513 		host->rescan_disable = 0;
2514 		host->power_notify_type = MMC_HOST_PW_NOTIFY_LONG;
2515 		spin_unlock_irqrestore(&host->lock, flags);
2516 		mmc_detect_change(host, 0);
2517 
2518 	}
2519 
2520 	return 0;
2521 }
2522 #endif
2523 
2524 static int __init mmc_init(void)
2525 {
2526 	int ret;
2527 
2528 	workqueue = alloc_ordered_workqueue("kmmcd", 0);
2529 	if (!workqueue)
2530 		return -ENOMEM;
2531 
2532 	ret = mmc_register_bus();
2533 	if (ret)
2534 		goto destroy_workqueue;
2535 
2536 	ret = mmc_register_host_class();
2537 	if (ret)
2538 		goto unregister_bus;
2539 
2540 	ret = sdio_register_bus();
2541 	if (ret)
2542 		goto unregister_host_class;
2543 
2544 	return 0;
2545 
2546 unregister_host_class:
2547 	mmc_unregister_host_class();
2548 unregister_bus:
2549 	mmc_unregister_bus();
2550 destroy_workqueue:
2551 	destroy_workqueue(workqueue);
2552 
2553 	return ret;
2554 }
2555 
2556 static void __exit mmc_exit(void)
2557 {
2558 	sdio_unregister_bus();
2559 	mmc_unregister_host_class();
2560 	mmc_unregister_bus();
2561 	destroy_workqueue(workqueue);
2562 }
2563 
2564 subsys_initcall(mmc_init);
2565 module_exit(mmc_exit);
2566 
2567 MODULE_LICENSE("GPL");
2568