xref: /linux/drivers/mmc/core/core.c (revision 90ab5ee94171b3e28de6bb42ee30b527014e0be7)
1 /*
2  *  linux/drivers/mmc/core/core.c
3  *
4  *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5  *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7  *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/suspend.h>
27 #include <linux/fault-inject.h>
28 #include <linux/random.h>
29 
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/sd.h>
34 
35 #include "core.h"
36 #include "bus.h"
37 #include "host.h"
38 #include "sdio_bus.h"
39 
40 #include "mmc_ops.h"
41 #include "sd_ops.h"
42 #include "sdio_ops.h"
43 
44 static struct workqueue_struct *workqueue;
45 
46 /*
47  * Enabling software CRCs on the data blocks can be a significant (30%)
48  * performance cost, and for other reasons may not always be desired.
49  * So we allow it it to be disabled.
50  */
51 bool use_spi_crc = 1;
52 module_param(use_spi_crc, bool, 0);
53 
54 /*
55  * We normally treat cards as removed during suspend if they are not
56  * known to be on a non-removable bus, to avoid the risk of writing
57  * back data to a different card after resume.  Allow this to be
58  * overridden if necessary.
59  */
60 #ifdef CONFIG_MMC_UNSAFE_RESUME
61 bool mmc_assume_removable;
62 #else
63 bool mmc_assume_removable = 1;
64 #endif
65 EXPORT_SYMBOL(mmc_assume_removable);
66 module_param_named(removable, mmc_assume_removable, bool, 0644);
67 MODULE_PARM_DESC(
68 	removable,
69 	"MMC/SD cards are removable and may be removed during suspend");
70 
71 /*
72  * Internal function. Schedule delayed work in the MMC work queue.
73  */
74 static int mmc_schedule_delayed_work(struct delayed_work *work,
75 				     unsigned long delay)
76 {
77 	return queue_delayed_work(workqueue, work, delay);
78 }
79 
80 /*
81  * Internal function. Flush all scheduled work from the MMC work queue.
82  */
83 static void mmc_flush_scheduled_work(void)
84 {
85 	flush_workqueue(workqueue);
86 }
87 
88 #ifdef CONFIG_FAIL_MMC_REQUEST
89 
90 /*
91  * Internal function. Inject random data errors.
92  * If mmc_data is NULL no errors are injected.
93  */
94 static void mmc_should_fail_request(struct mmc_host *host,
95 				    struct mmc_request *mrq)
96 {
97 	struct mmc_command *cmd = mrq->cmd;
98 	struct mmc_data *data = mrq->data;
99 	static const int data_errors[] = {
100 		-ETIMEDOUT,
101 		-EILSEQ,
102 		-EIO,
103 	};
104 
105 	if (!data)
106 		return;
107 
108 	if (cmd->error || data->error ||
109 	    !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
110 		return;
111 
112 	data->error = data_errors[random32() % ARRAY_SIZE(data_errors)];
113 	data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9;
114 }
115 
116 #else /* CONFIG_FAIL_MMC_REQUEST */
117 
118 static inline void mmc_should_fail_request(struct mmc_host *host,
119 					   struct mmc_request *mrq)
120 {
121 }
122 
123 #endif /* CONFIG_FAIL_MMC_REQUEST */
124 
125 /**
126  *	mmc_request_done - finish processing an MMC request
127  *	@host: MMC host which completed request
128  *	@mrq: MMC request which request
129  *
130  *	MMC drivers should call this function when they have completed
131  *	their processing of a request.
132  */
133 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
134 {
135 	struct mmc_command *cmd = mrq->cmd;
136 	int err = cmd->error;
137 
138 	if (err && cmd->retries && mmc_host_is_spi(host)) {
139 		if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
140 			cmd->retries = 0;
141 	}
142 
143 	if (err && cmd->retries) {
144 		/*
145 		 * Request starter must handle retries - see
146 		 * mmc_wait_for_req_done().
147 		 */
148 		if (mrq->done)
149 			mrq->done(mrq);
150 	} else {
151 		mmc_should_fail_request(host, mrq);
152 
153 		led_trigger_event(host->led, LED_OFF);
154 
155 		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
156 			mmc_hostname(host), cmd->opcode, err,
157 			cmd->resp[0], cmd->resp[1],
158 			cmd->resp[2], cmd->resp[3]);
159 
160 		if (mrq->data) {
161 			pr_debug("%s:     %d bytes transferred: %d\n",
162 				mmc_hostname(host),
163 				mrq->data->bytes_xfered, mrq->data->error);
164 		}
165 
166 		if (mrq->stop) {
167 			pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
168 				mmc_hostname(host), mrq->stop->opcode,
169 				mrq->stop->error,
170 				mrq->stop->resp[0], mrq->stop->resp[1],
171 				mrq->stop->resp[2], mrq->stop->resp[3]);
172 		}
173 
174 		if (mrq->done)
175 			mrq->done(mrq);
176 
177 		mmc_host_clk_release(host);
178 	}
179 }
180 
181 EXPORT_SYMBOL(mmc_request_done);
182 
183 static void
184 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
185 {
186 #ifdef CONFIG_MMC_DEBUG
187 	unsigned int i, sz;
188 	struct scatterlist *sg;
189 #endif
190 
191 	pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
192 		 mmc_hostname(host), mrq->cmd->opcode,
193 		 mrq->cmd->arg, mrq->cmd->flags);
194 
195 	if (mrq->data) {
196 		pr_debug("%s:     blksz %d blocks %d flags %08x "
197 			"tsac %d ms nsac %d\n",
198 			mmc_hostname(host), mrq->data->blksz,
199 			mrq->data->blocks, mrq->data->flags,
200 			mrq->data->timeout_ns / 1000000,
201 			mrq->data->timeout_clks);
202 	}
203 
204 	if (mrq->stop) {
205 		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
206 			 mmc_hostname(host), mrq->stop->opcode,
207 			 mrq->stop->arg, mrq->stop->flags);
208 	}
209 
210 	WARN_ON(!host->claimed);
211 
212 	mrq->cmd->error = 0;
213 	mrq->cmd->mrq = mrq;
214 	if (mrq->data) {
215 		BUG_ON(mrq->data->blksz > host->max_blk_size);
216 		BUG_ON(mrq->data->blocks > host->max_blk_count);
217 		BUG_ON(mrq->data->blocks * mrq->data->blksz >
218 			host->max_req_size);
219 
220 #ifdef CONFIG_MMC_DEBUG
221 		sz = 0;
222 		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
223 			sz += sg->length;
224 		BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
225 #endif
226 
227 		mrq->cmd->data = mrq->data;
228 		mrq->data->error = 0;
229 		mrq->data->mrq = mrq;
230 		if (mrq->stop) {
231 			mrq->data->stop = mrq->stop;
232 			mrq->stop->error = 0;
233 			mrq->stop->mrq = mrq;
234 		}
235 	}
236 	mmc_host_clk_hold(host);
237 	led_trigger_event(host->led, LED_FULL);
238 	host->ops->request(host, mrq);
239 }
240 
241 static void mmc_wait_done(struct mmc_request *mrq)
242 {
243 	complete(&mrq->completion);
244 }
245 
246 static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
247 {
248 	init_completion(&mrq->completion);
249 	mrq->done = mmc_wait_done;
250 	mmc_start_request(host, mrq);
251 }
252 
253 static void mmc_wait_for_req_done(struct mmc_host *host,
254 				  struct mmc_request *mrq)
255 {
256 	struct mmc_command *cmd;
257 
258 	while (1) {
259 		wait_for_completion(&mrq->completion);
260 
261 		cmd = mrq->cmd;
262 		if (!cmd->error || !cmd->retries)
263 			break;
264 
265 		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
266 			 mmc_hostname(host), cmd->opcode, cmd->error);
267 		cmd->retries--;
268 		cmd->error = 0;
269 		host->ops->request(host, mrq);
270 	}
271 }
272 
273 /**
274  *	mmc_pre_req - Prepare for a new request
275  *	@host: MMC host to prepare command
276  *	@mrq: MMC request to prepare for
277  *	@is_first_req: true if there is no previous started request
278  *                     that may run in parellel to this call, otherwise false
279  *
280  *	mmc_pre_req() is called in prior to mmc_start_req() to let
281  *	host prepare for the new request. Preparation of a request may be
282  *	performed while another request is running on the host.
283  */
284 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
285 		 bool is_first_req)
286 {
287 	if (host->ops->pre_req)
288 		host->ops->pre_req(host, mrq, is_first_req);
289 }
290 
291 /**
292  *	mmc_post_req - Post process a completed request
293  *	@host: MMC host to post process command
294  *	@mrq: MMC request to post process for
295  *	@err: Error, if non zero, clean up any resources made in pre_req
296  *
297  *	Let the host post process a completed request. Post processing of
298  *	a request may be performed while another reuqest is running.
299  */
300 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
301 			 int err)
302 {
303 	if (host->ops->post_req)
304 		host->ops->post_req(host, mrq, err);
305 }
306 
307 /**
308  *	mmc_start_req - start a non-blocking request
309  *	@host: MMC host to start command
310  *	@areq: async request to start
311  *	@error: out parameter returns 0 for success, otherwise non zero
312  *
313  *	Start a new MMC custom command request for a host.
314  *	If there is on ongoing async request wait for completion
315  *	of that request and start the new one and return.
316  *	Does not wait for the new request to complete.
317  *
318  *      Returns the completed request, NULL in case of none completed.
319  *	Wait for the an ongoing request (previoulsy started) to complete and
320  *	return the completed request. If there is no ongoing request, NULL
321  *	is returned without waiting. NULL is not an error condition.
322  */
323 struct mmc_async_req *mmc_start_req(struct mmc_host *host,
324 				    struct mmc_async_req *areq, int *error)
325 {
326 	int err = 0;
327 	struct mmc_async_req *data = host->areq;
328 
329 	/* Prepare a new request */
330 	if (areq)
331 		mmc_pre_req(host, areq->mrq, !host->areq);
332 
333 	if (host->areq) {
334 		mmc_wait_for_req_done(host, host->areq->mrq);
335 		err = host->areq->err_check(host->card, host->areq);
336 		if (err) {
337 			/* post process the completed failed request */
338 			mmc_post_req(host, host->areq->mrq, 0);
339 			if (areq)
340 				/*
341 				 * Cancel the new prepared request, because
342 				 * it can't run until the failed
343 				 * request has been properly handled.
344 				 */
345 				mmc_post_req(host, areq->mrq, -EINVAL);
346 
347 			host->areq = NULL;
348 			goto out;
349 		}
350 	}
351 
352 	if (areq)
353 		__mmc_start_req(host, areq->mrq);
354 
355 	if (host->areq)
356 		mmc_post_req(host, host->areq->mrq, 0);
357 
358 	host->areq = areq;
359  out:
360 	if (error)
361 		*error = err;
362 	return data;
363 }
364 EXPORT_SYMBOL(mmc_start_req);
365 
366 /**
367  *	mmc_wait_for_req - start a request and wait for completion
368  *	@host: MMC host to start command
369  *	@mrq: MMC request to start
370  *
371  *	Start a new MMC custom command request for a host, and wait
372  *	for the command to complete. Does not attempt to parse the
373  *	response.
374  */
375 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
376 {
377 	__mmc_start_req(host, mrq);
378 	mmc_wait_for_req_done(host, mrq);
379 }
380 EXPORT_SYMBOL(mmc_wait_for_req);
381 
382 /**
383  *	mmc_interrupt_hpi - Issue for High priority Interrupt
384  *	@card: the MMC card associated with the HPI transfer
385  *
386  *	Issued High Priority Interrupt, and check for card status
387  *	util out-of prg-state.
388  */
389 int mmc_interrupt_hpi(struct mmc_card *card)
390 {
391 	int err;
392 	u32 status;
393 
394 	BUG_ON(!card);
395 
396 	if (!card->ext_csd.hpi_en) {
397 		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
398 		return 1;
399 	}
400 
401 	mmc_claim_host(card->host);
402 	err = mmc_send_status(card, &status);
403 	if (err) {
404 		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
405 		goto out;
406 	}
407 
408 	/*
409 	 * If the card status is in PRG-state, we can send the HPI command.
410 	 */
411 	if (R1_CURRENT_STATE(status) == R1_STATE_PRG) {
412 		do {
413 			/*
414 			 * We don't know when the HPI command will finish
415 			 * processing, so we need to resend HPI until out
416 			 * of prg-state, and keep checking the card status
417 			 * with SEND_STATUS.  If a timeout error occurs when
418 			 * sending the HPI command, we are already out of
419 			 * prg-state.
420 			 */
421 			err = mmc_send_hpi_cmd(card, &status);
422 			if (err)
423 				pr_debug("%s: abort HPI (%d error)\n",
424 					 mmc_hostname(card->host), err);
425 
426 			err = mmc_send_status(card, &status);
427 			if (err)
428 				break;
429 		} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
430 	} else
431 		pr_debug("%s: Left prg-state\n", mmc_hostname(card->host));
432 
433 out:
434 	mmc_release_host(card->host);
435 	return err;
436 }
437 EXPORT_SYMBOL(mmc_interrupt_hpi);
438 
439 /**
440  *	mmc_wait_for_cmd - start a command and wait for completion
441  *	@host: MMC host to start command
442  *	@cmd: MMC command to start
443  *	@retries: maximum number of retries
444  *
445  *	Start a new MMC command for a host, and wait for the command
446  *	to complete.  Return any error that occurred while the command
447  *	was executing.  Do not attempt to parse the response.
448  */
449 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
450 {
451 	struct mmc_request mrq = {NULL};
452 
453 	WARN_ON(!host->claimed);
454 
455 	memset(cmd->resp, 0, sizeof(cmd->resp));
456 	cmd->retries = retries;
457 
458 	mrq.cmd = cmd;
459 	cmd->data = NULL;
460 
461 	mmc_wait_for_req(host, &mrq);
462 
463 	return cmd->error;
464 }
465 
466 EXPORT_SYMBOL(mmc_wait_for_cmd);
467 
468 /**
469  *	mmc_set_data_timeout - set the timeout for a data command
470  *	@data: data phase for command
471  *	@card: the MMC card associated with the data transfer
472  *
473  *	Computes the data timeout parameters according to the
474  *	correct algorithm given the card type.
475  */
476 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
477 {
478 	unsigned int mult;
479 
480 	/*
481 	 * SDIO cards only define an upper 1 s limit on access.
482 	 */
483 	if (mmc_card_sdio(card)) {
484 		data->timeout_ns = 1000000000;
485 		data->timeout_clks = 0;
486 		return;
487 	}
488 
489 	/*
490 	 * SD cards use a 100 multiplier rather than 10
491 	 */
492 	mult = mmc_card_sd(card) ? 100 : 10;
493 
494 	/*
495 	 * Scale up the multiplier (and therefore the timeout) by
496 	 * the r2w factor for writes.
497 	 */
498 	if (data->flags & MMC_DATA_WRITE)
499 		mult <<= card->csd.r2w_factor;
500 
501 	data->timeout_ns = card->csd.tacc_ns * mult;
502 	data->timeout_clks = card->csd.tacc_clks * mult;
503 
504 	/*
505 	 * SD cards also have an upper limit on the timeout.
506 	 */
507 	if (mmc_card_sd(card)) {
508 		unsigned int timeout_us, limit_us;
509 
510 		timeout_us = data->timeout_ns / 1000;
511 		if (mmc_host_clk_rate(card->host))
512 			timeout_us += data->timeout_clks * 1000 /
513 				(mmc_host_clk_rate(card->host) / 1000);
514 
515 		if (data->flags & MMC_DATA_WRITE)
516 			/*
517 			 * The limit is really 250 ms, but that is
518 			 * insufficient for some crappy cards.
519 			 */
520 			limit_us = 300000;
521 		else
522 			limit_us = 100000;
523 
524 		/*
525 		 * SDHC cards always use these fixed values.
526 		 */
527 		if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
528 			data->timeout_ns = limit_us * 1000;
529 			data->timeout_clks = 0;
530 		}
531 	}
532 
533 	/*
534 	 * Some cards require longer data read timeout than indicated in CSD.
535 	 * Address this by setting the read timeout to a "reasonably high"
536 	 * value. For the cards tested, 300ms has proven enough. If necessary,
537 	 * this value can be increased if other problematic cards require this.
538 	 */
539 	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
540 		data->timeout_ns = 300000000;
541 		data->timeout_clks = 0;
542 	}
543 
544 	/*
545 	 * Some cards need very high timeouts if driven in SPI mode.
546 	 * The worst observed timeout was 900ms after writing a
547 	 * continuous stream of data until the internal logic
548 	 * overflowed.
549 	 */
550 	if (mmc_host_is_spi(card->host)) {
551 		if (data->flags & MMC_DATA_WRITE) {
552 			if (data->timeout_ns < 1000000000)
553 				data->timeout_ns = 1000000000;	/* 1s */
554 		} else {
555 			if (data->timeout_ns < 100000000)
556 				data->timeout_ns =  100000000;	/* 100ms */
557 		}
558 	}
559 }
560 EXPORT_SYMBOL(mmc_set_data_timeout);
561 
562 /**
563  *	mmc_align_data_size - pads a transfer size to a more optimal value
564  *	@card: the MMC card associated with the data transfer
565  *	@sz: original transfer size
566  *
567  *	Pads the original data size with a number of extra bytes in
568  *	order to avoid controller bugs and/or performance hits
569  *	(e.g. some controllers revert to PIO for certain sizes).
570  *
571  *	Returns the improved size, which might be unmodified.
572  *
573  *	Note that this function is only relevant when issuing a
574  *	single scatter gather entry.
575  */
576 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
577 {
578 	/*
579 	 * FIXME: We don't have a system for the controller to tell
580 	 * the core about its problems yet, so for now we just 32-bit
581 	 * align the size.
582 	 */
583 	sz = ((sz + 3) / 4) * 4;
584 
585 	return sz;
586 }
587 EXPORT_SYMBOL(mmc_align_data_size);
588 
589 /**
590  *	mmc_host_enable - enable a host.
591  *	@host: mmc host to enable
592  *
593  *	Hosts that support power saving can use the 'enable' and 'disable'
594  *	methods to exit and enter power saving states. For more information
595  *	see comments for struct mmc_host_ops.
596  */
597 int mmc_host_enable(struct mmc_host *host)
598 {
599 	if (!(host->caps & MMC_CAP_DISABLE))
600 		return 0;
601 
602 	if (host->en_dis_recurs)
603 		return 0;
604 
605 	if (host->nesting_cnt++)
606 		return 0;
607 
608 	cancel_delayed_work_sync(&host->disable);
609 
610 	if (host->enabled)
611 		return 0;
612 
613 	if (host->ops->enable) {
614 		int err;
615 
616 		host->en_dis_recurs = 1;
617 		err = host->ops->enable(host);
618 		host->en_dis_recurs = 0;
619 
620 		if (err) {
621 			pr_debug("%s: enable error %d\n",
622 				 mmc_hostname(host), err);
623 			return err;
624 		}
625 	}
626 	host->enabled = 1;
627 	return 0;
628 }
629 EXPORT_SYMBOL(mmc_host_enable);
630 
631 static int mmc_host_do_disable(struct mmc_host *host, int lazy)
632 {
633 	if (host->ops->disable) {
634 		int err;
635 
636 		host->en_dis_recurs = 1;
637 		err = host->ops->disable(host, lazy);
638 		host->en_dis_recurs = 0;
639 
640 		if (err < 0) {
641 			pr_debug("%s: disable error %d\n",
642 				 mmc_hostname(host), err);
643 			return err;
644 		}
645 		if (err > 0) {
646 			unsigned long delay = msecs_to_jiffies(err);
647 
648 			mmc_schedule_delayed_work(&host->disable, delay);
649 		}
650 	}
651 	host->enabled = 0;
652 	return 0;
653 }
654 
655 /**
656  *	mmc_host_disable - disable a host.
657  *	@host: mmc host to disable
658  *
659  *	Hosts that support power saving can use the 'enable' and 'disable'
660  *	methods to exit and enter power saving states. For more information
661  *	see comments for struct mmc_host_ops.
662  */
663 int mmc_host_disable(struct mmc_host *host)
664 {
665 	int err;
666 
667 	if (!(host->caps & MMC_CAP_DISABLE))
668 		return 0;
669 
670 	if (host->en_dis_recurs)
671 		return 0;
672 
673 	if (--host->nesting_cnt)
674 		return 0;
675 
676 	if (!host->enabled)
677 		return 0;
678 
679 	err = mmc_host_do_disable(host, 0);
680 	return err;
681 }
682 EXPORT_SYMBOL(mmc_host_disable);
683 
684 /**
685  *	__mmc_claim_host - exclusively claim a host
686  *	@host: mmc host to claim
687  *	@abort: whether or not the operation should be aborted
688  *
689  *	Claim a host for a set of operations.  If @abort is non null and
690  *	dereference a non-zero value then this will return prematurely with
691  *	that non-zero value without acquiring the lock.  Returns zero
692  *	with the lock held otherwise.
693  */
694 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
695 {
696 	DECLARE_WAITQUEUE(wait, current);
697 	unsigned long flags;
698 	int stop;
699 
700 	might_sleep();
701 
702 	add_wait_queue(&host->wq, &wait);
703 	spin_lock_irqsave(&host->lock, flags);
704 	while (1) {
705 		set_current_state(TASK_UNINTERRUPTIBLE);
706 		stop = abort ? atomic_read(abort) : 0;
707 		if (stop || !host->claimed || host->claimer == current)
708 			break;
709 		spin_unlock_irqrestore(&host->lock, flags);
710 		schedule();
711 		spin_lock_irqsave(&host->lock, flags);
712 	}
713 	set_current_state(TASK_RUNNING);
714 	if (!stop) {
715 		host->claimed = 1;
716 		host->claimer = current;
717 		host->claim_cnt += 1;
718 	} else
719 		wake_up(&host->wq);
720 	spin_unlock_irqrestore(&host->lock, flags);
721 	remove_wait_queue(&host->wq, &wait);
722 	if (!stop)
723 		mmc_host_enable(host);
724 	return stop;
725 }
726 
727 EXPORT_SYMBOL(__mmc_claim_host);
728 
729 /**
730  *	mmc_try_claim_host - try exclusively to claim a host
731  *	@host: mmc host to claim
732  *
733  *	Returns %1 if the host is claimed, %0 otherwise.
734  */
735 int mmc_try_claim_host(struct mmc_host *host)
736 {
737 	int claimed_host = 0;
738 	unsigned long flags;
739 
740 	spin_lock_irqsave(&host->lock, flags);
741 	if (!host->claimed || host->claimer == current) {
742 		host->claimed = 1;
743 		host->claimer = current;
744 		host->claim_cnt += 1;
745 		claimed_host = 1;
746 	}
747 	spin_unlock_irqrestore(&host->lock, flags);
748 	return claimed_host;
749 }
750 EXPORT_SYMBOL(mmc_try_claim_host);
751 
752 /**
753  *	mmc_do_release_host - release a claimed host
754  *	@host: mmc host to release
755  *
756  *	If you successfully claimed a host, this function will
757  *	release it again.
758  */
759 void mmc_do_release_host(struct mmc_host *host)
760 {
761 	unsigned long flags;
762 
763 	spin_lock_irqsave(&host->lock, flags);
764 	if (--host->claim_cnt) {
765 		/* Release for nested claim */
766 		spin_unlock_irqrestore(&host->lock, flags);
767 	} else {
768 		host->claimed = 0;
769 		host->claimer = NULL;
770 		spin_unlock_irqrestore(&host->lock, flags);
771 		wake_up(&host->wq);
772 	}
773 }
774 EXPORT_SYMBOL(mmc_do_release_host);
775 
776 void mmc_host_deeper_disable(struct work_struct *work)
777 {
778 	struct mmc_host *host =
779 		container_of(work, struct mmc_host, disable.work);
780 
781 	/* If the host is claimed then we do not want to disable it anymore */
782 	if (!mmc_try_claim_host(host))
783 		return;
784 	mmc_host_do_disable(host, 1);
785 	mmc_do_release_host(host);
786 }
787 
788 /**
789  *	mmc_host_lazy_disable - lazily disable a host.
790  *	@host: mmc host to disable
791  *
792  *	Hosts that support power saving can use the 'enable' and 'disable'
793  *	methods to exit and enter power saving states. For more information
794  *	see comments for struct mmc_host_ops.
795  */
796 int mmc_host_lazy_disable(struct mmc_host *host)
797 {
798 	if (!(host->caps & MMC_CAP_DISABLE))
799 		return 0;
800 
801 	if (host->en_dis_recurs)
802 		return 0;
803 
804 	if (--host->nesting_cnt)
805 		return 0;
806 
807 	if (!host->enabled)
808 		return 0;
809 
810 	if (host->disable_delay) {
811 		mmc_schedule_delayed_work(&host->disable,
812 				msecs_to_jiffies(host->disable_delay));
813 		return 0;
814 	} else
815 		return mmc_host_do_disable(host, 1);
816 }
817 EXPORT_SYMBOL(mmc_host_lazy_disable);
818 
819 /**
820  *	mmc_release_host - release a host
821  *	@host: mmc host to release
822  *
823  *	Release a MMC host, allowing others to claim the host
824  *	for their operations.
825  */
826 void mmc_release_host(struct mmc_host *host)
827 {
828 	WARN_ON(!host->claimed);
829 
830 	mmc_host_lazy_disable(host);
831 
832 	mmc_do_release_host(host);
833 }
834 
835 EXPORT_SYMBOL(mmc_release_host);
836 
837 /*
838  * Internal function that does the actual ios call to the host driver,
839  * optionally printing some debug output.
840  */
841 static inline void mmc_set_ios(struct mmc_host *host)
842 {
843 	struct mmc_ios *ios = &host->ios;
844 
845 	pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
846 		"width %u timing %u\n",
847 		 mmc_hostname(host), ios->clock, ios->bus_mode,
848 		 ios->power_mode, ios->chip_select, ios->vdd,
849 		 ios->bus_width, ios->timing);
850 
851 	if (ios->clock > 0)
852 		mmc_set_ungated(host);
853 	host->ops->set_ios(host, ios);
854 }
855 
856 /*
857  * Control chip select pin on a host.
858  */
859 void mmc_set_chip_select(struct mmc_host *host, int mode)
860 {
861 	mmc_host_clk_hold(host);
862 	host->ios.chip_select = mode;
863 	mmc_set_ios(host);
864 	mmc_host_clk_release(host);
865 }
866 
867 /*
868  * Sets the host clock to the highest possible frequency that
869  * is below "hz".
870  */
871 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
872 {
873 	WARN_ON(hz < host->f_min);
874 
875 	if (hz > host->f_max)
876 		hz = host->f_max;
877 
878 	host->ios.clock = hz;
879 	mmc_set_ios(host);
880 }
881 
882 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
883 {
884 	mmc_host_clk_hold(host);
885 	__mmc_set_clock(host, hz);
886 	mmc_host_clk_release(host);
887 }
888 
889 #ifdef CONFIG_MMC_CLKGATE
890 /*
891  * This gates the clock by setting it to 0 Hz.
892  */
893 void mmc_gate_clock(struct mmc_host *host)
894 {
895 	unsigned long flags;
896 
897 	spin_lock_irqsave(&host->clk_lock, flags);
898 	host->clk_old = host->ios.clock;
899 	host->ios.clock = 0;
900 	host->clk_gated = true;
901 	spin_unlock_irqrestore(&host->clk_lock, flags);
902 	mmc_set_ios(host);
903 }
904 
905 /*
906  * This restores the clock from gating by using the cached
907  * clock value.
908  */
909 void mmc_ungate_clock(struct mmc_host *host)
910 {
911 	/*
912 	 * We should previously have gated the clock, so the clock shall
913 	 * be 0 here! The clock may however be 0 during initialization,
914 	 * when some request operations are performed before setting
915 	 * the frequency. When ungate is requested in that situation
916 	 * we just ignore the call.
917 	 */
918 	if (host->clk_old) {
919 		BUG_ON(host->ios.clock);
920 		/* This call will also set host->clk_gated to false */
921 		__mmc_set_clock(host, host->clk_old);
922 	}
923 }
924 
925 void mmc_set_ungated(struct mmc_host *host)
926 {
927 	unsigned long flags;
928 
929 	/*
930 	 * We've been given a new frequency while the clock is gated,
931 	 * so make sure we regard this as ungating it.
932 	 */
933 	spin_lock_irqsave(&host->clk_lock, flags);
934 	host->clk_gated = false;
935 	spin_unlock_irqrestore(&host->clk_lock, flags);
936 }
937 
938 #else
939 void mmc_set_ungated(struct mmc_host *host)
940 {
941 }
942 #endif
943 
944 /*
945  * Change the bus mode (open drain/push-pull) of a host.
946  */
947 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
948 {
949 	mmc_host_clk_hold(host);
950 	host->ios.bus_mode = mode;
951 	mmc_set_ios(host);
952 	mmc_host_clk_release(host);
953 }
954 
955 /*
956  * Change data bus width of a host.
957  */
958 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
959 {
960 	mmc_host_clk_hold(host);
961 	host->ios.bus_width = width;
962 	mmc_set_ios(host);
963 	mmc_host_clk_release(host);
964 }
965 
966 /**
967  * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
968  * @vdd:	voltage (mV)
969  * @low_bits:	prefer low bits in boundary cases
970  *
971  * This function returns the OCR bit number according to the provided @vdd
972  * value. If conversion is not possible a negative errno value returned.
973  *
974  * Depending on the @low_bits flag the function prefers low or high OCR bits
975  * on boundary voltages. For example,
976  * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
977  * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
978  *
979  * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
980  */
981 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
982 {
983 	const int max_bit = ilog2(MMC_VDD_35_36);
984 	int bit;
985 
986 	if (vdd < 1650 || vdd > 3600)
987 		return -EINVAL;
988 
989 	if (vdd >= 1650 && vdd <= 1950)
990 		return ilog2(MMC_VDD_165_195);
991 
992 	if (low_bits)
993 		vdd -= 1;
994 
995 	/* Base 2000 mV, step 100 mV, bit's base 8. */
996 	bit = (vdd - 2000) / 100 + 8;
997 	if (bit > max_bit)
998 		return max_bit;
999 	return bit;
1000 }
1001 
1002 /**
1003  * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1004  * @vdd_min:	minimum voltage value (mV)
1005  * @vdd_max:	maximum voltage value (mV)
1006  *
1007  * This function returns the OCR mask bits according to the provided @vdd_min
1008  * and @vdd_max values. If conversion is not possible the function returns 0.
1009  *
1010  * Notes wrt boundary cases:
1011  * This function sets the OCR bits for all boundary voltages, for example
1012  * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1013  * MMC_VDD_34_35 mask.
1014  */
1015 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1016 {
1017 	u32 mask = 0;
1018 
1019 	if (vdd_max < vdd_min)
1020 		return 0;
1021 
1022 	/* Prefer high bits for the boundary vdd_max values. */
1023 	vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1024 	if (vdd_max < 0)
1025 		return 0;
1026 
1027 	/* Prefer low bits for the boundary vdd_min values. */
1028 	vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1029 	if (vdd_min < 0)
1030 		return 0;
1031 
1032 	/* Fill the mask, from max bit to min bit. */
1033 	while (vdd_max >= vdd_min)
1034 		mask |= 1 << vdd_max--;
1035 
1036 	return mask;
1037 }
1038 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1039 
1040 #ifdef CONFIG_REGULATOR
1041 
1042 /**
1043  * mmc_regulator_get_ocrmask - return mask of supported voltages
1044  * @supply: regulator to use
1045  *
1046  * This returns either a negative errno, or a mask of voltages that
1047  * can be provided to MMC/SD/SDIO devices using the specified voltage
1048  * regulator.  This would normally be called before registering the
1049  * MMC host adapter.
1050  */
1051 int mmc_regulator_get_ocrmask(struct regulator *supply)
1052 {
1053 	int			result = 0;
1054 	int			count;
1055 	int			i;
1056 
1057 	count = regulator_count_voltages(supply);
1058 	if (count < 0)
1059 		return count;
1060 
1061 	for (i = 0; i < count; i++) {
1062 		int		vdd_uV;
1063 		int		vdd_mV;
1064 
1065 		vdd_uV = regulator_list_voltage(supply, i);
1066 		if (vdd_uV <= 0)
1067 			continue;
1068 
1069 		vdd_mV = vdd_uV / 1000;
1070 		result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1071 	}
1072 
1073 	return result;
1074 }
1075 EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
1076 
1077 /**
1078  * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1079  * @mmc: the host to regulate
1080  * @supply: regulator to use
1081  * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1082  *
1083  * Returns zero on success, else negative errno.
1084  *
1085  * MMC host drivers may use this to enable or disable a regulator using
1086  * a particular supply voltage.  This would normally be called from the
1087  * set_ios() method.
1088  */
1089 int mmc_regulator_set_ocr(struct mmc_host *mmc,
1090 			struct regulator *supply,
1091 			unsigned short vdd_bit)
1092 {
1093 	int			result = 0;
1094 	int			min_uV, max_uV;
1095 
1096 	if (vdd_bit) {
1097 		int		tmp;
1098 		int		voltage;
1099 
1100 		/* REVISIT mmc_vddrange_to_ocrmask() may have set some
1101 		 * bits this regulator doesn't quite support ... don't
1102 		 * be too picky, most cards and regulators are OK with
1103 		 * a 0.1V range goof (it's a small error percentage).
1104 		 */
1105 		tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1106 		if (tmp == 0) {
1107 			min_uV = 1650 * 1000;
1108 			max_uV = 1950 * 1000;
1109 		} else {
1110 			min_uV = 1900 * 1000 + tmp * 100 * 1000;
1111 			max_uV = min_uV + 100 * 1000;
1112 		}
1113 
1114 		/* avoid needless changes to this voltage; the regulator
1115 		 * might not allow this operation
1116 		 */
1117 		voltage = regulator_get_voltage(supply);
1118 		if (voltage < 0)
1119 			result = voltage;
1120 		else if (voltage < min_uV || voltage > max_uV)
1121 			result = regulator_set_voltage(supply, min_uV, max_uV);
1122 		else
1123 			result = 0;
1124 
1125 		if (result == 0 && !mmc->regulator_enabled) {
1126 			result = regulator_enable(supply);
1127 			if (!result)
1128 				mmc->regulator_enabled = true;
1129 		}
1130 	} else if (mmc->regulator_enabled) {
1131 		result = regulator_disable(supply);
1132 		if (result == 0)
1133 			mmc->regulator_enabled = false;
1134 	}
1135 
1136 	if (result)
1137 		dev_err(mmc_dev(mmc),
1138 			"could not set regulator OCR (%d)\n", result);
1139 	return result;
1140 }
1141 EXPORT_SYMBOL(mmc_regulator_set_ocr);
1142 
1143 #endif /* CONFIG_REGULATOR */
1144 
1145 /*
1146  * Mask off any voltages we don't support and select
1147  * the lowest voltage
1148  */
1149 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1150 {
1151 	int bit;
1152 
1153 	ocr &= host->ocr_avail;
1154 
1155 	bit = ffs(ocr);
1156 	if (bit) {
1157 		bit -= 1;
1158 
1159 		ocr &= 3 << bit;
1160 
1161 		mmc_host_clk_hold(host);
1162 		host->ios.vdd = bit;
1163 		mmc_set_ios(host);
1164 		mmc_host_clk_release(host);
1165 	} else {
1166 		pr_warning("%s: host doesn't support card's voltages\n",
1167 				mmc_hostname(host));
1168 		ocr = 0;
1169 	}
1170 
1171 	return ocr;
1172 }
1173 
1174 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
1175 {
1176 	struct mmc_command cmd = {0};
1177 	int err = 0;
1178 
1179 	BUG_ON(!host);
1180 
1181 	/*
1182 	 * Send CMD11 only if the request is to switch the card to
1183 	 * 1.8V signalling.
1184 	 */
1185 	if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
1186 		cmd.opcode = SD_SWITCH_VOLTAGE;
1187 		cmd.arg = 0;
1188 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1189 
1190 		err = mmc_wait_for_cmd(host, &cmd, 0);
1191 		if (err)
1192 			return err;
1193 
1194 		if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1195 			return -EIO;
1196 	}
1197 
1198 	host->ios.signal_voltage = signal_voltage;
1199 
1200 	if (host->ops->start_signal_voltage_switch)
1201 		err = host->ops->start_signal_voltage_switch(host, &host->ios);
1202 
1203 	return err;
1204 }
1205 
1206 /*
1207  * Select timing parameters for host.
1208  */
1209 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1210 {
1211 	mmc_host_clk_hold(host);
1212 	host->ios.timing = timing;
1213 	mmc_set_ios(host);
1214 	mmc_host_clk_release(host);
1215 }
1216 
1217 /*
1218  * Select appropriate driver type for host.
1219  */
1220 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1221 {
1222 	mmc_host_clk_hold(host);
1223 	host->ios.drv_type = drv_type;
1224 	mmc_set_ios(host);
1225 	mmc_host_clk_release(host);
1226 }
1227 
1228 static void mmc_poweroff_notify(struct mmc_host *host)
1229 {
1230 	struct mmc_card *card;
1231 	unsigned int timeout;
1232 	unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
1233 	int err = 0;
1234 
1235 	card = host->card;
1236 
1237 	/*
1238 	 * Send power notify command only if card
1239 	 * is mmc and notify state is powered ON
1240 	 */
1241 	if (card && mmc_card_mmc(card) &&
1242 	    (card->poweroff_notify_state == MMC_POWERED_ON)) {
1243 
1244 		if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
1245 			notify_type = EXT_CSD_POWER_OFF_SHORT;
1246 			timeout = card->ext_csd.generic_cmd6_time;
1247 			card->poweroff_notify_state = MMC_POWEROFF_SHORT;
1248 		} else {
1249 			notify_type = EXT_CSD_POWER_OFF_LONG;
1250 			timeout = card->ext_csd.power_off_longtime;
1251 			card->poweroff_notify_state = MMC_POWEROFF_LONG;
1252 		}
1253 
1254 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1255 				 EXT_CSD_POWER_OFF_NOTIFICATION,
1256 				 notify_type, timeout);
1257 
1258 		if (err && err != -EBADMSG)
1259 			pr_err("Device failed to respond within %d poweroff "
1260 			       "time. Forcefully powering down the device\n",
1261 			       timeout);
1262 
1263 		/* Set the card state to no notification after the poweroff */
1264 		card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
1265 	}
1266 }
1267 
1268 /*
1269  * Apply power to the MMC stack.  This is a two-stage process.
1270  * First, we enable power to the card without the clock running.
1271  * We then wait a bit for the power to stabilise.  Finally,
1272  * enable the bus drivers and clock to the card.
1273  *
1274  * We must _NOT_ enable the clock prior to power stablising.
1275  *
1276  * If a host does all the power sequencing itself, ignore the
1277  * initial MMC_POWER_UP stage.
1278  */
1279 static void mmc_power_up(struct mmc_host *host)
1280 {
1281 	int bit;
1282 
1283 	mmc_host_clk_hold(host);
1284 
1285 	/* If ocr is set, we use it */
1286 	if (host->ocr)
1287 		bit = ffs(host->ocr) - 1;
1288 	else
1289 		bit = fls(host->ocr_avail) - 1;
1290 
1291 	host->ios.vdd = bit;
1292 	if (mmc_host_is_spi(host))
1293 		host->ios.chip_select = MMC_CS_HIGH;
1294 	else
1295 		host->ios.chip_select = MMC_CS_DONTCARE;
1296 	host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1297 	host->ios.power_mode = MMC_POWER_UP;
1298 	host->ios.bus_width = MMC_BUS_WIDTH_1;
1299 	host->ios.timing = MMC_TIMING_LEGACY;
1300 	mmc_set_ios(host);
1301 
1302 	/*
1303 	 * This delay should be sufficient to allow the power supply
1304 	 * to reach the minimum voltage.
1305 	 */
1306 	mmc_delay(10);
1307 
1308 	host->ios.clock = host->f_init;
1309 
1310 	host->ios.power_mode = MMC_POWER_ON;
1311 	mmc_set_ios(host);
1312 
1313 	/*
1314 	 * This delay must be at least 74 clock sizes, or 1 ms, or the
1315 	 * time required to reach a stable voltage.
1316 	 */
1317 	mmc_delay(10);
1318 
1319 	mmc_host_clk_release(host);
1320 }
1321 
1322 void mmc_power_off(struct mmc_host *host)
1323 {
1324 	mmc_host_clk_hold(host);
1325 
1326 	host->ios.clock = 0;
1327 	host->ios.vdd = 0;
1328 
1329 	mmc_poweroff_notify(host);
1330 
1331 	/*
1332 	 * Reset ocr mask to be the highest possible voltage supported for
1333 	 * this mmc host. This value will be used at next power up.
1334 	 */
1335 	host->ocr = 1 << (fls(host->ocr_avail) - 1);
1336 
1337 	if (!mmc_host_is_spi(host)) {
1338 		host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1339 		host->ios.chip_select = MMC_CS_DONTCARE;
1340 	}
1341 	host->ios.power_mode = MMC_POWER_OFF;
1342 	host->ios.bus_width = MMC_BUS_WIDTH_1;
1343 	host->ios.timing = MMC_TIMING_LEGACY;
1344 	mmc_set_ios(host);
1345 
1346 	/*
1347 	 * Some configurations, such as the 802.11 SDIO card in the OLPC
1348 	 * XO-1.5, require a short delay after poweroff before the card
1349 	 * can be successfully turned on again.
1350 	 */
1351 	mmc_delay(1);
1352 
1353 	mmc_host_clk_release(host);
1354 }
1355 
1356 /*
1357  * Cleanup when the last reference to the bus operator is dropped.
1358  */
1359 static void __mmc_release_bus(struct mmc_host *host)
1360 {
1361 	BUG_ON(!host);
1362 	BUG_ON(host->bus_refs);
1363 	BUG_ON(!host->bus_dead);
1364 
1365 	host->bus_ops = NULL;
1366 }
1367 
1368 /*
1369  * Increase reference count of bus operator
1370  */
1371 static inline void mmc_bus_get(struct mmc_host *host)
1372 {
1373 	unsigned long flags;
1374 
1375 	spin_lock_irqsave(&host->lock, flags);
1376 	host->bus_refs++;
1377 	spin_unlock_irqrestore(&host->lock, flags);
1378 }
1379 
1380 /*
1381  * Decrease reference count of bus operator and free it if
1382  * it is the last reference.
1383  */
1384 static inline void mmc_bus_put(struct mmc_host *host)
1385 {
1386 	unsigned long flags;
1387 
1388 	spin_lock_irqsave(&host->lock, flags);
1389 	host->bus_refs--;
1390 	if ((host->bus_refs == 0) && host->bus_ops)
1391 		__mmc_release_bus(host);
1392 	spin_unlock_irqrestore(&host->lock, flags);
1393 }
1394 
1395 /*
1396  * Assign a mmc bus handler to a host. Only one bus handler may control a
1397  * host at any given time.
1398  */
1399 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1400 {
1401 	unsigned long flags;
1402 
1403 	BUG_ON(!host);
1404 	BUG_ON(!ops);
1405 
1406 	WARN_ON(!host->claimed);
1407 
1408 	spin_lock_irqsave(&host->lock, flags);
1409 
1410 	BUG_ON(host->bus_ops);
1411 	BUG_ON(host->bus_refs);
1412 
1413 	host->bus_ops = ops;
1414 	host->bus_refs = 1;
1415 	host->bus_dead = 0;
1416 
1417 	spin_unlock_irqrestore(&host->lock, flags);
1418 }
1419 
1420 /*
1421  * Remove the current bus handler from a host.
1422  */
1423 void mmc_detach_bus(struct mmc_host *host)
1424 {
1425 	unsigned long flags;
1426 
1427 	BUG_ON(!host);
1428 
1429 	WARN_ON(!host->claimed);
1430 	WARN_ON(!host->bus_ops);
1431 
1432 	spin_lock_irqsave(&host->lock, flags);
1433 
1434 	host->bus_dead = 1;
1435 
1436 	spin_unlock_irqrestore(&host->lock, flags);
1437 
1438 	mmc_bus_put(host);
1439 }
1440 
1441 /**
1442  *	mmc_detect_change - process change of state on a MMC socket
1443  *	@host: host which changed state.
1444  *	@delay: optional delay to wait before detection (jiffies)
1445  *
1446  *	MMC drivers should call this when they detect a card has been
1447  *	inserted or removed. The MMC layer will confirm that any
1448  *	present card is still functional, and initialize any newly
1449  *	inserted.
1450  */
1451 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1452 {
1453 #ifdef CONFIG_MMC_DEBUG
1454 	unsigned long flags;
1455 	spin_lock_irqsave(&host->lock, flags);
1456 	WARN_ON(host->removed);
1457 	spin_unlock_irqrestore(&host->lock, flags);
1458 #endif
1459 
1460 	mmc_schedule_delayed_work(&host->detect, delay);
1461 }
1462 
1463 EXPORT_SYMBOL(mmc_detect_change);
1464 
1465 void mmc_init_erase(struct mmc_card *card)
1466 {
1467 	unsigned int sz;
1468 
1469 	if (is_power_of_2(card->erase_size))
1470 		card->erase_shift = ffs(card->erase_size) - 1;
1471 	else
1472 		card->erase_shift = 0;
1473 
1474 	/*
1475 	 * It is possible to erase an arbitrarily large area of an SD or MMC
1476 	 * card.  That is not desirable because it can take a long time
1477 	 * (minutes) potentially delaying more important I/O, and also the
1478 	 * timeout calculations become increasingly hugely over-estimated.
1479 	 * Consequently, 'pref_erase' is defined as a guide to limit erases
1480 	 * to that size and alignment.
1481 	 *
1482 	 * For SD cards that define Allocation Unit size, limit erases to one
1483 	 * Allocation Unit at a time.  For MMC cards that define High Capacity
1484 	 * Erase Size, whether it is switched on or not, limit to that size.
1485 	 * Otherwise just have a stab at a good value.  For modern cards it
1486 	 * will end up being 4MiB.  Note that if the value is too small, it
1487 	 * can end up taking longer to erase.
1488 	 */
1489 	if (mmc_card_sd(card) && card->ssr.au) {
1490 		card->pref_erase = card->ssr.au;
1491 		card->erase_shift = ffs(card->ssr.au) - 1;
1492 	} else if (card->ext_csd.hc_erase_size) {
1493 		card->pref_erase = card->ext_csd.hc_erase_size;
1494 	} else {
1495 		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1496 		if (sz < 128)
1497 			card->pref_erase = 512 * 1024 / 512;
1498 		else if (sz < 512)
1499 			card->pref_erase = 1024 * 1024 / 512;
1500 		else if (sz < 1024)
1501 			card->pref_erase = 2 * 1024 * 1024 / 512;
1502 		else
1503 			card->pref_erase = 4 * 1024 * 1024 / 512;
1504 		if (card->pref_erase < card->erase_size)
1505 			card->pref_erase = card->erase_size;
1506 		else {
1507 			sz = card->pref_erase % card->erase_size;
1508 			if (sz)
1509 				card->pref_erase += card->erase_size - sz;
1510 		}
1511 	}
1512 }
1513 
1514 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1515 				          unsigned int arg, unsigned int qty)
1516 {
1517 	unsigned int erase_timeout;
1518 
1519 	if (card->ext_csd.erase_group_def & 1) {
1520 		/* High Capacity Erase Group Size uses HC timeouts */
1521 		if (arg == MMC_TRIM_ARG)
1522 			erase_timeout = card->ext_csd.trim_timeout;
1523 		else
1524 			erase_timeout = card->ext_csd.hc_erase_timeout;
1525 	} else {
1526 		/* CSD Erase Group Size uses write timeout */
1527 		unsigned int mult = (10 << card->csd.r2w_factor);
1528 		unsigned int timeout_clks = card->csd.tacc_clks * mult;
1529 		unsigned int timeout_us;
1530 
1531 		/* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1532 		if (card->csd.tacc_ns < 1000000)
1533 			timeout_us = (card->csd.tacc_ns * mult) / 1000;
1534 		else
1535 			timeout_us = (card->csd.tacc_ns / 1000) * mult;
1536 
1537 		/*
1538 		 * ios.clock is only a target.  The real clock rate might be
1539 		 * less but not that much less, so fudge it by multiplying by 2.
1540 		 */
1541 		timeout_clks <<= 1;
1542 		timeout_us += (timeout_clks * 1000) /
1543 			      (mmc_host_clk_rate(card->host) / 1000);
1544 
1545 		erase_timeout = timeout_us / 1000;
1546 
1547 		/*
1548 		 * Theoretically, the calculation could underflow so round up
1549 		 * to 1ms in that case.
1550 		 */
1551 		if (!erase_timeout)
1552 			erase_timeout = 1;
1553 	}
1554 
1555 	/* Multiplier for secure operations */
1556 	if (arg & MMC_SECURE_ARGS) {
1557 		if (arg == MMC_SECURE_ERASE_ARG)
1558 			erase_timeout *= card->ext_csd.sec_erase_mult;
1559 		else
1560 			erase_timeout *= card->ext_csd.sec_trim_mult;
1561 	}
1562 
1563 	erase_timeout *= qty;
1564 
1565 	/*
1566 	 * Ensure at least a 1 second timeout for SPI as per
1567 	 * 'mmc_set_data_timeout()'
1568 	 */
1569 	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1570 		erase_timeout = 1000;
1571 
1572 	return erase_timeout;
1573 }
1574 
1575 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1576 					 unsigned int arg,
1577 					 unsigned int qty)
1578 {
1579 	unsigned int erase_timeout;
1580 
1581 	if (card->ssr.erase_timeout) {
1582 		/* Erase timeout specified in SD Status Register (SSR) */
1583 		erase_timeout = card->ssr.erase_timeout * qty +
1584 				card->ssr.erase_offset;
1585 	} else {
1586 		/*
1587 		 * Erase timeout not specified in SD Status Register (SSR) so
1588 		 * use 250ms per write block.
1589 		 */
1590 		erase_timeout = 250 * qty;
1591 	}
1592 
1593 	/* Must not be less than 1 second */
1594 	if (erase_timeout < 1000)
1595 		erase_timeout = 1000;
1596 
1597 	return erase_timeout;
1598 }
1599 
1600 static unsigned int mmc_erase_timeout(struct mmc_card *card,
1601 				      unsigned int arg,
1602 				      unsigned int qty)
1603 {
1604 	if (mmc_card_sd(card))
1605 		return mmc_sd_erase_timeout(card, arg, qty);
1606 	else
1607 		return mmc_mmc_erase_timeout(card, arg, qty);
1608 }
1609 
1610 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1611 			unsigned int to, unsigned int arg)
1612 {
1613 	struct mmc_command cmd = {0};
1614 	unsigned int qty = 0;
1615 	int err;
1616 
1617 	/*
1618 	 * qty is used to calculate the erase timeout which depends on how many
1619 	 * erase groups (or allocation units in SD terminology) are affected.
1620 	 * We count erasing part of an erase group as one erase group.
1621 	 * For SD, the allocation units are always a power of 2.  For MMC, the
1622 	 * erase group size is almost certainly also power of 2, but it does not
1623 	 * seem to insist on that in the JEDEC standard, so we fall back to
1624 	 * division in that case.  SD may not specify an allocation unit size,
1625 	 * in which case the timeout is based on the number of write blocks.
1626 	 *
1627 	 * Note that the timeout for secure trim 2 will only be correct if the
1628 	 * number of erase groups specified is the same as the total of all
1629 	 * preceding secure trim 1 commands.  Since the power may have been
1630 	 * lost since the secure trim 1 commands occurred, it is generally
1631 	 * impossible to calculate the secure trim 2 timeout correctly.
1632 	 */
1633 	if (card->erase_shift)
1634 		qty += ((to >> card->erase_shift) -
1635 			(from >> card->erase_shift)) + 1;
1636 	else if (mmc_card_sd(card))
1637 		qty += to - from + 1;
1638 	else
1639 		qty += ((to / card->erase_size) -
1640 			(from / card->erase_size)) + 1;
1641 
1642 	if (!mmc_card_blockaddr(card)) {
1643 		from <<= 9;
1644 		to <<= 9;
1645 	}
1646 
1647 	if (mmc_card_sd(card))
1648 		cmd.opcode = SD_ERASE_WR_BLK_START;
1649 	else
1650 		cmd.opcode = MMC_ERASE_GROUP_START;
1651 	cmd.arg = from;
1652 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1653 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1654 	if (err) {
1655 		pr_err("mmc_erase: group start error %d, "
1656 		       "status %#x\n", err, cmd.resp[0]);
1657 		err = -EIO;
1658 		goto out;
1659 	}
1660 
1661 	memset(&cmd, 0, sizeof(struct mmc_command));
1662 	if (mmc_card_sd(card))
1663 		cmd.opcode = SD_ERASE_WR_BLK_END;
1664 	else
1665 		cmd.opcode = MMC_ERASE_GROUP_END;
1666 	cmd.arg = to;
1667 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1668 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1669 	if (err) {
1670 		pr_err("mmc_erase: group end error %d, status %#x\n",
1671 		       err, cmd.resp[0]);
1672 		err = -EIO;
1673 		goto out;
1674 	}
1675 
1676 	memset(&cmd, 0, sizeof(struct mmc_command));
1677 	cmd.opcode = MMC_ERASE;
1678 	cmd.arg = arg;
1679 	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1680 	cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
1681 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1682 	if (err) {
1683 		pr_err("mmc_erase: erase error %d, status %#x\n",
1684 		       err, cmd.resp[0]);
1685 		err = -EIO;
1686 		goto out;
1687 	}
1688 
1689 	if (mmc_host_is_spi(card->host))
1690 		goto out;
1691 
1692 	do {
1693 		memset(&cmd, 0, sizeof(struct mmc_command));
1694 		cmd.opcode = MMC_SEND_STATUS;
1695 		cmd.arg = card->rca << 16;
1696 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1697 		/* Do not retry else we can't see errors */
1698 		err = mmc_wait_for_cmd(card->host, &cmd, 0);
1699 		if (err || (cmd.resp[0] & 0xFDF92000)) {
1700 			pr_err("error %d requesting status %#x\n",
1701 				err, cmd.resp[0]);
1702 			err = -EIO;
1703 			goto out;
1704 		}
1705 	} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
1706 		 R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG);
1707 out:
1708 	return err;
1709 }
1710 
1711 /**
1712  * mmc_erase - erase sectors.
1713  * @card: card to erase
1714  * @from: first sector to erase
1715  * @nr: number of sectors to erase
1716  * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
1717  *
1718  * Caller must claim host before calling this function.
1719  */
1720 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
1721 	      unsigned int arg)
1722 {
1723 	unsigned int rem, to = from + nr;
1724 
1725 	if (!(card->host->caps & MMC_CAP_ERASE) ||
1726 	    !(card->csd.cmdclass & CCC_ERASE))
1727 		return -EOPNOTSUPP;
1728 
1729 	if (!card->erase_size)
1730 		return -EOPNOTSUPP;
1731 
1732 	if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
1733 		return -EOPNOTSUPP;
1734 
1735 	if ((arg & MMC_SECURE_ARGS) &&
1736 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
1737 		return -EOPNOTSUPP;
1738 
1739 	if ((arg & MMC_TRIM_ARGS) &&
1740 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
1741 		return -EOPNOTSUPP;
1742 
1743 	if (arg == MMC_SECURE_ERASE_ARG) {
1744 		if (from % card->erase_size || nr % card->erase_size)
1745 			return -EINVAL;
1746 	}
1747 
1748 	if (arg == MMC_ERASE_ARG) {
1749 		rem = from % card->erase_size;
1750 		if (rem) {
1751 			rem = card->erase_size - rem;
1752 			from += rem;
1753 			if (nr > rem)
1754 				nr -= rem;
1755 			else
1756 				return 0;
1757 		}
1758 		rem = nr % card->erase_size;
1759 		if (rem)
1760 			nr -= rem;
1761 	}
1762 
1763 	if (nr == 0)
1764 		return 0;
1765 
1766 	to = from + nr;
1767 
1768 	if (to <= from)
1769 		return -EINVAL;
1770 
1771 	/* 'from' and 'to' are inclusive */
1772 	to -= 1;
1773 
1774 	return mmc_do_erase(card, from, to, arg);
1775 }
1776 EXPORT_SYMBOL(mmc_erase);
1777 
1778 int mmc_can_erase(struct mmc_card *card)
1779 {
1780 	if ((card->host->caps & MMC_CAP_ERASE) &&
1781 	    (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
1782 		return 1;
1783 	return 0;
1784 }
1785 EXPORT_SYMBOL(mmc_can_erase);
1786 
1787 int mmc_can_trim(struct mmc_card *card)
1788 {
1789 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
1790 		return 1;
1791 	if (mmc_can_discard(card))
1792 		return 1;
1793 	return 0;
1794 }
1795 EXPORT_SYMBOL(mmc_can_trim);
1796 
1797 int mmc_can_discard(struct mmc_card *card)
1798 {
1799 	/*
1800 	 * As there's no way to detect the discard support bit at v4.5
1801 	 * use the s/w feature support filed.
1802 	 */
1803 	if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
1804 		return 1;
1805 	return 0;
1806 }
1807 EXPORT_SYMBOL(mmc_can_discard);
1808 
1809 int mmc_can_sanitize(struct mmc_card *card)
1810 {
1811 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
1812 		return 1;
1813 	return 0;
1814 }
1815 EXPORT_SYMBOL(mmc_can_sanitize);
1816 
1817 int mmc_can_secure_erase_trim(struct mmc_card *card)
1818 {
1819 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
1820 		return 1;
1821 	return 0;
1822 }
1823 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
1824 
1825 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1826 			    unsigned int nr)
1827 {
1828 	if (!card->erase_size)
1829 		return 0;
1830 	if (from % card->erase_size || nr % card->erase_size)
1831 		return 0;
1832 	return 1;
1833 }
1834 EXPORT_SYMBOL(mmc_erase_group_aligned);
1835 
1836 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
1837 					    unsigned int arg)
1838 {
1839 	struct mmc_host *host = card->host;
1840 	unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
1841 	unsigned int last_timeout = 0;
1842 
1843 	if (card->erase_shift)
1844 		max_qty = UINT_MAX >> card->erase_shift;
1845 	else if (mmc_card_sd(card))
1846 		max_qty = UINT_MAX;
1847 	else
1848 		max_qty = UINT_MAX / card->erase_size;
1849 
1850 	/* Find the largest qty with an OK timeout */
1851 	do {
1852 		y = 0;
1853 		for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
1854 			timeout = mmc_erase_timeout(card, arg, qty + x);
1855 			if (timeout > host->max_discard_to)
1856 				break;
1857 			if (timeout < last_timeout)
1858 				break;
1859 			last_timeout = timeout;
1860 			y = x;
1861 		}
1862 		qty += y;
1863 	} while (y);
1864 
1865 	if (!qty)
1866 		return 0;
1867 
1868 	if (qty == 1)
1869 		return 1;
1870 
1871 	/* Convert qty to sectors */
1872 	if (card->erase_shift)
1873 		max_discard = --qty << card->erase_shift;
1874 	else if (mmc_card_sd(card))
1875 		max_discard = qty;
1876 	else
1877 		max_discard = --qty * card->erase_size;
1878 
1879 	return max_discard;
1880 }
1881 
1882 unsigned int mmc_calc_max_discard(struct mmc_card *card)
1883 {
1884 	struct mmc_host *host = card->host;
1885 	unsigned int max_discard, max_trim;
1886 
1887 	if (!host->max_discard_to)
1888 		return UINT_MAX;
1889 
1890 	/*
1891 	 * Without erase_group_def set, MMC erase timeout depends on clock
1892 	 * frequence which can change.  In that case, the best choice is
1893 	 * just the preferred erase size.
1894 	 */
1895 	if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
1896 		return card->pref_erase;
1897 
1898 	max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
1899 	if (mmc_can_trim(card)) {
1900 		max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
1901 		if (max_trim < max_discard)
1902 			max_discard = max_trim;
1903 	} else if (max_discard < card->erase_size) {
1904 		max_discard = 0;
1905 	}
1906 	pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
1907 		 mmc_hostname(host), max_discard, host->max_discard_to);
1908 	return max_discard;
1909 }
1910 EXPORT_SYMBOL(mmc_calc_max_discard);
1911 
1912 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1913 {
1914 	struct mmc_command cmd = {0};
1915 
1916 	if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
1917 		return 0;
1918 
1919 	cmd.opcode = MMC_SET_BLOCKLEN;
1920 	cmd.arg = blocklen;
1921 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1922 	return mmc_wait_for_cmd(card->host, &cmd, 5);
1923 }
1924 EXPORT_SYMBOL(mmc_set_blocklen);
1925 
1926 static void mmc_hw_reset_for_init(struct mmc_host *host)
1927 {
1928 	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1929 		return;
1930 	mmc_host_clk_hold(host);
1931 	host->ops->hw_reset(host);
1932 	mmc_host_clk_release(host);
1933 }
1934 
1935 int mmc_can_reset(struct mmc_card *card)
1936 {
1937 	u8 rst_n_function;
1938 
1939 	if (!mmc_card_mmc(card))
1940 		return 0;
1941 	rst_n_function = card->ext_csd.rst_n_function;
1942 	if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
1943 		return 0;
1944 	return 1;
1945 }
1946 EXPORT_SYMBOL(mmc_can_reset);
1947 
1948 static int mmc_do_hw_reset(struct mmc_host *host, int check)
1949 {
1950 	struct mmc_card *card = host->card;
1951 
1952 	if (!host->bus_ops->power_restore)
1953 		return -EOPNOTSUPP;
1954 
1955 	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1956 		return -EOPNOTSUPP;
1957 
1958 	if (!card)
1959 		return -EINVAL;
1960 
1961 	if (!mmc_can_reset(card))
1962 		return -EOPNOTSUPP;
1963 
1964 	mmc_host_clk_hold(host);
1965 	mmc_set_clock(host, host->f_init);
1966 
1967 	host->ops->hw_reset(host);
1968 
1969 	/* If the reset has happened, then a status command will fail */
1970 	if (check) {
1971 		struct mmc_command cmd = {0};
1972 		int err;
1973 
1974 		cmd.opcode = MMC_SEND_STATUS;
1975 		if (!mmc_host_is_spi(card->host))
1976 			cmd.arg = card->rca << 16;
1977 		cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
1978 		err = mmc_wait_for_cmd(card->host, &cmd, 0);
1979 		if (!err) {
1980 			mmc_host_clk_release(host);
1981 			return -ENOSYS;
1982 		}
1983 	}
1984 
1985 	host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR);
1986 	if (mmc_host_is_spi(host)) {
1987 		host->ios.chip_select = MMC_CS_HIGH;
1988 		host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1989 	} else {
1990 		host->ios.chip_select = MMC_CS_DONTCARE;
1991 		host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1992 	}
1993 	host->ios.bus_width = MMC_BUS_WIDTH_1;
1994 	host->ios.timing = MMC_TIMING_LEGACY;
1995 	mmc_set_ios(host);
1996 
1997 	mmc_host_clk_release(host);
1998 
1999 	return host->bus_ops->power_restore(host);
2000 }
2001 
2002 int mmc_hw_reset(struct mmc_host *host)
2003 {
2004 	return mmc_do_hw_reset(host, 0);
2005 }
2006 EXPORT_SYMBOL(mmc_hw_reset);
2007 
2008 int mmc_hw_reset_check(struct mmc_host *host)
2009 {
2010 	return mmc_do_hw_reset(host, 1);
2011 }
2012 EXPORT_SYMBOL(mmc_hw_reset_check);
2013 
2014 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2015 {
2016 	host->f_init = freq;
2017 
2018 #ifdef CONFIG_MMC_DEBUG
2019 	pr_info("%s: %s: trying to init card at %u Hz\n",
2020 		mmc_hostname(host), __func__, host->f_init);
2021 #endif
2022 	mmc_power_up(host);
2023 
2024 	/*
2025 	 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2026 	 * do a hardware reset if possible.
2027 	 */
2028 	mmc_hw_reset_for_init(host);
2029 
2030 	/*
2031 	 * sdio_reset sends CMD52 to reset card.  Since we do not know
2032 	 * if the card is being re-initialized, just send it.  CMD52
2033 	 * should be ignored by SD/eMMC cards.
2034 	 */
2035 	sdio_reset(host);
2036 	mmc_go_idle(host);
2037 
2038 	mmc_send_if_cond(host, host->ocr_avail);
2039 
2040 	/* Order's important: probe SDIO, then SD, then MMC */
2041 	if (!mmc_attach_sdio(host))
2042 		return 0;
2043 	if (!mmc_attach_sd(host))
2044 		return 0;
2045 	if (!mmc_attach_mmc(host))
2046 		return 0;
2047 
2048 	mmc_power_off(host);
2049 	return -EIO;
2050 }
2051 
2052 void mmc_rescan(struct work_struct *work)
2053 {
2054 	static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
2055 	struct mmc_host *host =
2056 		container_of(work, struct mmc_host, detect.work);
2057 	int i;
2058 
2059 	if (host->rescan_disable)
2060 		return;
2061 
2062 	mmc_bus_get(host);
2063 
2064 	/*
2065 	 * if there is a _removable_ card registered, check whether it is
2066 	 * still present
2067 	 */
2068 	if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
2069 	    && !(host->caps & MMC_CAP_NONREMOVABLE))
2070 		host->bus_ops->detect(host);
2071 
2072 	/*
2073 	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2074 	 * the card is no longer present.
2075 	 */
2076 	mmc_bus_put(host);
2077 	mmc_bus_get(host);
2078 
2079 	/* if there still is a card present, stop here */
2080 	if (host->bus_ops != NULL) {
2081 		mmc_bus_put(host);
2082 		goto out;
2083 	}
2084 
2085 	/*
2086 	 * Only we can add a new handler, so it's safe to
2087 	 * release the lock here.
2088 	 */
2089 	mmc_bus_put(host);
2090 
2091 	if (host->ops->get_cd && host->ops->get_cd(host) == 0)
2092 		goto out;
2093 
2094 	mmc_claim_host(host);
2095 	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2096 		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2097 			break;
2098 		if (freqs[i] <= host->f_min)
2099 			break;
2100 	}
2101 	mmc_release_host(host);
2102 
2103  out:
2104 	if (host->caps & MMC_CAP_NEEDS_POLL)
2105 		mmc_schedule_delayed_work(&host->detect, HZ);
2106 }
2107 
2108 void mmc_start_host(struct mmc_host *host)
2109 {
2110 	mmc_power_off(host);
2111 	mmc_detect_change(host, 0);
2112 }
2113 
2114 void mmc_stop_host(struct mmc_host *host)
2115 {
2116 #ifdef CONFIG_MMC_DEBUG
2117 	unsigned long flags;
2118 	spin_lock_irqsave(&host->lock, flags);
2119 	host->removed = 1;
2120 	spin_unlock_irqrestore(&host->lock, flags);
2121 #endif
2122 
2123 	if (host->caps & MMC_CAP_DISABLE)
2124 		cancel_delayed_work(&host->disable);
2125 	cancel_delayed_work_sync(&host->detect);
2126 	mmc_flush_scheduled_work();
2127 
2128 	/* clear pm flags now and let card drivers set them as needed */
2129 	host->pm_flags = 0;
2130 
2131 	mmc_bus_get(host);
2132 	if (host->bus_ops && !host->bus_dead) {
2133 		if (host->bus_ops->remove)
2134 			host->bus_ops->remove(host);
2135 
2136 		mmc_claim_host(host);
2137 		mmc_detach_bus(host);
2138 		mmc_power_off(host);
2139 		mmc_release_host(host);
2140 		mmc_bus_put(host);
2141 		return;
2142 	}
2143 	mmc_bus_put(host);
2144 
2145 	BUG_ON(host->card);
2146 
2147 	mmc_power_off(host);
2148 }
2149 
2150 int mmc_power_save_host(struct mmc_host *host)
2151 {
2152 	int ret = 0;
2153 
2154 #ifdef CONFIG_MMC_DEBUG
2155 	pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2156 #endif
2157 
2158 	mmc_bus_get(host);
2159 
2160 	if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2161 		mmc_bus_put(host);
2162 		return -EINVAL;
2163 	}
2164 
2165 	if (host->bus_ops->power_save)
2166 		ret = host->bus_ops->power_save(host);
2167 
2168 	mmc_bus_put(host);
2169 
2170 	mmc_power_off(host);
2171 
2172 	return ret;
2173 }
2174 EXPORT_SYMBOL(mmc_power_save_host);
2175 
2176 int mmc_power_restore_host(struct mmc_host *host)
2177 {
2178 	int ret;
2179 
2180 #ifdef CONFIG_MMC_DEBUG
2181 	pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2182 #endif
2183 
2184 	mmc_bus_get(host);
2185 
2186 	if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2187 		mmc_bus_put(host);
2188 		return -EINVAL;
2189 	}
2190 
2191 	mmc_power_up(host);
2192 	ret = host->bus_ops->power_restore(host);
2193 
2194 	mmc_bus_put(host);
2195 
2196 	return ret;
2197 }
2198 EXPORT_SYMBOL(mmc_power_restore_host);
2199 
2200 int mmc_card_awake(struct mmc_host *host)
2201 {
2202 	int err = -ENOSYS;
2203 
2204 	mmc_bus_get(host);
2205 
2206 	if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
2207 		err = host->bus_ops->awake(host);
2208 
2209 	mmc_bus_put(host);
2210 
2211 	return err;
2212 }
2213 EXPORT_SYMBOL(mmc_card_awake);
2214 
2215 int mmc_card_sleep(struct mmc_host *host)
2216 {
2217 	int err = -ENOSYS;
2218 
2219 	mmc_bus_get(host);
2220 
2221 	if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
2222 		err = host->bus_ops->sleep(host);
2223 
2224 	mmc_bus_put(host);
2225 
2226 	return err;
2227 }
2228 EXPORT_SYMBOL(mmc_card_sleep);
2229 
2230 int mmc_card_can_sleep(struct mmc_host *host)
2231 {
2232 	struct mmc_card *card = host->card;
2233 
2234 	if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
2235 		return 1;
2236 	return 0;
2237 }
2238 EXPORT_SYMBOL(mmc_card_can_sleep);
2239 
2240 /*
2241  * Flush the cache to the non-volatile storage.
2242  */
2243 int mmc_flush_cache(struct mmc_card *card)
2244 {
2245 	struct mmc_host *host = card->host;
2246 	int err = 0;
2247 
2248 	if (!(host->caps2 & MMC_CAP2_CACHE_CTRL))
2249 		return err;
2250 
2251 	if (mmc_card_mmc(card) &&
2252 			(card->ext_csd.cache_size > 0) &&
2253 			(card->ext_csd.cache_ctrl & 1)) {
2254 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2255 				EXT_CSD_FLUSH_CACHE, 1, 0);
2256 		if (err)
2257 			pr_err("%s: cache flush error %d\n",
2258 					mmc_hostname(card->host), err);
2259 	}
2260 
2261 	return err;
2262 }
2263 EXPORT_SYMBOL(mmc_flush_cache);
2264 
2265 /*
2266  * Turn the cache ON/OFF.
2267  * Turning the cache OFF shall trigger flushing of the data
2268  * to the non-volatile storage.
2269  */
2270 int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
2271 {
2272 	struct mmc_card *card = host->card;
2273 	int err = 0;
2274 
2275 	if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
2276 			mmc_card_is_removable(host))
2277 		return err;
2278 
2279 	if (card && mmc_card_mmc(card) &&
2280 			(card->ext_csd.cache_size > 0)) {
2281 		enable = !!enable;
2282 
2283 		if (card->ext_csd.cache_ctrl ^ enable)
2284 			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2285 					EXT_CSD_CACHE_CTRL, enable, 0);
2286 		if (err)
2287 			pr_err("%s: cache %s error %d\n",
2288 					mmc_hostname(card->host),
2289 					enable ? "on" : "off",
2290 					err);
2291 		else
2292 			card->ext_csd.cache_ctrl = enable;
2293 	}
2294 
2295 	return err;
2296 }
2297 EXPORT_SYMBOL(mmc_cache_ctrl);
2298 
2299 #ifdef CONFIG_PM
2300 
2301 /**
2302  *	mmc_suspend_host - suspend a host
2303  *	@host: mmc host
2304  */
2305 int mmc_suspend_host(struct mmc_host *host)
2306 {
2307 	int err = 0;
2308 
2309 	if (host->caps & MMC_CAP_DISABLE)
2310 		cancel_delayed_work(&host->disable);
2311 	cancel_delayed_work(&host->detect);
2312 	mmc_flush_scheduled_work();
2313 	err = mmc_cache_ctrl(host, 0);
2314 	if (err)
2315 		goto out;
2316 
2317 	mmc_bus_get(host);
2318 	if (host->bus_ops && !host->bus_dead) {
2319 
2320 		/*
2321 		 * A long response time is not acceptable for device drivers
2322 		 * when doing suspend. Prevent mmc_claim_host in the suspend
2323 		 * sequence, to potentially wait "forever" by trying to
2324 		 * pre-claim the host.
2325 		 */
2326 		if (mmc_try_claim_host(host)) {
2327 			if (host->bus_ops->suspend) {
2328 				/*
2329 				 * For eMMC 4.5 device send notify command
2330 				 * before sleep, because in sleep state eMMC 4.5
2331 				 * devices respond to only RESET and AWAKE cmd
2332 				 */
2333 				mmc_poweroff_notify(host);
2334 				err = host->bus_ops->suspend(host);
2335 			}
2336 			mmc_do_release_host(host);
2337 
2338 			if (err == -ENOSYS || !host->bus_ops->resume) {
2339 				/*
2340 				 * We simply "remove" the card in this case.
2341 				 * It will be redetected on resume.
2342 				 */
2343 				if (host->bus_ops->remove)
2344 					host->bus_ops->remove(host);
2345 				mmc_claim_host(host);
2346 				mmc_detach_bus(host);
2347 				mmc_power_off(host);
2348 				mmc_release_host(host);
2349 				host->pm_flags = 0;
2350 				err = 0;
2351 			}
2352 		} else {
2353 			err = -EBUSY;
2354 		}
2355 	}
2356 	mmc_bus_put(host);
2357 
2358 	if (!err && !mmc_card_keep_power(host))
2359 		mmc_power_off(host);
2360 
2361 out:
2362 	return err;
2363 }
2364 
2365 EXPORT_SYMBOL(mmc_suspend_host);
2366 
2367 /**
2368  *	mmc_resume_host - resume a previously suspended host
2369  *	@host: mmc host
2370  */
2371 int mmc_resume_host(struct mmc_host *host)
2372 {
2373 	int err = 0;
2374 
2375 	mmc_bus_get(host);
2376 	if (host->bus_ops && !host->bus_dead) {
2377 		if (!mmc_card_keep_power(host)) {
2378 			mmc_power_up(host);
2379 			mmc_select_voltage(host, host->ocr);
2380 			/*
2381 			 * Tell runtime PM core we just powered up the card,
2382 			 * since it still believes the card is powered off.
2383 			 * Note that currently runtime PM is only enabled
2384 			 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
2385 			 */
2386 			if (mmc_card_sdio(host->card) &&
2387 			    (host->caps & MMC_CAP_POWER_OFF_CARD)) {
2388 				pm_runtime_disable(&host->card->dev);
2389 				pm_runtime_set_active(&host->card->dev);
2390 				pm_runtime_enable(&host->card->dev);
2391 			}
2392 		}
2393 		BUG_ON(!host->bus_ops->resume);
2394 		err = host->bus_ops->resume(host);
2395 		if (err) {
2396 			pr_warning("%s: error %d during resume "
2397 					    "(card was removed?)\n",
2398 					    mmc_hostname(host), err);
2399 			err = 0;
2400 		}
2401 	}
2402 	host->pm_flags &= ~MMC_PM_KEEP_POWER;
2403 	mmc_bus_put(host);
2404 
2405 	return err;
2406 }
2407 EXPORT_SYMBOL(mmc_resume_host);
2408 
2409 /* Do the card removal on suspend if card is assumed removeable
2410  * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2411    to sync the card.
2412 */
2413 int mmc_pm_notify(struct notifier_block *notify_block,
2414 					unsigned long mode, void *unused)
2415 {
2416 	struct mmc_host *host = container_of(
2417 		notify_block, struct mmc_host, pm_notify);
2418 	unsigned long flags;
2419 
2420 
2421 	switch (mode) {
2422 	case PM_HIBERNATION_PREPARE:
2423 	case PM_SUSPEND_PREPARE:
2424 
2425 		spin_lock_irqsave(&host->lock, flags);
2426 		host->rescan_disable = 1;
2427 		host->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
2428 		spin_unlock_irqrestore(&host->lock, flags);
2429 		cancel_delayed_work_sync(&host->detect);
2430 
2431 		if (!host->bus_ops || host->bus_ops->suspend)
2432 			break;
2433 
2434 		mmc_claim_host(host);
2435 
2436 		if (host->bus_ops->remove)
2437 			host->bus_ops->remove(host);
2438 
2439 		mmc_detach_bus(host);
2440 		mmc_power_off(host);
2441 		mmc_release_host(host);
2442 		host->pm_flags = 0;
2443 		break;
2444 
2445 	case PM_POST_SUSPEND:
2446 	case PM_POST_HIBERNATION:
2447 	case PM_POST_RESTORE:
2448 
2449 		spin_lock_irqsave(&host->lock, flags);
2450 		host->rescan_disable = 0;
2451 		host->power_notify_type = MMC_HOST_PW_NOTIFY_LONG;
2452 		spin_unlock_irqrestore(&host->lock, flags);
2453 		mmc_detect_change(host, 0);
2454 
2455 	}
2456 
2457 	return 0;
2458 }
2459 #endif
2460 
2461 static int __init mmc_init(void)
2462 {
2463 	int ret;
2464 
2465 	workqueue = alloc_ordered_workqueue("kmmcd", 0);
2466 	if (!workqueue)
2467 		return -ENOMEM;
2468 
2469 	ret = mmc_register_bus();
2470 	if (ret)
2471 		goto destroy_workqueue;
2472 
2473 	ret = mmc_register_host_class();
2474 	if (ret)
2475 		goto unregister_bus;
2476 
2477 	ret = sdio_register_bus();
2478 	if (ret)
2479 		goto unregister_host_class;
2480 
2481 	return 0;
2482 
2483 unregister_host_class:
2484 	mmc_unregister_host_class();
2485 unregister_bus:
2486 	mmc_unregister_bus();
2487 destroy_workqueue:
2488 	destroy_workqueue(workqueue);
2489 
2490 	return ret;
2491 }
2492 
2493 static void __exit mmc_exit(void)
2494 {
2495 	sdio_unregister_bus();
2496 	mmc_unregister_host_class();
2497 	mmc_unregister_bus();
2498 	destroy_workqueue(workqueue);
2499 }
2500 
2501 subsys_initcall(mmc_init);
2502 module_exit(mmc_exit);
2503 
2504 MODULE_LICENSE("GPL");
2505