xref: /linux/drivers/mmc/core/core.c (revision 005438a8eef063495ac059d128eea71b58de50e5)
1 /*
2  *  linux/drivers/mmc/core/core.c
3  *
4  *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5  *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7  *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/suspend.h>
28 #include <linux/fault-inject.h>
29 #include <linux/random.h>
30 #include <linux/slab.h>
31 #include <linux/of.h>
32 
33 #include <linux/mmc/card.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/slot-gpio.h>
38 
39 #include "core.h"
40 #include "bus.h"
41 #include "host.h"
42 #include "sdio_bus.h"
43 #include "pwrseq.h"
44 
45 #include "mmc_ops.h"
46 #include "sd_ops.h"
47 #include "sdio_ops.h"
48 
49 /* If the device is not responding */
50 #define MMC_CORE_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
51 
52 /*
53  * Background operations can take a long time, depending on the housekeeping
54  * operations the card has to perform.
55  */
56 #define MMC_BKOPS_MAX_TIMEOUT	(4 * 60 * 1000) /* max time to wait in ms */
57 
58 static struct workqueue_struct *workqueue;
59 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
60 
61 /*
62  * Enabling software CRCs on the data blocks can be a significant (30%)
63  * performance cost, and for other reasons may not always be desired.
64  * So we allow it it to be disabled.
65  */
66 bool use_spi_crc = 1;
67 module_param(use_spi_crc, bool, 0);
68 
69 /*
70  * Internal function. Schedule delayed work in the MMC work queue.
71  */
72 static int mmc_schedule_delayed_work(struct delayed_work *work,
73 				     unsigned long delay)
74 {
75 	return queue_delayed_work(workqueue, work, delay);
76 }
77 
78 /*
79  * Internal function. Flush all scheduled work from the MMC work queue.
80  */
81 static void mmc_flush_scheduled_work(void)
82 {
83 	flush_workqueue(workqueue);
84 }
85 
86 #ifdef CONFIG_FAIL_MMC_REQUEST
87 
88 /*
89  * Internal function. Inject random data errors.
90  * If mmc_data is NULL no errors are injected.
91  */
92 static void mmc_should_fail_request(struct mmc_host *host,
93 				    struct mmc_request *mrq)
94 {
95 	struct mmc_command *cmd = mrq->cmd;
96 	struct mmc_data *data = mrq->data;
97 	static const int data_errors[] = {
98 		-ETIMEDOUT,
99 		-EILSEQ,
100 		-EIO,
101 	};
102 
103 	if (!data)
104 		return;
105 
106 	if (cmd->error || data->error ||
107 	    !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
108 		return;
109 
110 	data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
111 	data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
112 }
113 
114 #else /* CONFIG_FAIL_MMC_REQUEST */
115 
116 static inline void mmc_should_fail_request(struct mmc_host *host,
117 					   struct mmc_request *mrq)
118 {
119 }
120 
121 #endif /* CONFIG_FAIL_MMC_REQUEST */
122 
123 /**
124  *	mmc_request_done - finish processing an MMC request
125  *	@host: MMC host which completed request
126  *	@mrq: MMC request which request
127  *
128  *	MMC drivers should call this function when they have completed
129  *	their processing of a request.
130  */
131 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
132 {
133 	struct mmc_command *cmd = mrq->cmd;
134 	int err = cmd->error;
135 
136 	/* Flag re-tuning needed on CRC errors */
137 	if (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
138 	    (mrq->data && mrq->data->error == -EILSEQ) ||
139 	    (mrq->stop && mrq->stop->error == -EILSEQ))
140 		mmc_retune_needed(host);
141 
142 	if (err && cmd->retries && mmc_host_is_spi(host)) {
143 		if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
144 			cmd->retries = 0;
145 	}
146 
147 	if (err && cmd->retries && !mmc_card_removed(host->card)) {
148 		/*
149 		 * Request starter must handle retries - see
150 		 * mmc_wait_for_req_done().
151 		 */
152 		if (mrq->done)
153 			mrq->done(mrq);
154 	} else {
155 		mmc_should_fail_request(host, mrq);
156 
157 		led_trigger_event(host->led, LED_OFF);
158 
159 		if (mrq->sbc) {
160 			pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
161 				mmc_hostname(host), mrq->sbc->opcode,
162 				mrq->sbc->error,
163 				mrq->sbc->resp[0], mrq->sbc->resp[1],
164 				mrq->sbc->resp[2], mrq->sbc->resp[3]);
165 		}
166 
167 		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
168 			mmc_hostname(host), cmd->opcode, err,
169 			cmd->resp[0], cmd->resp[1],
170 			cmd->resp[2], cmd->resp[3]);
171 
172 		if (mrq->data) {
173 			pr_debug("%s:     %d bytes transferred: %d\n",
174 				mmc_hostname(host),
175 				mrq->data->bytes_xfered, mrq->data->error);
176 		}
177 
178 		if (mrq->stop) {
179 			pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
180 				mmc_hostname(host), mrq->stop->opcode,
181 				mrq->stop->error,
182 				mrq->stop->resp[0], mrq->stop->resp[1],
183 				mrq->stop->resp[2], mrq->stop->resp[3]);
184 		}
185 
186 		if (mrq->done)
187 			mrq->done(mrq);
188 
189 		mmc_host_clk_release(host);
190 	}
191 }
192 
193 EXPORT_SYMBOL(mmc_request_done);
194 
195 static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
196 {
197 	int err;
198 
199 	/* Assumes host controller has been runtime resumed by mmc_claim_host */
200 	err = mmc_retune(host);
201 	if (err) {
202 		mrq->cmd->error = err;
203 		mmc_request_done(host, mrq);
204 		return;
205 	}
206 
207 	host->ops->request(host, mrq);
208 }
209 
210 static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
211 {
212 #ifdef CONFIG_MMC_DEBUG
213 	unsigned int i, sz;
214 	struct scatterlist *sg;
215 #endif
216 	mmc_retune_hold(host);
217 
218 	if (mmc_card_removed(host->card))
219 		return -ENOMEDIUM;
220 
221 	if (mrq->sbc) {
222 		pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
223 			 mmc_hostname(host), mrq->sbc->opcode,
224 			 mrq->sbc->arg, mrq->sbc->flags);
225 	}
226 
227 	pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
228 		 mmc_hostname(host), mrq->cmd->opcode,
229 		 mrq->cmd->arg, mrq->cmd->flags);
230 
231 	if (mrq->data) {
232 		pr_debug("%s:     blksz %d blocks %d flags %08x "
233 			"tsac %d ms nsac %d\n",
234 			mmc_hostname(host), mrq->data->blksz,
235 			mrq->data->blocks, mrq->data->flags,
236 			mrq->data->timeout_ns / 1000000,
237 			mrq->data->timeout_clks);
238 	}
239 
240 	if (mrq->stop) {
241 		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
242 			 mmc_hostname(host), mrq->stop->opcode,
243 			 mrq->stop->arg, mrq->stop->flags);
244 	}
245 
246 	WARN_ON(!host->claimed);
247 
248 	mrq->cmd->error = 0;
249 	mrq->cmd->mrq = mrq;
250 	if (mrq->sbc) {
251 		mrq->sbc->error = 0;
252 		mrq->sbc->mrq = mrq;
253 	}
254 	if (mrq->data) {
255 		BUG_ON(mrq->data->blksz > host->max_blk_size);
256 		BUG_ON(mrq->data->blocks > host->max_blk_count);
257 		BUG_ON(mrq->data->blocks * mrq->data->blksz >
258 			host->max_req_size);
259 
260 #ifdef CONFIG_MMC_DEBUG
261 		sz = 0;
262 		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
263 			sz += sg->length;
264 		BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
265 #endif
266 
267 		mrq->cmd->data = mrq->data;
268 		mrq->data->error = 0;
269 		mrq->data->mrq = mrq;
270 		if (mrq->stop) {
271 			mrq->data->stop = mrq->stop;
272 			mrq->stop->error = 0;
273 			mrq->stop->mrq = mrq;
274 		}
275 	}
276 	mmc_host_clk_hold(host);
277 	led_trigger_event(host->led, LED_FULL);
278 	__mmc_start_request(host, mrq);
279 
280 	return 0;
281 }
282 
283 /**
284  *	mmc_start_bkops - start BKOPS for supported cards
285  *	@card: MMC card to start BKOPS
286  *	@form_exception: A flag to indicate if this function was
287  *			 called due to an exception raised by the card
288  *
289  *	Start background operations whenever requested.
290  *	When the urgent BKOPS bit is set in a R1 command response
291  *	then background operations should be started immediately.
292 */
293 void mmc_start_bkops(struct mmc_card *card, bool from_exception)
294 {
295 	int err;
296 	int timeout;
297 	bool use_busy_signal;
298 
299 	BUG_ON(!card);
300 
301 	if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
302 		return;
303 
304 	err = mmc_read_bkops_status(card);
305 	if (err) {
306 		pr_err("%s: Failed to read bkops status: %d\n",
307 		       mmc_hostname(card->host), err);
308 		return;
309 	}
310 
311 	if (!card->ext_csd.raw_bkops_status)
312 		return;
313 
314 	if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
315 	    from_exception)
316 		return;
317 
318 	mmc_claim_host(card->host);
319 	if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
320 		timeout = MMC_BKOPS_MAX_TIMEOUT;
321 		use_busy_signal = true;
322 	} else {
323 		timeout = 0;
324 		use_busy_signal = false;
325 	}
326 
327 	mmc_retune_hold(card->host);
328 
329 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
330 			EXT_CSD_BKOPS_START, 1, timeout,
331 			use_busy_signal, true, false);
332 	if (err) {
333 		pr_warn("%s: Error %d starting bkops\n",
334 			mmc_hostname(card->host), err);
335 		mmc_retune_release(card->host);
336 		goto out;
337 	}
338 
339 	/*
340 	 * For urgent bkops status (LEVEL_2 and more)
341 	 * bkops executed synchronously, otherwise
342 	 * the operation is in progress
343 	 */
344 	if (!use_busy_signal)
345 		mmc_card_set_doing_bkops(card);
346 	else
347 		mmc_retune_release(card->host);
348 out:
349 	mmc_release_host(card->host);
350 }
351 EXPORT_SYMBOL(mmc_start_bkops);
352 
353 /*
354  * mmc_wait_data_done() - done callback for data request
355  * @mrq: done data request
356  *
357  * Wakes up mmc context, passed as a callback to host controller driver
358  */
359 static void mmc_wait_data_done(struct mmc_request *mrq)
360 {
361 	mrq->host->context_info.is_done_rcv = true;
362 	wake_up_interruptible(&mrq->host->context_info.wait);
363 }
364 
365 static void mmc_wait_done(struct mmc_request *mrq)
366 {
367 	complete(&mrq->completion);
368 }
369 
370 /*
371  *__mmc_start_data_req() - starts data request
372  * @host: MMC host to start the request
373  * @mrq: data request to start
374  *
375  * Sets the done callback to be called when request is completed by the card.
376  * Starts data mmc request execution
377  */
378 static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
379 {
380 	int err;
381 
382 	mrq->done = mmc_wait_data_done;
383 	mrq->host = host;
384 
385 	err = mmc_start_request(host, mrq);
386 	if (err) {
387 		mrq->cmd->error = err;
388 		mmc_wait_data_done(mrq);
389 	}
390 
391 	return err;
392 }
393 
394 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
395 {
396 	int err;
397 
398 	init_completion(&mrq->completion);
399 	mrq->done = mmc_wait_done;
400 
401 	err = mmc_start_request(host, mrq);
402 	if (err) {
403 		mrq->cmd->error = err;
404 		complete(&mrq->completion);
405 	}
406 
407 	return err;
408 }
409 
410 /*
411  * mmc_wait_for_data_req_done() - wait for request completed
412  * @host: MMC host to prepare the command.
413  * @mrq: MMC request to wait for
414  *
415  * Blocks MMC context till host controller will ack end of data request
416  * execution or new request notification arrives from the block layer.
417  * Handles command retries.
418  *
419  * Returns enum mmc_blk_status after checking errors.
420  */
421 static int mmc_wait_for_data_req_done(struct mmc_host *host,
422 				      struct mmc_request *mrq,
423 				      struct mmc_async_req *next_req)
424 {
425 	struct mmc_command *cmd;
426 	struct mmc_context_info *context_info = &host->context_info;
427 	int err;
428 	unsigned long flags;
429 
430 	while (1) {
431 		wait_event_interruptible(context_info->wait,
432 				(context_info->is_done_rcv ||
433 				 context_info->is_new_req));
434 		spin_lock_irqsave(&context_info->lock, flags);
435 		context_info->is_waiting_last_req = false;
436 		spin_unlock_irqrestore(&context_info->lock, flags);
437 		if (context_info->is_done_rcv) {
438 			context_info->is_done_rcv = false;
439 			context_info->is_new_req = false;
440 			cmd = mrq->cmd;
441 
442 			if (!cmd->error || !cmd->retries ||
443 			    mmc_card_removed(host->card)) {
444 				err = host->areq->err_check(host->card,
445 							    host->areq);
446 				break; /* return err */
447 			} else {
448 				mmc_retune_recheck(host);
449 				pr_info("%s: req failed (CMD%u): %d, retrying...\n",
450 					mmc_hostname(host),
451 					cmd->opcode, cmd->error);
452 				cmd->retries--;
453 				cmd->error = 0;
454 				__mmc_start_request(host, mrq);
455 				continue; /* wait for done/new event again */
456 			}
457 		} else if (context_info->is_new_req) {
458 			context_info->is_new_req = false;
459 			if (!next_req)
460 				return MMC_BLK_NEW_REQUEST;
461 		}
462 	}
463 	mmc_retune_release(host);
464 	return err;
465 }
466 
467 static void mmc_wait_for_req_done(struct mmc_host *host,
468 				  struct mmc_request *mrq)
469 {
470 	struct mmc_command *cmd;
471 
472 	while (1) {
473 		wait_for_completion(&mrq->completion);
474 
475 		cmd = mrq->cmd;
476 
477 		/*
478 		 * If host has timed out waiting for the sanitize
479 		 * to complete, card might be still in programming state
480 		 * so let's try to bring the card out of programming
481 		 * state.
482 		 */
483 		if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
484 			if (!mmc_interrupt_hpi(host->card)) {
485 				pr_warn("%s: %s: Interrupted sanitize\n",
486 					mmc_hostname(host), __func__);
487 				cmd->error = 0;
488 				break;
489 			} else {
490 				pr_err("%s: %s: Failed to interrupt sanitize\n",
491 				       mmc_hostname(host), __func__);
492 			}
493 		}
494 		if (!cmd->error || !cmd->retries ||
495 		    mmc_card_removed(host->card))
496 			break;
497 
498 		mmc_retune_recheck(host);
499 
500 		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
501 			 mmc_hostname(host), cmd->opcode, cmd->error);
502 		cmd->retries--;
503 		cmd->error = 0;
504 		__mmc_start_request(host, mrq);
505 	}
506 
507 	mmc_retune_release(host);
508 }
509 
510 /**
511  *	mmc_pre_req - Prepare for a new request
512  *	@host: MMC host to prepare command
513  *	@mrq: MMC request to prepare for
514  *	@is_first_req: true if there is no previous started request
515  *                     that may run in parellel to this call, otherwise false
516  *
517  *	mmc_pre_req() is called in prior to mmc_start_req() to let
518  *	host prepare for the new request. Preparation of a request may be
519  *	performed while another request is running on the host.
520  */
521 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
522 		 bool is_first_req)
523 {
524 	if (host->ops->pre_req) {
525 		mmc_host_clk_hold(host);
526 		host->ops->pre_req(host, mrq, is_first_req);
527 		mmc_host_clk_release(host);
528 	}
529 }
530 
531 /**
532  *	mmc_post_req - Post process a completed request
533  *	@host: MMC host to post process command
534  *	@mrq: MMC request to post process for
535  *	@err: Error, if non zero, clean up any resources made in pre_req
536  *
537  *	Let the host post process a completed request. Post processing of
538  *	a request may be performed while another reuqest is running.
539  */
540 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
541 			 int err)
542 {
543 	if (host->ops->post_req) {
544 		mmc_host_clk_hold(host);
545 		host->ops->post_req(host, mrq, err);
546 		mmc_host_clk_release(host);
547 	}
548 }
549 
550 /**
551  *	mmc_start_req - start a non-blocking request
552  *	@host: MMC host to start command
553  *	@areq: async request to start
554  *	@error: out parameter returns 0 for success, otherwise non zero
555  *
556  *	Start a new MMC custom command request for a host.
557  *	If there is on ongoing async request wait for completion
558  *	of that request and start the new one and return.
559  *	Does not wait for the new request to complete.
560  *
561  *      Returns the completed request, NULL in case of none completed.
562  *	Wait for the an ongoing request (previoulsy started) to complete and
563  *	return the completed request. If there is no ongoing request, NULL
564  *	is returned without waiting. NULL is not an error condition.
565  */
566 struct mmc_async_req *mmc_start_req(struct mmc_host *host,
567 				    struct mmc_async_req *areq, int *error)
568 {
569 	int err = 0;
570 	int start_err = 0;
571 	struct mmc_async_req *data = host->areq;
572 
573 	/* Prepare a new request */
574 	if (areq)
575 		mmc_pre_req(host, areq->mrq, !host->areq);
576 
577 	if (host->areq) {
578 		err = mmc_wait_for_data_req_done(host, host->areq->mrq,	areq);
579 		if (err == MMC_BLK_NEW_REQUEST) {
580 			if (error)
581 				*error = err;
582 			/*
583 			 * The previous request was not completed,
584 			 * nothing to return
585 			 */
586 			return NULL;
587 		}
588 		/*
589 		 * Check BKOPS urgency for each R1 response
590 		 */
591 		if (host->card && mmc_card_mmc(host->card) &&
592 		    ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
593 		     (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
594 		    (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
595 
596 			/* Cancel the prepared request */
597 			if (areq)
598 				mmc_post_req(host, areq->mrq, -EINVAL);
599 
600 			mmc_start_bkops(host->card, true);
601 
602 			/* prepare the request again */
603 			if (areq)
604 				mmc_pre_req(host, areq->mrq, !host->areq);
605 		}
606 	}
607 
608 	if (!err && areq)
609 		start_err = __mmc_start_data_req(host, areq->mrq);
610 
611 	if (host->areq)
612 		mmc_post_req(host, host->areq->mrq, 0);
613 
614 	 /* Cancel a prepared request if it was not started. */
615 	if ((err || start_err) && areq)
616 		mmc_post_req(host, areq->mrq, -EINVAL);
617 
618 	if (err)
619 		host->areq = NULL;
620 	else
621 		host->areq = areq;
622 
623 	if (error)
624 		*error = err;
625 	return data;
626 }
627 EXPORT_SYMBOL(mmc_start_req);
628 
629 /**
630  *	mmc_wait_for_req - start a request and wait for completion
631  *	@host: MMC host to start command
632  *	@mrq: MMC request to start
633  *
634  *	Start a new MMC custom command request for a host, and wait
635  *	for the command to complete. Does not attempt to parse the
636  *	response.
637  */
638 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
639 {
640 	__mmc_start_req(host, mrq);
641 	mmc_wait_for_req_done(host, mrq);
642 }
643 EXPORT_SYMBOL(mmc_wait_for_req);
644 
645 /**
646  *	mmc_interrupt_hpi - Issue for High priority Interrupt
647  *	@card: the MMC card associated with the HPI transfer
648  *
649  *	Issued High Priority Interrupt, and check for card status
650  *	until out-of prg-state.
651  */
652 int mmc_interrupt_hpi(struct mmc_card *card)
653 {
654 	int err;
655 	u32 status;
656 	unsigned long prg_wait;
657 
658 	BUG_ON(!card);
659 
660 	if (!card->ext_csd.hpi_en) {
661 		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
662 		return 1;
663 	}
664 
665 	mmc_claim_host(card->host);
666 	err = mmc_send_status(card, &status);
667 	if (err) {
668 		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
669 		goto out;
670 	}
671 
672 	switch (R1_CURRENT_STATE(status)) {
673 	case R1_STATE_IDLE:
674 	case R1_STATE_READY:
675 	case R1_STATE_STBY:
676 	case R1_STATE_TRAN:
677 		/*
678 		 * In idle and transfer states, HPI is not needed and the caller
679 		 * can issue the next intended command immediately
680 		 */
681 		goto out;
682 	case R1_STATE_PRG:
683 		break;
684 	default:
685 		/* In all other states, it's illegal to issue HPI */
686 		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
687 			mmc_hostname(card->host), R1_CURRENT_STATE(status));
688 		err = -EINVAL;
689 		goto out;
690 	}
691 
692 	err = mmc_send_hpi_cmd(card, &status);
693 	if (err)
694 		goto out;
695 
696 	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
697 	do {
698 		err = mmc_send_status(card, &status);
699 
700 		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
701 			break;
702 		if (time_after(jiffies, prg_wait))
703 			err = -ETIMEDOUT;
704 	} while (!err);
705 
706 out:
707 	mmc_release_host(card->host);
708 	return err;
709 }
710 EXPORT_SYMBOL(mmc_interrupt_hpi);
711 
712 /**
713  *	mmc_wait_for_cmd - start a command and wait for completion
714  *	@host: MMC host to start command
715  *	@cmd: MMC command to start
716  *	@retries: maximum number of retries
717  *
718  *	Start a new MMC command for a host, and wait for the command
719  *	to complete.  Return any error that occurred while the command
720  *	was executing.  Do not attempt to parse the response.
721  */
722 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
723 {
724 	struct mmc_request mrq = {NULL};
725 
726 	WARN_ON(!host->claimed);
727 
728 	memset(cmd->resp, 0, sizeof(cmd->resp));
729 	cmd->retries = retries;
730 
731 	mrq.cmd = cmd;
732 	cmd->data = NULL;
733 
734 	mmc_wait_for_req(host, &mrq);
735 
736 	return cmd->error;
737 }
738 
739 EXPORT_SYMBOL(mmc_wait_for_cmd);
740 
741 /**
742  *	mmc_stop_bkops - stop ongoing BKOPS
743  *	@card: MMC card to check BKOPS
744  *
745  *	Send HPI command to stop ongoing background operations to
746  *	allow rapid servicing of foreground operations, e.g. read/
747  *	writes. Wait until the card comes out of the programming state
748  *	to avoid errors in servicing read/write requests.
749  */
750 int mmc_stop_bkops(struct mmc_card *card)
751 {
752 	int err = 0;
753 
754 	BUG_ON(!card);
755 	err = mmc_interrupt_hpi(card);
756 
757 	/*
758 	 * If err is EINVAL, we can't issue an HPI.
759 	 * It should complete the BKOPS.
760 	 */
761 	if (!err || (err == -EINVAL)) {
762 		mmc_card_clr_doing_bkops(card);
763 		mmc_retune_release(card->host);
764 		err = 0;
765 	}
766 
767 	return err;
768 }
769 EXPORT_SYMBOL(mmc_stop_bkops);
770 
771 int mmc_read_bkops_status(struct mmc_card *card)
772 {
773 	int err;
774 	u8 *ext_csd;
775 
776 	mmc_claim_host(card->host);
777 	err = mmc_get_ext_csd(card, &ext_csd);
778 	mmc_release_host(card->host);
779 	if (err)
780 		return err;
781 
782 	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
783 	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
784 	kfree(ext_csd);
785 	return 0;
786 }
787 EXPORT_SYMBOL(mmc_read_bkops_status);
788 
789 /**
790  *	mmc_set_data_timeout - set the timeout for a data command
791  *	@data: data phase for command
792  *	@card: the MMC card associated with the data transfer
793  *
794  *	Computes the data timeout parameters according to the
795  *	correct algorithm given the card type.
796  */
797 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
798 {
799 	unsigned int mult;
800 
801 	/*
802 	 * SDIO cards only define an upper 1 s limit on access.
803 	 */
804 	if (mmc_card_sdio(card)) {
805 		data->timeout_ns = 1000000000;
806 		data->timeout_clks = 0;
807 		return;
808 	}
809 
810 	/*
811 	 * SD cards use a 100 multiplier rather than 10
812 	 */
813 	mult = mmc_card_sd(card) ? 100 : 10;
814 
815 	/*
816 	 * Scale up the multiplier (and therefore the timeout) by
817 	 * the r2w factor for writes.
818 	 */
819 	if (data->flags & MMC_DATA_WRITE)
820 		mult <<= card->csd.r2w_factor;
821 
822 	data->timeout_ns = card->csd.tacc_ns * mult;
823 	data->timeout_clks = card->csd.tacc_clks * mult;
824 
825 	/*
826 	 * SD cards also have an upper limit on the timeout.
827 	 */
828 	if (mmc_card_sd(card)) {
829 		unsigned int timeout_us, limit_us;
830 
831 		timeout_us = data->timeout_ns / 1000;
832 		if (mmc_host_clk_rate(card->host))
833 			timeout_us += data->timeout_clks * 1000 /
834 				(mmc_host_clk_rate(card->host) / 1000);
835 
836 		if (data->flags & MMC_DATA_WRITE)
837 			/*
838 			 * The MMC spec "It is strongly recommended
839 			 * for hosts to implement more than 500ms
840 			 * timeout value even if the card indicates
841 			 * the 250ms maximum busy length."  Even the
842 			 * previous value of 300ms is known to be
843 			 * insufficient for some cards.
844 			 */
845 			limit_us = 3000000;
846 		else
847 			limit_us = 100000;
848 
849 		/*
850 		 * SDHC cards always use these fixed values.
851 		 */
852 		if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
853 			data->timeout_ns = limit_us * 1000;
854 			data->timeout_clks = 0;
855 		}
856 
857 		/* assign limit value if invalid */
858 		if (timeout_us == 0)
859 			data->timeout_ns = limit_us * 1000;
860 	}
861 
862 	/*
863 	 * Some cards require longer data read timeout than indicated in CSD.
864 	 * Address this by setting the read timeout to a "reasonably high"
865 	 * value. For the cards tested, 300ms has proven enough. If necessary,
866 	 * this value can be increased if other problematic cards require this.
867 	 */
868 	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
869 		data->timeout_ns = 300000000;
870 		data->timeout_clks = 0;
871 	}
872 
873 	/*
874 	 * Some cards need very high timeouts if driven in SPI mode.
875 	 * The worst observed timeout was 900ms after writing a
876 	 * continuous stream of data until the internal logic
877 	 * overflowed.
878 	 */
879 	if (mmc_host_is_spi(card->host)) {
880 		if (data->flags & MMC_DATA_WRITE) {
881 			if (data->timeout_ns < 1000000000)
882 				data->timeout_ns = 1000000000;	/* 1s */
883 		} else {
884 			if (data->timeout_ns < 100000000)
885 				data->timeout_ns =  100000000;	/* 100ms */
886 		}
887 	}
888 }
889 EXPORT_SYMBOL(mmc_set_data_timeout);
890 
891 /**
892  *	mmc_align_data_size - pads a transfer size to a more optimal value
893  *	@card: the MMC card associated with the data transfer
894  *	@sz: original transfer size
895  *
896  *	Pads the original data size with a number of extra bytes in
897  *	order to avoid controller bugs and/or performance hits
898  *	(e.g. some controllers revert to PIO for certain sizes).
899  *
900  *	Returns the improved size, which might be unmodified.
901  *
902  *	Note that this function is only relevant when issuing a
903  *	single scatter gather entry.
904  */
905 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
906 {
907 	/*
908 	 * FIXME: We don't have a system for the controller to tell
909 	 * the core about its problems yet, so for now we just 32-bit
910 	 * align the size.
911 	 */
912 	sz = ((sz + 3) / 4) * 4;
913 
914 	return sz;
915 }
916 EXPORT_SYMBOL(mmc_align_data_size);
917 
918 /**
919  *	__mmc_claim_host - exclusively claim a host
920  *	@host: mmc host to claim
921  *	@abort: whether or not the operation should be aborted
922  *
923  *	Claim a host for a set of operations.  If @abort is non null and
924  *	dereference a non-zero value then this will return prematurely with
925  *	that non-zero value without acquiring the lock.  Returns zero
926  *	with the lock held otherwise.
927  */
928 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
929 {
930 	DECLARE_WAITQUEUE(wait, current);
931 	unsigned long flags;
932 	int stop;
933 	bool pm = false;
934 
935 	might_sleep();
936 
937 	add_wait_queue(&host->wq, &wait);
938 	spin_lock_irqsave(&host->lock, flags);
939 	while (1) {
940 		set_current_state(TASK_UNINTERRUPTIBLE);
941 		stop = abort ? atomic_read(abort) : 0;
942 		if (stop || !host->claimed || host->claimer == current)
943 			break;
944 		spin_unlock_irqrestore(&host->lock, flags);
945 		schedule();
946 		spin_lock_irqsave(&host->lock, flags);
947 	}
948 	set_current_state(TASK_RUNNING);
949 	if (!stop) {
950 		host->claimed = 1;
951 		host->claimer = current;
952 		host->claim_cnt += 1;
953 		if (host->claim_cnt == 1)
954 			pm = true;
955 	} else
956 		wake_up(&host->wq);
957 	spin_unlock_irqrestore(&host->lock, flags);
958 	remove_wait_queue(&host->wq, &wait);
959 
960 	if (pm)
961 		pm_runtime_get_sync(mmc_dev(host));
962 
963 	return stop;
964 }
965 EXPORT_SYMBOL(__mmc_claim_host);
966 
967 /**
968  *	mmc_release_host - release a host
969  *	@host: mmc host to release
970  *
971  *	Release a MMC host, allowing others to claim the host
972  *	for their operations.
973  */
974 void mmc_release_host(struct mmc_host *host)
975 {
976 	unsigned long flags;
977 
978 	WARN_ON(!host->claimed);
979 
980 	spin_lock_irqsave(&host->lock, flags);
981 	if (--host->claim_cnt) {
982 		/* Release for nested claim */
983 		spin_unlock_irqrestore(&host->lock, flags);
984 	} else {
985 		host->claimed = 0;
986 		host->claimer = NULL;
987 		spin_unlock_irqrestore(&host->lock, flags);
988 		wake_up(&host->wq);
989 		pm_runtime_mark_last_busy(mmc_dev(host));
990 		pm_runtime_put_autosuspend(mmc_dev(host));
991 	}
992 }
993 EXPORT_SYMBOL(mmc_release_host);
994 
995 /*
996  * This is a helper function, which fetches a runtime pm reference for the
997  * card device and also claims the host.
998  */
999 void mmc_get_card(struct mmc_card *card)
1000 {
1001 	pm_runtime_get_sync(&card->dev);
1002 	mmc_claim_host(card->host);
1003 }
1004 EXPORT_SYMBOL(mmc_get_card);
1005 
1006 /*
1007  * This is a helper function, which releases the host and drops the runtime
1008  * pm reference for the card device.
1009  */
1010 void mmc_put_card(struct mmc_card *card)
1011 {
1012 	mmc_release_host(card->host);
1013 	pm_runtime_mark_last_busy(&card->dev);
1014 	pm_runtime_put_autosuspend(&card->dev);
1015 }
1016 EXPORT_SYMBOL(mmc_put_card);
1017 
1018 /*
1019  * Internal function that does the actual ios call to the host driver,
1020  * optionally printing some debug output.
1021  */
1022 static inline void mmc_set_ios(struct mmc_host *host)
1023 {
1024 	struct mmc_ios *ios = &host->ios;
1025 
1026 	pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
1027 		"width %u timing %u\n",
1028 		 mmc_hostname(host), ios->clock, ios->bus_mode,
1029 		 ios->power_mode, ios->chip_select, ios->vdd,
1030 		 ios->bus_width, ios->timing);
1031 
1032 	if (ios->clock > 0)
1033 		mmc_set_ungated(host);
1034 	host->ops->set_ios(host, ios);
1035 }
1036 
1037 /*
1038  * Control chip select pin on a host.
1039  */
1040 void mmc_set_chip_select(struct mmc_host *host, int mode)
1041 {
1042 	mmc_host_clk_hold(host);
1043 	host->ios.chip_select = mode;
1044 	mmc_set_ios(host);
1045 	mmc_host_clk_release(host);
1046 }
1047 
1048 /*
1049  * Sets the host clock to the highest possible frequency that
1050  * is below "hz".
1051  */
1052 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
1053 {
1054 	WARN_ON(hz && hz < host->f_min);
1055 
1056 	if (hz > host->f_max)
1057 		hz = host->f_max;
1058 
1059 	host->ios.clock = hz;
1060 	mmc_set_ios(host);
1061 }
1062 
1063 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
1064 {
1065 	mmc_host_clk_hold(host);
1066 	__mmc_set_clock(host, hz);
1067 	mmc_host_clk_release(host);
1068 }
1069 
1070 #ifdef CONFIG_MMC_CLKGATE
1071 /*
1072  * This gates the clock by setting it to 0 Hz.
1073  */
1074 void mmc_gate_clock(struct mmc_host *host)
1075 {
1076 	unsigned long flags;
1077 
1078 	spin_lock_irqsave(&host->clk_lock, flags);
1079 	host->clk_old = host->ios.clock;
1080 	host->ios.clock = 0;
1081 	host->clk_gated = true;
1082 	spin_unlock_irqrestore(&host->clk_lock, flags);
1083 	mmc_set_ios(host);
1084 }
1085 
1086 /*
1087  * This restores the clock from gating by using the cached
1088  * clock value.
1089  */
1090 void mmc_ungate_clock(struct mmc_host *host)
1091 {
1092 	/*
1093 	 * We should previously have gated the clock, so the clock shall
1094 	 * be 0 here! The clock may however be 0 during initialization,
1095 	 * when some request operations are performed before setting
1096 	 * the frequency. When ungate is requested in that situation
1097 	 * we just ignore the call.
1098 	 */
1099 	if (host->clk_old) {
1100 		BUG_ON(host->ios.clock);
1101 		/* This call will also set host->clk_gated to false */
1102 		__mmc_set_clock(host, host->clk_old);
1103 	}
1104 }
1105 
1106 void mmc_set_ungated(struct mmc_host *host)
1107 {
1108 	unsigned long flags;
1109 
1110 	/*
1111 	 * We've been given a new frequency while the clock is gated,
1112 	 * so make sure we regard this as ungating it.
1113 	 */
1114 	spin_lock_irqsave(&host->clk_lock, flags);
1115 	host->clk_gated = false;
1116 	spin_unlock_irqrestore(&host->clk_lock, flags);
1117 }
1118 
1119 #else
1120 void mmc_set_ungated(struct mmc_host *host)
1121 {
1122 }
1123 #endif
1124 
1125 int mmc_execute_tuning(struct mmc_card *card)
1126 {
1127 	struct mmc_host *host = card->host;
1128 	u32 opcode;
1129 	int err;
1130 
1131 	if (!host->ops->execute_tuning)
1132 		return 0;
1133 
1134 	if (mmc_card_mmc(card))
1135 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
1136 	else
1137 		opcode = MMC_SEND_TUNING_BLOCK;
1138 
1139 	mmc_host_clk_hold(host);
1140 	err = host->ops->execute_tuning(host, opcode);
1141 	mmc_host_clk_release(host);
1142 
1143 	if (err)
1144 		pr_err("%s: tuning execution failed\n", mmc_hostname(host));
1145 	else
1146 		mmc_retune_enable(host);
1147 
1148 	return err;
1149 }
1150 
1151 /*
1152  * Change the bus mode (open drain/push-pull) of a host.
1153  */
1154 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1155 {
1156 	mmc_host_clk_hold(host);
1157 	host->ios.bus_mode = mode;
1158 	mmc_set_ios(host);
1159 	mmc_host_clk_release(host);
1160 }
1161 
1162 /*
1163  * Change data bus width of a host.
1164  */
1165 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1166 {
1167 	mmc_host_clk_hold(host);
1168 	host->ios.bus_width = width;
1169 	mmc_set_ios(host);
1170 	mmc_host_clk_release(host);
1171 }
1172 
1173 /*
1174  * Set initial state after a power cycle or a hw_reset.
1175  */
1176 void mmc_set_initial_state(struct mmc_host *host)
1177 {
1178 	mmc_retune_disable(host);
1179 
1180 	if (mmc_host_is_spi(host))
1181 		host->ios.chip_select = MMC_CS_HIGH;
1182 	else
1183 		host->ios.chip_select = MMC_CS_DONTCARE;
1184 	host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1185 	host->ios.bus_width = MMC_BUS_WIDTH_1;
1186 	host->ios.timing = MMC_TIMING_LEGACY;
1187 	host->ios.drv_type = 0;
1188 
1189 	mmc_set_ios(host);
1190 }
1191 
1192 /**
1193  * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1194  * @vdd:	voltage (mV)
1195  * @low_bits:	prefer low bits in boundary cases
1196  *
1197  * This function returns the OCR bit number according to the provided @vdd
1198  * value. If conversion is not possible a negative errno value returned.
1199  *
1200  * Depending on the @low_bits flag the function prefers low or high OCR bits
1201  * on boundary voltages. For example,
1202  * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1203  * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1204  *
1205  * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1206  */
1207 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1208 {
1209 	const int max_bit = ilog2(MMC_VDD_35_36);
1210 	int bit;
1211 
1212 	if (vdd < 1650 || vdd > 3600)
1213 		return -EINVAL;
1214 
1215 	if (vdd >= 1650 && vdd <= 1950)
1216 		return ilog2(MMC_VDD_165_195);
1217 
1218 	if (low_bits)
1219 		vdd -= 1;
1220 
1221 	/* Base 2000 mV, step 100 mV, bit's base 8. */
1222 	bit = (vdd - 2000) / 100 + 8;
1223 	if (bit > max_bit)
1224 		return max_bit;
1225 	return bit;
1226 }
1227 
1228 /**
1229  * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1230  * @vdd_min:	minimum voltage value (mV)
1231  * @vdd_max:	maximum voltage value (mV)
1232  *
1233  * This function returns the OCR mask bits according to the provided @vdd_min
1234  * and @vdd_max values. If conversion is not possible the function returns 0.
1235  *
1236  * Notes wrt boundary cases:
1237  * This function sets the OCR bits for all boundary voltages, for example
1238  * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1239  * MMC_VDD_34_35 mask.
1240  */
1241 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1242 {
1243 	u32 mask = 0;
1244 
1245 	if (vdd_max < vdd_min)
1246 		return 0;
1247 
1248 	/* Prefer high bits for the boundary vdd_max values. */
1249 	vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1250 	if (vdd_max < 0)
1251 		return 0;
1252 
1253 	/* Prefer low bits for the boundary vdd_min values. */
1254 	vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1255 	if (vdd_min < 0)
1256 		return 0;
1257 
1258 	/* Fill the mask, from max bit to min bit. */
1259 	while (vdd_max >= vdd_min)
1260 		mask |= 1 << vdd_max--;
1261 
1262 	return mask;
1263 }
1264 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1265 
1266 #ifdef CONFIG_OF
1267 
1268 /**
1269  * mmc_of_parse_voltage - return mask of supported voltages
1270  * @np: The device node need to be parsed.
1271  * @mask: mask of voltages available for MMC/SD/SDIO
1272  *
1273  * 1. Return zero on success.
1274  * 2. Return negative errno: voltage-range is invalid.
1275  */
1276 int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1277 {
1278 	const u32 *voltage_ranges;
1279 	int num_ranges, i;
1280 
1281 	voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1282 	num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1283 	if (!voltage_ranges || !num_ranges) {
1284 		pr_info("%s: voltage-ranges unspecified\n", np->full_name);
1285 		return -EINVAL;
1286 	}
1287 
1288 	for (i = 0; i < num_ranges; i++) {
1289 		const int j = i * 2;
1290 		u32 ocr_mask;
1291 
1292 		ocr_mask = mmc_vddrange_to_ocrmask(
1293 				be32_to_cpu(voltage_ranges[j]),
1294 				be32_to_cpu(voltage_ranges[j + 1]));
1295 		if (!ocr_mask) {
1296 			pr_err("%s: voltage-range #%d is invalid\n",
1297 				np->full_name, i);
1298 			return -EINVAL;
1299 		}
1300 		*mask |= ocr_mask;
1301 	}
1302 
1303 	return 0;
1304 }
1305 EXPORT_SYMBOL(mmc_of_parse_voltage);
1306 
1307 #endif /* CONFIG_OF */
1308 
1309 static int mmc_of_get_func_num(struct device_node *node)
1310 {
1311 	u32 reg;
1312 	int ret;
1313 
1314 	ret = of_property_read_u32(node, "reg", &reg);
1315 	if (ret < 0)
1316 		return ret;
1317 
1318 	return reg;
1319 }
1320 
1321 struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1322 		unsigned func_num)
1323 {
1324 	struct device_node *node;
1325 
1326 	if (!host->parent || !host->parent->of_node)
1327 		return NULL;
1328 
1329 	for_each_child_of_node(host->parent->of_node, node) {
1330 		if (mmc_of_get_func_num(node) == func_num)
1331 			return node;
1332 	}
1333 
1334 	return NULL;
1335 }
1336 
1337 #ifdef CONFIG_REGULATOR
1338 
1339 /**
1340  * mmc_regulator_get_ocrmask - return mask of supported voltages
1341  * @supply: regulator to use
1342  *
1343  * This returns either a negative errno, or a mask of voltages that
1344  * can be provided to MMC/SD/SDIO devices using the specified voltage
1345  * regulator.  This would normally be called before registering the
1346  * MMC host adapter.
1347  */
1348 int mmc_regulator_get_ocrmask(struct regulator *supply)
1349 {
1350 	int			result = 0;
1351 	int			count;
1352 	int			i;
1353 	int			vdd_uV;
1354 	int			vdd_mV;
1355 
1356 	count = regulator_count_voltages(supply);
1357 	if (count < 0)
1358 		return count;
1359 
1360 	for (i = 0; i < count; i++) {
1361 		vdd_uV = regulator_list_voltage(supply, i);
1362 		if (vdd_uV <= 0)
1363 			continue;
1364 
1365 		vdd_mV = vdd_uV / 1000;
1366 		result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1367 	}
1368 
1369 	if (!result) {
1370 		vdd_uV = regulator_get_voltage(supply);
1371 		if (vdd_uV <= 0)
1372 			return vdd_uV;
1373 
1374 		vdd_mV = vdd_uV / 1000;
1375 		result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1376 	}
1377 
1378 	return result;
1379 }
1380 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1381 
1382 /**
1383  * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1384  * @mmc: the host to regulate
1385  * @supply: regulator to use
1386  * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1387  *
1388  * Returns zero on success, else negative errno.
1389  *
1390  * MMC host drivers may use this to enable or disable a regulator using
1391  * a particular supply voltage.  This would normally be called from the
1392  * set_ios() method.
1393  */
1394 int mmc_regulator_set_ocr(struct mmc_host *mmc,
1395 			struct regulator *supply,
1396 			unsigned short vdd_bit)
1397 {
1398 	int			result = 0;
1399 	int			min_uV, max_uV;
1400 
1401 	if (vdd_bit) {
1402 		int		tmp;
1403 
1404 		/*
1405 		 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1406 		 * bits this regulator doesn't quite support ... don't
1407 		 * be too picky, most cards and regulators are OK with
1408 		 * a 0.1V range goof (it's a small error percentage).
1409 		 */
1410 		tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1411 		if (tmp == 0) {
1412 			min_uV = 1650 * 1000;
1413 			max_uV = 1950 * 1000;
1414 		} else {
1415 			min_uV = 1900 * 1000 + tmp * 100 * 1000;
1416 			max_uV = min_uV + 100 * 1000;
1417 		}
1418 
1419 		result = regulator_set_voltage(supply, min_uV, max_uV);
1420 		if (result == 0 && !mmc->regulator_enabled) {
1421 			result = regulator_enable(supply);
1422 			if (!result)
1423 				mmc->regulator_enabled = true;
1424 		}
1425 	} else if (mmc->regulator_enabled) {
1426 		result = regulator_disable(supply);
1427 		if (result == 0)
1428 			mmc->regulator_enabled = false;
1429 	}
1430 
1431 	if (result)
1432 		dev_err(mmc_dev(mmc),
1433 			"could not set regulator OCR (%d)\n", result);
1434 	return result;
1435 }
1436 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1437 
1438 #endif /* CONFIG_REGULATOR */
1439 
1440 int mmc_regulator_get_supply(struct mmc_host *mmc)
1441 {
1442 	struct device *dev = mmc_dev(mmc);
1443 	int ret;
1444 
1445 	mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1446 	mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1447 
1448 	if (IS_ERR(mmc->supply.vmmc)) {
1449 		if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1450 			return -EPROBE_DEFER;
1451 		dev_info(dev, "No vmmc regulator found\n");
1452 	} else {
1453 		ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1454 		if (ret > 0)
1455 			mmc->ocr_avail = ret;
1456 		else
1457 			dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1458 	}
1459 
1460 	if (IS_ERR(mmc->supply.vqmmc)) {
1461 		if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1462 			return -EPROBE_DEFER;
1463 		dev_info(dev, "No vqmmc regulator found\n");
1464 	}
1465 
1466 	return 0;
1467 }
1468 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1469 
1470 /*
1471  * Mask off any voltages we don't support and select
1472  * the lowest voltage
1473  */
1474 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1475 {
1476 	int bit;
1477 
1478 	/*
1479 	 * Sanity check the voltages that the card claims to
1480 	 * support.
1481 	 */
1482 	if (ocr & 0x7F) {
1483 		dev_warn(mmc_dev(host),
1484 		"card claims to support voltages below defined range\n");
1485 		ocr &= ~0x7F;
1486 	}
1487 
1488 	ocr &= host->ocr_avail;
1489 	if (!ocr) {
1490 		dev_warn(mmc_dev(host), "no support for card's volts\n");
1491 		return 0;
1492 	}
1493 
1494 	if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1495 		bit = ffs(ocr) - 1;
1496 		ocr &= 3 << bit;
1497 		mmc_power_cycle(host, ocr);
1498 	} else {
1499 		bit = fls(ocr) - 1;
1500 		ocr &= 3 << bit;
1501 		if (bit != host->ios.vdd)
1502 			dev_warn(mmc_dev(host), "exceeding card's volts\n");
1503 	}
1504 
1505 	return ocr;
1506 }
1507 
1508 int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1509 {
1510 	int err = 0;
1511 	int old_signal_voltage = host->ios.signal_voltage;
1512 
1513 	host->ios.signal_voltage = signal_voltage;
1514 	if (host->ops->start_signal_voltage_switch) {
1515 		mmc_host_clk_hold(host);
1516 		err = host->ops->start_signal_voltage_switch(host, &host->ios);
1517 		mmc_host_clk_release(host);
1518 	}
1519 
1520 	if (err)
1521 		host->ios.signal_voltage = old_signal_voltage;
1522 
1523 	return err;
1524 
1525 }
1526 
1527 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
1528 {
1529 	struct mmc_command cmd = {0};
1530 	int err = 0;
1531 	u32 clock;
1532 
1533 	BUG_ON(!host);
1534 
1535 	/*
1536 	 * Send CMD11 only if the request is to switch the card to
1537 	 * 1.8V signalling.
1538 	 */
1539 	if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1540 		return __mmc_set_signal_voltage(host, signal_voltage);
1541 
1542 	/*
1543 	 * If we cannot switch voltages, return failure so the caller
1544 	 * can continue without UHS mode
1545 	 */
1546 	if (!host->ops->start_signal_voltage_switch)
1547 		return -EPERM;
1548 	if (!host->ops->card_busy)
1549 		pr_warn("%s: cannot verify signal voltage switch\n",
1550 			mmc_hostname(host));
1551 
1552 	mmc_host_clk_hold(host);
1553 
1554 	cmd.opcode = SD_SWITCH_VOLTAGE;
1555 	cmd.arg = 0;
1556 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1557 
1558 	err = mmc_wait_for_cmd(host, &cmd, 0);
1559 	if (err)
1560 		goto err_command;
1561 
1562 	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
1563 		err = -EIO;
1564 		goto err_command;
1565 	}
1566 	/*
1567 	 * The card should drive cmd and dat[0:3] low immediately
1568 	 * after the response of cmd11, but wait 1 ms to be sure
1569 	 */
1570 	mmc_delay(1);
1571 	if (host->ops->card_busy && !host->ops->card_busy(host)) {
1572 		err = -EAGAIN;
1573 		goto power_cycle;
1574 	}
1575 	/*
1576 	 * During a signal voltage level switch, the clock must be gated
1577 	 * for 5 ms according to the SD spec
1578 	 */
1579 	clock = host->ios.clock;
1580 	host->ios.clock = 0;
1581 	mmc_set_ios(host);
1582 
1583 	if (__mmc_set_signal_voltage(host, signal_voltage)) {
1584 		/*
1585 		 * Voltages may not have been switched, but we've already
1586 		 * sent CMD11, so a power cycle is required anyway
1587 		 */
1588 		err = -EAGAIN;
1589 		goto power_cycle;
1590 	}
1591 
1592 	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
1593 	mmc_delay(10);
1594 	host->ios.clock = clock;
1595 	mmc_set_ios(host);
1596 
1597 	/* Wait for at least 1 ms according to spec */
1598 	mmc_delay(1);
1599 
1600 	/*
1601 	 * Failure to switch is indicated by the card holding
1602 	 * dat[0:3] low
1603 	 */
1604 	if (host->ops->card_busy && host->ops->card_busy(host))
1605 		err = -EAGAIN;
1606 
1607 power_cycle:
1608 	if (err) {
1609 		pr_debug("%s: Signal voltage switch failed, "
1610 			"power cycling card\n", mmc_hostname(host));
1611 		mmc_power_cycle(host, ocr);
1612 	}
1613 
1614 err_command:
1615 	mmc_host_clk_release(host);
1616 
1617 	return err;
1618 }
1619 
1620 /*
1621  * Select timing parameters for host.
1622  */
1623 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1624 {
1625 	mmc_host_clk_hold(host);
1626 	host->ios.timing = timing;
1627 	mmc_set_ios(host);
1628 	mmc_host_clk_release(host);
1629 }
1630 
1631 /*
1632  * Select appropriate driver type for host.
1633  */
1634 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1635 {
1636 	mmc_host_clk_hold(host);
1637 	host->ios.drv_type = drv_type;
1638 	mmc_set_ios(host);
1639 	mmc_host_clk_release(host);
1640 }
1641 
1642 int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1643 			      int card_drv_type, int *drv_type)
1644 {
1645 	struct mmc_host *host = card->host;
1646 	int host_drv_type = SD_DRIVER_TYPE_B;
1647 	int drive_strength;
1648 
1649 	*drv_type = 0;
1650 
1651 	if (!host->ops->select_drive_strength)
1652 		return 0;
1653 
1654 	/* Use SD definition of driver strength for hosts */
1655 	if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1656 		host_drv_type |= SD_DRIVER_TYPE_A;
1657 
1658 	if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1659 		host_drv_type |= SD_DRIVER_TYPE_C;
1660 
1661 	if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1662 		host_drv_type |= SD_DRIVER_TYPE_D;
1663 
1664 	/*
1665 	 * The drive strength that the hardware can support
1666 	 * depends on the board design.  Pass the appropriate
1667 	 * information and let the hardware specific code
1668 	 * return what is possible given the options
1669 	 */
1670 	mmc_host_clk_hold(host);
1671 	drive_strength = host->ops->select_drive_strength(card, max_dtr,
1672 							  host_drv_type,
1673 							  card_drv_type,
1674 							  drv_type);
1675 	mmc_host_clk_release(host);
1676 
1677 	return drive_strength;
1678 }
1679 
1680 /*
1681  * Apply power to the MMC stack.  This is a two-stage process.
1682  * First, we enable power to the card without the clock running.
1683  * We then wait a bit for the power to stabilise.  Finally,
1684  * enable the bus drivers and clock to the card.
1685  *
1686  * We must _NOT_ enable the clock prior to power stablising.
1687  *
1688  * If a host does all the power sequencing itself, ignore the
1689  * initial MMC_POWER_UP stage.
1690  */
1691 void mmc_power_up(struct mmc_host *host, u32 ocr)
1692 {
1693 	if (host->ios.power_mode == MMC_POWER_ON)
1694 		return;
1695 
1696 	mmc_host_clk_hold(host);
1697 
1698 	mmc_pwrseq_pre_power_on(host);
1699 
1700 	host->ios.vdd = fls(ocr) - 1;
1701 	host->ios.power_mode = MMC_POWER_UP;
1702 	/* Set initial state and call mmc_set_ios */
1703 	mmc_set_initial_state(host);
1704 
1705 	/* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1706 	if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
1707 		dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1708 	else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0)
1709 		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1710 	else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0)
1711 		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1712 
1713 	/*
1714 	 * This delay should be sufficient to allow the power supply
1715 	 * to reach the minimum voltage.
1716 	 */
1717 	mmc_delay(10);
1718 
1719 	mmc_pwrseq_post_power_on(host);
1720 
1721 	host->ios.clock = host->f_init;
1722 
1723 	host->ios.power_mode = MMC_POWER_ON;
1724 	mmc_set_ios(host);
1725 
1726 	/*
1727 	 * This delay must be at least 74 clock sizes, or 1 ms, or the
1728 	 * time required to reach a stable voltage.
1729 	 */
1730 	mmc_delay(10);
1731 
1732 	mmc_host_clk_release(host);
1733 }
1734 
1735 void mmc_power_off(struct mmc_host *host)
1736 {
1737 	if (host->ios.power_mode == MMC_POWER_OFF)
1738 		return;
1739 
1740 	mmc_host_clk_hold(host);
1741 
1742 	mmc_pwrseq_power_off(host);
1743 
1744 	host->ios.clock = 0;
1745 	host->ios.vdd = 0;
1746 
1747 	host->ios.power_mode = MMC_POWER_OFF;
1748 	/* Set initial state and call mmc_set_ios */
1749 	mmc_set_initial_state(host);
1750 
1751 	/*
1752 	 * Some configurations, such as the 802.11 SDIO card in the OLPC
1753 	 * XO-1.5, require a short delay after poweroff before the card
1754 	 * can be successfully turned on again.
1755 	 */
1756 	mmc_delay(1);
1757 
1758 	mmc_host_clk_release(host);
1759 }
1760 
1761 void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1762 {
1763 	mmc_power_off(host);
1764 	/* Wait at least 1 ms according to SD spec */
1765 	mmc_delay(1);
1766 	mmc_power_up(host, ocr);
1767 }
1768 
1769 /*
1770  * Cleanup when the last reference to the bus operator is dropped.
1771  */
1772 static void __mmc_release_bus(struct mmc_host *host)
1773 {
1774 	BUG_ON(!host);
1775 	BUG_ON(host->bus_refs);
1776 	BUG_ON(!host->bus_dead);
1777 
1778 	host->bus_ops = NULL;
1779 }
1780 
1781 /*
1782  * Increase reference count of bus operator
1783  */
1784 static inline void mmc_bus_get(struct mmc_host *host)
1785 {
1786 	unsigned long flags;
1787 
1788 	spin_lock_irqsave(&host->lock, flags);
1789 	host->bus_refs++;
1790 	spin_unlock_irqrestore(&host->lock, flags);
1791 }
1792 
1793 /*
1794  * Decrease reference count of bus operator and free it if
1795  * it is the last reference.
1796  */
1797 static inline void mmc_bus_put(struct mmc_host *host)
1798 {
1799 	unsigned long flags;
1800 
1801 	spin_lock_irqsave(&host->lock, flags);
1802 	host->bus_refs--;
1803 	if ((host->bus_refs == 0) && host->bus_ops)
1804 		__mmc_release_bus(host);
1805 	spin_unlock_irqrestore(&host->lock, flags);
1806 }
1807 
1808 /*
1809  * Assign a mmc bus handler to a host. Only one bus handler may control a
1810  * host at any given time.
1811  */
1812 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1813 {
1814 	unsigned long flags;
1815 
1816 	BUG_ON(!host);
1817 	BUG_ON(!ops);
1818 
1819 	WARN_ON(!host->claimed);
1820 
1821 	spin_lock_irqsave(&host->lock, flags);
1822 
1823 	BUG_ON(host->bus_ops);
1824 	BUG_ON(host->bus_refs);
1825 
1826 	host->bus_ops = ops;
1827 	host->bus_refs = 1;
1828 	host->bus_dead = 0;
1829 
1830 	spin_unlock_irqrestore(&host->lock, flags);
1831 }
1832 
1833 /*
1834  * Remove the current bus handler from a host.
1835  */
1836 void mmc_detach_bus(struct mmc_host *host)
1837 {
1838 	unsigned long flags;
1839 
1840 	BUG_ON(!host);
1841 
1842 	WARN_ON(!host->claimed);
1843 	WARN_ON(!host->bus_ops);
1844 
1845 	spin_lock_irqsave(&host->lock, flags);
1846 
1847 	host->bus_dead = 1;
1848 
1849 	spin_unlock_irqrestore(&host->lock, flags);
1850 
1851 	mmc_bus_put(host);
1852 }
1853 
1854 static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1855 				bool cd_irq)
1856 {
1857 #ifdef CONFIG_MMC_DEBUG
1858 	unsigned long flags;
1859 	spin_lock_irqsave(&host->lock, flags);
1860 	WARN_ON(host->removed);
1861 	spin_unlock_irqrestore(&host->lock, flags);
1862 #endif
1863 
1864 	/*
1865 	 * If the device is configured as wakeup, we prevent a new sleep for
1866 	 * 5 s to give provision for user space to consume the event.
1867 	 */
1868 	if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1869 		device_can_wakeup(mmc_dev(host)))
1870 		pm_wakeup_event(mmc_dev(host), 5000);
1871 
1872 	host->detect_change = 1;
1873 	mmc_schedule_delayed_work(&host->detect, delay);
1874 }
1875 
1876 /**
1877  *	mmc_detect_change - process change of state on a MMC socket
1878  *	@host: host which changed state.
1879  *	@delay: optional delay to wait before detection (jiffies)
1880  *
1881  *	MMC drivers should call this when they detect a card has been
1882  *	inserted or removed. The MMC layer will confirm that any
1883  *	present card is still functional, and initialize any newly
1884  *	inserted.
1885  */
1886 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1887 {
1888 	_mmc_detect_change(host, delay, true);
1889 }
1890 EXPORT_SYMBOL(mmc_detect_change);
1891 
1892 void mmc_init_erase(struct mmc_card *card)
1893 {
1894 	unsigned int sz;
1895 
1896 	if (is_power_of_2(card->erase_size))
1897 		card->erase_shift = ffs(card->erase_size) - 1;
1898 	else
1899 		card->erase_shift = 0;
1900 
1901 	/*
1902 	 * It is possible to erase an arbitrarily large area of an SD or MMC
1903 	 * card.  That is not desirable because it can take a long time
1904 	 * (minutes) potentially delaying more important I/O, and also the
1905 	 * timeout calculations become increasingly hugely over-estimated.
1906 	 * Consequently, 'pref_erase' is defined as a guide to limit erases
1907 	 * to that size and alignment.
1908 	 *
1909 	 * For SD cards that define Allocation Unit size, limit erases to one
1910 	 * Allocation Unit at a time.  For MMC cards that define High Capacity
1911 	 * Erase Size, whether it is switched on or not, limit to that size.
1912 	 * Otherwise just have a stab at a good value.  For modern cards it
1913 	 * will end up being 4MiB.  Note that if the value is too small, it
1914 	 * can end up taking longer to erase.
1915 	 */
1916 	if (mmc_card_sd(card) && card->ssr.au) {
1917 		card->pref_erase = card->ssr.au;
1918 		card->erase_shift = ffs(card->ssr.au) - 1;
1919 	} else if (card->ext_csd.hc_erase_size) {
1920 		card->pref_erase = card->ext_csd.hc_erase_size;
1921 	} else if (card->erase_size) {
1922 		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1923 		if (sz < 128)
1924 			card->pref_erase = 512 * 1024 / 512;
1925 		else if (sz < 512)
1926 			card->pref_erase = 1024 * 1024 / 512;
1927 		else if (sz < 1024)
1928 			card->pref_erase = 2 * 1024 * 1024 / 512;
1929 		else
1930 			card->pref_erase = 4 * 1024 * 1024 / 512;
1931 		if (card->pref_erase < card->erase_size)
1932 			card->pref_erase = card->erase_size;
1933 		else {
1934 			sz = card->pref_erase % card->erase_size;
1935 			if (sz)
1936 				card->pref_erase += card->erase_size - sz;
1937 		}
1938 	} else
1939 		card->pref_erase = 0;
1940 }
1941 
1942 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1943 				          unsigned int arg, unsigned int qty)
1944 {
1945 	unsigned int erase_timeout;
1946 
1947 	if (arg == MMC_DISCARD_ARG ||
1948 	    (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1949 		erase_timeout = card->ext_csd.trim_timeout;
1950 	} else if (card->ext_csd.erase_group_def & 1) {
1951 		/* High Capacity Erase Group Size uses HC timeouts */
1952 		if (arg == MMC_TRIM_ARG)
1953 			erase_timeout = card->ext_csd.trim_timeout;
1954 		else
1955 			erase_timeout = card->ext_csd.hc_erase_timeout;
1956 	} else {
1957 		/* CSD Erase Group Size uses write timeout */
1958 		unsigned int mult = (10 << card->csd.r2w_factor);
1959 		unsigned int timeout_clks = card->csd.tacc_clks * mult;
1960 		unsigned int timeout_us;
1961 
1962 		/* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1963 		if (card->csd.tacc_ns < 1000000)
1964 			timeout_us = (card->csd.tacc_ns * mult) / 1000;
1965 		else
1966 			timeout_us = (card->csd.tacc_ns / 1000) * mult;
1967 
1968 		/*
1969 		 * ios.clock is only a target.  The real clock rate might be
1970 		 * less but not that much less, so fudge it by multiplying by 2.
1971 		 */
1972 		timeout_clks <<= 1;
1973 		timeout_us += (timeout_clks * 1000) /
1974 			      (mmc_host_clk_rate(card->host) / 1000);
1975 
1976 		erase_timeout = timeout_us / 1000;
1977 
1978 		/*
1979 		 * Theoretically, the calculation could underflow so round up
1980 		 * to 1ms in that case.
1981 		 */
1982 		if (!erase_timeout)
1983 			erase_timeout = 1;
1984 	}
1985 
1986 	/* Multiplier for secure operations */
1987 	if (arg & MMC_SECURE_ARGS) {
1988 		if (arg == MMC_SECURE_ERASE_ARG)
1989 			erase_timeout *= card->ext_csd.sec_erase_mult;
1990 		else
1991 			erase_timeout *= card->ext_csd.sec_trim_mult;
1992 	}
1993 
1994 	erase_timeout *= qty;
1995 
1996 	/*
1997 	 * Ensure at least a 1 second timeout for SPI as per
1998 	 * 'mmc_set_data_timeout()'
1999 	 */
2000 	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
2001 		erase_timeout = 1000;
2002 
2003 	return erase_timeout;
2004 }
2005 
2006 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
2007 					 unsigned int arg,
2008 					 unsigned int qty)
2009 {
2010 	unsigned int erase_timeout;
2011 
2012 	if (card->ssr.erase_timeout) {
2013 		/* Erase timeout specified in SD Status Register (SSR) */
2014 		erase_timeout = card->ssr.erase_timeout * qty +
2015 				card->ssr.erase_offset;
2016 	} else {
2017 		/*
2018 		 * Erase timeout not specified in SD Status Register (SSR) so
2019 		 * use 250ms per write block.
2020 		 */
2021 		erase_timeout = 250 * qty;
2022 	}
2023 
2024 	/* Must not be less than 1 second */
2025 	if (erase_timeout < 1000)
2026 		erase_timeout = 1000;
2027 
2028 	return erase_timeout;
2029 }
2030 
2031 static unsigned int mmc_erase_timeout(struct mmc_card *card,
2032 				      unsigned int arg,
2033 				      unsigned int qty)
2034 {
2035 	if (mmc_card_sd(card))
2036 		return mmc_sd_erase_timeout(card, arg, qty);
2037 	else
2038 		return mmc_mmc_erase_timeout(card, arg, qty);
2039 }
2040 
2041 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
2042 			unsigned int to, unsigned int arg)
2043 {
2044 	struct mmc_command cmd = {0};
2045 	unsigned int qty = 0;
2046 	unsigned long timeout;
2047 	int err;
2048 
2049 	mmc_retune_hold(card->host);
2050 
2051 	/*
2052 	 * qty is used to calculate the erase timeout which depends on how many
2053 	 * erase groups (or allocation units in SD terminology) are affected.
2054 	 * We count erasing part of an erase group as one erase group.
2055 	 * For SD, the allocation units are always a power of 2.  For MMC, the
2056 	 * erase group size is almost certainly also power of 2, but it does not
2057 	 * seem to insist on that in the JEDEC standard, so we fall back to
2058 	 * division in that case.  SD may not specify an allocation unit size,
2059 	 * in which case the timeout is based on the number of write blocks.
2060 	 *
2061 	 * Note that the timeout for secure trim 2 will only be correct if the
2062 	 * number of erase groups specified is the same as the total of all
2063 	 * preceding secure trim 1 commands.  Since the power may have been
2064 	 * lost since the secure trim 1 commands occurred, it is generally
2065 	 * impossible to calculate the secure trim 2 timeout correctly.
2066 	 */
2067 	if (card->erase_shift)
2068 		qty += ((to >> card->erase_shift) -
2069 			(from >> card->erase_shift)) + 1;
2070 	else if (mmc_card_sd(card))
2071 		qty += to - from + 1;
2072 	else
2073 		qty += ((to / card->erase_size) -
2074 			(from / card->erase_size)) + 1;
2075 
2076 	if (!mmc_card_blockaddr(card)) {
2077 		from <<= 9;
2078 		to <<= 9;
2079 	}
2080 
2081 	if (mmc_card_sd(card))
2082 		cmd.opcode = SD_ERASE_WR_BLK_START;
2083 	else
2084 		cmd.opcode = MMC_ERASE_GROUP_START;
2085 	cmd.arg = from;
2086 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2087 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2088 	if (err) {
2089 		pr_err("mmc_erase: group start error %d, "
2090 		       "status %#x\n", err, cmd.resp[0]);
2091 		err = -EIO;
2092 		goto out;
2093 	}
2094 
2095 	memset(&cmd, 0, sizeof(struct mmc_command));
2096 	if (mmc_card_sd(card))
2097 		cmd.opcode = SD_ERASE_WR_BLK_END;
2098 	else
2099 		cmd.opcode = MMC_ERASE_GROUP_END;
2100 	cmd.arg = to;
2101 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2102 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2103 	if (err) {
2104 		pr_err("mmc_erase: group end error %d, status %#x\n",
2105 		       err, cmd.resp[0]);
2106 		err = -EIO;
2107 		goto out;
2108 	}
2109 
2110 	memset(&cmd, 0, sizeof(struct mmc_command));
2111 	cmd.opcode = MMC_ERASE;
2112 	cmd.arg = arg;
2113 	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2114 	cmd.busy_timeout = mmc_erase_timeout(card, arg, qty);
2115 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2116 	if (err) {
2117 		pr_err("mmc_erase: erase error %d, status %#x\n",
2118 		       err, cmd.resp[0]);
2119 		err = -EIO;
2120 		goto out;
2121 	}
2122 
2123 	if (mmc_host_is_spi(card->host))
2124 		goto out;
2125 
2126 	timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
2127 	do {
2128 		memset(&cmd, 0, sizeof(struct mmc_command));
2129 		cmd.opcode = MMC_SEND_STATUS;
2130 		cmd.arg = card->rca << 16;
2131 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2132 		/* Do not retry else we can't see errors */
2133 		err = mmc_wait_for_cmd(card->host, &cmd, 0);
2134 		if (err || (cmd.resp[0] & 0xFDF92000)) {
2135 			pr_err("error %d requesting status %#x\n",
2136 				err, cmd.resp[0]);
2137 			err = -EIO;
2138 			goto out;
2139 		}
2140 
2141 		/* Timeout if the device never becomes ready for data and
2142 		 * never leaves the program state.
2143 		 */
2144 		if (time_after(jiffies, timeout)) {
2145 			pr_err("%s: Card stuck in programming state! %s\n",
2146 				mmc_hostname(card->host), __func__);
2147 			err =  -EIO;
2148 			goto out;
2149 		}
2150 
2151 	} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
2152 		 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
2153 out:
2154 	mmc_retune_release(card->host);
2155 	return err;
2156 }
2157 
2158 /**
2159  * mmc_erase - erase sectors.
2160  * @card: card to erase
2161  * @from: first sector to erase
2162  * @nr: number of sectors to erase
2163  * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2164  *
2165  * Caller must claim host before calling this function.
2166  */
2167 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2168 	      unsigned int arg)
2169 {
2170 	unsigned int rem, to = from + nr;
2171 
2172 	if (!(card->host->caps & MMC_CAP_ERASE) ||
2173 	    !(card->csd.cmdclass & CCC_ERASE))
2174 		return -EOPNOTSUPP;
2175 
2176 	if (!card->erase_size)
2177 		return -EOPNOTSUPP;
2178 
2179 	if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2180 		return -EOPNOTSUPP;
2181 
2182 	if ((arg & MMC_SECURE_ARGS) &&
2183 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2184 		return -EOPNOTSUPP;
2185 
2186 	if ((arg & MMC_TRIM_ARGS) &&
2187 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2188 		return -EOPNOTSUPP;
2189 
2190 	if (arg == MMC_SECURE_ERASE_ARG) {
2191 		if (from % card->erase_size || nr % card->erase_size)
2192 			return -EINVAL;
2193 	}
2194 
2195 	if (arg == MMC_ERASE_ARG) {
2196 		rem = from % card->erase_size;
2197 		if (rem) {
2198 			rem = card->erase_size - rem;
2199 			from += rem;
2200 			if (nr > rem)
2201 				nr -= rem;
2202 			else
2203 				return 0;
2204 		}
2205 		rem = nr % card->erase_size;
2206 		if (rem)
2207 			nr -= rem;
2208 	}
2209 
2210 	if (nr == 0)
2211 		return 0;
2212 
2213 	to = from + nr;
2214 
2215 	if (to <= from)
2216 		return -EINVAL;
2217 
2218 	/* 'from' and 'to' are inclusive */
2219 	to -= 1;
2220 
2221 	return mmc_do_erase(card, from, to, arg);
2222 }
2223 EXPORT_SYMBOL(mmc_erase);
2224 
2225 int mmc_can_erase(struct mmc_card *card)
2226 {
2227 	if ((card->host->caps & MMC_CAP_ERASE) &&
2228 	    (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2229 		return 1;
2230 	return 0;
2231 }
2232 EXPORT_SYMBOL(mmc_can_erase);
2233 
2234 int mmc_can_trim(struct mmc_card *card)
2235 {
2236 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
2237 		return 1;
2238 	return 0;
2239 }
2240 EXPORT_SYMBOL(mmc_can_trim);
2241 
2242 int mmc_can_discard(struct mmc_card *card)
2243 {
2244 	/*
2245 	 * As there's no way to detect the discard support bit at v4.5
2246 	 * use the s/w feature support filed.
2247 	 */
2248 	if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2249 		return 1;
2250 	return 0;
2251 }
2252 EXPORT_SYMBOL(mmc_can_discard);
2253 
2254 int mmc_can_sanitize(struct mmc_card *card)
2255 {
2256 	if (!mmc_can_trim(card) && !mmc_can_erase(card))
2257 		return 0;
2258 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2259 		return 1;
2260 	return 0;
2261 }
2262 EXPORT_SYMBOL(mmc_can_sanitize);
2263 
2264 int mmc_can_secure_erase_trim(struct mmc_card *card)
2265 {
2266 	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2267 	    !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2268 		return 1;
2269 	return 0;
2270 }
2271 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2272 
2273 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2274 			    unsigned int nr)
2275 {
2276 	if (!card->erase_size)
2277 		return 0;
2278 	if (from % card->erase_size || nr % card->erase_size)
2279 		return 0;
2280 	return 1;
2281 }
2282 EXPORT_SYMBOL(mmc_erase_group_aligned);
2283 
2284 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2285 					    unsigned int arg)
2286 {
2287 	struct mmc_host *host = card->host;
2288 	unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
2289 	unsigned int last_timeout = 0;
2290 
2291 	if (card->erase_shift)
2292 		max_qty = UINT_MAX >> card->erase_shift;
2293 	else if (mmc_card_sd(card))
2294 		max_qty = UINT_MAX;
2295 	else
2296 		max_qty = UINT_MAX / card->erase_size;
2297 
2298 	/* Find the largest qty with an OK timeout */
2299 	do {
2300 		y = 0;
2301 		for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2302 			timeout = mmc_erase_timeout(card, arg, qty + x);
2303 			if (timeout > host->max_busy_timeout)
2304 				break;
2305 			if (timeout < last_timeout)
2306 				break;
2307 			last_timeout = timeout;
2308 			y = x;
2309 		}
2310 		qty += y;
2311 	} while (y);
2312 
2313 	if (!qty)
2314 		return 0;
2315 
2316 	if (qty == 1)
2317 		return 1;
2318 
2319 	/* Convert qty to sectors */
2320 	if (card->erase_shift)
2321 		max_discard = --qty << card->erase_shift;
2322 	else if (mmc_card_sd(card))
2323 		max_discard = qty;
2324 	else
2325 		max_discard = --qty * card->erase_size;
2326 
2327 	return max_discard;
2328 }
2329 
2330 unsigned int mmc_calc_max_discard(struct mmc_card *card)
2331 {
2332 	struct mmc_host *host = card->host;
2333 	unsigned int max_discard, max_trim;
2334 
2335 	if (!host->max_busy_timeout)
2336 		return UINT_MAX;
2337 
2338 	/*
2339 	 * Without erase_group_def set, MMC erase timeout depends on clock
2340 	 * frequence which can change.  In that case, the best choice is
2341 	 * just the preferred erase size.
2342 	 */
2343 	if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2344 		return card->pref_erase;
2345 
2346 	max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2347 	if (mmc_can_trim(card)) {
2348 		max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2349 		if (max_trim < max_discard)
2350 			max_discard = max_trim;
2351 	} else if (max_discard < card->erase_size) {
2352 		max_discard = 0;
2353 	}
2354 	pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2355 		 mmc_hostname(host), max_discard, host->max_busy_timeout);
2356 	return max_discard;
2357 }
2358 EXPORT_SYMBOL(mmc_calc_max_discard);
2359 
2360 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2361 {
2362 	struct mmc_command cmd = {0};
2363 
2364 	if (mmc_card_blockaddr(card) || mmc_card_ddr52(card))
2365 		return 0;
2366 
2367 	cmd.opcode = MMC_SET_BLOCKLEN;
2368 	cmd.arg = blocklen;
2369 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2370 	return mmc_wait_for_cmd(card->host, &cmd, 5);
2371 }
2372 EXPORT_SYMBOL(mmc_set_blocklen);
2373 
2374 int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2375 			bool is_rel_write)
2376 {
2377 	struct mmc_command cmd = {0};
2378 
2379 	cmd.opcode = MMC_SET_BLOCK_COUNT;
2380 	cmd.arg = blockcount & 0x0000FFFF;
2381 	if (is_rel_write)
2382 		cmd.arg |= 1 << 31;
2383 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2384 	return mmc_wait_for_cmd(card->host, &cmd, 5);
2385 }
2386 EXPORT_SYMBOL(mmc_set_blockcount);
2387 
2388 static void mmc_hw_reset_for_init(struct mmc_host *host)
2389 {
2390 	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2391 		return;
2392 	mmc_host_clk_hold(host);
2393 	host->ops->hw_reset(host);
2394 	mmc_host_clk_release(host);
2395 }
2396 
2397 int mmc_hw_reset(struct mmc_host *host)
2398 {
2399 	int ret;
2400 
2401 	if (!host->card)
2402 		return -EINVAL;
2403 
2404 	mmc_bus_get(host);
2405 	if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
2406 		mmc_bus_put(host);
2407 		return -EOPNOTSUPP;
2408 	}
2409 
2410 	ret = host->bus_ops->reset(host);
2411 	mmc_bus_put(host);
2412 
2413 	if (ret != -EOPNOTSUPP)
2414 		pr_warn("%s: tried to reset card\n", mmc_hostname(host));
2415 
2416 	return ret;
2417 }
2418 EXPORT_SYMBOL(mmc_hw_reset);
2419 
2420 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2421 {
2422 	host->f_init = freq;
2423 
2424 #ifdef CONFIG_MMC_DEBUG
2425 	pr_info("%s: %s: trying to init card at %u Hz\n",
2426 		mmc_hostname(host), __func__, host->f_init);
2427 #endif
2428 	mmc_power_up(host, host->ocr_avail);
2429 
2430 	/*
2431 	 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2432 	 * do a hardware reset if possible.
2433 	 */
2434 	mmc_hw_reset_for_init(host);
2435 
2436 	/*
2437 	 * sdio_reset sends CMD52 to reset card.  Since we do not know
2438 	 * if the card is being re-initialized, just send it.  CMD52
2439 	 * should be ignored by SD/eMMC cards.
2440 	 */
2441 	sdio_reset(host);
2442 	mmc_go_idle(host);
2443 
2444 	mmc_send_if_cond(host, host->ocr_avail);
2445 
2446 	/* Order's important: probe SDIO, then SD, then MMC */
2447 	if (!mmc_attach_sdio(host))
2448 		return 0;
2449 	if (!mmc_attach_sd(host))
2450 		return 0;
2451 	if (!mmc_attach_mmc(host))
2452 		return 0;
2453 
2454 	mmc_power_off(host);
2455 	return -EIO;
2456 }
2457 
2458 int _mmc_detect_card_removed(struct mmc_host *host)
2459 {
2460 	int ret;
2461 
2462 	if (host->caps & MMC_CAP_NONREMOVABLE)
2463 		return 0;
2464 
2465 	if (!host->card || mmc_card_removed(host->card))
2466 		return 1;
2467 
2468 	ret = host->bus_ops->alive(host);
2469 
2470 	/*
2471 	 * Card detect status and alive check may be out of sync if card is
2472 	 * removed slowly, when card detect switch changes while card/slot
2473 	 * pads are still contacted in hardware (refer to "SD Card Mechanical
2474 	 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2475 	 * detect work 200ms later for this case.
2476 	 */
2477 	if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2478 		mmc_detect_change(host, msecs_to_jiffies(200));
2479 		pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2480 	}
2481 
2482 	if (ret) {
2483 		mmc_card_set_removed(host->card);
2484 		pr_debug("%s: card remove detected\n", mmc_hostname(host));
2485 	}
2486 
2487 	return ret;
2488 }
2489 
2490 int mmc_detect_card_removed(struct mmc_host *host)
2491 {
2492 	struct mmc_card *card = host->card;
2493 	int ret;
2494 
2495 	WARN_ON(!host->claimed);
2496 
2497 	if (!card)
2498 		return 1;
2499 
2500 	ret = mmc_card_removed(card);
2501 	/*
2502 	 * The card will be considered unchanged unless we have been asked to
2503 	 * detect a change or host requires polling to provide card detection.
2504 	 */
2505 	if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2506 		return ret;
2507 
2508 	host->detect_change = 0;
2509 	if (!ret) {
2510 		ret = _mmc_detect_card_removed(host);
2511 		if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2512 			/*
2513 			 * Schedule a detect work as soon as possible to let a
2514 			 * rescan handle the card removal.
2515 			 */
2516 			cancel_delayed_work(&host->detect);
2517 			_mmc_detect_change(host, 0, false);
2518 		}
2519 	}
2520 
2521 	return ret;
2522 }
2523 EXPORT_SYMBOL(mmc_detect_card_removed);
2524 
2525 void mmc_rescan(struct work_struct *work)
2526 {
2527 	struct mmc_host *host =
2528 		container_of(work, struct mmc_host, detect.work);
2529 	int i;
2530 
2531 	if (host->trigger_card_event && host->ops->card_event) {
2532 		host->ops->card_event(host);
2533 		host->trigger_card_event = false;
2534 	}
2535 
2536 	if (host->rescan_disable)
2537 		return;
2538 
2539 	/* If there is a non-removable card registered, only scan once */
2540 	if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
2541 		return;
2542 	host->rescan_entered = 1;
2543 
2544 	mmc_bus_get(host);
2545 
2546 	/*
2547 	 * if there is a _removable_ card registered, check whether it is
2548 	 * still present
2549 	 */
2550 	if (host->bus_ops && !host->bus_dead
2551 	    && !(host->caps & MMC_CAP_NONREMOVABLE))
2552 		host->bus_ops->detect(host);
2553 
2554 	host->detect_change = 0;
2555 
2556 	/*
2557 	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2558 	 * the card is no longer present.
2559 	 */
2560 	mmc_bus_put(host);
2561 	mmc_bus_get(host);
2562 
2563 	/* if there still is a card present, stop here */
2564 	if (host->bus_ops != NULL) {
2565 		mmc_bus_put(host);
2566 		goto out;
2567 	}
2568 
2569 	/*
2570 	 * Only we can add a new handler, so it's safe to
2571 	 * release the lock here.
2572 	 */
2573 	mmc_bus_put(host);
2574 
2575 	if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd &&
2576 			host->ops->get_cd(host) == 0) {
2577 		mmc_claim_host(host);
2578 		mmc_power_off(host);
2579 		mmc_release_host(host);
2580 		goto out;
2581 	}
2582 
2583 	mmc_claim_host(host);
2584 	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2585 		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2586 			break;
2587 		if (freqs[i] <= host->f_min)
2588 			break;
2589 	}
2590 	mmc_release_host(host);
2591 
2592  out:
2593 	if (host->caps & MMC_CAP_NEEDS_POLL)
2594 		mmc_schedule_delayed_work(&host->detect, HZ);
2595 }
2596 
2597 void mmc_start_host(struct mmc_host *host)
2598 {
2599 	host->f_init = max(freqs[0], host->f_min);
2600 	host->rescan_disable = 0;
2601 	host->ios.power_mode = MMC_POWER_UNDEFINED;
2602 	if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
2603 		mmc_power_off(host);
2604 	else
2605 		mmc_power_up(host, host->ocr_avail);
2606 	mmc_gpiod_request_cd_irq(host);
2607 	_mmc_detect_change(host, 0, false);
2608 }
2609 
2610 void mmc_stop_host(struct mmc_host *host)
2611 {
2612 #ifdef CONFIG_MMC_DEBUG
2613 	unsigned long flags;
2614 	spin_lock_irqsave(&host->lock, flags);
2615 	host->removed = 1;
2616 	spin_unlock_irqrestore(&host->lock, flags);
2617 #endif
2618 	if (host->slot.cd_irq >= 0)
2619 		disable_irq(host->slot.cd_irq);
2620 
2621 	host->rescan_disable = 1;
2622 	cancel_delayed_work_sync(&host->detect);
2623 	mmc_flush_scheduled_work();
2624 
2625 	/* clear pm flags now and let card drivers set them as needed */
2626 	host->pm_flags = 0;
2627 
2628 	mmc_bus_get(host);
2629 	if (host->bus_ops && !host->bus_dead) {
2630 		/* Calling bus_ops->remove() with a claimed host can deadlock */
2631 		host->bus_ops->remove(host);
2632 		mmc_claim_host(host);
2633 		mmc_detach_bus(host);
2634 		mmc_power_off(host);
2635 		mmc_release_host(host);
2636 		mmc_bus_put(host);
2637 		return;
2638 	}
2639 	mmc_bus_put(host);
2640 
2641 	BUG_ON(host->card);
2642 
2643 	mmc_power_off(host);
2644 }
2645 
2646 int mmc_power_save_host(struct mmc_host *host)
2647 {
2648 	int ret = 0;
2649 
2650 #ifdef CONFIG_MMC_DEBUG
2651 	pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2652 #endif
2653 
2654 	mmc_bus_get(host);
2655 
2656 	if (!host->bus_ops || host->bus_dead) {
2657 		mmc_bus_put(host);
2658 		return -EINVAL;
2659 	}
2660 
2661 	if (host->bus_ops->power_save)
2662 		ret = host->bus_ops->power_save(host);
2663 
2664 	mmc_bus_put(host);
2665 
2666 	mmc_power_off(host);
2667 
2668 	return ret;
2669 }
2670 EXPORT_SYMBOL(mmc_power_save_host);
2671 
2672 int mmc_power_restore_host(struct mmc_host *host)
2673 {
2674 	int ret;
2675 
2676 #ifdef CONFIG_MMC_DEBUG
2677 	pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2678 #endif
2679 
2680 	mmc_bus_get(host);
2681 
2682 	if (!host->bus_ops || host->bus_dead) {
2683 		mmc_bus_put(host);
2684 		return -EINVAL;
2685 	}
2686 
2687 	mmc_power_up(host, host->card->ocr);
2688 	ret = host->bus_ops->power_restore(host);
2689 
2690 	mmc_bus_put(host);
2691 
2692 	return ret;
2693 }
2694 EXPORT_SYMBOL(mmc_power_restore_host);
2695 
2696 /*
2697  * Flush the cache to the non-volatile storage.
2698  */
2699 int mmc_flush_cache(struct mmc_card *card)
2700 {
2701 	int err = 0;
2702 
2703 	if (mmc_card_mmc(card) &&
2704 			(card->ext_csd.cache_size > 0) &&
2705 			(card->ext_csd.cache_ctrl & 1)) {
2706 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2707 				EXT_CSD_FLUSH_CACHE, 1, 0);
2708 		if (err)
2709 			pr_err("%s: cache flush error %d\n",
2710 					mmc_hostname(card->host), err);
2711 	}
2712 
2713 	return err;
2714 }
2715 EXPORT_SYMBOL(mmc_flush_cache);
2716 
2717 #ifdef CONFIG_PM
2718 
2719 /* Do the card removal on suspend if card is assumed removeable
2720  * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2721    to sync the card.
2722 */
2723 int mmc_pm_notify(struct notifier_block *notify_block,
2724 					unsigned long mode, void *unused)
2725 {
2726 	struct mmc_host *host = container_of(
2727 		notify_block, struct mmc_host, pm_notify);
2728 	unsigned long flags;
2729 	int err = 0;
2730 
2731 	switch (mode) {
2732 	case PM_HIBERNATION_PREPARE:
2733 	case PM_SUSPEND_PREPARE:
2734 	case PM_RESTORE_PREPARE:
2735 		spin_lock_irqsave(&host->lock, flags);
2736 		host->rescan_disable = 1;
2737 		spin_unlock_irqrestore(&host->lock, flags);
2738 		cancel_delayed_work_sync(&host->detect);
2739 
2740 		if (!host->bus_ops)
2741 			break;
2742 
2743 		/* Validate prerequisites for suspend */
2744 		if (host->bus_ops->pre_suspend)
2745 			err = host->bus_ops->pre_suspend(host);
2746 		if (!err)
2747 			break;
2748 
2749 		/* Calling bus_ops->remove() with a claimed host can deadlock */
2750 		host->bus_ops->remove(host);
2751 		mmc_claim_host(host);
2752 		mmc_detach_bus(host);
2753 		mmc_power_off(host);
2754 		mmc_release_host(host);
2755 		host->pm_flags = 0;
2756 		break;
2757 
2758 	case PM_POST_SUSPEND:
2759 	case PM_POST_HIBERNATION:
2760 	case PM_POST_RESTORE:
2761 
2762 		spin_lock_irqsave(&host->lock, flags);
2763 		host->rescan_disable = 0;
2764 		spin_unlock_irqrestore(&host->lock, flags);
2765 		_mmc_detect_change(host, 0, false);
2766 
2767 	}
2768 
2769 	return 0;
2770 }
2771 #endif
2772 
2773 /**
2774  * mmc_init_context_info() - init synchronization context
2775  * @host: mmc host
2776  *
2777  * Init struct context_info needed to implement asynchronous
2778  * request mechanism, used by mmc core, host driver and mmc requests
2779  * supplier.
2780  */
2781 void mmc_init_context_info(struct mmc_host *host)
2782 {
2783 	spin_lock_init(&host->context_info.lock);
2784 	host->context_info.is_new_req = false;
2785 	host->context_info.is_done_rcv = false;
2786 	host->context_info.is_waiting_last_req = false;
2787 	init_waitqueue_head(&host->context_info.wait);
2788 }
2789 
2790 static int __init mmc_init(void)
2791 {
2792 	int ret;
2793 
2794 	workqueue = alloc_ordered_workqueue("kmmcd", 0);
2795 	if (!workqueue)
2796 		return -ENOMEM;
2797 
2798 	ret = mmc_register_bus();
2799 	if (ret)
2800 		goto destroy_workqueue;
2801 
2802 	ret = mmc_register_host_class();
2803 	if (ret)
2804 		goto unregister_bus;
2805 
2806 	ret = sdio_register_bus();
2807 	if (ret)
2808 		goto unregister_host_class;
2809 
2810 	return 0;
2811 
2812 unregister_host_class:
2813 	mmc_unregister_host_class();
2814 unregister_bus:
2815 	mmc_unregister_bus();
2816 destroy_workqueue:
2817 	destroy_workqueue(workqueue);
2818 
2819 	return ret;
2820 }
2821 
2822 static void __exit mmc_exit(void)
2823 {
2824 	sdio_unregister_bus();
2825 	mmc_unregister_host_class();
2826 	mmc_unregister_bus();
2827 	destroy_workqueue(workqueue);
2828 }
2829 
2830 subsys_initcall(mmc_init);
2831 module_exit(mmc_exit);
2832 
2833 MODULE_LICENSE("GPL");
2834