xref: /linux/drivers/mmc/core/core.c (revision 98838d95075a5295f3478ceba18bcccf472e30f4)
1 /*
2  *  linux/drivers/mmc/core/core.c
3  *
4  *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5  *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7  *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/suspend.h>
28 #include <linux/fault-inject.h>
29 #include <linux/random.h>
30 #include <linux/slab.h>
31 #include <linux/of.h>
32 
33 #include <linux/mmc/card.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/slot-gpio.h>
38 
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/mmc.h>
41 
42 #include "core.h"
43 #include "bus.h"
44 #include "host.h"
45 #include "sdio_bus.h"
46 #include "pwrseq.h"
47 
48 #include "mmc_ops.h"
49 #include "sd_ops.h"
50 #include "sdio_ops.h"
51 
52 /* If the device is not responding */
53 #define MMC_CORE_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
54 
55 /*
56  * Background operations can take a long time, depending on the housekeeping
57  * operations the card has to perform.
58  */
59 #define MMC_BKOPS_MAX_TIMEOUT	(4 * 60 * 1000) /* max time to wait in ms */
60 
61 /* The max erase timeout, used when host->max_busy_timeout isn't specified */
62 #define MMC_ERASE_TIMEOUT_MS	(60 * 1000) /* 60 s */
63 
64 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
65 
66 /*
67  * Enabling software CRCs on the data blocks can be a significant (30%)
68  * performance cost, and for other reasons may not always be desired.
69  * So we allow it it to be disabled.
70  */
71 bool use_spi_crc = 1;
72 module_param(use_spi_crc, bool, 0);
73 
74 static int mmc_schedule_delayed_work(struct delayed_work *work,
75 				     unsigned long delay)
76 {
77 	/*
78 	 * We use the system_freezable_wq, because of two reasons.
79 	 * First, it allows several works (not the same work item) to be
80 	 * executed simultaneously. Second, the queue becomes frozen when
81 	 * userspace becomes frozen during system PM.
82 	 */
83 	return queue_delayed_work(system_freezable_wq, work, delay);
84 }
85 
86 #ifdef CONFIG_FAIL_MMC_REQUEST
87 
88 /*
89  * Internal function. Inject random data errors.
90  * If mmc_data is NULL no errors are injected.
91  */
92 static void mmc_should_fail_request(struct mmc_host *host,
93 				    struct mmc_request *mrq)
94 {
95 	struct mmc_command *cmd = mrq->cmd;
96 	struct mmc_data *data = mrq->data;
97 	static const int data_errors[] = {
98 		-ETIMEDOUT,
99 		-EILSEQ,
100 		-EIO,
101 	};
102 
103 	if (!data)
104 		return;
105 
106 	if (cmd->error || data->error ||
107 	    !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
108 		return;
109 
110 	data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
111 	data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
112 }
113 
114 #else /* CONFIG_FAIL_MMC_REQUEST */
115 
116 static inline void mmc_should_fail_request(struct mmc_host *host,
117 					   struct mmc_request *mrq)
118 {
119 }
120 
121 #endif /* CONFIG_FAIL_MMC_REQUEST */
122 
123 static inline void mmc_complete_cmd(struct mmc_request *mrq)
124 {
125 	if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
126 		complete_all(&mrq->cmd_completion);
127 }
128 
129 void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
130 {
131 	if (!mrq->cap_cmd_during_tfr)
132 		return;
133 
134 	mmc_complete_cmd(mrq);
135 
136 	pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
137 		 mmc_hostname(host), mrq->cmd->opcode);
138 }
139 EXPORT_SYMBOL(mmc_command_done);
140 
141 /**
142  *	mmc_request_done - finish processing an MMC request
143  *	@host: MMC host which completed request
144  *	@mrq: MMC request which request
145  *
146  *	MMC drivers should call this function when they have completed
147  *	their processing of a request.
148  */
149 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
150 {
151 	struct mmc_command *cmd = mrq->cmd;
152 	int err = cmd->error;
153 
154 	/* Flag re-tuning needed on CRC errors */
155 	if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
156 	    cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
157 	    (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
158 	    (mrq->data && mrq->data->error == -EILSEQ) ||
159 	    (mrq->stop && mrq->stop->error == -EILSEQ)))
160 		mmc_retune_needed(host);
161 
162 	if (err && cmd->retries && mmc_host_is_spi(host)) {
163 		if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
164 			cmd->retries = 0;
165 	}
166 
167 	if (host->ongoing_mrq == mrq)
168 		host->ongoing_mrq = NULL;
169 
170 	mmc_complete_cmd(mrq);
171 
172 	trace_mmc_request_done(host, mrq);
173 
174 	if (err && cmd->retries && !mmc_card_removed(host->card)) {
175 		/*
176 		 * Request starter must handle retries - see
177 		 * mmc_wait_for_req_done().
178 		 */
179 		if (mrq->done)
180 			mrq->done(mrq);
181 	} else {
182 		mmc_should_fail_request(host, mrq);
183 
184 		if (!host->ongoing_mrq)
185 			led_trigger_event(host->led, LED_OFF);
186 
187 		if (mrq->sbc) {
188 			pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
189 				mmc_hostname(host), mrq->sbc->opcode,
190 				mrq->sbc->error,
191 				mrq->sbc->resp[0], mrq->sbc->resp[1],
192 				mrq->sbc->resp[2], mrq->sbc->resp[3]);
193 		}
194 
195 		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
196 			mmc_hostname(host), cmd->opcode, err,
197 			cmd->resp[0], cmd->resp[1],
198 			cmd->resp[2], cmd->resp[3]);
199 
200 		if (mrq->data) {
201 			pr_debug("%s:     %d bytes transferred: %d\n",
202 				mmc_hostname(host),
203 				mrq->data->bytes_xfered, mrq->data->error);
204 		}
205 
206 		if (mrq->stop) {
207 			pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
208 				mmc_hostname(host), mrq->stop->opcode,
209 				mrq->stop->error,
210 				mrq->stop->resp[0], mrq->stop->resp[1],
211 				mrq->stop->resp[2], mrq->stop->resp[3]);
212 		}
213 
214 		if (mrq->done)
215 			mrq->done(mrq);
216 	}
217 }
218 
219 EXPORT_SYMBOL(mmc_request_done);
220 
221 static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
222 {
223 	int err;
224 
225 	/* Assumes host controller has been runtime resumed by mmc_claim_host */
226 	err = mmc_retune(host);
227 	if (err) {
228 		mrq->cmd->error = err;
229 		mmc_request_done(host, mrq);
230 		return;
231 	}
232 
233 	/*
234 	 * For sdio rw commands we must wait for card busy otherwise some
235 	 * sdio devices won't work properly.
236 	 */
237 	if (mmc_is_io_op(mrq->cmd->opcode) && host->ops->card_busy) {
238 		int tries = 500; /* Wait aprox 500ms at maximum */
239 
240 		while (host->ops->card_busy(host) && --tries)
241 			mmc_delay(1);
242 
243 		if (tries == 0) {
244 			mrq->cmd->error = -EBUSY;
245 			mmc_request_done(host, mrq);
246 			return;
247 		}
248 	}
249 
250 	if (mrq->cap_cmd_during_tfr) {
251 		host->ongoing_mrq = mrq;
252 		/*
253 		 * Retry path could come through here without having waiting on
254 		 * cmd_completion, so ensure it is reinitialised.
255 		 */
256 		reinit_completion(&mrq->cmd_completion);
257 	}
258 
259 	trace_mmc_request_start(host, mrq);
260 
261 	host->ops->request(host, mrq);
262 }
263 
264 static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
265 {
266 #ifdef CONFIG_MMC_DEBUG
267 	unsigned int i, sz;
268 	struct scatterlist *sg;
269 #endif
270 	mmc_retune_hold(host);
271 
272 	if (mmc_card_removed(host->card))
273 		return -ENOMEDIUM;
274 
275 	if (mrq->sbc) {
276 		pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
277 			 mmc_hostname(host), mrq->sbc->opcode,
278 			 mrq->sbc->arg, mrq->sbc->flags);
279 	}
280 
281 	pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
282 		 mmc_hostname(host), mrq->cmd->opcode,
283 		 mrq->cmd->arg, mrq->cmd->flags);
284 
285 	if (mrq->data) {
286 		pr_debug("%s:     blksz %d blocks %d flags %08x "
287 			"tsac %d ms nsac %d\n",
288 			mmc_hostname(host), mrq->data->blksz,
289 			mrq->data->blocks, mrq->data->flags,
290 			mrq->data->timeout_ns / 1000000,
291 			mrq->data->timeout_clks);
292 	}
293 
294 	if (mrq->stop) {
295 		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
296 			 mmc_hostname(host), mrq->stop->opcode,
297 			 mrq->stop->arg, mrq->stop->flags);
298 	}
299 
300 	WARN_ON(!host->claimed);
301 
302 	mrq->cmd->error = 0;
303 	mrq->cmd->mrq = mrq;
304 	if (mrq->sbc) {
305 		mrq->sbc->error = 0;
306 		mrq->sbc->mrq = mrq;
307 	}
308 	if (mrq->data) {
309 		BUG_ON(mrq->data->blksz > host->max_blk_size);
310 		BUG_ON(mrq->data->blocks > host->max_blk_count);
311 		BUG_ON(mrq->data->blocks * mrq->data->blksz >
312 			host->max_req_size);
313 
314 #ifdef CONFIG_MMC_DEBUG
315 		sz = 0;
316 		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
317 			sz += sg->length;
318 		BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
319 #endif
320 
321 		mrq->cmd->data = mrq->data;
322 		mrq->data->error = 0;
323 		mrq->data->mrq = mrq;
324 		if (mrq->stop) {
325 			mrq->data->stop = mrq->stop;
326 			mrq->stop->error = 0;
327 			mrq->stop->mrq = mrq;
328 		}
329 	}
330 	led_trigger_event(host->led, LED_FULL);
331 	__mmc_start_request(host, mrq);
332 
333 	return 0;
334 }
335 
336 /**
337  *	mmc_start_bkops - start BKOPS for supported cards
338  *	@card: MMC card to start BKOPS
339  *	@form_exception: A flag to indicate if this function was
340  *			 called due to an exception raised by the card
341  *
342  *	Start background operations whenever requested.
343  *	When the urgent BKOPS bit is set in a R1 command response
344  *	then background operations should be started immediately.
345 */
346 void mmc_start_bkops(struct mmc_card *card, bool from_exception)
347 {
348 	int err;
349 	int timeout;
350 	bool use_busy_signal;
351 
352 	BUG_ON(!card);
353 
354 	if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
355 		return;
356 
357 	err = mmc_read_bkops_status(card);
358 	if (err) {
359 		pr_err("%s: Failed to read bkops status: %d\n",
360 		       mmc_hostname(card->host), err);
361 		return;
362 	}
363 
364 	if (!card->ext_csd.raw_bkops_status)
365 		return;
366 
367 	if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
368 	    from_exception)
369 		return;
370 
371 	mmc_claim_host(card->host);
372 	if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
373 		timeout = MMC_BKOPS_MAX_TIMEOUT;
374 		use_busy_signal = true;
375 	} else {
376 		timeout = 0;
377 		use_busy_signal = false;
378 	}
379 
380 	mmc_retune_hold(card->host);
381 
382 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
383 			EXT_CSD_BKOPS_START, 1, timeout,
384 			use_busy_signal, true, false);
385 	if (err) {
386 		pr_warn("%s: Error %d starting bkops\n",
387 			mmc_hostname(card->host), err);
388 		mmc_retune_release(card->host);
389 		goto out;
390 	}
391 
392 	/*
393 	 * For urgent bkops status (LEVEL_2 and more)
394 	 * bkops executed synchronously, otherwise
395 	 * the operation is in progress
396 	 */
397 	if (!use_busy_signal)
398 		mmc_card_set_doing_bkops(card);
399 	else
400 		mmc_retune_release(card->host);
401 out:
402 	mmc_release_host(card->host);
403 }
404 EXPORT_SYMBOL(mmc_start_bkops);
405 
406 /*
407  * mmc_wait_data_done() - done callback for data request
408  * @mrq: done data request
409  *
410  * Wakes up mmc context, passed as a callback to host controller driver
411  */
412 static void mmc_wait_data_done(struct mmc_request *mrq)
413 {
414 	struct mmc_context_info *context_info = &mrq->host->context_info;
415 
416 	context_info->is_done_rcv = true;
417 	wake_up_interruptible(&context_info->wait);
418 }
419 
420 static void mmc_wait_done(struct mmc_request *mrq)
421 {
422 	complete(&mrq->completion);
423 }
424 
425 static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
426 {
427 	struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
428 
429 	/*
430 	 * If there is an ongoing transfer, wait for the command line to become
431 	 * available.
432 	 */
433 	if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
434 		wait_for_completion(&ongoing_mrq->cmd_completion);
435 }
436 
437 /*
438  *__mmc_start_data_req() - starts data request
439  * @host: MMC host to start the request
440  * @mrq: data request to start
441  *
442  * Sets the done callback to be called when request is completed by the card.
443  * Starts data mmc request execution
444  * If an ongoing transfer is already in progress, wait for the command line
445  * to become available before sending another command.
446  */
447 static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
448 {
449 	int err;
450 
451 	mmc_wait_ongoing_tfr_cmd(host);
452 
453 	mrq->done = mmc_wait_data_done;
454 	mrq->host = host;
455 
456 	init_completion(&mrq->cmd_completion);
457 
458 	err = mmc_start_request(host, mrq);
459 	if (err) {
460 		mrq->cmd->error = err;
461 		mmc_complete_cmd(mrq);
462 		mmc_wait_data_done(mrq);
463 	}
464 
465 	return err;
466 }
467 
468 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
469 {
470 	int err;
471 
472 	mmc_wait_ongoing_tfr_cmd(host);
473 
474 	init_completion(&mrq->completion);
475 	mrq->done = mmc_wait_done;
476 
477 	init_completion(&mrq->cmd_completion);
478 
479 	err = mmc_start_request(host, mrq);
480 	if (err) {
481 		mrq->cmd->error = err;
482 		mmc_complete_cmd(mrq);
483 		complete(&mrq->completion);
484 	}
485 
486 	return err;
487 }
488 
489 /*
490  * mmc_wait_for_data_req_done() - wait for request completed
491  * @host: MMC host to prepare the command.
492  * @mrq: MMC request to wait for
493  *
494  * Blocks MMC context till host controller will ack end of data request
495  * execution or new request notification arrives from the block layer.
496  * Handles command retries.
497  *
498  * Returns enum mmc_blk_status after checking errors.
499  */
500 static int mmc_wait_for_data_req_done(struct mmc_host *host,
501 				      struct mmc_request *mrq,
502 				      struct mmc_async_req *next_req)
503 {
504 	struct mmc_command *cmd;
505 	struct mmc_context_info *context_info = &host->context_info;
506 	int err;
507 	unsigned long flags;
508 
509 	while (1) {
510 		wait_event_interruptible(context_info->wait,
511 				(context_info->is_done_rcv ||
512 				 context_info->is_new_req));
513 		spin_lock_irqsave(&context_info->lock, flags);
514 		context_info->is_waiting_last_req = false;
515 		spin_unlock_irqrestore(&context_info->lock, flags);
516 		if (context_info->is_done_rcv) {
517 			context_info->is_done_rcv = false;
518 			context_info->is_new_req = false;
519 			cmd = mrq->cmd;
520 
521 			if (!cmd->error || !cmd->retries ||
522 			    mmc_card_removed(host->card)) {
523 				err = host->areq->err_check(host->card,
524 							    host->areq);
525 				break; /* return err */
526 			} else {
527 				mmc_retune_recheck(host);
528 				pr_info("%s: req failed (CMD%u): %d, retrying...\n",
529 					mmc_hostname(host),
530 					cmd->opcode, cmd->error);
531 				cmd->retries--;
532 				cmd->error = 0;
533 				__mmc_start_request(host, mrq);
534 				continue; /* wait for done/new event again */
535 			}
536 		} else if (context_info->is_new_req) {
537 			context_info->is_new_req = false;
538 			if (!next_req)
539 				return MMC_BLK_NEW_REQUEST;
540 		}
541 	}
542 	mmc_retune_release(host);
543 	return err;
544 }
545 
546 void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
547 {
548 	struct mmc_command *cmd;
549 
550 	while (1) {
551 		wait_for_completion(&mrq->completion);
552 
553 		cmd = mrq->cmd;
554 
555 		/*
556 		 * If host has timed out waiting for the sanitize
557 		 * to complete, card might be still in programming state
558 		 * so let's try to bring the card out of programming
559 		 * state.
560 		 */
561 		if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
562 			if (!mmc_interrupt_hpi(host->card)) {
563 				pr_warn("%s: %s: Interrupted sanitize\n",
564 					mmc_hostname(host), __func__);
565 				cmd->error = 0;
566 				break;
567 			} else {
568 				pr_err("%s: %s: Failed to interrupt sanitize\n",
569 				       mmc_hostname(host), __func__);
570 			}
571 		}
572 		if (!cmd->error || !cmd->retries ||
573 		    mmc_card_removed(host->card))
574 			break;
575 
576 		mmc_retune_recheck(host);
577 
578 		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
579 			 mmc_hostname(host), cmd->opcode, cmd->error);
580 		cmd->retries--;
581 		cmd->error = 0;
582 		__mmc_start_request(host, mrq);
583 	}
584 
585 	mmc_retune_release(host);
586 }
587 EXPORT_SYMBOL(mmc_wait_for_req_done);
588 
589 /**
590  *	mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
591  *	@host: MMC host
592  *	@mrq: MMC request
593  *
594  *	mmc_is_req_done() is used with requests that have
595  *	mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
596  *	starting a request and before waiting for it to complete. That is,
597  *	either in between calls to mmc_start_req(), or after mmc_wait_for_req()
598  *	and before mmc_wait_for_req_done(). If it is called at other times the
599  *	result is not meaningful.
600  */
601 bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
602 {
603 	if (host->areq)
604 		return host->context_info.is_done_rcv;
605 	else
606 		return completion_done(&mrq->completion);
607 }
608 EXPORT_SYMBOL(mmc_is_req_done);
609 
610 /**
611  *	mmc_pre_req - Prepare for a new request
612  *	@host: MMC host to prepare command
613  *	@mrq: MMC request to prepare for
614  *	@is_first_req: true if there is no previous started request
615  *                     that may run in parellel to this call, otherwise false
616  *
617  *	mmc_pre_req() is called in prior to mmc_start_req() to let
618  *	host prepare for the new request. Preparation of a request may be
619  *	performed while another request is running on the host.
620  */
621 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
622 		 bool is_first_req)
623 {
624 	if (host->ops->pre_req)
625 		host->ops->pre_req(host, mrq, is_first_req);
626 }
627 
628 /**
629  *	mmc_post_req - Post process a completed request
630  *	@host: MMC host to post process command
631  *	@mrq: MMC request to post process for
632  *	@err: Error, if non zero, clean up any resources made in pre_req
633  *
634  *	Let the host post process a completed request. Post processing of
635  *	a request may be performed while another reuqest is running.
636  */
637 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
638 			 int err)
639 {
640 	if (host->ops->post_req)
641 		host->ops->post_req(host, mrq, err);
642 }
643 
644 /**
645  *	mmc_start_req - start a non-blocking request
646  *	@host: MMC host to start command
647  *	@areq: async request to start
648  *	@error: out parameter returns 0 for success, otherwise non zero
649  *
650  *	Start a new MMC custom command request for a host.
651  *	If there is on ongoing async request wait for completion
652  *	of that request and start the new one and return.
653  *	Does not wait for the new request to complete.
654  *
655  *      Returns the completed request, NULL in case of none completed.
656  *	Wait for the an ongoing request (previoulsy started) to complete and
657  *	return the completed request. If there is no ongoing request, NULL
658  *	is returned without waiting. NULL is not an error condition.
659  */
660 struct mmc_async_req *mmc_start_req(struct mmc_host *host,
661 				    struct mmc_async_req *areq, int *error)
662 {
663 	int err = 0;
664 	int start_err = 0;
665 	struct mmc_async_req *data = host->areq;
666 
667 	/* Prepare a new request */
668 	if (areq)
669 		mmc_pre_req(host, areq->mrq, !host->areq);
670 
671 	if (host->areq) {
672 		err = mmc_wait_for_data_req_done(host, host->areq->mrq,	areq);
673 		if (err == MMC_BLK_NEW_REQUEST) {
674 			if (error)
675 				*error = err;
676 			/*
677 			 * The previous request was not completed,
678 			 * nothing to return
679 			 */
680 			return NULL;
681 		}
682 		/*
683 		 * Check BKOPS urgency for each R1 response
684 		 */
685 		if (host->card && mmc_card_mmc(host->card) &&
686 		    ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
687 		     (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
688 		    (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
689 
690 			/* Cancel the prepared request */
691 			if (areq)
692 				mmc_post_req(host, areq->mrq, -EINVAL);
693 
694 			mmc_start_bkops(host->card, true);
695 
696 			/* prepare the request again */
697 			if (areq)
698 				mmc_pre_req(host, areq->mrq, !host->areq);
699 		}
700 	}
701 
702 	if (!err && areq)
703 		start_err = __mmc_start_data_req(host, areq->mrq);
704 
705 	if (host->areq)
706 		mmc_post_req(host, host->areq->mrq, 0);
707 
708 	 /* Cancel a prepared request if it was not started. */
709 	if ((err || start_err) && areq)
710 		mmc_post_req(host, areq->mrq, -EINVAL);
711 
712 	if (err)
713 		host->areq = NULL;
714 	else
715 		host->areq = areq;
716 
717 	if (error)
718 		*error = err;
719 	return data;
720 }
721 EXPORT_SYMBOL(mmc_start_req);
722 
723 /**
724  *	mmc_wait_for_req - start a request and wait for completion
725  *	@host: MMC host to start command
726  *	@mrq: MMC request to start
727  *
728  *	Start a new MMC custom command request for a host, and wait
729  *	for the command to complete. In the case of 'cap_cmd_during_tfr'
730  *	requests, the transfer is ongoing and the caller can issue further
731  *	commands that do not use the data lines, and then wait by calling
732  *	mmc_wait_for_req_done().
733  *	Does not attempt to parse the response.
734  */
735 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
736 {
737 	__mmc_start_req(host, mrq);
738 
739 	if (!mrq->cap_cmd_during_tfr)
740 		mmc_wait_for_req_done(host, mrq);
741 }
742 EXPORT_SYMBOL(mmc_wait_for_req);
743 
744 /**
745  *	mmc_interrupt_hpi - Issue for High priority Interrupt
746  *	@card: the MMC card associated with the HPI transfer
747  *
748  *	Issued High Priority Interrupt, and check for card status
749  *	until out-of prg-state.
750  */
751 int mmc_interrupt_hpi(struct mmc_card *card)
752 {
753 	int err;
754 	u32 status;
755 	unsigned long prg_wait;
756 
757 	BUG_ON(!card);
758 
759 	if (!card->ext_csd.hpi_en) {
760 		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
761 		return 1;
762 	}
763 
764 	mmc_claim_host(card->host);
765 	err = mmc_send_status(card, &status);
766 	if (err) {
767 		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
768 		goto out;
769 	}
770 
771 	switch (R1_CURRENT_STATE(status)) {
772 	case R1_STATE_IDLE:
773 	case R1_STATE_READY:
774 	case R1_STATE_STBY:
775 	case R1_STATE_TRAN:
776 		/*
777 		 * In idle and transfer states, HPI is not needed and the caller
778 		 * can issue the next intended command immediately
779 		 */
780 		goto out;
781 	case R1_STATE_PRG:
782 		break;
783 	default:
784 		/* In all other states, it's illegal to issue HPI */
785 		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
786 			mmc_hostname(card->host), R1_CURRENT_STATE(status));
787 		err = -EINVAL;
788 		goto out;
789 	}
790 
791 	err = mmc_send_hpi_cmd(card, &status);
792 	if (err)
793 		goto out;
794 
795 	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
796 	do {
797 		err = mmc_send_status(card, &status);
798 
799 		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
800 			break;
801 		if (time_after(jiffies, prg_wait))
802 			err = -ETIMEDOUT;
803 	} while (!err);
804 
805 out:
806 	mmc_release_host(card->host);
807 	return err;
808 }
809 EXPORT_SYMBOL(mmc_interrupt_hpi);
810 
811 /**
812  *	mmc_wait_for_cmd - start a command and wait for completion
813  *	@host: MMC host to start command
814  *	@cmd: MMC command to start
815  *	@retries: maximum number of retries
816  *
817  *	Start a new MMC command for a host, and wait for the command
818  *	to complete.  Return any error that occurred while the command
819  *	was executing.  Do not attempt to parse the response.
820  */
821 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
822 {
823 	struct mmc_request mrq = {NULL};
824 
825 	WARN_ON(!host->claimed);
826 
827 	memset(cmd->resp, 0, sizeof(cmd->resp));
828 	cmd->retries = retries;
829 
830 	mrq.cmd = cmd;
831 	cmd->data = NULL;
832 
833 	mmc_wait_for_req(host, &mrq);
834 
835 	return cmd->error;
836 }
837 
838 EXPORT_SYMBOL(mmc_wait_for_cmd);
839 
840 /**
841  *	mmc_stop_bkops - stop ongoing BKOPS
842  *	@card: MMC card to check BKOPS
843  *
844  *	Send HPI command to stop ongoing background operations to
845  *	allow rapid servicing of foreground operations, e.g. read/
846  *	writes. Wait until the card comes out of the programming state
847  *	to avoid errors in servicing read/write requests.
848  */
849 int mmc_stop_bkops(struct mmc_card *card)
850 {
851 	int err = 0;
852 
853 	BUG_ON(!card);
854 	err = mmc_interrupt_hpi(card);
855 
856 	/*
857 	 * If err is EINVAL, we can't issue an HPI.
858 	 * It should complete the BKOPS.
859 	 */
860 	if (!err || (err == -EINVAL)) {
861 		mmc_card_clr_doing_bkops(card);
862 		mmc_retune_release(card->host);
863 		err = 0;
864 	}
865 
866 	return err;
867 }
868 EXPORT_SYMBOL(mmc_stop_bkops);
869 
870 int mmc_read_bkops_status(struct mmc_card *card)
871 {
872 	int err;
873 	u8 *ext_csd;
874 
875 	mmc_claim_host(card->host);
876 	err = mmc_get_ext_csd(card, &ext_csd);
877 	mmc_release_host(card->host);
878 	if (err)
879 		return err;
880 
881 	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
882 	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
883 	kfree(ext_csd);
884 	return 0;
885 }
886 EXPORT_SYMBOL(mmc_read_bkops_status);
887 
888 /**
889  *	mmc_set_data_timeout - set the timeout for a data command
890  *	@data: data phase for command
891  *	@card: the MMC card associated with the data transfer
892  *
893  *	Computes the data timeout parameters according to the
894  *	correct algorithm given the card type.
895  */
896 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
897 {
898 	unsigned int mult;
899 
900 	/*
901 	 * SDIO cards only define an upper 1 s limit on access.
902 	 */
903 	if (mmc_card_sdio(card)) {
904 		data->timeout_ns = 1000000000;
905 		data->timeout_clks = 0;
906 		return;
907 	}
908 
909 	/*
910 	 * SD cards use a 100 multiplier rather than 10
911 	 */
912 	mult = mmc_card_sd(card) ? 100 : 10;
913 
914 	/*
915 	 * Scale up the multiplier (and therefore the timeout) by
916 	 * the r2w factor for writes.
917 	 */
918 	if (data->flags & MMC_DATA_WRITE)
919 		mult <<= card->csd.r2w_factor;
920 
921 	data->timeout_ns = card->csd.tacc_ns * mult;
922 	data->timeout_clks = card->csd.tacc_clks * mult;
923 
924 	/*
925 	 * SD cards also have an upper limit on the timeout.
926 	 */
927 	if (mmc_card_sd(card)) {
928 		unsigned int timeout_us, limit_us;
929 
930 		timeout_us = data->timeout_ns / 1000;
931 		if (card->host->ios.clock)
932 			timeout_us += data->timeout_clks * 1000 /
933 				(card->host->ios.clock / 1000);
934 
935 		if (data->flags & MMC_DATA_WRITE)
936 			/*
937 			 * The MMC spec "It is strongly recommended
938 			 * for hosts to implement more than 500ms
939 			 * timeout value even if the card indicates
940 			 * the 250ms maximum busy length."  Even the
941 			 * previous value of 300ms is known to be
942 			 * insufficient for some cards.
943 			 */
944 			limit_us = 3000000;
945 		else
946 			limit_us = 100000;
947 
948 		/*
949 		 * SDHC cards always use these fixed values.
950 		 */
951 		if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
952 			data->timeout_ns = limit_us * 1000;
953 			data->timeout_clks = 0;
954 		}
955 
956 		/* assign limit value if invalid */
957 		if (timeout_us == 0)
958 			data->timeout_ns = limit_us * 1000;
959 	}
960 
961 	/*
962 	 * Some cards require longer data read timeout than indicated in CSD.
963 	 * Address this by setting the read timeout to a "reasonably high"
964 	 * value. For the cards tested, 600ms has proven enough. If necessary,
965 	 * this value can be increased if other problematic cards require this.
966 	 */
967 	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
968 		data->timeout_ns = 600000000;
969 		data->timeout_clks = 0;
970 	}
971 
972 	/*
973 	 * Some cards need very high timeouts if driven in SPI mode.
974 	 * The worst observed timeout was 900ms after writing a
975 	 * continuous stream of data until the internal logic
976 	 * overflowed.
977 	 */
978 	if (mmc_host_is_spi(card->host)) {
979 		if (data->flags & MMC_DATA_WRITE) {
980 			if (data->timeout_ns < 1000000000)
981 				data->timeout_ns = 1000000000;	/* 1s */
982 		} else {
983 			if (data->timeout_ns < 100000000)
984 				data->timeout_ns =  100000000;	/* 100ms */
985 		}
986 	}
987 }
988 EXPORT_SYMBOL(mmc_set_data_timeout);
989 
990 /**
991  *	mmc_align_data_size - pads a transfer size to a more optimal value
992  *	@card: the MMC card associated with the data transfer
993  *	@sz: original transfer size
994  *
995  *	Pads the original data size with a number of extra bytes in
996  *	order to avoid controller bugs and/or performance hits
997  *	(e.g. some controllers revert to PIO for certain sizes).
998  *
999  *	Returns the improved size, which might be unmodified.
1000  *
1001  *	Note that this function is only relevant when issuing a
1002  *	single scatter gather entry.
1003  */
1004 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
1005 {
1006 	/*
1007 	 * FIXME: We don't have a system for the controller to tell
1008 	 * the core about its problems yet, so for now we just 32-bit
1009 	 * align the size.
1010 	 */
1011 	sz = ((sz + 3) / 4) * 4;
1012 
1013 	return sz;
1014 }
1015 EXPORT_SYMBOL(mmc_align_data_size);
1016 
1017 /**
1018  *	__mmc_claim_host - exclusively claim a host
1019  *	@host: mmc host to claim
1020  *	@abort: whether or not the operation should be aborted
1021  *
1022  *	Claim a host for a set of operations.  If @abort is non null and
1023  *	dereference a non-zero value then this will return prematurely with
1024  *	that non-zero value without acquiring the lock.  Returns zero
1025  *	with the lock held otherwise.
1026  */
1027 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
1028 {
1029 	DECLARE_WAITQUEUE(wait, current);
1030 	unsigned long flags;
1031 	int stop;
1032 	bool pm = false;
1033 
1034 	might_sleep();
1035 
1036 	add_wait_queue(&host->wq, &wait);
1037 	spin_lock_irqsave(&host->lock, flags);
1038 	while (1) {
1039 		set_current_state(TASK_UNINTERRUPTIBLE);
1040 		stop = abort ? atomic_read(abort) : 0;
1041 		if (stop || !host->claimed || host->claimer == current)
1042 			break;
1043 		spin_unlock_irqrestore(&host->lock, flags);
1044 		schedule();
1045 		spin_lock_irqsave(&host->lock, flags);
1046 	}
1047 	set_current_state(TASK_RUNNING);
1048 	if (!stop) {
1049 		host->claimed = 1;
1050 		host->claimer = current;
1051 		host->claim_cnt += 1;
1052 		if (host->claim_cnt == 1)
1053 			pm = true;
1054 	} else
1055 		wake_up(&host->wq);
1056 	spin_unlock_irqrestore(&host->lock, flags);
1057 	remove_wait_queue(&host->wq, &wait);
1058 
1059 	if (pm)
1060 		pm_runtime_get_sync(mmc_dev(host));
1061 
1062 	return stop;
1063 }
1064 EXPORT_SYMBOL(__mmc_claim_host);
1065 
1066 /**
1067  *	mmc_release_host - release a host
1068  *	@host: mmc host to release
1069  *
1070  *	Release a MMC host, allowing others to claim the host
1071  *	for their operations.
1072  */
1073 void mmc_release_host(struct mmc_host *host)
1074 {
1075 	unsigned long flags;
1076 
1077 	WARN_ON(!host->claimed);
1078 
1079 	spin_lock_irqsave(&host->lock, flags);
1080 	if (--host->claim_cnt) {
1081 		/* Release for nested claim */
1082 		spin_unlock_irqrestore(&host->lock, flags);
1083 	} else {
1084 		host->claimed = 0;
1085 		host->claimer = NULL;
1086 		spin_unlock_irqrestore(&host->lock, flags);
1087 		wake_up(&host->wq);
1088 		pm_runtime_mark_last_busy(mmc_dev(host));
1089 		pm_runtime_put_autosuspend(mmc_dev(host));
1090 	}
1091 }
1092 EXPORT_SYMBOL(mmc_release_host);
1093 
1094 /*
1095  * This is a helper function, which fetches a runtime pm reference for the
1096  * card device and also claims the host.
1097  */
1098 void mmc_get_card(struct mmc_card *card)
1099 {
1100 	pm_runtime_get_sync(&card->dev);
1101 	mmc_claim_host(card->host);
1102 }
1103 EXPORT_SYMBOL(mmc_get_card);
1104 
1105 /*
1106  * This is a helper function, which releases the host and drops the runtime
1107  * pm reference for the card device.
1108  */
1109 void mmc_put_card(struct mmc_card *card)
1110 {
1111 	mmc_release_host(card->host);
1112 	pm_runtime_mark_last_busy(&card->dev);
1113 	pm_runtime_put_autosuspend(&card->dev);
1114 }
1115 EXPORT_SYMBOL(mmc_put_card);
1116 
1117 /*
1118  * Internal function that does the actual ios call to the host driver,
1119  * optionally printing some debug output.
1120  */
1121 static inline void mmc_set_ios(struct mmc_host *host)
1122 {
1123 	struct mmc_ios *ios = &host->ios;
1124 
1125 	pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
1126 		"width %u timing %u\n",
1127 		 mmc_hostname(host), ios->clock, ios->bus_mode,
1128 		 ios->power_mode, ios->chip_select, ios->vdd,
1129 		 1 << ios->bus_width, ios->timing);
1130 
1131 	host->ops->set_ios(host, ios);
1132 }
1133 
1134 /*
1135  * Control chip select pin on a host.
1136  */
1137 void mmc_set_chip_select(struct mmc_host *host, int mode)
1138 {
1139 	host->ios.chip_select = mode;
1140 	mmc_set_ios(host);
1141 }
1142 
1143 /*
1144  * Sets the host clock to the highest possible frequency that
1145  * is below "hz".
1146  */
1147 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
1148 {
1149 	WARN_ON(hz && hz < host->f_min);
1150 
1151 	if (hz > host->f_max)
1152 		hz = host->f_max;
1153 
1154 	host->ios.clock = hz;
1155 	mmc_set_ios(host);
1156 }
1157 
1158 int mmc_execute_tuning(struct mmc_card *card)
1159 {
1160 	struct mmc_host *host = card->host;
1161 	u32 opcode;
1162 	int err;
1163 
1164 	if (!host->ops->execute_tuning)
1165 		return 0;
1166 
1167 	if (mmc_card_mmc(card))
1168 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
1169 	else
1170 		opcode = MMC_SEND_TUNING_BLOCK;
1171 
1172 	err = host->ops->execute_tuning(host, opcode);
1173 
1174 	if (err)
1175 		pr_err("%s: tuning execution failed: %d\n",
1176 			mmc_hostname(host), err);
1177 	else
1178 		mmc_retune_enable(host);
1179 
1180 	return err;
1181 }
1182 
1183 /*
1184  * Change the bus mode (open drain/push-pull) of a host.
1185  */
1186 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1187 {
1188 	host->ios.bus_mode = mode;
1189 	mmc_set_ios(host);
1190 }
1191 
1192 /*
1193  * Change data bus width of a host.
1194  */
1195 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1196 {
1197 	host->ios.bus_width = width;
1198 	mmc_set_ios(host);
1199 }
1200 
1201 /*
1202  * Set initial state after a power cycle or a hw_reset.
1203  */
1204 void mmc_set_initial_state(struct mmc_host *host)
1205 {
1206 	mmc_retune_disable(host);
1207 
1208 	if (mmc_host_is_spi(host))
1209 		host->ios.chip_select = MMC_CS_HIGH;
1210 	else
1211 		host->ios.chip_select = MMC_CS_DONTCARE;
1212 	host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1213 	host->ios.bus_width = MMC_BUS_WIDTH_1;
1214 	host->ios.timing = MMC_TIMING_LEGACY;
1215 	host->ios.drv_type = 0;
1216 	host->ios.enhanced_strobe = false;
1217 
1218 	/*
1219 	 * Make sure we are in non-enhanced strobe mode before we
1220 	 * actually enable it in ext_csd.
1221 	 */
1222 	if ((host->caps2 & MMC_CAP2_HS400_ES) &&
1223 	     host->ops->hs400_enhanced_strobe)
1224 		host->ops->hs400_enhanced_strobe(host, &host->ios);
1225 
1226 	mmc_set_ios(host);
1227 }
1228 
1229 /**
1230  * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1231  * @vdd:	voltage (mV)
1232  * @low_bits:	prefer low bits in boundary cases
1233  *
1234  * This function returns the OCR bit number according to the provided @vdd
1235  * value. If conversion is not possible a negative errno value returned.
1236  *
1237  * Depending on the @low_bits flag the function prefers low or high OCR bits
1238  * on boundary voltages. For example,
1239  * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1240  * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1241  *
1242  * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1243  */
1244 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1245 {
1246 	const int max_bit = ilog2(MMC_VDD_35_36);
1247 	int bit;
1248 
1249 	if (vdd < 1650 || vdd > 3600)
1250 		return -EINVAL;
1251 
1252 	if (vdd >= 1650 && vdd <= 1950)
1253 		return ilog2(MMC_VDD_165_195);
1254 
1255 	if (low_bits)
1256 		vdd -= 1;
1257 
1258 	/* Base 2000 mV, step 100 mV, bit's base 8. */
1259 	bit = (vdd - 2000) / 100 + 8;
1260 	if (bit > max_bit)
1261 		return max_bit;
1262 	return bit;
1263 }
1264 
1265 /**
1266  * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1267  * @vdd_min:	minimum voltage value (mV)
1268  * @vdd_max:	maximum voltage value (mV)
1269  *
1270  * This function returns the OCR mask bits according to the provided @vdd_min
1271  * and @vdd_max values. If conversion is not possible the function returns 0.
1272  *
1273  * Notes wrt boundary cases:
1274  * This function sets the OCR bits for all boundary voltages, for example
1275  * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1276  * MMC_VDD_34_35 mask.
1277  */
1278 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1279 {
1280 	u32 mask = 0;
1281 
1282 	if (vdd_max < vdd_min)
1283 		return 0;
1284 
1285 	/* Prefer high bits for the boundary vdd_max values. */
1286 	vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1287 	if (vdd_max < 0)
1288 		return 0;
1289 
1290 	/* Prefer low bits for the boundary vdd_min values. */
1291 	vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1292 	if (vdd_min < 0)
1293 		return 0;
1294 
1295 	/* Fill the mask, from max bit to min bit. */
1296 	while (vdd_max >= vdd_min)
1297 		mask |= 1 << vdd_max--;
1298 
1299 	return mask;
1300 }
1301 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1302 
1303 #ifdef CONFIG_OF
1304 
1305 /**
1306  * mmc_of_parse_voltage - return mask of supported voltages
1307  * @np: The device node need to be parsed.
1308  * @mask: mask of voltages available for MMC/SD/SDIO
1309  *
1310  * Parse the "voltage-ranges" DT property, returning zero if it is not
1311  * found, negative errno if the voltage-range specification is invalid,
1312  * or one if the voltage-range is specified and successfully parsed.
1313  */
1314 int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1315 {
1316 	const u32 *voltage_ranges;
1317 	int num_ranges, i;
1318 
1319 	voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1320 	num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1321 	if (!voltage_ranges) {
1322 		pr_debug("%s: voltage-ranges unspecified\n", np->full_name);
1323 		return 0;
1324 	}
1325 	if (!num_ranges) {
1326 		pr_err("%s: voltage-ranges empty\n", np->full_name);
1327 		return -EINVAL;
1328 	}
1329 
1330 	for (i = 0; i < num_ranges; i++) {
1331 		const int j = i * 2;
1332 		u32 ocr_mask;
1333 
1334 		ocr_mask = mmc_vddrange_to_ocrmask(
1335 				be32_to_cpu(voltage_ranges[j]),
1336 				be32_to_cpu(voltage_ranges[j + 1]));
1337 		if (!ocr_mask) {
1338 			pr_err("%s: voltage-range #%d is invalid\n",
1339 				np->full_name, i);
1340 			return -EINVAL;
1341 		}
1342 		*mask |= ocr_mask;
1343 	}
1344 
1345 	return 1;
1346 }
1347 EXPORT_SYMBOL(mmc_of_parse_voltage);
1348 
1349 #endif /* CONFIG_OF */
1350 
1351 static int mmc_of_get_func_num(struct device_node *node)
1352 {
1353 	u32 reg;
1354 	int ret;
1355 
1356 	ret = of_property_read_u32(node, "reg", &reg);
1357 	if (ret < 0)
1358 		return ret;
1359 
1360 	return reg;
1361 }
1362 
1363 struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1364 		unsigned func_num)
1365 {
1366 	struct device_node *node;
1367 
1368 	if (!host->parent || !host->parent->of_node)
1369 		return NULL;
1370 
1371 	for_each_child_of_node(host->parent->of_node, node) {
1372 		if (mmc_of_get_func_num(node) == func_num)
1373 			return node;
1374 	}
1375 
1376 	return NULL;
1377 }
1378 
1379 #ifdef CONFIG_REGULATOR
1380 
1381 /**
1382  * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
1383  * @vdd_bit:	OCR bit number
1384  * @min_uV:	minimum voltage value (mV)
1385  * @max_uV:	maximum voltage value (mV)
1386  *
1387  * This function returns the voltage range according to the provided OCR
1388  * bit number. If conversion is not possible a negative errno value returned.
1389  */
1390 static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
1391 {
1392 	int		tmp;
1393 
1394 	if (!vdd_bit)
1395 		return -EINVAL;
1396 
1397 	/*
1398 	 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1399 	 * bits this regulator doesn't quite support ... don't
1400 	 * be too picky, most cards and regulators are OK with
1401 	 * a 0.1V range goof (it's a small error percentage).
1402 	 */
1403 	tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1404 	if (tmp == 0) {
1405 		*min_uV = 1650 * 1000;
1406 		*max_uV = 1950 * 1000;
1407 	} else {
1408 		*min_uV = 1900 * 1000 + tmp * 100 * 1000;
1409 		*max_uV = *min_uV + 100 * 1000;
1410 	}
1411 
1412 	return 0;
1413 }
1414 
1415 /**
1416  * mmc_regulator_get_ocrmask - return mask of supported voltages
1417  * @supply: regulator to use
1418  *
1419  * This returns either a negative errno, or a mask of voltages that
1420  * can be provided to MMC/SD/SDIO devices using the specified voltage
1421  * regulator.  This would normally be called before registering the
1422  * MMC host adapter.
1423  */
1424 int mmc_regulator_get_ocrmask(struct regulator *supply)
1425 {
1426 	int			result = 0;
1427 	int			count;
1428 	int			i;
1429 	int			vdd_uV;
1430 	int			vdd_mV;
1431 
1432 	count = regulator_count_voltages(supply);
1433 	if (count < 0)
1434 		return count;
1435 
1436 	for (i = 0; i < count; i++) {
1437 		vdd_uV = regulator_list_voltage(supply, i);
1438 		if (vdd_uV <= 0)
1439 			continue;
1440 
1441 		vdd_mV = vdd_uV / 1000;
1442 		result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1443 	}
1444 
1445 	if (!result) {
1446 		vdd_uV = regulator_get_voltage(supply);
1447 		if (vdd_uV <= 0)
1448 			return vdd_uV;
1449 
1450 		vdd_mV = vdd_uV / 1000;
1451 		result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1452 	}
1453 
1454 	return result;
1455 }
1456 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1457 
1458 /**
1459  * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1460  * @mmc: the host to regulate
1461  * @supply: regulator to use
1462  * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1463  *
1464  * Returns zero on success, else negative errno.
1465  *
1466  * MMC host drivers may use this to enable or disable a regulator using
1467  * a particular supply voltage.  This would normally be called from the
1468  * set_ios() method.
1469  */
1470 int mmc_regulator_set_ocr(struct mmc_host *mmc,
1471 			struct regulator *supply,
1472 			unsigned short vdd_bit)
1473 {
1474 	int			result = 0;
1475 	int			min_uV, max_uV;
1476 
1477 	if (vdd_bit) {
1478 		mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
1479 
1480 		result = regulator_set_voltage(supply, min_uV, max_uV);
1481 		if (result == 0 && !mmc->regulator_enabled) {
1482 			result = regulator_enable(supply);
1483 			if (!result)
1484 				mmc->regulator_enabled = true;
1485 		}
1486 	} else if (mmc->regulator_enabled) {
1487 		result = regulator_disable(supply);
1488 		if (result == 0)
1489 			mmc->regulator_enabled = false;
1490 	}
1491 
1492 	if (result)
1493 		dev_err(mmc_dev(mmc),
1494 			"could not set regulator OCR (%d)\n", result);
1495 	return result;
1496 }
1497 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1498 
1499 static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
1500 						  int min_uV, int target_uV,
1501 						  int max_uV)
1502 {
1503 	/*
1504 	 * Check if supported first to avoid errors since we may try several
1505 	 * signal levels during power up and don't want to show errors.
1506 	 */
1507 	if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
1508 		return -EINVAL;
1509 
1510 	return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
1511 					     max_uV);
1512 }
1513 
1514 /**
1515  * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
1516  *
1517  * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
1518  * That will match the behavior of old boards where VQMMC and VMMC were supplied
1519  * by the same supply.  The Bus Operating conditions for 3.3V signaling in the
1520  * SD card spec also define VQMMC in terms of VMMC.
1521  * If this is not possible we'll try the full 2.7-3.6V of the spec.
1522  *
1523  * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
1524  * requested voltage.  This is definitely a good idea for UHS where there's a
1525  * separate regulator on the card that's trying to make 1.8V and it's best if
1526  * we match.
1527  *
1528  * This function is expected to be used by a controller's
1529  * start_signal_voltage_switch() function.
1530  */
1531 int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
1532 {
1533 	struct device *dev = mmc_dev(mmc);
1534 	int ret, volt, min_uV, max_uV;
1535 
1536 	/* If no vqmmc supply then we can't change the voltage */
1537 	if (IS_ERR(mmc->supply.vqmmc))
1538 		return -EINVAL;
1539 
1540 	switch (ios->signal_voltage) {
1541 	case MMC_SIGNAL_VOLTAGE_120:
1542 		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1543 						1100000, 1200000, 1300000);
1544 	case MMC_SIGNAL_VOLTAGE_180:
1545 		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1546 						1700000, 1800000, 1950000);
1547 	case MMC_SIGNAL_VOLTAGE_330:
1548 		ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
1549 		if (ret < 0)
1550 			return ret;
1551 
1552 		dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
1553 			__func__, volt, max_uV);
1554 
1555 		min_uV = max(volt - 300000, 2700000);
1556 		max_uV = min(max_uV + 200000, 3600000);
1557 
1558 		/*
1559 		 * Due to a limitation in the current implementation of
1560 		 * regulator_set_voltage_triplet() which is taking the lowest
1561 		 * voltage possible if below the target, search for a suitable
1562 		 * voltage in two steps and try to stay close to vmmc
1563 		 * with a 0.3V tolerance at first.
1564 		 */
1565 		if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1566 						min_uV, volt, max_uV))
1567 			return 0;
1568 
1569 		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1570 						2700000, volt, 3600000);
1571 	default:
1572 		return -EINVAL;
1573 	}
1574 }
1575 EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
1576 
1577 #endif /* CONFIG_REGULATOR */
1578 
1579 int mmc_regulator_get_supply(struct mmc_host *mmc)
1580 {
1581 	struct device *dev = mmc_dev(mmc);
1582 	int ret;
1583 
1584 	mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1585 	mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1586 
1587 	if (IS_ERR(mmc->supply.vmmc)) {
1588 		if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1589 			return -EPROBE_DEFER;
1590 		dev_dbg(dev, "No vmmc regulator found\n");
1591 	} else {
1592 		ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1593 		if (ret > 0)
1594 			mmc->ocr_avail = ret;
1595 		else
1596 			dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1597 	}
1598 
1599 	if (IS_ERR(mmc->supply.vqmmc)) {
1600 		if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1601 			return -EPROBE_DEFER;
1602 		dev_dbg(dev, "No vqmmc regulator found\n");
1603 	}
1604 
1605 	return 0;
1606 }
1607 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1608 
1609 /*
1610  * Mask off any voltages we don't support and select
1611  * the lowest voltage
1612  */
1613 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1614 {
1615 	int bit;
1616 
1617 	/*
1618 	 * Sanity check the voltages that the card claims to
1619 	 * support.
1620 	 */
1621 	if (ocr & 0x7F) {
1622 		dev_warn(mmc_dev(host),
1623 		"card claims to support voltages below defined range\n");
1624 		ocr &= ~0x7F;
1625 	}
1626 
1627 	ocr &= host->ocr_avail;
1628 	if (!ocr) {
1629 		dev_warn(mmc_dev(host), "no support for card's volts\n");
1630 		return 0;
1631 	}
1632 
1633 	if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1634 		bit = ffs(ocr) - 1;
1635 		ocr &= 3 << bit;
1636 		mmc_power_cycle(host, ocr);
1637 	} else {
1638 		bit = fls(ocr) - 1;
1639 		ocr &= 3 << bit;
1640 		if (bit != host->ios.vdd)
1641 			dev_warn(mmc_dev(host), "exceeding card's volts\n");
1642 	}
1643 
1644 	return ocr;
1645 }
1646 
1647 int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1648 {
1649 	int err = 0;
1650 	int old_signal_voltage = host->ios.signal_voltage;
1651 
1652 	host->ios.signal_voltage = signal_voltage;
1653 	if (host->ops->start_signal_voltage_switch)
1654 		err = host->ops->start_signal_voltage_switch(host, &host->ios);
1655 
1656 	if (err)
1657 		host->ios.signal_voltage = old_signal_voltage;
1658 
1659 	return err;
1660 
1661 }
1662 
1663 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
1664 {
1665 	struct mmc_command cmd = {0};
1666 	int err = 0;
1667 	u32 clock;
1668 
1669 	BUG_ON(!host);
1670 
1671 	/*
1672 	 * Send CMD11 only if the request is to switch the card to
1673 	 * 1.8V signalling.
1674 	 */
1675 	if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1676 		return __mmc_set_signal_voltage(host, signal_voltage);
1677 
1678 	/*
1679 	 * If we cannot switch voltages, return failure so the caller
1680 	 * can continue without UHS mode
1681 	 */
1682 	if (!host->ops->start_signal_voltage_switch)
1683 		return -EPERM;
1684 	if (!host->ops->card_busy)
1685 		pr_warn("%s: cannot verify signal voltage switch\n",
1686 			mmc_hostname(host));
1687 
1688 	cmd.opcode = SD_SWITCH_VOLTAGE;
1689 	cmd.arg = 0;
1690 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1691 
1692 	err = mmc_wait_for_cmd(host, &cmd, 0);
1693 	if (err)
1694 		return err;
1695 
1696 	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1697 		return -EIO;
1698 
1699 	/*
1700 	 * The card should drive cmd and dat[0:3] low immediately
1701 	 * after the response of cmd11, but wait 1 ms to be sure
1702 	 */
1703 	mmc_delay(1);
1704 	if (host->ops->card_busy && !host->ops->card_busy(host)) {
1705 		err = -EAGAIN;
1706 		goto power_cycle;
1707 	}
1708 	/*
1709 	 * During a signal voltage level switch, the clock must be gated
1710 	 * for 5 ms according to the SD spec
1711 	 */
1712 	clock = host->ios.clock;
1713 	host->ios.clock = 0;
1714 	mmc_set_ios(host);
1715 
1716 	if (__mmc_set_signal_voltage(host, signal_voltage)) {
1717 		/*
1718 		 * Voltages may not have been switched, but we've already
1719 		 * sent CMD11, so a power cycle is required anyway
1720 		 */
1721 		err = -EAGAIN;
1722 		goto power_cycle;
1723 	}
1724 
1725 	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
1726 	mmc_delay(10);
1727 	host->ios.clock = clock;
1728 	mmc_set_ios(host);
1729 
1730 	/* Wait for at least 1 ms according to spec */
1731 	mmc_delay(1);
1732 
1733 	/*
1734 	 * Failure to switch is indicated by the card holding
1735 	 * dat[0:3] low
1736 	 */
1737 	if (host->ops->card_busy && host->ops->card_busy(host))
1738 		err = -EAGAIN;
1739 
1740 power_cycle:
1741 	if (err) {
1742 		pr_debug("%s: Signal voltage switch failed, "
1743 			"power cycling card\n", mmc_hostname(host));
1744 		mmc_power_cycle(host, ocr);
1745 	}
1746 
1747 	return err;
1748 }
1749 
1750 /*
1751  * Select timing parameters for host.
1752  */
1753 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1754 {
1755 	host->ios.timing = timing;
1756 	mmc_set_ios(host);
1757 }
1758 
1759 /*
1760  * Select appropriate driver type for host.
1761  */
1762 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1763 {
1764 	host->ios.drv_type = drv_type;
1765 	mmc_set_ios(host);
1766 }
1767 
1768 int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1769 			      int card_drv_type, int *drv_type)
1770 {
1771 	struct mmc_host *host = card->host;
1772 	int host_drv_type = SD_DRIVER_TYPE_B;
1773 
1774 	*drv_type = 0;
1775 
1776 	if (!host->ops->select_drive_strength)
1777 		return 0;
1778 
1779 	/* Use SD definition of driver strength for hosts */
1780 	if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1781 		host_drv_type |= SD_DRIVER_TYPE_A;
1782 
1783 	if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1784 		host_drv_type |= SD_DRIVER_TYPE_C;
1785 
1786 	if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1787 		host_drv_type |= SD_DRIVER_TYPE_D;
1788 
1789 	/*
1790 	 * The drive strength that the hardware can support
1791 	 * depends on the board design.  Pass the appropriate
1792 	 * information and let the hardware specific code
1793 	 * return what is possible given the options
1794 	 */
1795 	return host->ops->select_drive_strength(card, max_dtr,
1796 						host_drv_type,
1797 						card_drv_type,
1798 						drv_type);
1799 }
1800 
1801 /*
1802  * Apply power to the MMC stack.  This is a two-stage process.
1803  * First, we enable power to the card without the clock running.
1804  * We then wait a bit for the power to stabilise.  Finally,
1805  * enable the bus drivers and clock to the card.
1806  *
1807  * We must _NOT_ enable the clock prior to power stablising.
1808  *
1809  * If a host does all the power sequencing itself, ignore the
1810  * initial MMC_POWER_UP stage.
1811  */
1812 void mmc_power_up(struct mmc_host *host, u32 ocr)
1813 {
1814 	if (host->ios.power_mode == MMC_POWER_ON)
1815 		return;
1816 
1817 	mmc_pwrseq_pre_power_on(host);
1818 
1819 	host->ios.vdd = fls(ocr) - 1;
1820 	host->ios.power_mode = MMC_POWER_UP;
1821 	/* Set initial state and call mmc_set_ios */
1822 	mmc_set_initial_state(host);
1823 
1824 	/* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1825 	if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
1826 		dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1827 	else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0)
1828 		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1829 	else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0)
1830 		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1831 
1832 	/*
1833 	 * This delay should be sufficient to allow the power supply
1834 	 * to reach the minimum voltage.
1835 	 */
1836 	mmc_delay(10);
1837 
1838 	mmc_pwrseq_post_power_on(host);
1839 
1840 	host->ios.clock = host->f_init;
1841 
1842 	host->ios.power_mode = MMC_POWER_ON;
1843 	mmc_set_ios(host);
1844 
1845 	/*
1846 	 * This delay must be at least 74 clock sizes, or 1 ms, or the
1847 	 * time required to reach a stable voltage.
1848 	 */
1849 	mmc_delay(10);
1850 }
1851 
1852 void mmc_power_off(struct mmc_host *host)
1853 {
1854 	if (host->ios.power_mode == MMC_POWER_OFF)
1855 		return;
1856 
1857 	mmc_pwrseq_power_off(host);
1858 
1859 	host->ios.clock = 0;
1860 	host->ios.vdd = 0;
1861 
1862 	host->ios.power_mode = MMC_POWER_OFF;
1863 	/* Set initial state and call mmc_set_ios */
1864 	mmc_set_initial_state(host);
1865 
1866 	/*
1867 	 * Some configurations, such as the 802.11 SDIO card in the OLPC
1868 	 * XO-1.5, require a short delay after poweroff before the card
1869 	 * can be successfully turned on again.
1870 	 */
1871 	mmc_delay(1);
1872 }
1873 
1874 void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1875 {
1876 	mmc_power_off(host);
1877 	/* Wait at least 1 ms according to SD spec */
1878 	mmc_delay(1);
1879 	mmc_power_up(host, ocr);
1880 }
1881 
1882 /*
1883  * Cleanup when the last reference to the bus operator is dropped.
1884  */
1885 static void __mmc_release_bus(struct mmc_host *host)
1886 {
1887 	BUG_ON(!host);
1888 	BUG_ON(host->bus_refs);
1889 	BUG_ON(!host->bus_dead);
1890 
1891 	host->bus_ops = NULL;
1892 }
1893 
1894 /*
1895  * Increase reference count of bus operator
1896  */
1897 static inline void mmc_bus_get(struct mmc_host *host)
1898 {
1899 	unsigned long flags;
1900 
1901 	spin_lock_irqsave(&host->lock, flags);
1902 	host->bus_refs++;
1903 	spin_unlock_irqrestore(&host->lock, flags);
1904 }
1905 
1906 /*
1907  * Decrease reference count of bus operator and free it if
1908  * it is the last reference.
1909  */
1910 static inline void mmc_bus_put(struct mmc_host *host)
1911 {
1912 	unsigned long flags;
1913 
1914 	spin_lock_irqsave(&host->lock, flags);
1915 	host->bus_refs--;
1916 	if ((host->bus_refs == 0) && host->bus_ops)
1917 		__mmc_release_bus(host);
1918 	spin_unlock_irqrestore(&host->lock, flags);
1919 }
1920 
1921 /*
1922  * Assign a mmc bus handler to a host. Only one bus handler may control a
1923  * host at any given time.
1924  */
1925 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1926 {
1927 	unsigned long flags;
1928 
1929 	BUG_ON(!host);
1930 	BUG_ON(!ops);
1931 
1932 	WARN_ON(!host->claimed);
1933 
1934 	spin_lock_irqsave(&host->lock, flags);
1935 
1936 	BUG_ON(host->bus_ops);
1937 	BUG_ON(host->bus_refs);
1938 
1939 	host->bus_ops = ops;
1940 	host->bus_refs = 1;
1941 	host->bus_dead = 0;
1942 
1943 	spin_unlock_irqrestore(&host->lock, flags);
1944 }
1945 
1946 /*
1947  * Remove the current bus handler from a host.
1948  */
1949 void mmc_detach_bus(struct mmc_host *host)
1950 {
1951 	unsigned long flags;
1952 
1953 	BUG_ON(!host);
1954 
1955 	WARN_ON(!host->claimed);
1956 	WARN_ON(!host->bus_ops);
1957 
1958 	spin_lock_irqsave(&host->lock, flags);
1959 
1960 	host->bus_dead = 1;
1961 
1962 	spin_unlock_irqrestore(&host->lock, flags);
1963 
1964 	mmc_bus_put(host);
1965 }
1966 
1967 static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1968 				bool cd_irq)
1969 {
1970 #ifdef CONFIG_MMC_DEBUG
1971 	unsigned long flags;
1972 	spin_lock_irqsave(&host->lock, flags);
1973 	WARN_ON(host->removed);
1974 	spin_unlock_irqrestore(&host->lock, flags);
1975 #endif
1976 
1977 	/*
1978 	 * If the device is configured as wakeup, we prevent a new sleep for
1979 	 * 5 s to give provision for user space to consume the event.
1980 	 */
1981 	if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1982 		device_can_wakeup(mmc_dev(host)))
1983 		pm_wakeup_event(mmc_dev(host), 5000);
1984 
1985 	host->detect_change = 1;
1986 	mmc_schedule_delayed_work(&host->detect, delay);
1987 }
1988 
1989 /**
1990  *	mmc_detect_change - process change of state on a MMC socket
1991  *	@host: host which changed state.
1992  *	@delay: optional delay to wait before detection (jiffies)
1993  *
1994  *	MMC drivers should call this when they detect a card has been
1995  *	inserted or removed. The MMC layer will confirm that any
1996  *	present card is still functional, and initialize any newly
1997  *	inserted.
1998  */
1999 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
2000 {
2001 	_mmc_detect_change(host, delay, true);
2002 }
2003 EXPORT_SYMBOL(mmc_detect_change);
2004 
2005 void mmc_init_erase(struct mmc_card *card)
2006 {
2007 	unsigned int sz;
2008 
2009 	if (is_power_of_2(card->erase_size))
2010 		card->erase_shift = ffs(card->erase_size) - 1;
2011 	else
2012 		card->erase_shift = 0;
2013 
2014 	/*
2015 	 * It is possible to erase an arbitrarily large area of an SD or MMC
2016 	 * card.  That is not desirable because it can take a long time
2017 	 * (minutes) potentially delaying more important I/O, and also the
2018 	 * timeout calculations become increasingly hugely over-estimated.
2019 	 * Consequently, 'pref_erase' is defined as a guide to limit erases
2020 	 * to that size and alignment.
2021 	 *
2022 	 * For SD cards that define Allocation Unit size, limit erases to one
2023 	 * Allocation Unit at a time.
2024 	 * For MMC, have a stab at ai good value and for modern cards it will
2025 	 * end up being 4MiB. Note that if the value is too small, it can end
2026 	 * up taking longer to erase. Also note, erase_size is already set to
2027 	 * High Capacity Erase Size if available when this function is called.
2028 	 */
2029 	if (mmc_card_sd(card) && card->ssr.au) {
2030 		card->pref_erase = card->ssr.au;
2031 		card->erase_shift = ffs(card->ssr.au) - 1;
2032 	} else if (card->erase_size) {
2033 		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
2034 		if (sz < 128)
2035 			card->pref_erase = 512 * 1024 / 512;
2036 		else if (sz < 512)
2037 			card->pref_erase = 1024 * 1024 / 512;
2038 		else if (sz < 1024)
2039 			card->pref_erase = 2 * 1024 * 1024 / 512;
2040 		else
2041 			card->pref_erase = 4 * 1024 * 1024 / 512;
2042 		if (card->pref_erase < card->erase_size)
2043 			card->pref_erase = card->erase_size;
2044 		else {
2045 			sz = card->pref_erase % card->erase_size;
2046 			if (sz)
2047 				card->pref_erase += card->erase_size - sz;
2048 		}
2049 	} else
2050 		card->pref_erase = 0;
2051 }
2052 
2053 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
2054 				          unsigned int arg, unsigned int qty)
2055 {
2056 	unsigned int erase_timeout;
2057 
2058 	if (arg == MMC_DISCARD_ARG ||
2059 	    (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
2060 		erase_timeout = card->ext_csd.trim_timeout;
2061 	} else if (card->ext_csd.erase_group_def & 1) {
2062 		/* High Capacity Erase Group Size uses HC timeouts */
2063 		if (arg == MMC_TRIM_ARG)
2064 			erase_timeout = card->ext_csd.trim_timeout;
2065 		else
2066 			erase_timeout = card->ext_csd.hc_erase_timeout;
2067 	} else {
2068 		/* CSD Erase Group Size uses write timeout */
2069 		unsigned int mult = (10 << card->csd.r2w_factor);
2070 		unsigned int timeout_clks = card->csd.tacc_clks * mult;
2071 		unsigned int timeout_us;
2072 
2073 		/* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
2074 		if (card->csd.tacc_ns < 1000000)
2075 			timeout_us = (card->csd.tacc_ns * mult) / 1000;
2076 		else
2077 			timeout_us = (card->csd.tacc_ns / 1000) * mult;
2078 
2079 		/*
2080 		 * ios.clock is only a target.  The real clock rate might be
2081 		 * less but not that much less, so fudge it by multiplying by 2.
2082 		 */
2083 		timeout_clks <<= 1;
2084 		timeout_us += (timeout_clks * 1000) /
2085 			      (card->host->ios.clock / 1000);
2086 
2087 		erase_timeout = timeout_us / 1000;
2088 
2089 		/*
2090 		 * Theoretically, the calculation could underflow so round up
2091 		 * to 1ms in that case.
2092 		 */
2093 		if (!erase_timeout)
2094 			erase_timeout = 1;
2095 	}
2096 
2097 	/* Multiplier for secure operations */
2098 	if (arg & MMC_SECURE_ARGS) {
2099 		if (arg == MMC_SECURE_ERASE_ARG)
2100 			erase_timeout *= card->ext_csd.sec_erase_mult;
2101 		else
2102 			erase_timeout *= card->ext_csd.sec_trim_mult;
2103 	}
2104 
2105 	erase_timeout *= qty;
2106 
2107 	/*
2108 	 * Ensure at least a 1 second timeout for SPI as per
2109 	 * 'mmc_set_data_timeout()'
2110 	 */
2111 	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
2112 		erase_timeout = 1000;
2113 
2114 	return erase_timeout;
2115 }
2116 
2117 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
2118 					 unsigned int arg,
2119 					 unsigned int qty)
2120 {
2121 	unsigned int erase_timeout;
2122 
2123 	if (card->ssr.erase_timeout) {
2124 		/* Erase timeout specified in SD Status Register (SSR) */
2125 		erase_timeout = card->ssr.erase_timeout * qty +
2126 				card->ssr.erase_offset;
2127 	} else {
2128 		/*
2129 		 * Erase timeout not specified in SD Status Register (SSR) so
2130 		 * use 250ms per write block.
2131 		 */
2132 		erase_timeout = 250 * qty;
2133 	}
2134 
2135 	/* Must not be less than 1 second */
2136 	if (erase_timeout < 1000)
2137 		erase_timeout = 1000;
2138 
2139 	return erase_timeout;
2140 }
2141 
2142 static unsigned int mmc_erase_timeout(struct mmc_card *card,
2143 				      unsigned int arg,
2144 				      unsigned int qty)
2145 {
2146 	if (mmc_card_sd(card))
2147 		return mmc_sd_erase_timeout(card, arg, qty);
2148 	else
2149 		return mmc_mmc_erase_timeout(card, arg, qty);
2150 }
2151 
2152 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
2153 			unsigned int to, unsigned int arg)
2154 {
2155 	struct mmc_command cmd = {0};
2156 	unsigned int qty = 0, busy_timeout = 0;
2157 	bool use_r1b_resp = false;
2158 	unsigned long timeout;
2159 	int err;
2160 
2161 	mmc_retune_hold(card->host);
2162 
2163 	/*
2164 	 * qty is used to calculate the erase timeout which depends on how many
2165 	 * erase groups (or allocation units in SD terminology) are affected.
2166 	 * We count erasing part of an erase group as one erase group.
2167 	 * For SD, the allocation units are always a power of 2.  For MMC, the
2168 	 * erase group size is almost certainly also power of 2, but it does not
2169 	 * seem to insist on that in the JEDEC standard, so we fall back to
2170 	 * division in that case.  SD may not specify an allocation unit size,
2171 	 * in which case the timeout is based on the number of write blocks.
2172 	 *
2173 	 * Note that the timeout for secure trim 2 will only be correct if the
2174 	 * number of erase groups specified is the same as the total of all
2175 	 * preceding secure trim 1 commands.  Since the power may have been
2176 	 * lost since the secure trim 1 commands occurred, it is generally
2177 	 * impossible to calculate the secure trim 2 timeout correctly.
2178 	 */
2179 	if (card->erase_shift)
2180 		qty += ((to >> card->erase_shift) -
2181 			(from >> card->erase_shift)) + 1;
2182 	else if (mmc_card_sd(card))
2183 		qty += to - from + 1;
2184 	else
2185 		qty += ((to / card->erase_size) -
2186 			(from / card->erase_size)) + 1;
2187 
2188 	if (!mmc_card_blockaddr(card)) {
2189 		from <<= 9;
2190 		to <<= 9;
2191 	}
2192 
2193 	if (mmc_card_sd(card))
2194 		cmd.opcode = SD_ERASE_WR_BLK_START;
2195 	else
2196 		cmd.opcode = MMC_ERASE_GROUP_START;
2197 	cmd.arg = from;
2198 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2199 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2200 	if (err) {
2201 		pr_err("mmc_erase: group start error %d, "
2202 		       "status %#x\n", err, cmd.resp[0]);
2203 		err = -EIO;
2204 		goto out;
2205 	}
2206 
2207 	memset(&cmd, 0, sizeof(struct mmc_command));
2208 	if (mmc_card_sd(card))
2209 		cmd.opcode = SD_ERASE_WR_BLK_END;
2210 	else
2211 		cmd.opcode = MMC_ERASE_GROUP_END;
2212 	cmd.arg = to;
2213 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2214 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2215 	if (err) {
2216 		pr_err("mmc_erase: group end error %d, status %#x\n",
2217 		       err, cmd.resp[0]);
2218 		err = -EIO;
2219 		goto out;
2220 	}
2221 
2222 	memset(&cmd, 0, sizeof(struct mmc_command));
2223 	cmd.opcode = MMC_ERASE;
2224 	cmd.arg = arg;
2225 	busy_timeout = mmc_erase_timeout(card, arg, qty);
2226 	/*
2227 	 * If the host controller supports busy signalling and the timeout for
2228 	 * the erase operation does not exceed the max_busy_timeout, we should
2229 	 * use R1B response. Or we need to prevent the host from doing hw busy
2230 	 * detection, which is done by converting to a R1 response instead.
2231 	 */
2232 	if (card->host->max_busy_timeout &&
2233 	    busy_timeout > card->host->max_busy_timeout) {
2234 		cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2235 	} else {
2236 		cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2237 		cmd.busy_timeout = busy_timeout;
2238 		use_r1b_resp = true;
2239 	}
2240 
2241 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2242 	if (err) {
2243 		pr_err("mmc_erase: erase error %d, status %#x\n",
2244 		       err, cmd.resp[0]);
2245 		err = -EIO;
2246 		goto out;
2247 	}
2248 
2249 	if (mmc_host_is_spi(card->host))
2250 		goto out;
2251 
2252 	/*
2253 	 * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
2254 	 * shall be avoided.
2255 	 */
2256 	if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
2257 		goto out;
2258 
2259 	timeout = jiffies + msecs_to_jiffies(busy_timeout);
2260 	do {
2261 		memset(&cmd, 0, sizeof(struct mmc_command));
2262 		cmd.opcode = MMC_SEND_STATUS;
2263 		cmd.arg = card->rca << 16;
2264 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2265 		/* Do not retry else we can't see errors */
2266 		err = mmc_wait_for_cmd(card->host, &cmd, 0);
2267 		if (err || (cmd.resp[0] & 0xFDF92000)) {
2268 			pr_err("error %d requesting status %#x\n",
2269 				err, cmd.resp[0]);
2270 			err = -EIO;
2271 			goto out;
2272 		}
2273 
2274 		/* Timeout if the device never becomes ready for data and
2275 		 * never leaves the program state.
2276 		 */
2277 		if (time_after(jiffies, timeout)) {
2278 			pr_err("%s: Card stuck in programming state! %s\n",
2279 				mmc_hostname(card->host), __func__);
2280 			err =  -EIO;
2281 			goto out;
2282 		}
2283 
2284 	} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
2285 		 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
2286 out:
2287 	mmc_retune_release(card->host);
2288 	return err;
2289 }
2290 
2291 static unsigned int mmc_align_erase_size(struct mmc_card *card,
2292 					 unsigned int *from,
2293 					 unsigned int *to,
2294 					 unsigned int nr)
2295 {
2296 	unsigned int from_new = *from, nr_new = nr, rem;
2297 
2298 	/*
2299 	 * When the 'card->erase_size' is power of 2, we can use round_up/down()
2300 	 * to align the erase size efficiently.
2301 	 */
2302 	if (is_power_of_2(card->erase_size)) {
2303 		unsigned int temp = from_new;
2304 
2305 		from_new = round_up(temp, card->erase_size);
2306 		rem = from_new - temp;
2307 
2308 		if (nr_new > rem)
2309 			nr_new -= rem;
2310 		else
2311 			return 0;
2312 
2313 		nr_new = round_down(nr_new, card->erase_size);
2314 	} else {
2315 		rem = from_new % card->erase_size;
2316 		if (rem) {
2317 			rem = card->erase_size - rem;
2318 			from_new += rem;
2319 			if (nr_new > rem)
2320 				nr_new -= rem;
2321 			else
2322 				return 0;
2323 		}
2324 
2325 		rem = nr_new % card->erase_size;
2326 		if (rem)
2327 			nr_new -= rem;
2328 	}
2329 
2330 	if (nr_new == 0)
2331 		return 0;
2332 
2333 	*to = from_new + nr_new;
2334 	*from = from_new;
2335 
2336 	return nr_new;
2337 }
2338 
2339 /**
2340  * mmc_erase - erase sectors.
2341  * @card: card to erase
2342  * @from: first sector to erase
2343  * @nr: number of sectors to erase
2344  * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2345  *
2346  * Caller must claim host before calling this function.
2347  */
2348 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2349 	      unsigned int arg)
2350 {
2351 	unsigned int rem, to = from + nr;
2352 	int err;
2353 
2354 	if (!(card->host->caps & MMC_CAP_ERASE) ||
2355 	    !(card->csd.cmdclass & CCC_ERASE))
2356 		return -EOPNOTSUPP;
2357 
2358 	if (!card->erase_size)
2359 		return -EOPNOTSUPP;
2360 
2361 	if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2362 		return -EOPNOTSUPP;
2363 
2364 	if ((arg & MMC_SECURE_ARGS) &&
2365 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2366 		return -EOPNOTSUPP;
2367 
2368 	if ((arg & MMC_TRIM_ARGS) &&
2369 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2370 		return -EOPNOTSUPP;
2371 
2372 	if (arg == MMC_SECURE_ERASE_ARG) {
2373 		if (from % card->erase_size || nr % card->erase_size)
2374 			return -EINVAL;
2375 	}
2376 
2377 	if (arg == MMC_ERASE_ARG)
2378 		nr = mmc_align_erase_size(card, &from, &to, nr);
2379 
2380 	if (nr == 0)
2381 		return 0;
2382 
2383 	if (to <= from)
2384 		return -EINVAL;
2385 
2386 	/* 'from' and 'to' are inclusive */
2387 	to -= 1;
2388 
2389 	/*
2390 	 * Special case where only one erase-group fits in the timeout budget:
2391 	 * If the region crosses an erase-group boundary on this particular
2392 	 * case, we will be trimming more than one erase-group which, does not
2393 	 * fit in the timeout budget of the controller, so we need to split it
2394 	 * and call mmc_do_erase() twice if necessary. This special case is
2395 	 * identified by the card->eg_boundary flag.
2396 	 */
2397 	rem = card->erase_size - (from % card->erase_size);
2398 	if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
2399 		err = mmc_do_erase(card, from, from + rem - 1, arg);
2400 		from += rem;
2401 		if ((err) || (to <= from))
2402 			return err;
2403 	}
2404 
2405 	return mmc_do_erase(card, from, to, arg);
2406 }
2407 EXPORT_SYMBOL(mmc_erase);
2408 
2409 int mmc_can_erase(struct mmc_card *card)
2410 {
2411 	if ((card->host->caps & MMC_CAP_ERASE) &&
2412 	    (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2413 		return 1;
2414 	return 0;
2415 }
2416 EXPORT_SYMBOL(mmc_can_erase);
2417 
2418 int mmc_can_trim(struct mmc_card *card)
2419 {
2420 	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
2421 	    (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
2422 		return 1;
2423 	return 0;
2424 }
2425 EXPORT_SYMBOL(mmc_can_trim);
2426 
2427 int mmc_can_discard(struct mmc_card *card)
2428 {
2429 	/*
2430 	 * As there's no way to detect the discard support bit at v4.5
2431 	 * use the s/w feature support filed.
2432 	 */
2433 	if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2434 		return 1;
2435 	return 0;
2436 }
2437 EXPORT_SYMBOL(mmc_can_discard);
2438 
2439 int mmc_can_sanitize(struct mmc_card *card)
2440 {
2441 	if (!mmc_can_trim(card) && !mmc_can_erase(card))
2442 		return 0;
2443 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2444 		return 1;
2445 	return 0;
2446 }
2447 EXPORT_SYMBOL(mmc_can_sanitize);
2448 
2449 int mmc_can_secure_erase_trim(struct mmc_card *card)
2450 {
2451 	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2452 	    !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2453 		return 1;
2454 	return 0;
2455 }
2456 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2457 
2458 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2459 			    unsigned int nr)
2460 {
2461 	if (!card->erase_size)
2462 		return 0;
2463 	if (from % card->erase_size || nr % card->erase_size)
2464 		return 0;
2465 	return 1;
2466 }
2467 EXPORT_SYMBOL(mmc_erase_group_aligned);
2468 
2469 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2470 					    unsigned int arg)
2471 {
2472 	struct mmc_host *host = card->host;
2473 	unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
2474 	unsigned int last_timeout = 0;
2475 	unsigned int max_busy_timeout = host->max_busy_timeout ?
2476 			host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
2477 
2478 	if (card->erase_shift) {
2479 		max_qty = UINT_MAX >> card->erase_shift;
2480 		min_qty = card->pref_erase >> card->erase_shift;
2481 	} else if (mmc_card_sd(card)) {
2482 		max_qty = UINT_MAX;
2483 		min_qty = card->pref_erase;
2484 	} else {
2485 		max_qty = UINT_MAX / card->erase_size;
2486 		min_qty = card->pref_erase / card->erase_size;
2487 	}
2488 
2489 	/*
2490 	 * We should not only use 'host->max_busy_timeout' as the limitation
2491 	 * when deciding the max discard sectors. We should set a balance value
2492 	 * to improve the erase speed, and it can not get too long timeout at
2493 	 * the same time.
2494 	 *
2495 	 * Here we set 'card->pref_erase' as the minimal discard sectors no
2496 	 * matter what size of 'host->max_busy_timeout', but if the
2497 	 * 'host->max_busy_timeout' is large enough for more discard sectors,
2498 	 * then we can continue to increase the max discard sectors until we
2499 	 * get a balance value. In cases when the 'host->max_busy_timeout'
2500 	 * isn't specified, use the default max erase timeout.
2501 	 */
2502 	do {
2503 		y = 0;
2504 		for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2505 			timeout = mmc_erase_timeout(card, arg, qty + x);
2506 
2507 			if (qty + x > min_qty && timeout > max_busy_timeout)
2508 				break;
2509 
2510 			if (timeout < last_timeout)
2511 				break;
2512 			last_timeout = timeout;
2513 			y = x;
2514 		}
2515 		qty += y;
2516 	} while (y);
2517 
2518 	if (!qty)
2519 		return 0;
2520 
2521 	/*
2522 	 * When specifying a sector range to trim, chances are we might cross
2523 	 * an erase-group boundary even if the amount of sectors is less than
2524 	 * one erase-group.
2525 	 * If we can only fit one erase-group in the controller timeout budget,
2526 	 * we have to care that erase-group boundaries are not crossed by a
2527 	 * single trim operation. We flag that special case with "eg_boundary".
2528 	 * In all other cases we can just decrement qty and pretend that we
2529 	 * always touch (qty + 1) erase-groups as a simple optimization.
2530 	 */
2531 	if (qty == 1)
2532 		card->eg_boundary = 1;
2533 	else
2534 		qty--;
2535 
2536 	/* Convert qty to sectors */
2537 	if (card->erase_shift)
2538 		max_discard = qty << card->erase_shift;
2539 	else if (mmc_card_sd(card))
2540 		max_discard = qty + 1;
2541 	else
2542 		max_discard = qty * card->erase_size;
2543 
2544 	return max_discard;
2545 }
2546 
2547 unsigned int mmc_calc_max_discard(struct mmc_card *card)
2548 {
2549 	struct mmc_host *host = card->host;
2550 	unsigned int max_discard, max_trim;
2551 
2552 	/*
2553 	 * Without erase_group_def set, MMC erase timeout depends on clock
2554 	 * frequence which can change.  In that case, the best choice is
2555 	 * just the preferred erase size.
2556 	 */
2557 	if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2558 		return card->pref_erase;
2559 
2560 	max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2561 	if (mmc_can_trim(card)) {
2562 		max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2563 		if (max_trim < max_discard)
2564 			max_discard = max_trim;
2565 	} else if (max_discard < card->erase_size) {
2566 		max_discard = 0;
2567 	}
2568 	pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2569 		mmc_hostname(host), max_discard, host->max_busy_timeout ?
2570 		host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
2571 	return max_discard;
2572 }
2573 EXPORT_SYMBOL(mmc_calc_max_discard);
2574 
2575 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2576 {
2577 	struct mmc_command cmd = {0};
2578 
2579 	if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
2580 	    mmc_card_hs400(card) || mmc_card_hs400es(card))
2581 		return 0;
2582 
2583 	cmd.opcode = MMC_SET_BLOCKLEN;
2584 	cmd.arg = blocklen;
2585 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2586 	return mmc_wait_for_cmd(card->host, &cmd, 5);
2587 }
2588 EXPORT_SYMBOL(mmc_set_blocklen);
2589 
2590 int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2591 			bool is_rel_write)
2592 {
2593 	struct mmc_command cmd = {0};
2594 
2595 	cmd.opcode = MMC_SET_BLOCK_COUNT;
2596 	cmd.arg = blockcount & 0x0000FFFF;
2597 	if (is_rel_write)
2598 		cmd.arg |= 1 << 31;
2599 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2600 	return mmc_wait_for_cmd(card->host, &cmd, 5);
2601 }
2602 EXPORT_SYMBOL(mmc_set_blockcount);
2603 
2604 static void mmc_hw_reset_for_init(struct mmc_host *host)
2605 {
2606 	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2607 		return;
2608 	host->ops->hw_reset(host);
2609 }
2610 
2611 int mmc_hw_reset(struct mmc_host *host)
2612 {
2613 	int ret;
2614 
2615 	if (!host->card)
2616 		return -EINVAL;
2617 
2618 	mmc_bus_get(host);
2619 	if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
2620 		mmc_bus_put(host);
2621 		return -EOPNOTSUPP;
2622 	}
2623 
2624 	ret = host->bus_ops->reset(host);
2625 	mmc_bus_put(host);
2626 
2627 	if (ret)
2628 		pr_warn("%s: tried to reset card, got error %d\n",
2629 			mmc_hostname(host), ret);
2630 
2631 	return ret;
2632 }
2633 EXPORT_SYMBOL(mmc_hw_reset);
2634 
2635 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2636 {
2637 	host->f_init = freq;
2638 
2639 #ifdef CONFIG_MMC_DEBUG
2640 	pr_info("%s: %s: trying to init card at %u Hz\n",
2641 		mmc_hostname(host), __func__, host->f_init);
2642 #endif
2643 	mmc_power_up(host, host->ocr_avail);
2644 
2645 	/*
2646 	 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2647 	 * do a hardware reset if possible.
2648 	 */
2649 	mmc_hw_reset_for_init(host);
2650 
2651 	/*
2652 	 * sdio_reset sends CMD52 to reset card.  Since we do not know
2653 	 * if the card is being re-initialized, just send it.  CMD52
2654 	 * should be ignored by SD/eMMC cards.
2655 	 * Skip it if we already know that we do not support SDIO commands
2656 	 */
2657 	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2658 		sdio_reset(host);
2659 
2660 	mmc_go_idle(host);
2661 
2662 	if (!(host->caps2 & MMC_CAP2_NO_SD))
2663 		mmc_send_if_cond(host, host->ocr_avail);
2664 
2665 	/* Order's important: probe SDIO, then SD, then MMC */
2666 	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2667 		if (!mmc_attach_sdio(host))
2668 			return 0;
2669 
2670 	if (!(host->caps2 & MMC_CAP2_NO_SD))
2671 		if (!mmc_attach_sd(host))
2672 			return 0;
2673 
2674 	if (!(host->caps2 & MMC_CAP2_NO_MMC))
2675 		if (!mmc_attach_mmc(host))
2676 			return 0;
2677 
2678 	mmc_power_off(host);
2679 	return -EIO;
2680 }
2681 
2682 int _mmc_detect_card_removed(struct mmc_host *host)
2683 {
2684 	int ret;
2685 
2686 	if (!host->card || mmc_card_removed(host->card))
2687 		return 1;
2688 
2689 	ret = host->bus_ops->alive(host);
2690 
2691 	/*
2692 	 * Card detect status and alive check may be out of sync if card is
2693 	 * removed slowly, when card detect switch changes while card/slot
2694 	 * pads are still contacted in hardware (refer to "SD Card Mechanical
2695 	 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2696 	 * detect work 200ms later for this case.
2697 	 */
2698 	if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2699 		mmc_detect_change(host, msecs_to_jiffies(200));
2700 		pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2701 	}
2702 
2703 	if (ret) {
2704 		mmc_card_set_removed(host->card);
2705 		pr_debug("%s: card remove detected\n", mmc_hostname(host));
2706 	}
2707 
2708 	return ret;
2709 }
2710 
2711 int mmc_detect_card_removed(struct mmc_host *host)
2712 {
2713 	struct mmc_card *card = host->card;
2714 	int ret;
2715 
2716 	WARN_ON(!host->claimed);
2717 
2718 	if (!card)
2719 		return 1;
2720 
2721 	if (!mmc_card_is_removable(host))
2722 		return 0;
2723 
2724 	ret = mmc_card_removed(card);
2725 	/*
2726 	 * The card will be considered unchanged unless we have been asked to
2727 	 * detect a change or host requires polling to provide card detection.
2728 	 */
2729 	if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2730 		return ret;
2731 
2732 	host->detect_change = 0;
2733 	if (!ret) {
2734 		ret = _mmc_detect_card_removed(host);
2735 		if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2736 			/*
2737 			 * Schedule a detect work as soon as possible to let a
2738 			 * rescan handle the card removal.
2739 			 */
2740 			cancel_delayed_work(&host->detect);
2741 			_mmc_detect_change(host, 0, false);
2742 		}
2743 	}
2744 
2745 	return ret;
2746 }
2747 EXPORT_SYMBOL(mmc_detect_card_removed);
2748 
2749 void mmc_rescan(struct work_struct *work)
2750 {
2751 	struct mmc_host *host =
2752 		container_of(work, struct mmc_host, detect.work);
2753 	int i;
2754 
2755 	if (host->rescan_disable)
2756 		return;
2757 
2758 	/* If there is a non-removable card registered, only scan once */
2759 	if (!mmc_card_is_removable(host) && host->rescan_entered)
2760 		return;
2761 	host->rescan_entered = 1;
2762 
2763 	if (host->trigger_card_event && host->ops->card_event) {
2764 		mmc_claim_host(host);
2765 		host->ops->card_event(host);
2766 		mmc_release_host(host);
2767 		host->trigger_card_event = false;
2768 	}
2769 
2770 	mmc_bus_get(host);
2771 
2772 	/*
2773 	 * if there is a _removable_ card registered, check whether it is
2774 	 * still present
2775 	 */
2776 	if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
2777 		host->bus_ops->detect(host);
2778 
2779 	host->detect_change = 0;
2780 
2781 	/*
2782 	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2783 	 * the card is no longer present.
2784 	 */
2785 	mmc_bus_put(host);
2786 	mmc_bus_get(host);
2787 
2788 	/* if there still is a card present, stop here */
2789 	if (host->bus_ops != NULL) {
2790 		mmc_bus_put(host);
2791 		goto out;
2792 	}
2793 
2794 	/*
2795 	 * Only we can add a new handler, so it's safe to
2796 	 * release the lock here.
2797 	 */
2798 	mmc_bus_put(host);
2799 
2800 	mmc_claim_host(host);
2801 	if (mmc_card_is_removable(host) && host->ops->get_cd &&
2802 			host->ops->get_cd(host) == 0) {
2803 		mmc_power_off(host);
2804 		mmc_release_host(host);
2805 		goto out;
2806 	}
2807 
2808 	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2809 		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2810 			break;
2811 		if (freqs[i] <= host->f_min)
2812 			break;
2813 	}
2814 	mmc_release_host(host);
2815 
2816  out:
2817 	if (host->caps & MMC_CAP_NEEDS_POLL)
2818 		mmc_schedule_delayed_work(&host->detect, HZ);
2819 }
2820 
2821 void mmc_start_host(struct mmc_host *host)
2822 {
2823 	host->f_init = max(freqs[0], host->f_min);
2824 	host->rescan_disable = 0;
2825 	host->ios.power_mode = MMC_POWER_UNDEFINED;
2826 
2827 	mmc_claim_host(host);
2828 	if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
2829 		mmc_power_off(host);
2830 	else
2831 		mmc_power_up(host, host->ocr_avail);
2832 	mmc_release_host(host);
2833 
2834 	mmc_gpiod_request_cd_irq(host);
2835 	_mmc_detect_change(host, 0, false);
2836 }
2837 
2838 void mmc_stop_host(struct mmc_host *host)
2839 {
2840 #ifdef CONFIG_MMC_DEBUG
2841 	unsigned long flags;
2842 	spin_lock_irqsave(&host->lock, flags);
2843 	host->removed = 1;
2844 	spin_unlock_irqrestore(&host->lock, flags);
2845 #endif
2846 	if (host->slot.cd_irq >= 0)
2847 		disable_irq(host->slot.cd_irq);
2848 
2849 	host->rescan_disable = 1;
2850 	cancel_delayed_work_sync(&host->detect);
2851 
2852 	/* clear pm flags now and let card drivers set them as needed */
2853 	host->pm_flags = 0;
2854 
2855 	mmc_bus_get(host);
2856 	if (host->bus_ops && !host->bus_dead) {
2857 		/* Calling bus_ops->remove() with a claimed host can deadlock */
2858 		host->bus_ops->remove(host);
2859 		mmc_claim_host(host);
2860 		mmc_detach_bus(host);
2861 		mmc_power_off(host);
2862 		mmc_release_host(host);
2863 		mmc_bus_put(host);
2864 		return;
2865 	}
2866 	mmc_bus_put(host);
2867 
2868 	BUG_ON(host->card);
2869 
2870 	mmc_claim_host(host);
2871 	mmc_power_off(host);
2872 	mmc_release_host(host);
2873 }
2874 
2875 int mmc_power_save_host(struct mmc_host *host)
2876 {
2877 	int ret = 0;
2878 
2879 #ifdef CONFIG_MMC_DEBUG
2880 	pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2881 #endif
2882 
2883 	mmc_bus_get(host);
2884 
2885 	if (!host->bus_ops || host->bus_dead) {
2886 		mmc_bus_put(host);
2887 		return -EINVAL;
2888 	}
2889 
2890 	if (host->bus_ops->power_save)
2891 		ret = host->bus_ops->power_save(host);
2892 
2893 	mmc_bus_put(host);
2894 
2895 	mmc_power_off(host);
2896 
2897 	return ret;
2898 }
2899 EXPORT_SYMBOL(mmc_power_save_host);
2900 
2901 int mmc_power_restore_host(struct mmc_host *host)
2902 {
2903 	int ret;
2904 
2905 #ifdef CONFIG_MMC_DEBUG
2906 	pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2907 #endif
2908 
2909 	mmc_bus_get(host);
2910 
2911 	if (!host->bus_ops || host->bus_dead) {
2912 		mmc_bus_put(host);
2913 		return -EINVAL;
2914 	}
2915 
2916 	mmc_power_up(host, host->card->ocr);
2917 	ret = host->bus_ops->power_restore(host);
2918 
2919 	mmc_bus_put(host);
2920 
2921 	return ret;
2922 }
2923 EXPORT_SYMBOL(mmc_power_restore_host);
2924 
2925 /*
2926  * Flush the cache to the non-volatile storage.
2927  */
2928 int mmc_flush_cache(struct mmc_card *card)
2929 {
2930 	int err = 0;
2931 
2932 	if (mmc_card_mmc(card) &&
2933 			(card->ext_csd.cache_size > 0) &&
2934 			(card->ext_csd.cache_ctrl & 1)) {
2935 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2936 				EXT_CSD_FLUSH_CACHE, 1, 0);
2937 		if (err)
2938 			pr_err("%s: cache flush error %d\n",
2939 					mmc_hostname(card->host), err);
2940 	}
2941 
2942 	return err;
2943 }
2944 EXPORT_SYMBOL(mmc_flush_cache);
2945 
2946 #ifdef CONFIG_PM_SLEEP
2947 /* Do the card removal on suspend if card is assumed removeable
2948  * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2949    to sync the card.
2950 */
2951 static int mmc_pm_notify(struct notifier_block *notify_block,
2952 			unsigned long mode, void *unused)
2953 {
2954 	struct mmc_host *host = container_of(
2955 		notify_block, struct mmc_host, pm_notify);
2956 	unsigned long flags;
2957 	int err = 0;
2958 
2959 	switch (mode) {
2960 	case PM_HIBERNATION_PREPARE:
2961 	case PM_SUSPEND_PREPARE:
2962 	case PM_RESTORE_PREPARE:
2963 		spin_lock_irqsave(&host->lock, flags);
2964 		host->rescan_disable = 1;
2965 		spin_unlock_irqrestore(&host->lock, flags);
2966 		cancel_delayed_work_sync(&host->detect);
2967 
2968 		if (!host->bus_ops)
2969 			break;
2970 
2971 		/* Validate prerequisites for suspend */
2972 		if (host->bus_ops->pre_suspend)
2973 			err = host->bus_ops->pre_suspend(host);
2974 		if (!err)
2975 			break;
2976 
2977 		/* Calling bus_ops->remove() with a claimed host can deadlock */
2978 		host->bus_ops->remove(host);
2979 		mmc_claim_host(host);
2980 		mmc_detach_bus(host);
2981 		mmc_power_off(host);
2982 		mmc_release_host(host);
2983 		host->pm_flags = 0;
2984 		break;
2985 
2986 	case PM_POST_SUSPEND:
2987 	case PM_POST_HIBERNATION:
2988 	case PM_POST_RESTORE:
2989 
2990 		spin_lock_irqsave(&host->lock, flags);
2991 		host->rescan_disable = 0;
2992 		spin_unlock_irqrestore(&host->lock, flags);
2993 		_mmc_detect_change(host, 0, false);
2994 
2995 	}
2996 
2997 	return 0;
2998 }
2999 
3000 void mmc_register_pm_notifier(struct mmc_host *host)
3001 {
3002 	host->pm_notify.notifier_call = mmc_pm_notify;
3003 	register_pm_notifier(&host->pm_notify);
3004 }
3005 
3006 void mmc_unregister_pm_notifier(struct mmc_host *host)
3007 {
3008 	unregister_pm_notifier(&host->pm_notify);
3009 }
3010 #endif
3011 
3012 /**
3013  * mmc_init_context_info() - init synchronization context
3014  * @host: mmc host
3015  *
3016  * Init struct context_info needed to implement asynchronous
3017  * request mechanism, used by mmc core, host driver and mmc requests
3018  * supplier.
3019  */
3020 void mmc_init_context_info(struct mmc_host *host)
3021 {
3022 	spin_lock_init(&host->context_info.lock);
3023 	host->context_info.is_new_req = false;
3024 	host->context_info.is_done_rcv = false;
3025 	host->context_info.is_waiting_last_req = false;
3026 	init_waitqueue_head(&host->context_info.wait);
3027 }
3028 
3029 static int __init mmc_init(void)
3030 {
3031 	int ret;
3032 
3033 	ret = mmc_register_bus();
3034 	if (ret)
3035 		return ret;
3036 
3037 	ret = mmc_register_host_class();
3038 	if (ret)
3039 		goto unregister_bus;
3040 
3041 	ret = sdio_register_bus();
3042 	if (ret)
3043 		goto unregister_host_class;
3044 
3045 	return 0;
3046 
3047 unregister_host_class:
3048 	mmc_unregister_host_class();
3049 unregister_bus:
3050 	mmc_unregister_bus();
3051 	return ret;
3052 }
3053 
3054 static void __exit mmc_exit(void)
3055 {
3056 	sdio_unregister_bus();
3057 	mmc_unregister_host_class();
3058 	mmc_unregister_bus();
3059 }
3060 
3061 subsys_initcall(mmc_init);
3062 module_exit(mmc_exit);
3063 
3064 MODULE_LICENSE("GPL");
3065