xref: /linux/drivers/mmc/core/core.c (revision 32786fdc9506aeba98278c1844d4bfb766863832)
1 /*
2  *  linux/drivers/mmc/core/core.c
3  *
4  *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5  *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7  *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/suspend.h>
28 #include <linux/fault-inject.h>
29 #include <linux/random.h>
30 #include <linux/slab.h>
31 #include <linux/of.h>
32 
33 #include <linux/mmc/card.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/slot-gpio.h>
38 
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/mmc.h>
41 
42 #include "core.h"
43 #include "bus.h"
44 #include "host.h"
45 #include "sdio_bus.h"
46 #include "pwrseq.h"
47 
48 #include "mmc_ops.h"
49 #include "sd_ops.h"
50 #include "sdio_ops.h"
51 
52 /* If the device is not responding */
53 #define MMC_CORE_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
54 
55 /*
56  * Background operations can take a long time, depending on the housekeeping
57  * operations the card has to perform.
58  */
59 #define MMC_BKOPS_MAX_TIMEOUT	(4 * 60 * 1000) /* max time to wait in ms */
60 
61 /* The max erase timeout, used when host->max_busy_timeout isn't specified */
62 #define MMC_ERASE_TIMEOUT_MS	(60 * 1000) /* 60 s */
63 
64 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
65 
66 /*
67  * Enabling software CRCs on the data blocks can be a significant (30%)
68  * performance cost, and for other reasons may not always be desired.
69  * So we allow it it to be disabled.
70  */
71 bool use_spi_crc = 1;
72 module_param(use_spi_crc, bool, 0);
73 
74 static int mmc_schedule_delayed_work(struct delayed_work *work,
75 				     unsigned long delay)
76 {
77 	/*
78 	 * We use the system_freezable_wq, because of two reasons.
79 	 * First, it allows several works (not the same work item) to be
80 	 * executed simultaneously. Second, the queue becomes frozen when
81 	 * userspace becomes frozen during system PM.
82 	 */
83 	return queue_delayed_work(system_freezable_wq, work, delay);
84 }
85 
86 #ifdef CONFIG_FAIL_MMC_REQUEST
87 
88 /*
89  * Internal function. Inject random data errors.
90  * If mmc_data is NULL no errors are injected.
91  */
92 static void mmc_should_fail_request(struct mmc_host *host,
93 				    struct mmc_request *mrq)
94 {
95 	struct mmc_command *cmd = mrq->cmd;
96 	struct mmc_data *data = mrq->data;
97 	static const int data_errors[] = {
98 		-ETIMEDOUT,
99 		-EILSEQ,
100 		-EIO,
101 	};
102 
103 	if (!data)
104 		return;
105 
106 	if (cmd->error || data->error ||
107 	    !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
108 		return;
109 
110 	data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
111 	data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
112 }
113 
114 #else /* CONFIG_FAIL_MMC_REQUEST */
115 
116 static inline void mmc_should_fail_request(struct mmc_host *host,
117 					   struct mmc_request *mrq)
118 {
119 }
120 
121 #endif /* CONFIG_FAIL_MMC_REQUEST */
122 
123 static inline void mmc_complete_cmd(struct mmc_request *mrq)
124 {
125 	if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
126 		complete_all(&mrq->cmd_completion);
127 }
128 
129 void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
130 {
131 	if (!mrq->cap_cmd_during_tfr)
132 		return;
133 
134 	mmc_complete_cmd(mrq);
135 
136 	pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
137 		 mmc_hostname(host), mrq->cmd->opcode);
138 }
139 EXPORT_SYMBOL(mmc_command_done);
140 
141 /**
142  *	mmc_request_done - finish processing an MMC request
143  *	@host: MMC host which completed request
144  *	@mrq: MMC request which request
145  *
146  *	MMC drivers should call this function when they have completed
147  *	their processing of a request.
148  */
149 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
150 {
151 	struct mmc_command *cmd = mrq->cmd;
152 	int err = cmd->error;
153 
154 	/* Flag re-tuning needed on CRC errors */
155 	if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
156 	    cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
157 	    (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
158 	    (mrq->data && mrq->data->error == -EILSEQ) ||
159 	    (mrq->stop && mrq->stop->error == -EILSEQ)))
160 		mmc_retune_needed(host);
161 
162 	if (err && cmd->retries && mmc_host_is_spi(host)) {
163 		if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
164 			cmd->retries = 0;
165 	}
166 
167 	if (host->ongoing_mrq == mrq)
168 		host->ongoing_mrq = NULL;
169 
170 	mmc_complete_cmd(mrq);
171 
172 	trace_mmc_request_done(host, mrq);
173 
174 	if (err && cmd->retries && !mmc_card_removed(host->card)) {
175 		/*
176 		 * Request starter must handle retries - see
177 		 * mmc_wait_for_req_done().
178 		 */
179 		if (mrq->done)
180 			mrq->done(mrq);
181 	} else {
182 		mmc_should_fail_request(host, mrq);
183 
184 		if (!host->ongoing_mrq)
185 			led_trigger_event(host->led, LED_OFF);
186 
187 		if (mrq->sbc) {
188 			pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
189 				mmc_hostname(host), mrq->sbc->opcode,
190 				mrq->sbc->error,
191 				mrq->sbc->resp[0], mrq->sbc->resp[1],
192 				mrq->sbc->resp[2], mrq->sbc->resp[3]);
193 		}
194 
195 		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
196 			mmc_hostname(host), cmd->opcode, err,
197 			cmd->resp[0], cmd->resp[1],
198 			cmd->resp[2], cmd->resp[3]);
199 
200 		if (mrq->data) {
201 			pr_debug("%s:     %d bytes transferred: %d\n",
202 				mmc_hostname(host),
203 				mrq->data->bytes_xfered, mrq->data->error);
204 		}
205 
206 		if (mrq->stop) {
207 			pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
208 				mmc_hostname(host), mrq->stop->opcode,
209 				mrq->stop->error,
210 				mrq->stop->resp[0], mrq->stop->resp[1],
211 				mrq->stop->resp[2], mrq->stop->resp[3]);
212 		}
213 
214 		if (mrq->done)
215 			mrq->done(mrq);
216 	}
217 }
218 
219 EXPORT_SYMBOL(mmc_request_done);
220 
221 static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
222 {
223 	int err;
224 
225 	/* Assumes host controller has been runtime resumed by mmc_claim_host */
226 	err = mmc_retune(host);
227 	if (err) {
228 		mrq->cmd->error = err;
229 		mmc_request_done(host, mrq);
230 		return;
231 	}
232 
233 	/*
234 	 * For sdio rw commands we must wait for card busy otherwise some
235 	 * sdio devices won't work properly.
236 	 */
237 	if (mmc_is_io_op(mrq->cmd->opcode) && host->ops->card_busy) {
238 		int tries = 500; /* Wait aprox 500ms at maximum */
239 
240 		while (host->ops->card_busy(host) && --tries)
241 			mmc_delay(1);
242 
243 		if (tries == 0) {
244 			mrq->cmd->error = -EBUSY;
245 			mmc_request_done(host, mrq);
246 			return;
247 		}
248 	}
249 
250 	if (mrq->cap_cmd_during_tfr) {
251 		host->ongoing_mrq = mrq;
252 		/*
253 		 * Retry path could come through here without having waiting on
254 		 * cmd_completion, so ensure it is reinitialised.
255 		 */
256 		reinit_completion(&mrq->cmd_completion);
257 	}
258 
259 	trace_mmc_request_start(host, mrq);
260 
261 	host->ops->request(host, mrq);
262 }
263 
264 static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
265 {
266 #ifdef CONFIG_MMC_DEBUG
267 	unsigned int i, sz;
268 	struct scatterlist *sg;
269 #endif
270 	mmc_retune_hold(host);
271 
272 	if (mmc_card_removed(host->card))
273 		return -ENOMEDIUM;
274 
275 	if (mrq->sbc) {
276 		pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
277 			 mmc_hostname(host), mrq->sbc->opcode,
278 			 mrq->sbc->arg, mrq->sbc->flags);
279 	}
280 
281 	pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
282 		 mmc_hostname(host), mrq->cmd->opcode,
283 		 mrq->cmd->arg, mrq->cmd->flags);
284 
285 	if (mrq->data) {
286 		pr_debug("%s:     blksz %d blocks %d flags %08x "
287 			"tsac %d ms nsac %d\n",
288 			mmc_hostname(host), mrq->data->blksz,
289 			mrq->data->blocks, mrq->data->flags,
290 			mrq->data->timeout_ns / 1000000,
291 			mrq->data->timeout_clks);
292 	}
293 
294 	if (mrq->stop) {
295 		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
296 			 mmc_hostname(host), mrq->stop->opcode,
297 			 mrq->stop->arg, mrq->stop->flags);
298 	}
299 
300 	WARN_ON(!host->claimed);
301 
302 	mrq->cmd->error = 0;
303 	mrq->cmd->mrq = mrq;
304 	if (mrq->sbc) {
305 		mrq->sbc->error = 0;
306 		mrq->sbc->mrq = mrq;
307 	}
308 	if (mrq->data) {
309 		if (mrq->data->blksz > host->max_blk_size ||
310 		    mrq->data->blocks > host->max_blk_count ||
311 		    mrq->data->blocks * mrq->data->blksz > host->max_req_size)
312 			return -EINVAL;
313 #ifdef CONFIG_MMC_DEBUG
314 		sz = 0;
315 		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
316 			sz += sg->length;
317 		if (sz != mrq->data->blocks * mrq->data->blksz)
318 			return -EINVAL;
319 #endif
320 
321 		mrq->cmd->data = mrq->data;
322 		mrq->data->error = 0;
323 		mrq->data->mrq = mrq;
324 		if (mrq->stop) {
325 			mrq->data->stop = mrq->stop;
326 			mrq->stop->error = 0;
327 			mrq->stop->mrq = mrq;
328 		}
329 	}
330 	led_trigger_event(host->led, LED_FULL);
331 	__mmc_start_request(host, mrq);
332 
333 	return 0;
334 }
335 
336 /**
337  *	mmc_start_bkops - start BKOPS for supported cards
338  *	@card: MMC card to start BKOPS
339  *	@form_exception: A flag to indicate if this function was
340  *			 called due to an exception raised by the card
341  *
342  *	Start background operations whenever requested.
343  *	When the urgent BKOPS bit is set in a R1 command response
344  *	then background operations should be started immediately.
345 */
346 void mmc_start_bkops(struct mmc_card *card, bool from_exception)
347 {
348 	int err;
349 	int timeout;
350 	bool use_busy_signal;
351 
352 	if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
353 		return;
354 
355 	err = mmc_read_bkops_status(card);
356 	if (err) {
357 		pr_err("%s: Failed to read bkops status: %d\n",
358 		       mmc_hostname(card->host), err);
359 		return;
360 	}
361 
362 	if (!card->ext_csd.raw_bkops_status)
363 		return;
364 
365 	if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
366 	    from_exception)
367 		return;
368 
369 	mmc_claim_host(card->host);
370 	if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
371 		timeout = MMC_BKOPS_MAX_TIMEOUT;
372 		use_busy_signal = true;
373 	} else {
374 		timeout = 0;
375 		use_busy_signal = false;
376 	}
377 
378 	mmc_retune_hold(card->host);
379 
380 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
381 			EXT_CSD_BKOPS_START, 1, timeout, 0,
382 			use_busy_signal, true, false);
383 	if (err) {
384 		pr_warn("%s: Error %d starting bkops\n",
385 			mmc_hostname(card->host), err);
386 		mmc_retune_release(card->host);
387 		goto out;
388 	}
389 
390 	/*
391 	 * For urgent bkops status (LEVEL_2 and more)
392 	 * bkops executed synchronously, otherwise
393 	 * the operation is in progress
394 	 */
395 	if (!use_busy_signal)
396 		mmc_card_set_doing_bkops(card);
397 	else
398 		mmc_retune_release(card->host);
399 out:
400 	mmc_release_host(card->host);
401 }
402 EXPORT_SYMBOL(mmc_start_bkops);
403 
404 /*
405  * mmc_wait_data_done() - done callback for data request
406  * @mrq: done data request
407  *
408  * Wakes up mmc context, passed as a callback to host controller driver
409  */
410 static void mmc_wait_data_done(struct mmc_request *mrq)
411 {
412 	struct mmc_context_info *context_info = &mrq->host->context_info;
413 
414 	context_info->is_done_rcv = true;
415 	wake_up_interruptible(&context_info->wait);
416 }
417 
418 static void mmc_wait_done(struct mmc_request *mrq)
419 {
420 	complete(&mrq->completion);
421 }
422 
423 static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
424 {
425 	struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
426 
427 	/*
428 	 * If there is an ongoing transfer, wait for the command line to become
429 	 * available.
430 	 */
431 	if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
432 		wait_for_completion(&ongoing_mrq->cmd_completion);
433 }
434 
435 /*
436  *__mmc_start_data_req() - starts data request
437  * @host: MMC host to start the request
438  * @mrq: data request to start
439  *
440  * Sets the done callback to be called when request is completed by the card.
441  * Starts data mmc request execution
442  * If an ongoing transfer is already in progress, wait for the command line
443  * to become available before sending another command.
444  */
445 static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
446 {
447 	int err;
448 
449 	mmc_wait_ongoing_tfr_cmd(host);
450 
451 	mrq->done = mmc_wait_data_done;
452 	mrq->host = host;
453 
454 	init_completion(&mrq->cmd_completion);
455 
456 	err = mmc_start_request(host, mrq);
457 	if (err) {
458 		mrq->cmd->error = err;
459 		mmc_complete_cmd(mrq);
460 		mmc_wait_data_done(mrq);
461 	}
462 
463 	return err;
464 }
465 
466 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
467 {
468 	int err;
469 
470 	mmc_wait_ongoing_tfr_cmd(host);
471 
472 	init_completion(&mrq->completion);
473 	mrq->done = mmc_wait_done;
474 
475 	init_completion(&mrq->cmd_completion);
476 
477 	err = mmc_start_request(host, mrq);
478 	if (err) {
479 		mrq->cmd->error = err;
480 		mmc_complete_cmd(mrq);
481 		complete(&mrq->completion);
482 	}
483 
484 	return err;
485 }
486 
487 /*
488  * mmc_wait_for_data_req_done() - wait for request completed
489  * @host: MMC host to prepare the command.
490  * @mrq: MMC request to wait for
491  *
492  * Blocks MMC context till host controller will ack end of data request
493  * execution or new request notification arrives from the block layer.
494  * Handles command retries.
495  *
496  * Returns enum mmc_blk_status after checking errors.
497  */
498 static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
499 				      struct mmc_request *mrq,
500 				      struct mmc_async_req *next_req)
501 {
502 	struct mmc_command *cmd;
503 	struct mmc_context_info *context_info = &host->context_info;
504 	enum mmc_blk_status status;
505 
506 	while (1) {
507 		wait_event_interruptible(context_info->wait,
508 				(context_info->is_done_rcv ||
509 				 context_info->is_new_req));
510 		context_info->is_waiting_last_req = false;
511 		if (context_info->is_done_rcv) {
512 			context_info->is_done_rcv = false;
513 			cmd = mrq->cmd;
514 
515 			if (!cmd->error || !cmd->retries ||
516 			    mmc_card_removed(host->card)) {
517 				status = host->areq->err_check(host->card,
518 							       host->areq);
519 				break; /* return status */
520 			} else {
521 				mmc_retune_recheck(host);
522 				pr_info("%s: req failed (CMD%u): %d, retrying...\n",
523 					mmc_hostname(host),
524 					cmd->opcode, cmd->error);
525 				cmd->retries--;
526 				cmd->error = 0;
527 				__mmc_start_request(host, mrq);
528 				continue; /* wait for done/new event again */
529 			}
530 		} else if (context_info->is_new_req) {
531 			if (!next_req)
532 				return MMC_BLK_NEW_REQUEST;
533 		}
534 	}
535 	mmc_retune_release(host);
536 	return status;
537 }
538 
539 void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
540 {
541 	struct mmc_command *cmd;
542 
543 	while (1) {
544 		wait_for_completion(&mrq->completion);
545 
546 		cmd = mrq->cmd;
547 
548 		/*
549 		 * If host has timed out waiting for the sanitize
550 		 * to complete, card might be still in programming state
551 		 * so let's try to bring the card out of programming
552 		 * state.
553 		 */
554 		if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
555 			if (!mmc_interrupt_hpi(host->card)) {
556 				pr_warn("%s: %s: Interrupted sanitize\n",
557 					mmc_hostname(host), __func__);
558 				cmd->error = 0;
559 				break;
560 			} else {
561 				pr_err("%s: %s: Failed to interrupt sanitize\n",
562 				       mmc_hostname(host), __func__);
563 			}
564 		}
565 		if (!cmd->error || !cmd->retries ||
566 		    mmc_card_removed(host->card))
567 			break;
568 
569 		mmc_retune_recheck(host);
570 
571 		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
572 			 mmc_hostname(host), cmd->opcode, cmd->error);
573 		cmd->retries--;
574 		cmd->error = 0;
575 		__mmc_start_request(host, mrq);
576 	}
577 
578 	mmc_retune_release(host);
579 }
580 EXPORT_SYMBOL(mmc_wait_for_req_done);
581 
582 /**
583  *	mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
584  *	@host: MMC host
585  *	@mrq: MMC request
586  *
587  *	mmc_is_req_done() is used with requests that have
588  *	mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
589  *	starting a request and before waiting for it to complete. That is,
590  *	either in between calls to mmc_start_req(), or after mmc_wait_for_req()
591  *	and before mmc_wait_for_req_done(). If it is called at other times the
592  *	result is not meaningful.
593  */
594 bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
595 {
596 	if (host->areq)
597 		return host->context_info.is_done_rcv;
598 	else
599 		return completion_done(&mrq->completion);
600 }
601 EXPORT_SYMBOL(mmc_is_req_done);
602 
603 /**
604  *	mmc_pre_req - Prepare for a new request
605  *	@host: MMC host to prepare command
606  *	@mrq: MMC request to prepare for
607  *
608  *	mmc_pre_req() is called in prior to mmc_start_req() to let
609  *	host prepare for the new request. Preparation of a request may be
610  *	performed while another request is running on the host.
611  */
612 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
613 {
614 	if (host->ops->pre_req)
615 		host->ops->pre_req(host, mrq);
616 }
617 
618 /**
619  *	mmc_post_req - Post process a completed request
620  *	@host: MMC host to post process command
621  *	@mrq: MMC request to post process for
622  *	@err: Error, if non zero, clean up any resources made in pre_req
623  *
624  *	Let the host post process a completed request. Post processing of
625  *	a request may be performed while another reuqest is running.
626  */
627 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
628 			 int err)
629 {
630 	if (host->ops->post_req)
631 		host->ops->post_req(host, mrq, err);
632 }
633 
634 /**
635  *	mmc_start_req - start a non-blocking request
636  *	@host: MMC host to start command
637  *	@areq: async request to start
638  *	@error: out parameter returns 0 for success, otherwise non zero
639  *
640  *	Start a new MMC custom command request for a host.
641  *	If there is on ongoing async request wait for completion
642  *	of that request and start the new one and return.
643  *	Does not wait for the new request to complete.
644  *
645  *      Returns the completed request, NULL in case of none completed.
646  *	Wait for the an ongoing request (previoulsy started) to complete and
647  *	return the completed request. If there is no ongoing request, NULL
648  *	is returned without waiting. NULL is not an error condition.
649  */
650 struct mmc_async_req *mmc_start_req(struct mmc_host *host,
651 				    struct mmc_async_req *areq,
652 				    enum mmc_blk_status *ret_stat)
653 {
654 	enum mmc_blk_status status = MMC_BLK_SUCCESS;
655 	int start_err = 0;
656 	struct mmc_async_req *data = host->areq;
657 
658 	/* Prepare a new request */
659 	if (areq)
660 		mmc_pre_req(host, areq->mrq);
661 
662 	if (host->areq) {
663 		status = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
664 		if (status == MMC_BLK_NEW_REQUEST) {
665 			if (ret_stat)
666 				*ret_stat = status;
667 			/*
668 			 * The previous request was not completed,
669 			 * nothing to return
670 			 */
671 			return NULL;
672 		}
673 		/*
674 		 * Check BKOPS urgency for each R1 response
675 		 */
676 		if (host->card && mmc_card_mmc(host->card) &&
677 		    ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
678 		     (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
679 		    (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
680 
681 			/* Cancel the prepared request */
682 			if (areq)
683 				mmc_post_req(host, areq->mrq, -EINVAL);
684 
685 			mmc_start_bkops(host->card, true);
686 
687 			/* prepare the request again */
688 			if (areq)
689 				mmc_pre_req(host, areq->mrq);
690 		}
691 	}
692 
693 	if (status == MMC_BLK_SUCCESS && areq)
694 		start_err = __mmc_start_data_req(host, areq->mrq);
695 
696 	if (host->areq)
697 		mmc_post_req(host, host->areq->mrq, 0);
698 
699 	 /* Cancel a prepared request if it was not started. */
700 	if ((status != MMC_BLK_SUCCESS || start_err) && areq)
701 		mmc_post_req(host, areq->mrq, -EINVAL);
702 
703 	if (status != MMC_BLK_SUCCESS)
704 		host->areq = NULL;
705 	else
706 		host->areq = areq;
707 
708 	if (ret_stat)
709 		*ret_stat = status;
710 	return data;
711 }
712 EXPORT_SYMBOL(mmc_start_req);
713 
714 /**
715  *	mmc_wait_for_req - start a request and wait for completion
716  *	@host: MMC host to start command
717  *	@mrq: MMC request to start
718  *
719  *	Start a new MMC custom command request for a host, and wait
720  *	for the command to complete. In the case of 'cap_cmd_during_tfr'
721  *	requests, the transfer is ongoing and the caller can issue further
722  *	commands that do not use the data lines, and then wait by calling
723  *	mmc_wait_for_req_done().
724  *	Does not attempt to parse the response.
725  */
726 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
727 {
728 	__mmc_start_req(host, mrq);
729 
730 	if (!mrq->cap_cmd_during_tfr)
731 		mmc_wait_for_req_done(host, mrq);
732 }
733 EXPORT_SYMBOL(mmc_wait_for_req);
734 
735 /**
736  *	mmc_interrupt_hpi - Issue for High priority Interrupt
737  *	@card: the MMC card associated with the HPI transfer
738  *
739  *	Issued High Priority Interrupt, and check for card status
740  *	until out-of prg-state.
741  */
742 int mmc_interrupt_hpi(struct mmc_card *card)
743 {
744 	int err;
745 	u32 status;
746 	unsigned long prg_wait;
747 
748 	if (!card->ext_csd.hpi_en) {
749 		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
750 		return 1;
751 	}
752 
753 	mmc_claim_host(card->host);
754 	err = mmc_send_status(card, &status);
755 	if (err) {
756 		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
757 		goto out;
758 	}
759 
760 	switch (R1_CURRENT_STATE(status)) {
761 	case R1_STATE_IDLE:
762 	case R1_STATE_READY:
763 	case R1_STATE_STBY:
764 	case R1_STATE_TRAN:
765 		/*
766 		 * In idle and transfer states, HPI is not needed and the caller
767 		 * can issue the next intended command immediately
768 		 */
769 		goto out;
770 	case R1_STATE_PRG:
771 		break;
772 	default:
773 		/* In all other states, it's illegal to issue HPI */
774 		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
775 			mmc_hostname(card->host), R1_CURRENT_STATE(status));
776 		err = -EINVAL;
777 		goto out;
778 	}
779 
780 	err = mmc_send_hpi_cmd(card, &status);
781 	if (err)
782 		goto out;
783 
784 	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
785 	do {
786 		err = mmc_send_status(card, &status);
787 
788 		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
789 			break;
790 		if (time_after(jiffies, prg_wait))
791 			err = -ETIMEDOUT;
792 	} while (!err);
793 
794 out:
795 	mmc_release_host(card->host);
796 	return err;
797 }
798 EXPORT_SYMBOL(mmc_interrupt_hpi);
799 
800 /**
801  *	mmc_wait_for_cmd - start a command and wait for completion
802  *	@host: MMC host to start command
803  *	@cmd: MMC command to start
804  *	@retries: maximum number of retries
805  *
806  *	Start a new MMC command for a host, and wait for the command
807  *	to complete.  Return any error that occurred while the command
808  *	was executing.  Do not attempt to parse the response.
809  */
810 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
811 {
812 	struct mmc_request mrq = {NULL};
813 
814 	WARN_ON(!host->claimed);
815 
816 	memset(cmd->resp, 0, sizeof(cmd->resp));
817 	cmd->retries = retries;
818 
819 	mrq.cmd = cmd;
820 	cmd->data = NULL;
821 
822 	mmc_wait_for_req(host, &mrq);
823 
824 	return cmd->error;
825 }
826 
827 EXPORT_SYMBOL(mmc_wait_for_cmd);
828 
829 /**
830  *	mmc_stop_bkops - stop ongoing BKOPS
831  *	@card: MMC card to check BKOPS
832  *
833  *	Send HPI command to stop ongoing background operations to
834  *	allow rapid servicing of foreground operations, e.g. read/
835  *	writes. Wait until the card comes out of the programming state
836  *	to avoid errors in servicing read/write requests.
837  */
838 int mmc_stop_bkops(struct mmc_card *card)
839 {
840 	int err = 0;
841 
842 	err = mmc_interrupt_hpi(card);
843 
844 	/*
845 	 * If err is EINVAL, we can't issue an HPI.
846 	 * It should complete the BKOPS.
847 	 */
848 	if (!err || (err == -EINVAL)) {
849 		mmc_card_clr_doing_bkops(card);
850 		mmc_retune_release(card->host);
851 		err = 0;
852 	}
853 
854 	return err;
855 }
856 EXPORT_SYMBOL(mmc_stop_bkops);
857 
858 int mmc_read_bkops_status(struct mmc_card *card)
859 {
860 	int err;
861 	u8 *ext_csd;
862 
863 	mmc_claim_host(card->host);
864 	err = mmc_get_ext_csd(card, &ext_csd);
865 	mmc_release_host(card->host);
866 	if (err)
867 		return err;
868 
869 	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
870 	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
871 	kfree(ext_csd);
872 	return 0;
873 }
874 EXPORT_SYMBOL(mmc_read_bkops_status);
875 
876 /**
877  *	mmc_set_data_timeout - set the timeout for a data command
878  *	@data: data phase for command
879  *	@card: the MMC card associated with the data transfer
880  *
881  *	Computes the data timeout parameters according to the
882  *	correct algorithm given the card type.
883  */
884 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
885 {
886 	unsigned int mult;
887 
888 	/*
889 	 * SDIO cards only define an upper 1 s limit on access.
890 	 */
891 	if (mmc_card_sdio(card)) {
892 		data->timeout_ns = 1000000000;
893 		data->timeout_clks = 0;
894 		return;
895 	}
896 
897 	/*
898 	 * SD cards use a 100 multiplier rather than 10
899 	 */
900 	mult = mmc_card_sd(card) ? 100 : 10;
901 
902 	/*
903 	 * Scale up the multiplier (and therefore the timeout) by
904 	 * the r2w factor for writes.
905 	 */
906 	if (data->flags & MMC_DATA_WRITE)
907 		mult <<= card->csd.r2w_factor;
908 
909 	data->timeout_ns = card->csd.tacc_ns * mult;
910 	data->timeout_clks = card->csd.tacc_clks * mult;
911 
912 	/*
913 	 * SD cards also have an upper limit on the timeout.
914 	 */
915 	if (mmc_card_sd(card)) {
916 		unsigned int timeout_us, limit_us;
917 
918 		timeout_us = data->timeout_ns / 1000;
919 		if (card->host->ios.clock)
920 			timeout_us += data->timeout_clks * 1000 /
921 				(card->host->ios.clock / 1000);
922 
923 		if (data->flags & MMC_DATA_WRITE)
924 			/*
925 			 * The MMC spec "It is strongly recommended
926 			 * for hosts to implement more than 500ms
927 			 * timeout value even if the card indicates
928 			 * the 250ms maximum busy length."  Even the
929 			 * previous value of 300ms is known to be
930 			 * insufficient for some cards.
931 			 */
932 			limit_us = 3000000;
933 		else
934 			limit_us = 100000;
935 
936 		/*
937 		 * SDHC cards always use these fixed values.
938 		 */
939 		if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
940 			data->timeout_ns = limit_us * 1000;
941 			data->timeout_clks = 0;
942 		}
943 
944 		/* assign limit value if invalid */
945 		if (timeout_us == 0)
946 			data->timeout_ns = limit_us * 1000;
947 	}
948 
949 	/*
950 	 * Some cards require longer data read timeout than indicated in CSD.
951 	 * Address this by setting the read timeout to a "reasonably high"
952 	 * value. For the cards tested, 600ms has proven enough. If necessary,
953 	 * this value can be increased if other problematic cards require this.
954 	 */
955 	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
956 		data->timeout_ns = 600000000;
957 		data->timeout_clks = 0;
958 	}
959 
960 	/*
961 	 * Some cards need very high timeouts if driven in SPI mode.
962 	 * The worst observed timeout was 900ms after writing a
963 	 * continuous stream of data until the internal logic
964 	 * overflowed.
965 	 */
966 	if (mmc_host_is_spi(card->host)) {
967 		if (data->flags & MMC_DATA_WRITE) {
968 			if (data->timeout_ns < 1000000000)
969 				data->timeout_ns = 1000000000;	/* 1s */
970 		} else {
971 			if (data->timeout_ns < 100000000)
972 				data->timeout_ns =  100000000;	/* 100ms */
973 		}
974 	}
975 }
976 EXPORT_SYMBOL(mmc_set_data_timeout);
977 
978 /**
979  *	mmc_align_data_size - pads a transfer size to a more optimal value
980  *	@card: the MMC card associated with the data transfer
981  *	@sz: original transfer size
982  *
983  *	Pads the original data size with a number of extra bytes in
984  *	order to avoid controller bugs and/or performance hits
985  *	(e.g. some controllers revert to PIO for certain sizes).
986  *
987  *	Returns the improved size, which might be unmodified.
988  *
989  *	Note that this function is only relevant when issuing a
990  *	single scatter gather entry.
991  */
992 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
993 {
994 	/*
995 	 * FIXME: We don't have a system for the controller to tell
996 	 * the core about its problems yet, so for now we just 32-bit
997 	 * align the size.
998 	 */
999 	sz = ((sz + 3) / 4) * 4;
1000 
1001 	return sz;
1002 }
1003 EXPORT_SYMBOL(mmc_align_data_size);
1004 
1005 /**
1006  *	__mmc_claim_host - exclusively claim a host
1007  *	@host: mmc host to claim
1008  *	@abort: whether or not the operation should be aborted
1009  *
1010  *	Claim a host for a set of operations.  If @abort is non null and
1011  *	dereference a non-zero value then this will return prematurely with
1012  *	that non-zero value without acquiring the lock.  Returns zero
1013  *	with the lock held otherwise.
1014  */
1015 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
1016 {
1017 	DECLARE_WAITQUEUE(wait, current);
1018 	unsigned long flags;
1019 	int stop;
1020 	bool pm = false;
1021 
1022 	might_sleep();
1023 
1024 	add_wait_queue(&host->wq, &wait);
1025 	spin_lock_irqsave(&host->lock, flags);
1026 	while (1) {
1027 		set_current_state(TASK_UNINTERRUPTIBLE);
1028 		stop = abort ? atomic_read(abort) : 0;
1029 		if (stop || !host->claimed || host->claimer == current)
1030 			break;
1031 		spin_unlock_irqrestore(&host->lock, flags);
1032 		schedule();
1033 		spin_lock_irqsave(&host->lock, flags);
1034 	}
1035 	set_current_state(TASK_RUNNING);
1036 	if (!stop) {
1037 		host->claimed = 1;
1038 		host->claimer = current;
1039 		host->claim_cnt += 1;
1040 		if (host->claim_cnt == 1)
1041 			pm = true;
1042 	} else
1043 		wake_up(&host->wq);
1044 	spin_unlock_irqrestore(&host->lock, flags);
1045 	remove_wait_queue(&host->wq, &wait);
1046 
1047 	if (pm)
1048 		pm_runtime_get_sync(mmc_dev(host));
1049 
1050 	return stop;
1051 }
1052 EXPORT_SYMBOL(__mmc_claim_host);
1053 
1054 /**
1055  *	mmc_release_host - release a host
1056  *	@host: mmc host to release
1057  *
1058  *	Release a MMC host, allowing others to claim the host
1059  *	for their operations.
1060  */
1061 void mmc_release_host(struct mmc_host *host)
1062 {
1063 	unsigned long flags;
1064 
1065 	WARN_ON(!host->claimed);
1066 
1067 	spin_lock_irqsave(&host->lock, flags);
1068 	if (--host->claim_cnt) {
1069 		/* Release for nested claim */
1070 		spin_unlock_irqrestore(&host->lock, flags);
1071 	} else {
1072 		host->claimed = 0;
1073 		host->claimer = NULL;
1074 		spin_unlock_irqrestore(&host->lock, flags);
1075 		wake_up(&host->wq);
1076 		pm_runtime_mark_last_busy(mmc_dev(host));
1077 		pm_runtime_put_autosuspend(mmc_dev(host));
1078 	}
1079 }
1080 EXPORT_SYMBOL(mmc_release_host);
1081 
1082 /*
1083  * This is a helper function, which fetches a runtime pm reference for the
1084  * card device and also claims the host.
1085  */
1086 void mmc_get_card(struct mmc_card *card)
1087 {
1088 	pm_runtime_get_sync(&card->dev);
1089 	mmc_claim_host(card->host);
1090 }
1091 EXPORT_SYMBOL(mmc_get_card);
1092 
1093 /*
1094  * This is a helper function, which releases the host and drops the runtime
1095  * pm reference for the card device.
1096  */
1097 void mmc_put_card(struct mmc_card *card)
1098 {
1099 	mmc_release_host(card->host);
1100 	pm_runtime_mark_last_busy(&card->dev);
1101 	pm_runtime_put_autosuspend(&card->dev);
1102 }
1103 EXPORT_SYMBOL(mmc_put_card);
1104 
1105 /*
1106  * Internal function that does the actual ios call to the host driver,
1107  * optionally printing some debug output.
1108  */
1109 static inline void mmc_set_ios(struct mmc_host *host)
1110 {
1111 	struct mmc_ios *ios = &host->ios;
1112 
1113 	pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
1114 		"width %u timing %u\n",
1115 		 mmc_hostname(host), ios->clock, ios->bus_mode,
1116 		 ios->power_mode, ios->chip_select, ios->vdd,
1117 		 1 << ios->bus_width, ios->timing);
1118 
1119 	host->ops->set_ios(host, ios);
1120 }
1121 
1122 /*
1123  * Control chip select pin on a host.
1124  */
1125 void mmc_set_chip_select(struct mmc_host *host, int mode)
1126 {
1127 	host->ios.chip_select = mode;
1128 	mmc_set_ios(host);
1129 }
1130 
1131 /*
1132  * Sets the host clock to the highest possible frequency that
1133  * is below "hz".
1134  */
1135 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
1136 {
1137 	WARN_ON(hz && hz < host->f_min);
1138 
1139 	if (hz > host->f_max)
1140 		hz = host->f_max;
1141 
1142 	host->ios.clock = hz;
1143 	mmc_set_ios(host);
1144 }
1145 
1146 int mmc_execute_tuning(struct mmc_card *card)
1147 {
1148 	struct mmc_host *host = card->host;
1149 	u32 opcode;
1150 	int err;
1151 
1152 	if (!host->ops->execute_tuning)
1153 		return 0;
1154 
1155 	if (mmc_card_mmc(card))
1156 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
1157 	else
1158 		opcode = MMC_SEND_TUNING_BLOCK;
1159 
1160 	err = host->ops->execute_tuning(host, opcode);
1161 
1162 	if (err)
1163 		pr_err("%s: tuning execution failed: %d\n",
1164 			mmc_hostname(host), err);
1165 	else
1166 		mmc_retune_enable(host);
1167 
1168 	return err;
1169 }
1170 
1171 /*
1172  * Change the bus mode (open drain/push-pull) of a host.
1173  */
1174 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1175 {
1176 	host->ios.bus_mode = mode;
1177 	mmc_set_ios(host);
1178 }
1179 
1180 /*
1181  * Change data bus width of a host.
1182  */
1183 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1184 {
1185 	host->ios.bus_width = width;
1186 	mmc_set_ios(host);
1187 }
1188 
1189 /*
1190  * Set initial state after a power cycle or a hw_reset.
1191  */
1192 void mmc_set_initial_state(struct mmc_host *host)
1193 {
1194 	mmc_retune_disable(host);
1195 
1196 	if (mmc_host_is_spi(host))
1197 		host->ios.chip_select = MMC_CS_HIGH;
1198 	else
1199 		host->ios.chip_select = MMC_CS_DONTCARE;
1200 	host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1201 	host->ios.bus_width = MMC_BUS_WIDTH_1;
1202 	host->ios.timing = MMC_TIMING_LEGACY;
1203 	host->ios.drv_type = 0;
1204 	host->ios.enhanced_strobe = false;
1205 
1206 	/*
1207 	 * Make sure we are in non-enhanced strobe mode before we
1208 	 * actually enable it in ext_csd.
1209 	 */
1210 	if ((host->caps2 & MMC_CAP2_HS400_ES) &&
1211 	     host->ops->hs400_enhanced_strobe)
1212 		host->ops->hs400_enhanced_strobe(host, &host->ios);
1213 
1214 	mmc_set_ios(host);
1215 }
1216 
1217 /**
1218  * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1219  * @vdd:	voltage (mV)
1220  * @low_bits:	prefer low bits in boundary cases
1221  *
1222  * This function returns the OCR bit number according to the provided @vdd
1223  * value. If conversion is not possible a negative errno value returned.
1224  *
1225  * Depending on the @low_bits flag the function prefers low or high OCR bits
1226  * on boundary voltages. For example,
1227  * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1228  * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1229  *
1230  * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1231  */
1232 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1233 {
1234 	const int max_bit = ilog2(MMC_VDD_35_36);
1235 	int bit;
1236 
1237 	if (vdd < 1650 || vdd > 3600)
1238 		return -EINVAL;
1239 
1240 	if (vdd >= 1650 && vdd <= 1950)
1241 		return ilog2(MMC_VDD_165_195);
1242 
1243 	if (low_bits)
1244 		vdd -= 1;
1245 
1246 	/* Base 2000 mV, step 100 mV, bit's base 8. */
1247 	bit = (vdd - 2000) / 100 + 8;
1248 	if (bit > max_bit)
1249 		return max_bit;
1250 	return bit;
1251 }
1252 
1253 /**
1254  * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1255  * @vdd_min:	minimum voltage value (mV)
1256  * @vdd_max:	maximum voltage value (mV)
1257  *
1258  * This function returns the OCR mask bits according to the provided @vdd_min
1259  * and @vdd_max values. If conversion is not possible the function returns 0.
1260  *
1261  * Notes wrt boundary cases:
1262  * This function sets the OCR bits for all boundary voltages, for example
1263  * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1264  * MMC_VDD_34_35 mask.
1265  */
1266 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1267 {
1268 	u32 mask = 0;
1269 
1270 	if (vdd_max < vdd_min)
1271 		return 0;
1272 
1273 	/* Prefer high bits for the boundary vdd_max values. */
1274 	vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1275 	if (vdd_max < 0)
1276 		return 0;
1277 
1278 	/* Prefer low bits for the boundary vdd_min values. */
1279 	vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1280 	if (vdd_min < 0)
1281 		return 0;
1282 
1283 	/* Fill the mask, from max bit to min bit. */
1284 	while (vdd_max >= vdd_min)
1285 		mask |= 1 << vdd_max--;
1286 
1287 	return mask;
1288 }
1289 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1290 
1291 #ifdef CONFIG_OF
1292 
1293 /**
1294  * mmc_of_parse_voltage - return mask of supported voltages
1295  * @np: The device node need to be parsed.
1296  * @mask: mask of voltages available for MMC/SD/SDIO
1297  *
1298  * Parse the "voltage-ranges" DT property, returning zero if it is not
1299  * found, negative errno if the voltage-range specification is invalid,
1300  * or one if the voltage-range is specified and successfully parsed.
1301  */
1302 int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1303 {
1304 	const u32 *voltage_ranges;
1305 	int num_ranges, i;
1306 
1307 	voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1308 	num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1309 	if (!voltage_ranges) {
1310 		pr_debug("%s: voltage-ranges unspecified\n", np->full_name);
1311 		return 0;
1312 	}
1313 	if (!num_ranges) {
1314 		pr_err("%s: voltage-ranges empty\n", np->full_name);
1315 		return -EINVAL;
1316 	}
1317 
1318 	for (i = 0; i < num_ranges; i++) {
1319 		const int j = i * 2;
1320 		u32 ocr_mask;
1321 
1322 		ocr_mask = mmc_vddrange_to_ocrmask(
1323 				be32_to_cpu(voltage_ranges[j]),
1324 				be32_to_cpu(voltage_ranges[j + 1]));
1325 		if (!ocr_mask) {
1326 			pr_err("%s: voltage-range #%d is invalid\n",
1327 				np->full_name, i);
1328 			return -EINVAL;
1329 		}
1330 		*mask |= ocr_mask;
1331 	}
1332 
1333 	return 1;
1334 }
1335 EXPORT_SYMBOL(mmc_of_parse_voltage);
1336 
1337 #endif /* CONFIG_OF */
1338 
1339 static int mmc_of_get_func_num(struct device_node *node)
1340 {
1341 	u32 reg;
1342 	int ret;
1343 
1344 	ret = of_property_read_u32(node, "reg", &reg);
1345 	if (ret < 0)
1346 		return ret;
1347 
1348 	return reg;
1349 }
1350 
1351 struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1352 		unsigned func_num)
1353 {
1354 	struct device_node *node;
1355 
1356 	if (!host->parent || !host->parent->of_node)
1357 		return NULL;
1358 
1359 	for_each_child_of_node(host->parent->of_node, node) {
1360 		if (mmc_of_get_func_num(node) == func_num)
1361 			return node;
1362 	}
1363 
1364 	return NULL;
1365 }
1366 
1367 #ifdef CONFIG_REGULATOR
1368 
1369 /**
1370  * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
1371  * @vdd_bit:	OCR bit number
1372  * @min_uV:	minimum voltage value (mV)
1373  * @max_uV:	maximum voltage value (mV)
1374  *
1375  * This function returns the voltage range according to the provided OCR
1376  * bit number. If conversion is not possible a negative errno value returned.
1377  */
1378 static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
1379 {
1380 	int		tmp;
1381 
1382 	if (!vdd_bit)
1383 		return -EINVAL;
1384 
1385 	/*
1386 	 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1387 	 * bits this regulator doesn't quite support ... don't
1388 	 * be too picky, most cards and regulators are OK with
1389 	 * a 0.1V range goof (it's a small error percentage).
1390 	 */
1391 	tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1392 	if (tmp == 0) {
1393 		*min_uV = 1650 * 1000;
1394 		*max_uV = 1950 * 1000;
1395 	} else {
1396 		*min_uV = 1900 * 1000 + tmp * 100 * 1000;
1397 		*max_uV = *min_uV + 100 * 1000;
1398 	}
1399 
1400 	return 0;
1401 }
1402 
1403 /**
1404  * mmc_regulator_get_ocrmask - return mask of supported voltages
1405  * @supply: regulator to use
1406  *
1407  * This returns either a negative errno, or a mask of voltages that
1408  * can be provided to MMC/SD/SDIO devices using the specified voltage
1409  * regulator.  This would normally be called before registering the
1410  * MMC host adapter.
1411  */
1412 int mmc_regulator_get_ocrmask(struct regulator *supply)
1413 {
1414 	int			result = 0;
1415 	int			count;
1416 	int			i;
1417 	int			vdd_uV;
1418 	int			vdd_mV;
1419 
1420 	count = regulator_count_voltages(supply);
1421 	if (count < 0)
1422 		return count;
1423 
1424 	for (i = 0; i < count; i++) {
1425 		vdd_uV = regulator_list_voltage(supply, i);
1426 		if (vdd_uV <= 0)
1427 			continue;
1428 
1429 		vdd_mV = vdd_uV / 1000;
1430 		result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1431 	}
1432 
1433 	if (!result) {
1434 		vdd_uV = regulator_get_voltage(supply);
1435 		if (vdd_uV <= 0)
1436 			return vdd_uV;
1437 
1438 		vdd_mV = vdd_uV / 1000;
1439 		result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1440 	}
1441 
1442 	return result;
1443 }
1444 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1445 
1446 /**
1447  * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1448  * @mmc: the host to regulate
1449  * @supply: regulator to use
1450  * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1451  *
1452  * Returns zero on success, else negative errno.
1453  *
1454  * MMC host drivers may use this to enable or disable a regulator using
1455  * a particular supply voltage.  This would normally be called from the
1456  * set_ios() method.
1457  */
1458 int mmc_regulator_set_ocr(struct mmc_host *mmc,
1459 			struct regulator *supply,
1460 			unsigned short vdd_bit)
1461 {
1462 	int			result = 0;
1463 	int			min_uV, max_uV;
1464 
1465 	if (vdd_bit) {
1466 		mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
1467 
1468 		result = regulator_set_voltage(supply, min_uV, max_uV);
1469 		if (result == 0 && !mmc->regulator_enabled) {
1470 			result = regulator_enable(supply);
1471 			if (!result)
1472 				mmc->regulator_enabled = true;
1473 		}
1474 	} else if (mmc->regulator_enabled) {
1475 		result = regulator_disable(supply);
1476 		if (result == 0)
1477 			mmc->regulator_enabled = false;
1478 	}
1479 
1480 	if (result)
1481 		dev_err(mmc_dev(mmc),
1482 			"could not set regulator OCR (%d)\n", result);
1483 	return result;
1484 }
1485 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1486 
1487 static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
1488 						  int min_uV, int target_uV,
1489 						  int max_uV)
1490 {
1491 	/*
1492 	 * Check if supported first to avoid errors since we may try several
1493 	 * signal levels during power up and don't want to show errors.
1494 	 */
1495 	if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
1496 		return -EINVAL;
1497 
1498 	return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
1499 					     max_uV);
1500 }
1501 
1502 /**
1503  * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
1504  *
1505  * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
1506  * That will match the behavior of old boards where VQMMC and VMMC were supplied
1507  * by the same supply.  The Bus Operating conditions for 3.3V signaling in the
1508  * SD card spec also define VQMMC in terms of VMMC.
1509  * If this is not possible we'll try the full 2.7-3.6V of the spec.
1510  *
1511  * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
1512  * requested voltage.  This is definitely a good idea for UHS where there's a
1513  * separate regulator on the card that's trying to make 1.8V and it's best if
1514  * we match.
1515  *
1516  * This function is expected to be used by a controller's
1517  * start_signal_voltage_switch() function.
1518  */
1519 int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
1520 {
1521 	struct device *dev = mmc_dev(mmc);
1522 	int ret, volt, min_uV, max_uV;
1523 
1524 	/* If no vqmmc supply then we can't change the voltage */
1525 	if (IS_ERR(mmc->supply.vqmmc))
1526 		return -EINVAL;
1527 
1528 	switch (ios->signal_voltage) {
1529 	case MMC_SIGNAL_VOLTAGE_120:
1530 		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1531 						1100000, 1200000, 1300000);
1532 	case MMC_SIGNAL_VOLTAGE_180:
1533 		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1534 						1700000, 1800000, 1950000);
1535 	case MMC_SIGNAL_VOLTAGE_330:
1536 		ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
1537 		if (ret < 0)
1538 			return ret;
1539 
1540 		dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
1541 			__func__, volt, max_uV);
1542 
1543 		min_uV = max(volt - 300000, 2700000);
1544 		max_uV = min(max_uV + 200000, 3600000);
1545 
1546 		/*
1547 		 * Due to a limitation in the current implementation of
1548 		 * regulator_set_voltage_triplet() which is taking the lowest
1549 		 * voltage possible if below the target, search for a suitable
1550 		 * voltage in two steps and try to stay close to vmmc
1551 		 * with a 0.3V tolerance at first.
1552 		 */
1553 		if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1554 						min_uV, volt, max_uV))
1555 			return 0;
1556 
1557 		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1558 						2700000, volt, 3600000);
1559 	default:
1560 		return -EINVAL;
1561 	}
1562 }
1563 EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
1564 
1565 #endif /* CONFIG_REGULATOR */
1566 
1567 int mmc_regulator_get_supply(struct mmc_host *mmc)
1568 {
1569 	struct device *dev = mmc_dev(mmc);
1570 	int ret;
1571 
1572 	mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1573 	mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1574 
1575 	if (IS_ERR(mmc->supply.vmmc)) {
1576 		if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1577 			return -EPROBE_DEFER;
1578 		dev_dbg(dev, "No vmmc regulator found\n");
1579 	} else {
1580 		ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1581 		if (ret > 0)
1582 			mmc->ocr_avail = ret;
1583 		else
1584 			dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1585 	}
1586 
1587 	if (IS_ERR(mmc->supply.vqmmc)) {
1588 		if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1589 			return -EPROBE_DEFER;
1590 		dev_dbg(dev, "No vqmmc regulator found\n");
1591 	}
1592 
1593 	return 0;
1594 }
1595 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1596 
1597 /*
1598  * Mask off any voltages we don't support and select
1599  * the lowest voltage
1600  */
1601 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1602 {
1603 	int bit;
1604 
1605 	/*
1606 	 * Sanity check the voltages that the card claims to
1607 	 * support.
1608 	 */
1609 	if (ocr & 0x7F) {
1610 		dev_warn(mmc_dev(host),
1611 		"card claims to support voltages below defined range\n");
1612 		ocr &= ~0x7F;
1613 	}
1614 
1615 	ocr &= host->ocr_avail;
1616 	if (!ocr) {
1617 		dev_warn(mmc_dev(host), "no support for card's volts\n");
1618 		return 0;
1619 	}
1620 
1621 	if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1622 		bit = ffs(ocr) - 1;
1623 		ocr &= 3 << bit;
1624 		mmc_power_cycle(host, ocr);
1625 	} else {
1626 		bit = fls(ocr) - 1;
1627 		ocr &= 3 << bit;
1628 		if (bit != host->ios.vdd)
1629 			dev_warn(mmc_dev(host), "exceeding card's volts\n");
1630 	}
1631 
1632 	return ocr;
1633 }
1634 
1635 int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1636 {
1637 	int err = 0;
1638 	int old_signal_voltage = host->ios.signal_voltage;
1639 
1640 	host->ios.signal_voltage = signal_voltage;
1641 	if (host->ops->start_signal_voltage_switch)
1642 		err = host->ops->start_signal_voltage_switch(host, &host->ios);
1643 
1644 	if (err)
1645 		host->ios.signal_voltage = old_signal_voltage;
1646 
1647 	return err;
1648 
1649 }
1650 
1651 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
1652 {
1653 	struct mmc_command cmd = {0};
1654 	int err = 0;
1655 	u32 clock;
1656 
1657 	/*
1658 	 * Send CMD11 only if the request is to switch the card to
1659 	 * 1.8V signalling.
1660 	 */
1661 	if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1662 		return __mmc_set_signal_voltage(host, signal_voltage);
1663 
1664 	/*
1665 	 * If we cannot switch voltages, return failure so the caller
1666 	 * can continue without UHS mode
1667 	 */
1668 	if (!host->ops->start_signal_voltage_switch)
1669 		return -EPERM;
1670 	if (!host->ops->card_busy)
1671 		pr_warn("%s: cannot verify signal voltage switch\n",
1672 			mmc_hostname(host));
1673 
1674 	cmd.opcode = SD_SWITCH_VOLTAGE;
1675 	cmd.arg = 0;
1676 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1677 
1678 	err = mmc_wait_for_cmd(host, &cmd, 0);
1679 	if (err)
1680 		return err;
1681 
1682 	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1683 		return -EIO;
1684 
1685 	/*
1686 	 * The card should drive cmd and dat[0:3] low immediately
1687 	 * after the response of cmd11, but wait 1 ms to be sure
1688 	 */
1689 	mmc_delay(1);
1690 	if (host->ops->card_busy && !host->ops->card_busy(host)) {
1691 		err = -EAGAIN;
1692 		goto power_cycle;
1693 	}
1694 	/*
1695 	 * During a signal voltage level switch, the clock must be gated
1696 	 * for 5 ms according to the SD spec
1697 	 */
1698 	clock = host->ios.clock;
1699 	host->ios.clock = 0;
1700 	mmc_set_ios(host);
1701 
1702 	if (__mmc_set_signal_voltage(host, signal_voltage)) {
1703 		/*
1704 		 * Voltages may not have been switched, but we've already
1705 		 * sent CMD11, so a power cycle is required anyway
1706 		 */
1707 		err = -EAGAIN;
1708 		goto power_cycle;
1709 	}
1710 
1711 	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
1712 	mmc_delay(10);
1713 	host->ios.clock = clock;
1714 	mmc_set_ios(host);
1715 
1716 	/* Wait for at least 1 ms according to spec */
1717 	mmc_delay(1);
1718 
1719 	/*
1720 	 * Failure to switch is indicated by the card holding
1721 	 * dat[0:3] low
1722 	 */
1723 	if (host->ops->card_busy && host->ops->card_busy(host))
1724 		err = -EAGAIN;
1725 
1726 power_cycle:
1727 	if (err) {
1728 		pr_debug("%s: Signal voltage switch failed, "
1729 			"power cycling card\n", mmc_hostname(host));
1730 		mmc_power_cycle(host, ocr);
1731 	}
1732 
1733 	return err;
1734 }
1735 
1736 /*
1737  * Select timing parameters for host.
1738  */
1739 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1740 {
1741 	host->ios.timing = timing;
1742 	mmc_set_ios(host);
1743 }
1744 
1745 /*
1746  * Select appropriate driver type for host.
1747  */
1748 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1749 {
1750 	host->ios.drv_type = drv_type;
1751 	mmc_set_ios(host);
1752 }
1753 
1754 int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1755 			      int card_drv_type, int *drv_type)
1756 {
1757 	struct mmc_host *host = card->host;
1758 	int host_drv_type = SD_DRIVER_TYPE_B;
1759 
1760 	*drv_type = 0;
1761 
1762 	if (!host->ops->select_drive_strength)
1763 		return 0;
1764 
1765 	/* Use SD definition of driver strength for hosts */
1766 	if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1767 		host_drv_type |= SD_DRIVER_TYPE_A;
1768 
1769 	if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1770 		host_drv_type |= SD_DRIVER_TYPE_C;
1771 
1772 	if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1773 		host_drv_type |= SD_DRIVER_TYPE_D;
1774 
1775 	/*
1776 	 * The drive strength that the hardware can support
1777 	 * depends on the board design.  Pass the appropriate
1778 	 * information and let the hardware specific code
1779 	 * return what is possible given the options
1780 	 */
1781 	return host->ops->select_drive_strength(card, max_dtr,
1782 						host_drv_type,
1783 						card_drv_type,
1784 						drv_type);
1785 }
1786 
1787 /*
1788  * Apply power to the MMC stack.  This is a two-stage process.
1789  * First, we enable power to the card without the clock running.
1790  * We then wait a bit for the power to stabilise.  Finally,
1791  * enable the bus drivers and clock to the card.
1792  *
1793  * We must _NOT_ enable the clock prior to power stablising.
1794  *
1795  * If a host does all the power sequencing itself, ignore the
1796  * initial MMC_POWER_UP stage.
1797  */
1798 void mmc_power_up(struct mmc_host *host, u32 ocr)
1799 {
1800 	if (host->ios.power_mode == MMC_POWER_ON)
1801 		return;
1802 
1803 	mmc_pwrseq_pre_power_on(host);
1804 
1805 	host->ios.vdd = fls(ocr) - 1;
1806 	host->ios.power_mode = MMC_POWER_UP;
1807 	/* Set initial state and call mmc_set_ios */
1808 	mmc_set_initial_state(host);
1809 
1810 	/* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1811 	if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
1812 		dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1813 	else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0)
1814 		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1815 	else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0)
1816 		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1817 
1818 	/*
1819 	 * This delay should be sufficient to allow the power supply
1820 	 * to reach the minimum voltage.
1821 	 */
1822 	mmc_delay(10);
1823 
1824 	mmc_pwrseq_post_power_on(host);
1825 
1826 	host->ios.clock = host->f_init;
1827 
1828 	host->ios.power_mode = MMC_POWER_ON;
1829 	mmc_set_ios(host);
1830 
1831 	/*
1832 	 * This delay must be at least 74 clock sizes, or 1 ms, or the
1833 	 * time required to reach a stable voltage.
1834 	 */
1835 	mmc_delay(10);
1836 }
1837 
1838 void mmc_power_off(struct mmc_host *host)
1839 {
1840 	if (host->ios.power_mode == MMC_POWER_OFF)
1841 		return;
1842 
1843 	mmc_pwrseq_power_off(host);
1844 
1845 	host->ios.clock = 0;
1846 	host->ios.vdd = 0;
1847 
1848 	host->ios.power_mode = MMC_POWER_OFF;
1849 	/* Set initial state and call mmc_set_ios */
1850 	mmc_set_initial_state(host);
1851 
1852 	/*
1853 	 * Some configurations, such as the 802.11 SDIO card in the OLPC
1854 	 * XO-1.5, require a short delay after poweroff before the card
1855 	 * can be successfully turned on again.
1856 	 */
1857 	mmc_delay(1);
1858 }
1859 
1860 void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1861 {
1862 	mmc_power_off(host);
1863 	/* Wait at least 1 ms according to SD spec */
1864 	mmc_delay(1);
1865 	mmc_power_up(host, ocr);
1866 }
1867 
1868 /*
1869  * Cleanup when the last reference to the bus operator is dropped.
1870  */
1871 static void __mmc_release_bus(struct mmc_host *host)
1872 {
1873 	WARN_ON(!host->bus_dead);
1874 
1875 	host->bus_ops = NULL;
1876 }
1877 
1878 /*
1879  * Increase reference count of bus operator
1880  */
1881 static inline void mmc_bus_get(struct mmc_host *host)
1882 {
1883 	unsigned long flags;
1884 
1885 	spin_lock_irqsave(&host->lock, flags);
1886 	host->bus_refs++;
1887 	spin_unlock_irqrestore(&host->lock, flags);
1888 }
1889 
1890 /*
1891  * Decrease reference count of bus operator and free it if
1892  * it is the last reference.
1893  */
1894 static inline void mmc_bus_put(struct mmc_host *host)
1895 {
1896 	unsigned long flags;
1897 
1898 	spin_lock_irqsave(&host->lock, flags);
1899 	host->bus_refs--;
1900 	if ((host->bus_refs == 0) && host->bus_ops)
1901 		__mmc_release_bus(host);
1902 	spin_unlock_irqrestore(&host->lock, flags);
1903 }
1904 
1905 /*
1906  * Assign a mmc bus handler to a host. Only one bus handler may control a
1907  * host at any given time.
1908  */
1909 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1910 {
1911 	unsigned long flags;
1912 
1913 	WARN_ON(!host->claimed);
1914 
1915 	spin_lock_irqsave(&host->lock, flags);
1916 
1917 	WARN_ON(host->bus_ops);
1918 	WARN_ON(host->bus_refs);
1919 
1920 	host->bus_ops = ops;
1921 	host->bus_refs = 1;
1922 	host->bus_dead = 0;
1923 
1924 	spin_unlock_irqrestore(&host->lock, flags);
1925 }
1926 
1927 /*
1928  * Remove the current bus handler from a host.
1929  */
1930 void mmc_detach_bus(struct mmc_host *host)
1931 {
1932 	unsigned long flags;
1933 
1934 	WARN_ON(!host->claimed);
1935 	WARN_ON(!host->bus_ops);
1936 
1937 	spin_lock_irqsave(&host->lock, flags);
1938 
1939 	host->bus_dead = 1;
1940 
1941 	spin_unlock_irqrestore(&host->lock, flags);
1942 
1943 	mmc_bus_put(host);
1944 }
1945 
1946 static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1947 				bool cd_irq)
1948 {
1949 #ifdef CONFIG_MMC_DEBUG
1950 	unsigned long flags;
1951 	spin_lock_irqsave(&host->lock, flags);
1952 	WARN_ON(host->removed);
1953 	spin_unlock_irqrestore(&host->lock, flags);
1954 #endif
1955 
1956 	/*
1957 	 * If the device is configured as wakeup, we prevent a new sleep for
1958 	 * 5 s to give provision for user space to consume the event.
1959 	 */
1960 	if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1961 		device_can_wakeup(mmc_dev(host)))
1962 		pm_wakeup_event(mmc_dev(host), 5000);
1963 
1964 	host->detect_change = 1;
1965 	mmc_schedule_delayed_work(&host->detect, delay);
1966 }
1967 
1968 /**
1969  *	mmc_detect_change - process change of state on a MMC socket
1970  *	@host: host which changed state.
1971  *	@delay: optional delay to wait before detection (jiffies)
1972  *
1973  *	MMC drivers should call this when they detect a card has been
1974  *	inserted or removed. The MMC layer will confirm that any
1975  *	present card is still functional, and initialize any newly
1976  *	inserted.
1977  */
1978 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1979 {
1980 	_mmc_detect_change(host, delay, true);
1981 }
1982 EXPORT_SYMBOL(mmc_detect_change);
1983 
1984 void mmc_init_erase(struct mmc_card *card)
1985 {
1986 	unsigned int sz;
1987 
1988 	if (is_power_of_2(card->erase_size))
1989 		card->erase_shift = ffs(card->erase_size) - 1;
1990 	else
1991 		card->erase_shift = 0;
1992 
1993 	/*
1994 	 * It is possible to erase an arbitrarily large area of an SD or MMC
1995 	 * card.  That is not desirable because it can take a long time
1996 	 * (minutes) potentially delaying more important I/O, and also the
1997 	 * timeout calculations become increasingly hugely over-estimated.
1998 	 * Consequently, 'pref_erase' is defined as a guide to limit erases
1999 	 * to that size and alignment.
2000 	 *
2001 	 * For SD cards that define Allocation Unit size, limit erases to one
2002 	 * Allocation Unit at a time.
2003 	 * For MMC, have a stab at ai good value and for modern cards it will
2004 	 * end up being 4MiB. Note that if the value is too small, it can end
2005 	 * up taking longer to erase. Also note, erase_size is already set to
2006 	 * High Capacity Erase Size if available when this function is called.
2007 	 */
2008 	if (mmc_card_sd(card) && card->ssr.au) {
2009 		card->pref_erase = card->ssr.au;
2010 		card->erase_shift = ffs(card->ssr.au) - 1;
2011 	} else if (card->erase_size) {
2012 		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
2013 		if (sz < 128)
2014 			card->pref_erase = 512 * 1024 / 512;
2015 		else if (sz < 512)
2016 			card->pref_erase = 1024 * 1024 / 512;
2017 		else if (sz < 1024)
2018 			card->pref_erase = 2 * 1024 * 1024 / 512;
2019 		else
2020 			card->pref_erase = 4 * 1024 * 1024 / 512;
2021 		if (card->pref_erase < card->erase_size)
2022 			card->pref_erase = card->erase_size;
2023 		else {
2024 			sz = card->pref_erase % card->erase_size;
2025 			if (sz)
2026 				card->pref_erase += card->erase_size - sz;
2027 		}
2028 	} else
2029 		card->pref_erase = 0;
2030 }
2031 
2032 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
2033 				          unsigned int arg, unsigned int qty)
2034 {
2035 	unsigned int erase_timeout;
2036 
2037 	if (arg == MMC_DISCARD_ARG ||
2038 	    (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
2039 		erase_timeout = card->ext_csd.trim_timeout;
2040 	} else if (card->ext_csd.erase_group_def & 1) {
2041 		/* High Capacity Erase Group Size uses HC timeouts */
2042 		if (arg == MMC_TRIM_ARG)
2043 			erase_timeout = card->ext_csd.trim_timeout;
2044 		else
2045 			erase_timeout = card->ext_csd.hc_erase_timeout;
2046 	} else {
2047 		/* CSD Erase Group Size uses write timeout */
2048 		unsigned int mult = (10 << card->csd.r2w_factor);
2049 		unsigned int timeout_clks = card->csd.tacc_clks * mult;
2050 		unsigned int timeout_us;
2051 
2052 		/* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
2053 		if (card->csd.tacc_ns < 1000000)
2054 			timeout_us = (card->csd.tacc_ns * mult) / 1000;
2055 		else
2056 			timeout_us = (card->csd.tacc_ns / 1000) * mult;
2057 
2058 		/*
2059 		 * ios.clock is only a target.  The real clock rate might be
2060 		 * less but not that much less, so fudge it by multiplying by 2.
2061 		 */
2062 		timeout_clks <<= 1;
2063 		timeout_us += (timeout_clks * 1000) /
2064 			      (card->host->ios.clock / 1000);
2065 
2066 		erase_timeout = timeout_us / 1000;
2067 
2068 		/*
2069 		 * Theoretically, the calculation could underflow so round up
2070 		 * to 1ms in that case.
2071 		 */
2072 		if (!erase_timeout)
2073 			erase_timeout = 1;
2074 	}
2075 
2076 	/* Multiplier for secure operations */
2077 	if (arg & MMC_SECURE_ARGS) {
2078 		if (arg == MMC_SECURE_ERASE_ARG)
2079 			erase_timeout *= card->ext_csd.sec_erase_mult;
2080 		else
2081 			erase_timeout *= card->ext_csd.sec_trim_mult;
2082 	}
2083 
2084 	erase_timeout *= qty;
2085 
2086 	/*
2087 	 * Ensure at least a 1 second timeout for SPI as per
2088 	 * 'mmc_set_data_timeout()'
2089 	 */
2090 	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
2091 		erase_timeout = 1000;
2092 
2093 	return erase_timeout;
2094 }
2095 
2096 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
2097 					 unsigned int arg,
2098 					 unsigned int qty)
2099 {
2100 	unsigned int erase_timeout;
2101 
2102 	if (card->ssr.erase_timeout) {
2103 		/* Erase timeout specified in SD Status Register (SSR) */
2104 		erase_timeout = card->ssr.erase_timeout * qty +
2105 				card->ssr.erase_offset;
2106 	} else {
2107 		/*
2108 		 * Erase timeout not specified in SD Status Register (SSR) so
2109 		 * use 250ms per write block.
2110 		 */
2111 		erase_timeout = 250 * qty;
2112 	}
2113 
2114 	/* Must not be less than 1 second */
2115 	if (erase_timeout < 1000)
2116 		erase_timeout = 1000;
2117 
2118 	return erase_timeout;
2119 }
2120 
2121 static unsigned int mmc_erase_timeout(struct mmc_card *card,
2122 				      unsigned int arg,
2123 				      unsigned int qty)
2124 {
2125 	if (mmc_card_sd(card))
2126 		return mmc_sd_erase_timeout(card, arg, qty);
2127 	else
2128 		return mmc_mmc_erase_timeout(card, arg, qty);
2129 }
2130 
2131 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
2132 			unsigned int to, unsigned int arg)
2133 {
2134 	struct mmc_command cmd = {0};
2135 	unsigned int qty = 0, busy_timeout = 0;
2136 	bool use_r1b_resp = false;
2137 	unsigned long timeout;
2138 	int err;
2139 
2140 	mmc_retune_hold(card->host);
2141 
2142 	/*
2143 	 * qty is used to calculate the erase timeout which depends on how many
2144 	 * erase groups (or allocation units in SD terminology) are affected.
2145 	 * We count erasing part of an erase group as one erase group.
2146 	 * For SD, the allocation units are always a power of 2.  For MMC, the
2147 	 * erase group size is almost certainly also power of 2, but it does not
2148 	 * seem to insist on that in the JEDEC standard, so we fall back to
2149 	 * division in that case.  SD may not specify an allocation unit size,
2150 	 * in which case the timeout is based on the number of write blocks.
2151 	 *
2152 	 * Note that the timeout for secure trim 2 will only be correct if the
2153 	 * number of erase groups specified is the same as the total of all
2154 	 * preceding secure trim 1 commands.  Since the power may have been
2155 	 * lost since the secure trim 1 commands occurred, it is generally
2156 	 * impossible to calculate the secure trim 2 timeout correctly.
2157 	 */
2158 	if (card->erase_shift)
2159 		qty += ((to >> card->erase_shift) -
2160 			(from >> card->erase_shift)) + 1;
2161 	else if (mmc_card_sd(card))
2162 		qty += to - from + 1;
2163 	else
2164 		qty += ((to / card->erase_size) -
2165 			(from / card->erase_size)) + 1;
2166 
2167 	if (!mmc_card_blockaddr(card)) {
2168 		from <<= 9;
2169 		to <<= 9;
2170 	}
2171 
2172 	if (mmc_card_sd(card))
2173 		cmd.opcode = SD_ERASE_WR_BLK_START;
2174 	else
2175 		cmd.opcode = MMC_ERASE_GROUP_START;
2176 	cmd.arg = from;
2177 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2178 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2179 	if (err) {
2180 		pr_err("mmc_erase: group start error %d, "
2181 		       "status %#x\n", err, cmd.resp[0]);
2182 		err = -EIO;
2183 		goto out;
2184 	}
2185 
2186 	memset(&cmd, 0, sizeof(struct mmc_command));
2187 	if (mmc_card_sd(card))
2188 		cmd.opcode = SD_ERASE_WR_BLK_END;
2189 	else
2190 		cmd.opcode = MMC_ERASE_GROUP_END;
2191 	cmd.arg = to;
2192 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2193 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2194 	if (err) {
2195 		pr_err("mmc_erase: group end error %d, status %#x\n",
2196 		       err, cmd.resp[0]);
2197 		err = -EIO;
2198 		goto out;
2199 	}
2200 
2201 	memset(&cmd, 0, sizeof(struct mmc_command));
2202 	cmd.opcode = MMC_ERASE;
2203 	cmd.arg = arg;
2204 	busy_timeout = mmc_erase_timeout(card, arg, qty);
2205 	/*
2206 	 * If the host controller supports busy signalling and the timeout for
2207 	 * the erase operation does not exceed the max_busy_timeout, we should
2208 	 * use R1B response. Or we need to prevent the host from doing hw busy
2209 	 * detection, which is done by converting to a R1 response instead.
2210 	 */
2211 	if (card->host->max_busy_timeout &&
2212 	    busy_timeout > card->host->max_busy_timeout) {
2213 		cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2214 	} else {
2215 		cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2216 		cmd.busy_timeout = busy_timeout;
2217 		use_r1b_resp = true;
2218 	}
2219 
2220 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2221 	if (err) {
2222 		pr_err("mmc_erase: erase error %d, status %#x\n",
2223 		       err, cmd.resp[0]);
2224 		err = -EIO;
2225 		goto out;
2226 	}
2227 
2228 	if (mmc_host_is_spi(card->host))
2229 		goto out;
2230 
2231 	/*
2232 	 * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
2233 	 * shall be avoided.
2234 	 */
2235 	if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
2236 		goto out;
2237 
2238 	timeout = jiffies + msecs_to_jiffies(busy_timeout);
2239 	do {
2240 		memset(&cmd, 0, sizeof(struct mmc_command));
2241 		cmd.opcode = MMC_SEND_STATUS;
2242 		cmd.arg = card->rca << 16;
2243 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2244 		/* Do not retry else we can't see errors */
2245 		err = mmc_wait_for_cmd(card->host, &cmd, 0);
2246 		if (err || (cmd.resp[0] & 0xFDF92000)) {
2247 			pr_err("error %d requesting status %#x\n",
2248 				err, cmd.resp[0]);
2249 			err = -EIO;
2250 			goto out;
2251 		}
2252 
2253 		/* Timeout if the device never becomes ready for data and
2254 		 * never leaves the program state.
2255 		 */
2256 		if (time_after(jiffies, timeout)) {
2257 			pr_err("%s: Card stuck in programming state! %s\n",
2258 				mmc_hostname(card->host), __func__);
2259 			err =  -EIO;
2260 			goto out;
2261 		}
2262 
2263 	} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
2264 		 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
2265 out:
2266 	mmc_retune_release(card->host);
2267 	return err;
2268 }
2269 
2270 static unsigned int mmc_align_erase_size(struct mmc_card *card,
2271 					 unsigned int *from,
2272 					 unsigned int *to,
2273 					 unsigned int nr)
2274 {
2275 	unsigned int from_new = *from, nr_new = nr, rem;
2276 
2277 	/*
2278 	 * When the 'card->erase_size' is power of 2, we can use round_up/down()
2279 	 * to align the erase size efficiently.
2280 	 */
2281 	if (is_power_of_2(card->erase_size)) {
2282 		unsigned int temp = from_new;
2283 
2284 		from_new = round_up(temp, card->erase_size);
2285 		rem = from_new - temp;
2286 
2287 		if (nr_new > rem)
2288 			nr_new -= rem;
2289 		else
2290 			return 0;
2291 
2292 		nr_new = round_down(nr_new, card->erase_size);
2293 	} else {
2294 		rem = from_new % card->erase_size;
2295 		if (rem) {
2296 			rem = card->erase_size - rem;
2297 			from_new += rem;
2298 			if (nr_new > rem)
2299 				nr_new -= rem;
2300 			else
2301 				return 0;
2302 		}
2303 
2304 		rem = nr_new % card->erase_size;
2305 		if (rem)
2306 			nr_new -= rem;
2307 	}
2308 
2309 	if (nr_new == 0)
2310 		return 0;
2311 
2312 	*to = from_new + nr_new;
2313 	*from = from_new;
2314 
2315 	return nr_new;
2316 }
2317 
2318 /**
2319  * mmc_erase - erase sectors.
2320  * @card: card to erase
2321  * @from: first sector to erase
2322  * @nr: number of sectors to erase
2323  * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2324  *
2325  * Caller must claim host before calling this function.
2326  */
2327 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2328 	      unsigned int arg)
2329 {
2330 	unsigned int rem, to = from + nr;
2331 	int err;
2332 
2333 	if (!(card->host->caps & MMC_CAP_ERASE) ||
2334 	    !(card->csd.cmdclass & CCC_ERASE))
2335 		return -EOPNOTSUPP;
2336 
2337 	if (!card->erase_size)
2338 		return -EOPNOTSUPP;
2339 
2340 	if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2341 		return -EOPNOTSUPP;
2342 
2343 	if ((arg & MMC_SECURE_ARGS) &&
2344 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2345 		return -EOPNOTSUPP;
2346 
2347 	if ((arg & MMC_TRIM_ARGS) &&
2348 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2349 		return -EOPNOTSUPP;
2350 
2351 	if (arg == MMC_SECURE_ERASE_ARG) {
2352 		if (from % card->erase_size || nr % card->erase_size)
2353 			return -EINVAL;
2354 	}
2355 
2356 	if (arg == MMC_ERASE_ARG)
2357 		nr = mmc_align_erase_size(card, &from, &to, nr);
2358 
2359 	if (nr == 0)
2360 		return 0;
2361 
2362 	if (to <= from)
2363 		return -EINVAL;
2364 
2365 	/* 'from' and 'to' are inclusive */
2366 	to -= 1;
2367 
2368 	/*
2369 	 * Special case where only one erase-group fits in the timeout budget:
2370 	 * If the region crosses an erase-group boundary on this particular
2371 	 * case, we will be trimming more than one erase-group which, does not
2372 	 * fit in the timeout budget of the controller, so we need to split it
2373 	 * and call mmc_do_erase() twice if necessary. This special case is
2374 	 * identified by the card->eg_boundary flag.
2375 	 */
2376 	rem = card->erase_size - (from % card->erase_size);
2377 	if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
2378 		err = mmc_do_erase(card, from, from + rem - 1, arg);
2379 		from += rem;
2380 		if ((err) || (to <= from))
2381 			return err;
2382 	}
2383 
2384 	return mmc_do_erase(card, from, to, arg);
2385 }
2386 EXPORT_SYMBOL(mmc_erase);
2387 
2388 int mmc_can_erase(struct mmc_card *card)
2389 {
2390 	if ((card->host->caps & MMC_CAP_ERASE) &&
2391 	    (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2392 		return 1;
2393 	return 0;
2394 }
2395 EXPORT_SYMBOL(mmc_can_erase);
2396 
2397 int mmc_can_trim(struct mmc_card *card)
2398 {
2399 	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
2400 	    (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
2401 		return 1;
2402 	return 0;
2403 }
2404 EXPORT_SYMBOL(mmc_can_trim);
2405 
2406 int mmc_can_discard(struct mmc_card *card)
2407 {
2408 	/*
2409 	 * As there's no way to detect the discard support bit at v4.5
2410 	 * use the s/w feature support filed.
2411 	 */
2412 	if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2413 		return 1;
2414 	return 0;
2415 }
2416 EXPORT_SYMBOL(mmc_can_discard);
2417 
2418 int mmc_can_sanitize(struct mmc_card *card)
2419 {
2420 	if (!mmc_can_trim(card) && !mmc_can_erase(card))
2421 		return 0;
2422 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2423 		return 1;
2424 	return 0;
2425 }
2426 EXPORT_SYMBOL(mmc_can_sanitize);
2427 
2428 int mmc_can_secure_erase_trim(struct mmc_card *card)
2429 {
2430 	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2431 	    !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2432 		return 1;
2433 	return 0;
2434 }
2435 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2436 
2437 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2438 			    unsigned int nr)
2439 {
2440 	if (!card->erase_size)
2441 		return 0;
2442 	if (from % card->erase_size || nr % card->erase_size)
2443 		return 0;
2444 	return 1;
2445 }
2446 EXPORT_SYMBOL(mmc_erase_group_aligned);
2447 
2448 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2449 					    unsigned int arg)
2450 {
2451 	struct mmc_host *host = card->host;
2452 	unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
2453 	unsigned int last_timeout = 0;
2454 	unsigned int max_busy_timeout = host->max_busy_timeout ?
2455 			host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
2456 
2457 	if (card->erase_shift) {
2458 		max_qty = UINT_MAX >> card->erase_shift;
2459 		min_qty = card->pref_erase >> card->erase_shift;
2460 	} else if (mmc_card_sd(card)) {
2461 		max_qty = UINT_MAX;
2462 		min_qty = card->pref_erase;
2463 	} else {
2464 		max_qty = UINT_MAX / card->erase_size;
2465 		min_qty = card->pref_erase / card->erase_size;
2466 	}
2467 
2468 	/*
2469 	 * We should not only use 'host->max_busy_timeout' as the limitation
2470 	 * when deciding the max discard sectors. We should set a balance value
2471 	 * to improve the erase speed, and it can not get too long timeout at
2472 	 * the same time.
2473 	 *
2474 	 * Here we set 'card->pref_erase' as the minimal discard sectors no
2475 	 * matter what size of 'host->max_busy_timeout', but if the
2476 	 * 'host->max_busy_timeout' is large enough for more discard sectors,
2477 	 * then we can continue to increase the max discard sectors until we
2478 	 * get a balance value. In cases when the 'host->max_busy_timeout'
2479 	 * isn't specified, use the default max erase timeout.
2480 	 */
2481 	do {
2482 		y = 0;
2483 		for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2484 			timeout = mmc_erase_timeout(card, arg, qty + x);
2485 
2486 			if (qty + x > min_qty && timeout > max_busy_timeout)
2487 				break;
2488 
2489 			if (timeout < last_timeout)
2490 				break;
2491 			last_timeout = timeout;
2492 			y = x;
2493 		}
2494 		qty += y;
2495 	} while (y);
2496 
2497 	if (!qty)
2498 		return 0;
2499 
2500 	/*
2501 	 * When specifying a sector range to trim, chances are we might cross
2502 	 * an erase-group boundary even if the amount of sectors is less than
2503 	 * one erase-group.
2504 	 * If we can only fit one erase-group in the controller timeout budget,
2505 	 * we have to care that erase-group boundaries are not crossed by a
2506 	 * single trim operation. We flag that special case with "eg_boundary".
2507 	 * In all other cases we can just decrement qty and pretend that we
2508 	 * always touch (qty + 1) erase-groups as a simple optimization.
2509 	 */
2510 	if (qty == 1)
2511 		card->eg_boundary = 1;
2512 	else
2513 		qty--;
2514 
2515 	/* Convert qty to sectors */
2516 	if (card->erase_shift)
2517 		max_discard = qty << card->erase_shift;
2518 	else if (mmc_card_sd(card))
2519 		max_discard = qty + 1;
2520 	else
2521 		max_discard = qty * card->erase_size;
2522 
2523 	return max_discard;
2524 }
2525 
2526 unsigned int mmc_calc_max_discard(struct mmc_card *card)
2527 {
2528 	struct mmc_host *host = card->host;
2529 	unsigned int max_discard, max_trim;
2530 
2531 	/*
2532 	 * Without erase_group_def set, MMC erase timeout depends on clock
2533 	 * frequence which can change.  In that case, the best choice is
2534 	 * just the preferred erase size.
2535 	 */
2536 	if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2537 		return card->pref_erase;
2538 
2539 	max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2540 	if (mmc_can_trim(card)) {
2541 		max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2542 		if (max_trim < max_discard)
2543 			max_discard = max_trim;
2544 	} else if (max_discard < card->erase_size) {
2545 		max_discard = 0;
2546 	}
2547 	pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2548 		mmc_hostname(host), max_discard, host->max_busy_timeout ?
2549 		host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
2550 	return max_discard;
2551 }
2552 EXPORT_SYMBOL(mmc_calc_max_discard);
2553 
2554 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2555 {
2556 	struct mmc_command cmd = {0};
2557 
2558 	if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
2559 	    mmc_card_hs400(card) || mmc_card_hs400es(card))
2560 		return 0;
2561 
2562 	cmd.opcode = MMC_SET_BLOCKLEN;
2563 	cmd.arg = blocklen;
2564 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2565 	return mmc_wait_for_cmd(card->host, &cmd, 5);
2566 }
2567 EXPORT_SYMBOL(mmc_set_blocklen);
2568 
2569 int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2570 			bool is_rel_write)
2571 {
2572 	struct mmc_command cmd = {0};
2573 
2574 	cmd.opcode = MMC_SET_BLOCK_COUNT;
2575 	cmd.arg = blockcount & 0x0000FFFF;
2576 	if (is_rel_write)
2577 		cmd.arg |= 1 << 31;
2578 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2579 	return mmc_wait_for_cmd(card->host, &cmd, 5);
2580 }
2581 EXPORT_SYMBOL(mmc_set_blockcount);
2582 
2583 static void mmc_hw_reset_for_init(struct mmc_host *host)
2584 {
2585 	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2586 		return;
2587 	host->ops->hw_reset(host);
2588 }
2589 
2590 int mmc_hw_reset(struct mmc_host *host)
2591 {
2592 	int ret;
2593 
2594 	if (!host->card)
2595 		return -EINVAL;
2596 
2597 	mmc_bus_get(host);
2598 	if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
2599 		mmc_bus_put(host);
2600 		return -EOPNOTSUPP;
2601 	}
2602 
2603 	ret = host->bus_ops->reset(host);
2604 	mmc_bus_put(host);
2605 
2606 	if (ret)
2607 		pr_warn("%s: tried to reset card, got error %d\n",
2608 			mmc_hostname(host), ret);
2609 
2610 	return ret;
2611 }
2612 EXPORT_SYMBOL(mmc_hw_reset);
2613 
2614 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2615 {
2616 	host->f_init = freq;
2617 
2618 #ifdef CONFIG_MMC_DEBUG
2619 	pr_info("%s: %s: trying to init card at %u Hz\n",
2620 		mmc_hostname(host), __func__, host->f_init);
2621 #endif
2622 	mmc_power_up(host, host->ocr_avail);
2623 
2624 	/*
2625 	 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2626 	 * do a hardware reset if possible.
2627 	 */
2628 	mmc_hw_reset_for_init(host);
2629 
2630 	/*
2631 	 * sdio_reset sends CMD52 to reset card.  Since we do not know
2632 	 * if the card is being re-initialized, just send it.  CMD52
2633 	 * should be ignored by SD/eMMC cards.
2634 	 * Skip it if we already know that we do not support SDIO commands
2635 	 */
2636 	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2637 		sdio_reset(host);
2638 
2639 	mmc_go_idle(host);
2640 
2641 	if (!(host->caps2 & MMC_CAP2_NO_SD))
2642 		mmc_send_if_cond(host, host->ocr_avail);
2643 
2644 	/* Order's important: probe SDIO, then SD, then MMC */
2645 	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2646 		if (!mmc_attach_sdio(host))
2647 			return 0;
2648 
2649 	if (!(host->caps2 & MMC_CAP2_NO_SD))
2650 		if (!mmc_attach_sd(host))
2651 			return 0;
2652 
2653 	if (!(host->caps2 & MMC_CAP2_NO_MMC))
2654 		if (!mmc_attach_mmc(host))
2655 			return 0;
2656 
2657 	mmc_power_off(host);
2658 	return -EIO;
2659 }
2660 
2661 int _mmc_detect_card_removed(struct mmc_host *host)
2662 {
2663 	int ret;
2664 
2665 	if (!host->card || mmc_card_removed(host->card))
2666 		return 1;
2667 
2668 	ret = host->bus_ops->alive(host);
2669 
2670 	/*
2671 	 * Card detect status and alive check may be out of sync if card is
2672 	 * removed slowly, when card detect switch changes while card/slot
2673 	 * pads are still contacted in hardware (refer to "SD Card Mechanical
2674 	 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2675 	 * detect work 200ms later for this case.
2676 	 */
2677 	if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2678 		mmc_detect_change(host, msecs_to_jiffies(200));
2679 		pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2680 	}
2681 
2682 	if (ret) {
2683 		mmc_card_set_removed(host->card);
2684 		pr_debug("%s: card remove detected\n", mmc_hostname(host));
2685 	}
2686 
2687 	return ret;
2688 }
2689 
2690 int mmc_detect_card_removed(struct mmc_host *host)
2691 {
2692 	struct mmc_card *card = host->card;
2693 	int ret;
2694 
2695 	WARN_ON(!host->claimed);
2696 
2697 	if (!card)
2698 		return 1;
2699 
2700 	if (!mmc_card_is_removable(host))
2701 		return 0;
2702 
2703 	ret = mmc_card_removed(card);
2704 	/*
2705 	 * The card will be considered unchanged unless we have been asked to
2706 	 * detect a change or host requires polling to provide card detection.
2707 	 */
2708 	if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2709 		return ret;
2710 
2711 	host->detect_change = 0;
2712 	if (!ret) {
2713 		ret = _mmc_detect_card_removed(host);
2714 		if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2715 			/*
2716 			 * Schedule a detect work as soon as possible to let a
2717 			 * rescan handle the card removal.
2718 			 */
2719 			cancel_delayed_work(&host->detect);
2720 			_mmc_detect_change(host, 0, false);
2721 		}
2722 	}
2723 
2724 	return ret;
2725 }
2726 EXPORT_SYMBOL(mmc_detect_card_removed);
2727 
2728 void mmc_rescan(struct work_struct *work)
2729 {
2730 	struct mmc_host *host =
2731 		container_of(work, struct mmc_host, detect.work);
2732 	int i;
2733 
2734 	if (host->rescan_disable)
2735 		return;
2736 
2737 	/* If there is a non-removable card registered, only scan once */
2738 	if (!mmc_card_is_removable(host) && host->rescan_entered)
2739 		return;
2740 	host->rescan_entered = 1;
2741 
2742 	if (host->trigger_card_event && host->ops->card_event) {
2743 		mmc_claim_host(host);
2744 		host->ops->card_event(host);
2745 		mmc_release_host(host);
2746 		host->trigger_card_event = false;
2747 	}
2748 
2749 	mmc_bus_get(host);
2750 
2751 	/*
2752 	 * if there is a _removable_ card registered, check whether it is
2753 	 * still present
2754 	 */
2755 	if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
2756 		host->bus_ops->detect(host);
2757 
2758 	host->detect_change = 0;
2759 
2760 	/*
2761 	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2762 	 * the card is no longer present.
2763 	 */
2764 	mmc_bus_put(host);
2765 	mmc_bus_get(host);
2766 
2767 	/* if there still is a card present, stop here */
2768 	if (host->bus_ops != NULL) {
2769 		mmc_bus_put(host);
2770 		goto out;
2771 	}
2772 
2773 	/*
2774 	 * Only we can add a new handler, so it's safe to
2775 	 * release the lock here.
2776 	 */
2777 	mmc_bus_put(host);
2778 
2779 	mmc_claim_host(host);
2780 	if (mmc_card_is_removable(host) && host->ops->get_cd &&
2781 			host->ops->get_cd(host) == 0) {
2782 		mmc_power_off(host);
2783 		mmc_release_host(host);
2784 		goto out;
2785 	}
2786 
2787 	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2788 		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2789 			break;
2790 		if (freqs[i] <= host->f_min)
2791 			break;
2792 	}
2793 	mmc_release_host(host);
2794 
2795  out:
2796 	if (host->caps & MMC_CAP_NEEDS_POLL)
2797 		mmc_schedule_delayed_work(&host->detect, HZ);
2798 }
2799 
2800 void mmc_start_host(struct mmc_host *host)
2801 {
2802 	host->f_init = max(freqs[0], host->f_min);
2803 	host->rescan_disable = 0;
2804 	host->ios.power_mode = MMC_POWER_UNDEFINED;
2805 
2806 	if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
2807 		mmc_claim_host(host);
2808 		mmc_power_up(host, host->ocr_avail);
2809 		mmc_release_host(host);
2810 	}
2811 
2812 	mmc_gpiod_request_cd_irq(host);
2813 	_mmc_detect_change(host, 0, false);
2814 }
2815 
2816 void mmc_stop_host(struct mmc_host *host)
2817 {
2818 #ifdef CONFIG_MMC_DEBUG
2819 	unsigned long flags;
2820 	spin_lock_irqsave(&host->lock, flags);
2821 	host->removed = 1;
2822 	spin_unlock_irqrestore(&host->lock, flags);
2823 #endif
2824 	if (host->slot.cd_irq >= 0)
2825 		disable_irq(host->slot.cd_irq);
2826 
2827 	host->rescan_disable = 1;
2828 	cancel_delayed_work_sync(&host->detect);
2829 
2830 	/* clear pm flags now and let card drivers set them as needed */
2831 	host->pm_flags = 0;
2832 
2833 	mmc_bus_get(host);
2834 	if (host->bus_ops && !host->bus_dead) {
2835 		/* Calling bus_ops->remove() with a claimed host can deadlock */
2836 		host->bus_ops->remove(host);
2837 		mmc_claim_host(host);
2838 		mmc_detach_bus(host);
2839 		mmc_power_off(host);
2840 		mmc_release_host(host);
2841 		mmc_bus_put(host);
2842 		return;
2843 	}
2844 	mmc_bus_put(host);
2845 
2846 	mmc_claim_host(host);
2847 	mmc_power_off(host);
2848 	mmc_release_host(host);
2849 }
2850 
2851 int mmc_power_save_host(struct mmc_host *host)
2852 {
2853 	int ret = 0;
2854 
2855 #ifdef CONFIG_MMC_DEBUG
2856 	pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2857 #endif
2858 
2859 	mmc_bus_get(host);
2860 
2861 	if (!host->bus_ops || host->bus_dead) {
2862 		mmc_bus_put(host);
2863 		return -EINVAL;
2864 	}
2865 
2866 	if (host->bus_ops->power_save)
2867 		ret = host->bus_ops->power_save(host);
2868 
2869 	mmc_bus_put(host);
2870 
2871 	mmc_power_off(host);
2872 
2873 	return ret;
2874 }
2875 EXPORT_SYMBOL(mmc_power_save_host);
2876 
2877 int mmc_power_restore_host(struct mmc_host *host)
2878 {
2879 	int ret;
2880 
2881 #ifdef CONFIG_MMC_DEBUG
2882 	pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2883 #endif
2884 
2885 	mmc_bus_get(host);
2886 
2887 	if (!host->bus_ops || host->bus_dead) {
2888 		mmc_bus_put(host);
2889 		return -EINVAL;
2890 	}
2891 
2892 	mmc_power_up(host, host->card->ocr);
2893 	ret = host->bus_ops->power_restore(host);
2894 
2895 	mmc_bus_put(host);
2896 
2897 	return ret;
2898 }
2899 EXPORT_SYMBOL(mmc_power_restore_host);
2900 
2901 /*
2902  * Flush the cache to the non-volatile storage.
2903  */
2904 int mmc_flush_cache(struct mmc_card *card)
2905 {
2906 	int err = 0;
2907 
2908 	if (mmc_card_mmc(card) &&
2909 			(card->ext_csd.cache_size > 0) &&
2910 			(card->ext_csd.cache_ctrl & 1)) {
2911 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2912 				EXT_CSD_FLUSH_CACHE, 1, 0);
2913 		if (err)
2914 			pr_err("%s: cache flush error %d\n",
2915 					mmc_hostname(card->host), err);
2916 	}
2917 
2918 	return err;
2919 }
2920 EXPORT_SYMBOL(mmc_flush_cache);
2921 
2922 #ifdef CONFIG_PM_SLEEP
2923 /* Do the card removal on suspend if card is assumed removeable
2924  * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2925    to sync the card.
2926 */
2927 static int mmc_pm_notify(struct notifier_block *notify_block,
2928 			unsigned long mode, void *unused)
2929 {
2930 	struct mmc_host *host = container_of(
2931 		notify_block, struct mmc_host, pm_notify);
2932 	unsigned long flags;
2933 	int err = 0;
2934 
2935 	switch (mode) {
2936 	case PM_HIBERNATION_PREPARE:
2937 	case PM_SUSPEND_PREPARE:
2938 	case PM_RESTORE_PREPARE:
2939 		spin_lock_irqsave(&host->lock, flags);
2940 		host->rescan_disable = 1;
2941 		spin_unlock_irqrestore(&host->lock, flags);
2942 		cancel_delayed_work_sync(&host->detect);
2943 
2944 		if (!host->bus_ops)
2945 			break;
2946 
2947 		/* Validate prerequisites for suspend */
2948 		if (host->bus_ops->pre_suspend)
2949 			err = host->bus_ops->pre_suspend(host);
2950 		if (!err)
2951 			break;
2952 
2953 		/* Calling bus_ops->remove() with a claimed host can deadlock */
2954 		host->bus_ops->remove(host);
2955 		mmc_claim_host(host);
2956 		mmc_detach_bus(host);
2957 		mmc_power_off(host);
2958 		mmc_release_host(host);
2959 		host->pm_flags = 0;
2960 		break;
2961 
2962 	case PM_POST_SUSPEND:
2963 	case PM_POST_HIBERNATION:
2964 	case PM_POST_RESTORE:
2965 
2966 		spin_lock_irqsave(&host->lock, flags);
2967 		host->rescan_disable = 0;
2968 		spin_unlock_irqrestore(&host->lock, flags);
2969 		_mmc_detect_change(host, 0, false);
2970 
2971 	}
2972 
2973 	return 0;
2974 }
2975 
2976 void mmc_register_pm_notifier(struct mmc_host *host)
2977 {
2978 	host->pm_notify.notifier_call = mmc_pm_notify;
2979 	register_pm_notifier(&host->pm_notify);
2980 }
2981 
2982 void mmc_unregister_pm_notifier(struct mmc_host *host)
2983 {
2984 	unregister_pm_notifier(&host->pm_notify);
2985 }
2986 #endif
2987 
2988 /**
2989  * mmc_init_context_info() - init synchronization context
2990  * @host: mmc host
2991  *
2992  * Init struct context_info needed to implement asynchronous
2993  * request mechanism, used by mmc core, host driver and mmc requests
2994  * supplier.
2995  */
2996 void mmc_init_context_info(struct mmc_host *host)
2997 {
2998 	host->context_info.is_new_req = false;
2999 	host->context_info.is_done_rcv = false;
3000 	host->context_info.is_waiting_last_req = false;
3001 	init_waitqueue_head(&host->context_info.wait);
3002 }
3003 
3004 static int __init mmc_init(void)
3005 {
3006 	int ret;
3007 
3008 	ret = mmc_register_bus();
3009 	if (ret)
3010 		return ret;
3011 
3012 	ret = mmc_register_host_class();
3013 	if (ret)
3014 		goto unregister_bus;
3015 
3016 	ret = sdio_register_bus();
3017 	if (ret)
3018 		goto unregister_host_class;
3019 
3020 	return 0;
3021 
3022 unregister_host_class:
3023 	mmc_unregister_host_class();
3024 unregister_bus:
3025 	mmc_unregister_bus();
3026 	return ret;
3027 }
3028 
3029 static void __exit mmc_exit(void)
3030 {
3031 	sdio_unregister_bus();
3032 	mmc_unregister_host_class();
3033 	mmc_unregister_bus();
3034 }
3035 
3036 subsys_initcall(mmc_init);
3037 module_exit(mmc_exit);
3038 
3039 MODULE_LICENSE("GPL");
3040