xref: /linux/drivers/mmc/core/core.c (revision 41c177cf354126a22443b5c80cec9fdd313e67e1)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2aaac1b47SPierre Ossman /*
3aaac1b47SPierre Ossman  *  linux/drivers/mmc/core/core.c
4aaac1b47SPierre Ossman  *
5aaac1b47SPierre Ossman  *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
6aaac1b47SPierre Ossman  *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
7ad3868b2SPierre Ossman  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
8aaac1b47SPierre Ossman  *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
9aaac1b47SPierre Ossman  */
10aaac1b47SPierre Ossman #include <linux/module.h>
11aaac1b47SPierre Ossman #include <linux/init.h>
12aaac1b47SPierre Ossman #include <linux/interrupt.h>
13aaac1b47SPierre Ossman #include <linux/completion.h>
14aaac1b47SPierre Ossman #include <linux/device.h>
15aaac1b47SPierre Ossman #include <linux/delay.h>
16aaac1b47SPierre Ossman #include <linux/pagemap.h>
17aaac1b47SPierre Ossman #include <linux/err.h>
18af8350c7SPierre Ossman #include <linux/leds.h>
19aaac1b47SPierre Ossman #include <linux/scatterlist.h>
2086e8286aSAnton Vorontsov #include <linux/log2.h>
21e594573dSOhad Ben-Cohen #include <linux/pm_runtime.h>
22bbd43682SUlf Hansson #include <linux/pm_wakeup.h>
2335eb6db1SAmerigo Wang #include <linux/suspend.h>
241b676f70SPer Forlin #include <linux/fault-inject.h>
251b676f70SPer Forlin #include <linux/random.h>
26950d56acSJaehoon Chung #include <linux/slab.h>
276e9e318bSHaijun Zhang #include <linux/of.h>
28aaac1b47SPierre Ossman 
29aaac1b47SPierre Ossman #include <linux/mmc/card.h>
30aaac1b47SPierre Ossman #include <linux/mmc/host.h>
31da7fbe58SPierre Ossman #include <linux/mmc/mmc.h>
32da7fbe58SPierre Ossman #include <linux/mmc/sd.h>
33740a221eSAdrian Hunter #include <linux/mmc/slot-gpio.h>
34aaac1b47SPierre Ossman 
357962fc37SBaolin Wang #define CREATE_TRACE_POINTS
367962fc37SBaolin Wang #include <trace/events/mmc.h>
377962fc37SBaolin Wang 
38aaac1b47SPierre Ossman #include "core.h"
394facdde1SUlf Hansson #include "card.h"
4093f1c150SEric Biggers #include "crypto.h"
41ffce2e7eSPierre Ossman #include "bus.h"
42ffce2e7eSPierre Ossman #include "host.h"
43e29a7d73SPierre Ossman #include "sdio_bus.h"
443aa8793fSUlf Hansson #include "pwrseq.h"
45da7fbe58SPierre Ossman 
46da7fbe58SPierre Ossman #include "mmc_ops.h"
47da7fbe58SPierre Ossman #include "sd_ops.h"
485c4e6f13SPierre Ossman #include "sdio_ops.h"
49aaac1b47SPierre Ossman 
5012182affSUlf Hansson /* The max erase timeout, used when host->max_busy_timeout isn't specified */
5112182affSUlf Hansson #define MMC_ERASE_TIMEOUT_MS	(60 * 1000) /* 60 s */
52ad9be7ffSAvri Altman #define SD_DISCARD_TIMEOUT_MS	(250)
5312182affSUlf Hansson 
54fa550189SUlf Hansson static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
55ffce2e7eSPierre Ossman 
56ffce2e7eSPierre Ossman /*
57af517150SDavid Brownell  * Enabling software CRCs on the data blocks can be a significant (30%)
58af517150SDavid Brownell  * performance cost, and for other reasons may not always be desired.
59ff50df9aSAdrian Hunter  * So we allow it to be disabled.
60af517150SDavid Brownell  */
6190ab5ee9SRusty Russell bool use_spi_crc = 1;
62af517150SDavid Brownell module_param(use_spi_crc, bool, 0);
63af517150SDavid Brownell 
mmc_schedule_delayed_work(struct delayed_work * work,unsigned long delay)64ffce2e7eSPierre Ossman static int mmc_schedule_delayed_work(struct delayed_work *work,
65ffce2e7eSPierre Ossman 				     unsigned long delay)
66ffce2e7eSPierre Ossman {
67ffce2e7eSPierre Ossman 	/*
68520bd7a8SUlf Hansson 	 * We use the system_freezable_wq, because of two reasons.
69520bd7a8SUlf Hansson 	 * First, it allows several works (not the same work item) to be
70520bd7a8SUlf Hansson 	 * executed simultaneously. Second, the queue becomes frozen when
71520bd7a8SUlf Hansson 	 * userspace becomes frozen during system PM.
72ffce2e7eSPierre Ossman 	 */
73520bd7a8SUlf Hansson 	return queue_delayed_work(system_freezable_wq, work, delay);
74ffce2e7eSPierre Ossman }
75ffce2e7eSPierre Ossman 
761b676f70SPer Forlin #ifdef CONFIG_FAIL_MMC_REQUEST
771b676f70SPer Forlin 
781b676f70SPer Forlin /*
791b676f70SPer Forlin  * Internal function. Inject random data errors.
801b676f70SPer Forlin  * If mmc_data is NULL no errors are injected.
811b676f70SPer Forlin  */
mmc_should_fail_request(struct mmc_host * host,struct mmc_request * mrq)821b676f70SPer Forlin static void mmc_should_fail_request(struct mmc_host *host,
831b676f70SPer Forlin 				    struct mmc_request *mrq)
841b676f70SPer Forlin {
851b676f70SPer Forlin 	struct mmc_command *cmd = mrq->cmd;
861b676f70SPer Forlin 	struct mmc_data *data = mrq->data;
871b676f70SPer Forlin 	static const int data_errors[] = {
881b676f70SPer Forlin 		-ETIMEDOUT,
891b676f70SPer Forlin 		-EILSEQ,
901b676f70SPer Forlin 		-EIO,
911b676f70SPer Forlin 	};
921b676f70SPer Forlin 
931b676f70SPer Forlin 	if (!data)
941b676f70SPer Forlin 		return;
951b676f70SPer Forlin 
96e5723f95SRitesh Harjani 	if ((cmd && cmd->error) || data->error ||
971b676f70SPer Forlin 	    !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
981b676f70SPer Forlin 		return;
991b676f70SPer Forlin 
1008032bf12SJason A. Donenfeld 	data->error = data_errors[get_random_u32_below(ARRAY_SIZE(data_errors))];
1018032bf12SJason A. Donenfeld 	data->bytes_xfered = get_random_u32_below(data->bytes_xfered >> 9) << 9;
1021b676f70SPer Forlin }
1031b676f70SPer Forlin 
1041b676f70SPer Forlin #else /* CONFIG_FAIL_MMC_REQUEST */
1051b676f70SPer Forlin 
mmc_should_fail_request(struct mmc_host * host,struct mmc_request * mrq)1061b676f70SPer Forlin static inline void mmc_should_fail_request(struct mmc_host *host,
1071b676f70SPer Forlin 					   struct mmc_request *mrq)
1081b676f70SPer Forlin {
1091b676f70SPer Forlin }
1101b676f70SPer Forlin 
1111b676f70SPer Forlin #endif /* CONFIG_FAIL_MMC_REQUEST */
1121b676f70SPer Forlin 
mmc_complete_cmd(struct mmc_request * mrq)1135163af5aSAdrian Hunter static inline void mmc_complete_cmd(struct mmc_request *mrq)
1145163af5aSAdrian Hunter {
1155163af5aSAdrian Hunter 	if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
1165163af5aSAdrian Hunter 		complete_all(&mrq->cmd_completion);
1175163af5aSAdrian Hunter }
1185163af5aSAdrian Hunter 
mmc_command_done(struct mmc_host * host,struct mmc_request * mrq)1195163af5aSAdrian Hunter void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
1205163af5aSAdrian Hunter {
1215163af5aSAdrian Hunter 	if (!mrq->cap_cmd_during_tfr)
1225163af5aSAdrian Hunter 		return;
1235163af5aSAdrian Hunter 
1245163af5aSAdrian Hunter 	mmc_complete_cmd(mrq);
1255163af5aSAdrian Hunter 
1265163af5aSAdrian Hunter 	pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
1275163af5aSAdrian Hunter 		 mmc_hostname(host), mrq->cmd->opcode);
1285163af5aSAdrian Hunter }
1295163af5aSAdrian Hunter EXPORT_SYMBOL(mmc_command_done);
1305163af5aSAdrian Hunter 
131aaac1b47SPierre Ossman /**
132aaac1b47SPierre Ossman  *	mmc_request_done - finish processing an MMC request
133aaac1b47SPierre Ossman  *	@host: MMC host which completed request
134aaac1b47SPierre Ossman  *	@mrq: MMC request which request
135aaac1b47SPierre Ossman  *
136aaac1b47SPierre Ossman  *	MMC drivers should call this function when they have completed
137aaac1b47SPierre Ossman  *	their processing of a request.
138aaac1b47SPierre Ossman  */
mmc_request_done(struct mmc_host * host,struct mmc_request * mrq)139aaac1b47SPierre Ossman void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
140aaac1b47SPierre Ossman {
141aaac1b47SPierre Ossman 	struct mmc_command *cmd = mrq->cmd;
142aaac1b47SPierre Ossman 	int err = cmd->error;
143aaac1b47SPierre Ossman 
144bd11e8bdSAdrian Hunter 	/* Flag re-tuning needed on CRC errors */
145b98e7e8dSChanWoo Lee 	if (!mmc_op_tuning(cmd->opcode) &&
1460a55f4abSDouglas Anderson 	    !host->retune_crc_disable &&
147031277d4SChaotian Jing 	    (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
148bd11e8bdSAdrian Hunter 	    (mrq->data && mrq->data->error == -EILSEQ) ||
149031277d4SChaotian Jing 	    (mrq->stop && mrq->stop->error == -EILSEQ)))
150bd11e8bdSAdrian Hunter 		mmc_retune_needed(host);
151bd11e8bdSAdrian Hunter 
152af517150SDavid Brownell 	if (err && cmd->retries && mmc_host_is_spi(host)) {
153af517150SDavid Brownell 		if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
154af517150SDavid Brownell 			cmd->retries = 0;
155af517150SDavid Brownell 	}
156af517150SDavid Brownell 
1575163af5aSAdrian Hunter 	if (host->ongoing_mrq == mrq)
1585163af5aSAdrian Hunter 		host->ongoing_mrq = NULL;
1595163af5aSAdrian Hunter 
1605163af5aSAdrian Hunter 	mmc_complete_cmd(mrq);
1615163af5aSAdrian Hunter 
1627962fc37SBaolin Wang 	trace_mmc_request_done(host, mrq);
1637962fc37SBaolin Wang 
16408a7e1dfSAdrian Hunter 	/*
16567b8360aSLinus Walleij 	 * We list various conditions for the command to be considered
16667b8360aSLinus Walleij 	 * properly done:
16767b8360aSLinus Walleij 	 *
16867b8360aSLinus Walleij 	 * - There was no error, OK fine then
16967b8360aSLinus Walleij 	 * - We are not doing some kind of retry
17067b8360aSLinus Walleij 	 * - The card was removed (...so just complete everything no matter
17167b8360aSLinus Walleij 	 *   if there are errors or retries)
17208a7e1dfSAdrian Hunter 	 */
17367b8360aSLinus Walleij 	if (!err || !cmd->retries || mmc_card_removed(host->card)) {
1741b676f70SPer Forlin 		mmc_should_fail_request(host, mrq);
1751b676f70SPer Forlin 
1765163af5aSAdrian Hunter 		if (!host->ongoing_mrq)
177af8350c7SPierre Ossman 			led_trigger_event(host->led, LED_OFF);
178af8350c7SPierre Ossman 
179fc75b708SAndrew Gabbasov 		if (mrq->sbc) {
180fc75b708SAndrew Gabbasov 			pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
181fc75b708SAndrew Gabbasov 				mmc_hostname(host), mrq->sbc->opcode,
182fc75b708SAndrew Gabbasov 				mrq->sbc->error,
183fc75b708SAndrew Gabbasov 				mrq->sbc->resp[0], mrq->sbc->resp[1],
184fc75b708SAndrew Gabbasov 				mrq->sbc->resp[2], mrq->sbc->resp[3]);
185fc75b708SAndrew Gabbasov 		}
186fc75b708SAndrew Gabbasov 
187e4d21708SPierre Ossman 		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
188e4d21708SPierre Ossman 			mmc_hostname(host), cmd->opcode, err,
189e4d21708SPierre Ossman 			cmd->resp[0], cmd->resp[1],
190e4d21708SPierre Ossman 			cmd->resp[2], cmd->resp[3]);
191e4d21708SPierre Ossman 
192e4d21708SPierre Ossman 		if (mrq->data) {
193e4d21708SPierre Ossman 			pr_debug("%s:     %d bytes transferred: %d\n",
194e4d21708SPierre Ossman 				mmc_hostname(host),
195e4d21708SPierre Ossman 				mrq->data->bytes_xfered, mrq->data->error);
196e4d21708SPierre Ossman 		}
197e4d21708SPierre Ossman 
198e4d21708SPierre Ossman 		if (mrq->stop) {
199e4d21708SPierre Ossman 			pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
200e4d21708SPierre Ossman 				mmc_hostname(host), mrq->stop->opcode,
201e4d21708SPierre Ossman 				mrq->stop->error,
202e4d21708SPierre Ossman 				mrq->stop->resp[0], mrq->stop->resp[1],
203e4d21708SPierre Ossman 				mrq->stop->resp[2], mrq->stop->resp[3]);
204e4d21708SPierre Ossman 		}
20567b8360aSLinus Walleij 	}
20667b8360aSLinus Walleij 	/*
20767b8360aSLinus Walleij 	 * Request starter must handle retries - see
20867b8360aSLinus Walleij 	 * mmc_wait_for_req_done().
20967b8360aSLinus Walleij 	 */
210e4d21708SPierre Ossman 	if (mrq->done)
211aaac1b47SPierre Ossman 		mrq->done(mrq);
212aaac1b47SPierre Ossman }
213aaac1b47SPierre Ossman 
214aaac1b47SPierre Ossman EXPORT_SYMBOL(mmc_request_done);
215aaac1b47SPierre Ossman 
__mmc_start_request(struct mmc_host * host,struct mmc_request * mrq)21690a81489SAdrian Hunter static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
21790a81489SAdrian Hunter {
21890a81489SAdrian Hunter 	int err;
21990a81489SAdrian Hunter 
22090a81489SAdrian Hunter 	/* Assumes host controller has been runtime resumed by mmc_claim_host */
22190a81489SAdrian Hunter 	err = mmc_retune(host);
22290a81489SAdrian Hunter 	if (err) {
22390a81489SAdrian Hunter 		mrq->cmd->error = err;
22490a81489SAdrian Hunter 		mmc_request_done(host, mrq);
22590a81489SAdrian Hunter 		return;
22690a81489SAdrian Hunter 	}
22790a81489SAdrian Hunter 
2285d3f6ef0SHans de Goede 	/*
2295d3f6ef0SHans de Goede 	 * For sdio rw commands we must wait for card busy otherwise some
2305d3f6ef0SHans de Goede 	 * sdio devices won't work properly.
231f328c76eSjiajie.hao@mediatek.com 	 * And bypass I/O abort, reset and bus suspend operations.
2325d3f6ef0SHans de Goede 	 */
233f328c76eSjiajie.hao@mediatek.com 	if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) &&
234f328c76eSjiajie.hao@mediatek.com 	    host->ops->card_busy) {
2355d3f6ef0SHans de Goede 		int tries = 500; /* Wait aprox 500ms at maximum */
2365d3f6ef0SHans de Goede 
2375d3f6ef0SHans de Goede 		while (host->ops->card_busy(host) && --tries)
2385d3f6ef0SHans de Goede 			mmc_delay(1);
2395d3f6ef0SHans de Goede 
2405d3f6ef0SHans de Goede 		if (tries == 0) {
2415d3f6ef0SHans de Goede 			mrq->cmd->error = -EBUSY;
2425d3f6ef0SHans de Goede 			mmc_request_done(host, mrq);
2435d3f6ef0SHans de Goede 			return;
2445d3f6ef0SHans de Goede 		}
2455d3f6ef0SHans de Goede 	}
2465d3f6ef0SHans de Goede 
2475163af5aSAdrian Hunter 	if (mrq->cap_cmd_during_tfr) {
2485163af5aSAdrian Hunter 		host->ongoing_mrq = mrq;
2495163af5aSAdrian Hunter 		/*
2505163af5aSAdrian Hunter 		 * Retry path could come through here without having waiting on
2515163af5aSAdrian Hunter 		 * cmd_completion, so ensure it is reinitialised.
2525163af5aSAdrian Hunter 		 */
2535163af5aSAdrian Hunter 		reinit_completion(&mrq->cmd_completion);
2545163af5aSAdrian Hunter 	}
2555163af5aSAdrian Hunter 
2567962fc37SBaolin Wang 	trace_mmc_request_start(host, mrq);
2577962fc37SBaolin Wang 
2583e207c8cSAdrian Hunter 	if (host->cqe_on)
2593e207c8cSAdrian Hunter 		host->cqe_ops->cqe_off(host);
2603e207c8cSAdrian Hunter 
26190a81489SAdrian Hunter 	host->ops->request(host, mrq);
26290a81489SAdrian Hunter }
26390a81489SAdrian Hunter 
mmc_mrq_pr_debug(struct mmc_host * host,struct mmc_request * mrq,bool cqe)26472a5af55SAdrian Hunter static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq,
26572a5af55SAdrian Hunter 			     bool cqe)
266aaac1b47SPierre Ossman {
2677b2fd4f2SJaehoon Chung 	if (mrq->sbc) {
2687b2fd4f2SJaehoon Chung 		pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
2697b2fd4f2SJaehoon Chung 			 mmc_hostname(host), mrq->sbc->opcode,
2707b2fd4f2SJaehoon Chung 			 mrq->sbc->arg, mrq->sbc->flags);
2717b2fd4f2SJaehoon Chung 	}
2727b2fd4f2SJaehoon Chung 
2734b67e63fSAdrian Hunter 	if (mrq->cmd) {
27472a5af55SAdrian Hunter 		pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n",
27572a5af55SAdrian Hunter 			 mmc_hostname(host), cqe ? "CQE direct " : "",
27672a5af55SAdrian Hunter 			 mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
27772a5af55SAdrian Hunter 	} else if (cqe) {
27872a5af55SAdrian Hunter 		pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n",
27972a5af55SAdrian Hunter 			 mmc_hostname(host), mrq->tag, mrq->data->blk_addr);
2804b67e63fSAdrian Hunter 	}
281aaac1b47SPierre Ossman 
282e4d21708SPierre Ossman 	if (mrq->data) {
283e4d21708SPierre Ossman 		pr_debug("%s:     blksz %d blocks %d flags %08x "
284e4d21708SPierre Ossman 			"tsac %d ms nsac %d\n",
285e4d21708SPierre Ossman 			mmc_hostname(host), mrq->data->blksz,
286e4d21708SPierre Ossman 			mrq->data->blocks, mrq->data->flags,
287ce252eddSPierre Ossman 			mrq->data->timeout_ns / 1000000,
288e4d21708SPierre Ossman 			mrq->data->timeout_clks);
289e4d21708SPierre Ossman 	}
290e4d21708SPierre Ossman 
291e4d21708SPierre Ossman 	if (mrq->stop) {
292e4d21708SPierre Ossman 		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
293e4d21708SPierre Ossman 			 mmc_hostname(host), mrq->stop->opcode,
294e4d21708SPierre Ossman 			 mrq->stop->arg, mrq->stop->flags);
295e4d21708SPierre Ossman 	}
2964b67e63fSAdrian Hunter }
2974b67e63fSAdrian Hunter 
mmc_mrq_prep(struct mmc_host * host,struct mmc_request * mrq)298f34bdd2fSAdrian Hunter static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
2994b67e63fSAdrian Hunter {
300b044b1bcSShawn Lin 	unsigned int i, sz = 0;
3014b67e63fSAdrian Hunter 	struct scatterlist *sg;
3024b67e63fSAdrian Hunter 
303f34bdd2fSAdrian Hunter 	if (mrq->cmd) {
304aaac1b47SPierre Ossman 		mrq->cmd->error = 0;
305aaac1b47SPierre Ossman 		mrq->cmd->mrq = mrq;
306f34bdd2fSAdrian Hunter 		mrq->cmd->data = mrq->data;
307f34bdd2fSAdrian Hunter 	}
308cce411e6SAndrew Gabbasov 	if (mrq->sbc) {
309cce411e6SAndrew Gabbasov 		mrq->sbc->error = 0;
310cce411e6SAndrew Gabbasov 		mrq->sbc->mrq = mrq;
311cce411e6SAndrew Gabbasov 	}
312aaac1b47SPierre Ossman 	if (mrq->data) {
3136ff897ffSShawn Lin 		if (mrq->data->blksz > host->max_blk_size ||
3146ff897ffSShawn Lin 		    mrq->data->blocks > host->max_blk_count ||
3156ff897ffSShawn Lin 		    mrq->data->blocks * mrq->data->blksz > host->max_req_size)
3166ff897ffSShawn Lin 			return -EINVAL;
317b044b1bcSShawn Lin 
318a84756c5SPierre Ossman 		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
319a84756c5SPierre Ossman 			sz += sg->length;
3206ff897ffSShawn Lin 		if (sz != mrq->data->blocks * mrq->data->blksz)
3216ff897ffSShawn Lin 			return -EINVAL;
322b044b1bcSShawn Lin 
323aaac1b47SPierre Ossman 		mrq->data->error = 0;
324aaac1b47SPierre Ossman 		mrq->data->mrq = mrq;
325aaac1b47SPierre Ossman 		if (mrq->stop) {
326aaac1b47SPierre Ossman 			mrq->data->stop = mrq->stop;
327aaac1b47SPierre Ossman 			mrq->stop->error = 0;
328aaac1b47SPierre Ossman 			mrq->stop->mrq = mrq;
329aaac1b47SPierre Ossman 		}
330aaac1b47SPierre Ossman 	}
331f34bdd2fSAdrian Hunter 
332f34bdd2fSAdrian Hunter 	return 0;
333f34bdd2fSAdrian Hunter }
334f34bdd2fSAdrian Hunter 
mmc_start_request(struct mmc_host * host,struct mmc_request * mrq)335cb39f61eSAdrian Hunter int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
336f34bdd2fSAdrian Hunter {
337f34bdd2fSAdrian Hunter 	int err;
338f34bdd2fSAdrian Hunter 
339d2383318SAdrian Hunter 	init_completion(&mrq->cmd_completion);
340d2383318SAdrian Hunter 
341f34bdd2fSAdrian Hunter 	mmc_retune_hold(host);
342f34bdd2fSAdrian Hunter 
343f34bdd2fSAdrian Hunter 	if (mmc_card_removed(host->card))
344f34bdd2fSAdrian Hunter 		return -ENOMEDIUM;
345f34bdd2fSAdrian Hunter 
34672a5af55SAdrian Hunter 	mmc_mrq_pr_debug(host, mrq, false);
347f34bdd2fSAdrian Hunter 
348f34bdd2fSAdrian Hunter 	WARN_ON(!host->claimed);
349f34bdd2fSAdrian Hunter 
350f34bdd2fSAdrian Hunter 	err = mmc_mrq_prep(host, mrq);
351f34bdd2fSAdrian Hunter 	if (err)
352f34bdd2fSAdrian Hunter 		return err;
353f34bdd2fSAdrian Hunter 
35466c036e0SPierre Tardy 	led_trigger_event(host->led, LED_FULL);
35590a81489SAdrian Hunter 	__mmc_start_request(host, mrq);
356f100c1c2SAdrian Hunter 
357f100c1c2SAdrian Hunter 	return 0;
358aaac1b47SPierre Ossman }
359cb39f61eSAdrian Hunter EXPORT_SYMBOL(mmc_start_request);
360aaac1b47SPierre Ossman 
mmc_wait_done(struct mmc_request * mrq)361aaac1b47SPierre Ossman static void mmc_wait_done(struct mmc_request *mrq)
362aaac1b47SPierre Ossman {
363aa8b683aSPer Forlin 	complete(&mrq->completion);
364aaac1b47SPierre Ossman }
365aaac1b47SPierre Ossman 
mmc_wait_ongoing_tfr_cmd(struct mmc_host * host)3665163af5aSAdrian Hunter static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
3675163af5aSAdrian Hunter {
3685163af5aSAdrian Hunter 	struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
3695163af5aSAdrian Hunter 
3705163af5aSAdrian Hunter 	/*
3715163af5aSAdrian Hunter 	 * If there is an ongoing transfer, wait for the command line to become
3725163af5aSAdrian Hunter 	 * available.
3735163af5aSAdrian Hunter 	 */
3745163af5aSAdrian Hunter 	if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
3755163af5aSAdrian Hunter 		wait_for_completion(&ongoing_mrq->cmd_completion);
3765163af5aSAdrian Hunter }
3775163af5aSAdrian Hunter 
__mmc_start_req(struct mmc_host * host,struct mmc_request * mrq)378956d9fd5SUlf Hansson static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
379aa8b683aSPer Forlin {
380f100c1c2SAdrian Hunter 	int err;
381f100c1c2SAdrian Hunter 
3825163af5aSAdrian Hunter 	mmc_wait_ongoing_tfr_cmd(host);
3835163af5aSAdrian Hunter 
384aa8b683aSPer Forlin 	init_completion(&mrq->completion);
385aa8b683aSPer Forlin 	mrq->done = mmc_wait_done;
386f100c1c2SAdrian Hunter 
387f100c1c2SAdrian Hunter 	err = mmc_start_request(host, mrq);
388f100c1c2SAdrian Hunter 	if (err) {
389f100c1c2SAdrian Hunter 		mrq->cmd->error = err;
3905163af5aSAdrian Hunter 		mmc_complete_cmd(mrq);
391d3049504SAdrian Hunter 		complete(&mrq->completion);
392d3049504SAdrian Hunter 	}
393f100c1c2SAdrian Hunter 
394f100c1c2SAdrian Hunter 	return err;
395aa8b683aSPer Forlin }
396aa8b683aSPer Forlin 
mmc_wait_for_req_done(struct mmc_host * host,struct mmc_request * mrq)3975163af5aSAdrian Hunter void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
398aa8b683aSPer Forlin {
39908a7e1dfSAdrian Hunter 	struct mmc_command *cmd;
40008a7e1dfSAdrian Hunter 
40108a7e1dfSAdrian Hunter 	while (1) {
402aa8b683aSPer Forlin 		wait_for_completion(&mrq->completion);
40308a7e1dfSAdrian Hunter 
40408a7e1dfSAdrian Hunter 		cmd = mrq->cmd;
405775a9362SMaya Erez 
406d3049504SAdrian Hunter 		if (!cmd->error || !cmd->retries ||
407d3049504SAdrian Hunter 		    mmc_card_removed(host->card))
40808a7e1dfSAdrian Hunter 			break;
40908a7e1dfSAdrian Hunter 
41090a81489SAdrian Hunter 		mmc_retune_recheck(host);
41190a81489SAdrian Hunter 
41208a7e1dfSAdrian Hunter 		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
41308a7e1dfSAdrian Hunter 			 mmc_hostname(host), cmd->opcode, cmd->error);
41408a7e1dfSAdrian Hunter 		cmd->retries--;
41508a7e1dfSAdrian Hunter 		cmd->error = 0;
41690a81489SAdrian Hunter 		__mmc_start_request(host, mrq);
41708a7e1dfSAdrian Hunter 	}
41890a81489SAdrian Hunter 
41990a81489SAdrian Hunter 	mmc_retune_release(host);
420aa8b683aSPer Forlin }
4215163af5aSAdrian Hunter EXPORT_SYMBOL(mmc_wait_for_req_done);
4225163af5aSAdrian Hunter 
42372a5af55SAdrian Hunter /*
42472a5af55SAdrian Hunter  * mmc_cqe_start_req - Start a CQE request.
42572a5af55SAdrian Hunter  * @host: MMC host to start the request
42672a5af55SAdrian Hunter  * @mrq: request to start
42772a5af55SAdrian Hunter  *
42872a5af55SAdrian Hunter  * Start the request, re-tuning if needed and it is possible. Returns an error
42972a5af55SAdrian Hunter  * code if the request fails to start or -EBUSY if CQE is busy.
43072a5af55SAdrian Hunter  */
mmc_cqe_start_req(struct mmc_host * host,struct mmc_request * mrq)43172a5af55SAdrian Hunter int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
43272a5af55SAdrian Hunter {
43372a5af55SAdrian Hunter 	int err;
43472a5af55SAdrian Hunter 
43572a5af55SAdrian Hunter 	/*
43672a5af55SAdrian Hunter 	 * CQE cannot process re-tuning commands. Caller must hold retuning
43772a5af55SAdrian Hunter 	 * while CQE is in use.  Re-tuning can happen here only when CQE has no
43872a5af55SAdrian Hunter 	 * active requests i.e. this is the first.  Note, re-tuning will call
43972a5af55SAdrian Hunter 	 * ->cqe_off().
44072a5af55SAdrian Hunter 	 */
44172a5af55SAdrian Hunter 	err = mmc_retune(host);
44272a5af55SAdrian Hunter 	if (err)
44372a5af55SAdrian Hunter 		goto out_err;
44472a5af55SAdrian Hunter 
44572a5af55SAdrian Hunter 	mrq->host = host;
44672a5af55SAdrian Hunter 
44772a5af55SAdrian Hunter 	mmc_mrq_pr_debug(host, mrq, true);
44872a5af55SAdrian Hunter 
44972a5af55SAdrian Hunter 	err = mmc_mrq_prep(host, mrq);
45072a5af55SAdrian Hunter 	if (err)
45172a5af55SAdrian Hunter 		goto out_err;
45272a5af55SAdrian Hunter 
45372a5af55SAdrian Hunter 	err = host->cqe_ops->cqe_request(host, mrq);
45472a5af55SAdrian Hunter 	if (err)
45572a5af55SAdrian Hunter 		goto out_err;
45672a5af55SAdrian Hunter 
45772a5af55SAdrian Hunter 	trace_mmc_request_start(host, mrq);
45872a5af55SAdrian Hunter 
45972a5af55SAdrian Hunter 	return 0;
46072a5af55SAdrian Hunter 
46172a5af55SAdrian Hunter out_err:
46272a5af55SAdrian Hunter 	if (mrq->cmd) {
46372a5af55SAdrian Hunter 		pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
46472a5af55SAdrian Hunter 			 mmc_hostname(host), mrq->cmd->opcode, err);
46572a5af55SAdrian Hunter 	} else {
46672a5af55SAdrian Hunter 		pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
46772a5af55SAdrian Hunter 			 mmc_hostname(host), mrq->tag, err);
46872a5af55SAdrian Hunter 	}
46972a5af55SAdrian Hunter 	return err;
47072a5af55SAdrian Hunter }
47172a5af55SAdrian Hunter EXPORT_SYMBOL(mmc_cqe_start_req);
47272a5af55SAdrian Hunter 
47372a5af55SAdrian Hunter /**
47472a5af55SAdrian Hunter  *	mmc_cqe_request_done - CQE has finished processing an MMC request
47572a5af55SAdrian Hunter  *	@host: MMC host which completed request
47672a5af55SAdrian Hunter  *	@mrq: MMC request which completed
47772a5af55SAdrian Hunter  *
47872a5af55SAdrian Hunter  *	CQE drivers should call this function when they have completed
47972a5af55SAdrian Hunter  *	their processing of a request.
48072a5af55SAdrian Hunter  */
mmc_cqe_request_done(struct mmc_host * host,struct mmc_request * mrq)48172a5af55SAdrian Hunter void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
48272a5af55SAdrian Hunter {
48372a5af55SAdrian Hunter 	mmc_should_fail_request(host, mrq);
48472a5af55SAdrian Hunter 
48572a5af55SAdrian Hunter 	/* Flag re-tuning needed on CRC errors */
48672a5af55SAdrian Hunter 	if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
48772a5af55SAdrian Hunter 	    (mrq->data && mrq->data->error == -EILSEQ))
48872a5af55SAdrian Hunter 		mmc_retune_needed(host);
48972a5af55SAdrian Hunter 
49072a5af55SAdrian Hunter 	trace_mmc_request_done(host, mrq);
49172a5af55SAdrian Hunter 
49272a5af55SAdrian Hunter 	if (mrq->cmd) {
49372a5af55SAdrian Hunter 		pr_debug("%s: CQE req done (direct CMD%u): %d\n",
49472a5af55SAdrian Hunter 			 mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
49572a5af55SAdrian Hunter 	} else {
49672a5af55SAdrian Hunter 		pr_debug("%s: CQE transfer done tag %d\n",
49772a5af55SAdrian Hunter 			 mmc_hostname(host), mrq->tag);
49872a5af55SAdrian Hunter 	}
49972a5af55SAdrian Hunter 
50072a5af55SAdrian Hunter 	if (mrq->data) {
50172a5af55SAdrian Hunter 		pr_debug("%s:     %d bytes transferred: %d\n",
50272a5af55SAdrian Hunter 			 mmc_hostname(host),
50372a5af55SAdrian Hunter 			 mrq->data->bytes_xfered, mrq->data->error);
50472a5af55SAdrian Hunter 	}
50572a5af55SAdrian Hunter 
50672a5af55SAdrian Hunter 	mrq->done(mrq);
50772a5af55SAdrian Hunter }
50872a5af55SAdrian Hunter EXPORT_SYMBOL(mmc_cqe_request_done);
50972a5af55SAdrian Hunter 
51072a5af55SAdrian Hunter /**
51172a5af55SAdrian Hunter  *	mmc_cqe_post_req - CQE post process of a completed MMC request
51272a5af55SAdrian Hunter  *	@host: MMC host
51372a5af55SAdrian Hunter  *	@mrq: MMC request to be processed
51472a5af55SAdrian Hunter  */
mmc_cqe_post_req(struct mmc_host * host,struct mmc_request * mrq)51572a5af55SAdrian Hunter void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
51672a5af55SAdrian Hunter {
51772a5af55SAdrian Hunter 	if (host->cqe_ops->cqe_post_req)
51872a5af55SAdrian Hunter 		host->cqe_ops->cqe_post_req(host, mrq);
51972a5af55SAdrian Hunter }
52072a5af55SAdrian Hunter EXPORT_SYMBOL(mmc_cqe_post_req);
52172a5af55SAdrian Hunter 
52272a5af55SAdrian Hunter /* Arbitrary 1 second timeout */
52372a5af55SAdrian Hunter #define MMC_CQE_RECOVERY_TIMEOUT	1000
52472a5af55SAdrian Hunter 
52572a5af55SAdrian Hunter /*
52672a5af55SAdrian Hunter  * mmc_cqe_recovery - Recover from CQE errors.
52772a5af55SAdrian Hunter  * @host: MMC host to recover
52872a5af55SAdrian Hunter  *
529ff50df9aSAdrian Hunter  * Recovery consists of stopping CQE, stopping eMMC, discarding the queue
53072a5af55SAdrian Hunter  * in eMMC, and discarding the queue in CQE. CQE must call
53172a5af55SAdrian Hunter  * mmc_cqe_request_done() on all requests. An error is returned if the eMMC
53272a5af55SAdrian Hunter  * fails to discard its queue.
53372a5af55SAdrian Hunter  */
mmc_cqe_recovery(struct mmc_host * host)53472a5af55SAdrian Hunter int mmc_cqe_recovery(struct mmc_host *host)
53572a5af55SAdrian Hunter {
53672a5af55SAdrian Hunter 	struct mmc_command cmd;
53772a5af55SAdrian Hunter 	int err;
53872a5af55SAdrian Hunter 
53972a5af55SAdrian Hunter 	mmc_retune_hold_now(host);
54072a5af55SAdrian Hunter 
54172a5af55SAdrian Hunter 	/*
54272a5af55SAdrian Hunter 	 * Recovery is expected seldom, if at all, but it reduces performance,
54372a5af55SAdrian Hunter 	 * so make sure it is not completely silent.
54472a5af55SAdrian Hunter 	 */
54572a5af55SAdrian Hunter 	pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
54672a5af55SAdrian Hunter 
54772a5af55SAdrian Hunter 	host->cqe_ops->cqe_recovery_start(host);
54872a5af55SAdrian Hunter 
54972a5af55SAdrian Hunter 	memset(&cmd, 0, sizeof(cmd));
5506b1dc622SZheng Yongjun 	cmd.opcode       = MMC_STOP_TRANSMISSION;
5516b1dc622SZheng Yongjun 	cmd.flags        = MMC_RSP_R1B | MMC_CMD_AC;
55272a5af55SAdrian Hunter 	cmd.flags       &= ~MMC_RSP_CRC; /* Ignore CRC */
5536b1dc622SZheng Yongjun 	cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
554*8155d1faSAdrian Hunter 	mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
55572a5af55SAdrian Hunter 
556c616696aSAdrian Hunter 	mmc_poll_for_busy(host->card, MMC_CQE_RECOVERY_TIMEOUT, true, MMC_BUSY_IO);
557c616696aSAdrian Hunter 
55872a5af55SAdrian Hunter 	memset(&cmd, 0, sizeof(cmd));
55972a5af55SAdrian Hunter 	cmd.opcode       = MMC_CMDQ_TASK_MGMT;
56072a5af55SAdrian Hunter 	cmd.arg          = 1; /* Discard entire queue */
56172a5af55SAdrian Hunter 	cmd.flags        = MMC_RSP_R1B | MMC_CMD_AC;
56272a5af55SAdrian Hunter 	cmd.flags       &= ~MMC_RSP_CRC; /* Ignore CRC */
5636b1dc622SZheng Yongjun 	cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
564*8155d1faSAdrian Hunter 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
56572a5af55SAdrian Hunter 
56672a5af55SAdrian Hunter 	host->cqe_ops->cqe_recovery_finish(host);
56772a5af55SAdrian Hunter 
568*8155d1faSAdrian Hunter 	if (err)
569*8155d1faSAdrian Hunter 		err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
570*8155d1faSAdrian Hunter 
57172a5af55SAdrian Hunter 	mmc_retune_release(host);
57272a5af55SAdrian Hunter 
57372a5af55SAdrian Hunter 	return err;
57472a5af55SAdrian Hunter }
57572a5af55SAdrian Hunter EXPORT_SYMBOL(mmc_cqe_recovery);
57672a5af55SAdrian Hunter 
5775163af5aSAdrian Hunter /**
5785163af5aSAdrian Hunter  *	mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
5795163af5aSAdrian Hunter  *	@host: MMC host
5805163af5aSAdrian Hunter  *	@mrq: MMC request
5815163af5aSAdrian Hunter  *
5825163af5aSAdrian Hunter  *	mmc_is_req_done() is used with requests that have
5835163af5aSAdrian Hunter  *	mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
5845163af5aSAdrian Hunter  *	starting a request and before waiting for it to complete. That is,
5855163af5aSAdrian Hunter  *	either in between calls to mmc_start_req(), or after mmc_wait_for_req()
5865163af5aSAdrian Hunter  *	and before mmc_wait_for_req_done(). If it is called at other times the
5875163af5aSAdrian Hunter  *	result is not meaningful.
5885163af5aSAdrian Hunter  */
mmc_is_req_done(struct mmc_host * host,struct mmc_request * mrq)5895163af5aSAdrian Hunter bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
5905163af5aSAdrian Hunter {
5915163af5aSAdrian Hunter 	return completion_done(&mrq->completion);
5925163af5aSAdrian Hunter }
5935163af5aSAdrian Hunter EXPORT_SYMBOL(mmc_is_req_done);
594aa8b683aSPer Forlin 
595aa8b683aSPer Forlin /**
59667a61c48SPierre Ossman  *	mmc_wait_for_req - start a request and wait for completion
59767a61c48SPierre Ossman  *	@host: MMC host to start command
59867a61c48SPierre Ossman  *	@mrq: MMC request to start
59967a61c48SPierre Ossman  *
60067a61c48SPierre Ossman  *	Start a new MMC custom command request for a host, and wait
6015163af5aSAdrian Hunter  *	for the command to complete. In the case of 'cap_cmd_during_tfr'
6025163af5aSAdrian Hunter  *	requests, the transfer is ongoing and the caller can issue further
6035163af5aSAdrian Hunter  *	commands that do not use the data lines, and then wait by calling
6045163af5aSAdrian Hunter  *	mmc_wait_for_req_done().
6055163af5aSAdrian Hunter  *	Does not attempt to parse the response.
60667a61c48SPierre Ossman  */
mmc_wait_for_req(struct mmc_host * host,struct mmc_request * mrq)60767a61c48SPierre Ossman void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
608aaac1b47SPierre Ossman {
609aa8b683aSPer Forlin 	__mmc_start_req(host, mrq);
6105163af5aSAdrian Hunter 
6115163af5aSAdrian Hunter 	if (!mrq->cap_cmd_during_tfr)
612aa8b683aSPer Forlin 		mmc_wait_for_req_done(host, mrq);
613aaac1b47SPierre Ossman }
614aaac1b47SPierre Ossman EXPORT_SYMBOL(mmc_wait_for_req);
615aaac1b47SPierre Ossman 
616aaac1b47SPierre Ossman /**
617aaac1b47SPierre Ossman  *	mmc_wait_for_cmd - start a command and wait for completion
618aaac1b47SPierre Ossman  *	@host: MMC host to start command
619aaac1b47SPierre Ossman  *	@cmd: MMC command to start
620aaac1b47SPierre Ossman  *	@retries: maximum number of retries
621aaac1b47SPierre Ossman  *
622aaac1b47SPierre Ossman  *	Start a new MMC command for a host, and wait for the command
623aaac1b47SPierre Ossman  *	to complete.  Return any error that occurred while the command
624aaac1b47SPierre Ossman  *	was executing.  Do not attempt to parse the response.
625aaac1b47SPierre Ossman  */
mmc_wait_for_cmd(struct mmc_host * host,struct mmc_command * cmd,int retries)626aaac1b47SPierre Ossman int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
627aaac1b47SPierre Ossman {
628c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
629aaac1b47SPierre Ossman 
630d84075c8SPierre Ossman 	WARN_ON(!host->claimed);
631aaac1b47SPierre Ossman 
632aaac1b47SPierre Ossman 	memset(cmd->resp, 0, sizeof(cmd->resp));
633aaac1b47SPierre Ossman 	cmd->retries = retries;
634aaac1b47SPierre Ossman 
635aaac1b47SPierre Ossman 	mrq.cmd = cmd;
636aaac1b47SPierre Ossman 	cmd->data = NULL;
637aaac1b47SPierre Ossman 
638aaac1b47SPierre Ossman 	mmc_wait_for_req(host, &mrq);
639aaac1b47SPierre Ossman 
640aaac1b47SPierre Ossman 	return cmd->error;
641aaac1b47SPierre Ossman }
642aaac1b47SPierre Ossman 
643aaac1b47SPierre Ossman EXPORT_SYMBOL(mmc_wait_for_cmd);
644aaac1b47SPierre Ossman 
645aaac1b47SPierre Ossman /**
646aaac1b47SPierre Ossman  *	mmc_set_data_timeout - set the timeout for a data command
647aaac1b47SPierre Ossman  *	@data: data phase for command
648aaac1b47SPierre Ossman  *	@card: the MMC card associated with the data transfer
64967a61c48SPierre Ossman  *
65067a61c48SPierre Ossman  *	Computes the data timeout parameters according to the
65167a61c48SPierre Ossman  *	correct algorithm given the card type.
652aaac1b47SPierre Ossman  */
mmc_set_data_timeout(struct mmc_data * data,const struct mmc_card * card)653b146d26aSPierre Ossman void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
654aaac1b47SPierre Ossman {
655aaac1b47SPierre Ossman 	unsigned int mult;
656aaac1b47SPierre Ossman 
657aaac1b47SPierre Ossman 	/*
658e6f918bfSPierre Ossman 	 * SDIO cards only define an upper 1 s limit on access.
659e6f918bfSPierre Ossman 	 */
660e6f918bfSPierre Ossman 	if (mmc_card_sdio(card)) {
661e6f918bfSPierre Ossman 		data->timeout_ns = 1000000000;
662e6f918bfSPierre Ossman 		data->timeout_clks = 0;
663e6f918bfSPierre Ossman 		return;
664e6f918bfSPierre Ossman 	}
665e6f918bfSPierre Ossman 
666e6f918bfSPierre Ossman 	/*
667aaac1b47SPierre Ossman 	 * SD cards use a 100 multiplier rather than 10
668aaac1b47SPierre Ossman 	 */
669aaac1b47SPierre Ossman 	mult = mmc_card_sd(card) ? 100 : 10;
670aaac1b47SPierre Ossman 
671aaac1b47SPierre Ossman 	/*
672aaac1b47SPierre Ossman 	 * Scale up the multiplier (and therefore the timeout) by
673aaac1b47SPierre Ossman 	 * the r2w factor for writes.
674aaac1b47SPierre Ossman 	 */
675b146d26aSPierre Ossman 	if (data->flags & MMC_DATA_WRITE)
676aaac1b47SPierre Ossman 		mult <<= card->csd.r2w_factor;
677aaac1b47SPierre Ossman 
6784406ae21SShawn Lin 	data->timeout_ns = card->csd.taac_ns * mult;
6794406ae21SShawn Lin 	data->timeout_clks = card->csd.taac_clks * mult;
680aaac1b47SPierre Ossman 
681aaac1b47SPierre Ossman 	/*
682aaac1b47SPierre Ossman 	 * SD cards also have an upper limit on the timeout.
683aaac1b47SPierre Ossman 	 */
684aaac1b47SPierre Ossman 	if (mmc_card_sd(card)) {
685aaac1b47SPierre Ossman 		unsigned int timeout_us, limit_us;
686aaac1b47SPierre Ossman 
687aaac1b47SPierre Ossman 		timeout_us = data->timeout_ns / 1000;
6889eadcc05SUlf Hansson 		if (card->host->ios.clock)
689aaac1b47SPierre Ossman 			timeout_us += data->timeout_clks * 1000 /
6909eadcc05SUlf Hansson 				(card->host->ios.clock / 1000);
691aaac1b47SPierre Ossman 
692b146d26aSPierre Ossman 		if (data->flags & MMC_DATA_WRITE)
693493890e7SPierre Ossman 			/*
6943bdc9ba8SPaul Walmsley 			 * The MMC spec "It is strongly recommended
6953bdc9ba8SPaul Walmsley 			 * for hosts to implement more than 500ms
6963bdc9ba8SPaul Walmsley 			 * timeout value even if the card indicates
6973bdc9ba8SPaul Walmsley 			 * the 250ms maximum busy length."  Even the
6983bdc9ba8SPaul Walmsley 			 * previous value of 300ms is known to be
6993bdc9ba8SPaul Walmsley 			 * insufficient for some cards.
700493890e7SPierre Ossman 			 */
7013bdc9ba8SPaul Walmsley 			limit_us = 3000000;
702aaac1b47SPierre Ossman 		else
703aaac1b47SPierre Ossman 			limit_us = 100000;
704aaac1b47SPierre Ossman 
705aaac1b47SPierre Ossman 		/*
706aaac1b47SPierre Ossman 		 * SDHC cards always use these fixed values.
707aaac1b47SPierre Ossman 		 */
7086ca2920dSShawn Lin 		if (timeout_us > limit_us) {
709aaac1b47SPierre Ossman 			data->timeout_ns = limit_us * 1000;
710aaac1b47SPierre Ossman 			data->timeout_clks = 0;
711aaac1b47SPierre Ossman 		}
712f7bf11a3SStefan Wahren 
713f7bf11a3SStefan Wahren 		/* assign limit value if invalid */
714f7bf11a3SStefan Wahren 		if (timeout_us == 0)
715f7bf11a3SStefan Wahren 			data->timeout_ns = limit_us * 1000;
716aaac1b47SPierre Ossman 	}
7176de5fc9cSStefan Nilsson XK 
7186de5fc9cSStefan Nilsson XK 	/*
7196de5fc9cSStefan Nilsson XK 	 * Some cards require longer data read timeout than indicated in CSD.
7206de5fc9cSStefan Nilsson XK 	 * Address this by setting the read timeout to a "reasonably high"
72132ecd320SMatt Gumbel 	 * value. For the cards tested, 600ms has proven enough. If necessary,
7226de5fc9cSStefan Nilsson XK 	 * this value can be increased if other problematic cards require this.
7236de5fc9cSStefan Nilsson XK 	 */
7246de5fc9cSStefan Nilsson XK 	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
72532ecd320SMatt Gumbel 		data->timeout_ns = 600000000;
7266de5fc9cSStefan Nilsson XK 		data->timeout_clks = 0;
7276de5fc9cSStefan Nilsson XK 	}
7286de5fc9cSStefan Nilsson XK 
729c0c88871SWolfgang Muees 	/*
730c0c88871SWolfgang Muees 	 * Some cards need very high timeouts if driven in SPI mode.
731c0c88871SWolfgang Muees 	 * The worst observed timeout was 900ms after writing a
732c0c88871SWolfgang Muees 	 * continuous stream of data until the internal logic
733c0c88871SWolfgang Muees 	 * overflowed.
734c0c88871SWolfgang Muees 	 */
735c0c88871SWolfgang Muees 	if (mmc_host_is_spi(card->host)) {
736c0c88871SWolfgang Muees 		if (data->flags & MMC_DATA_WRITE) {
737c0c88871SWolfgang Muees 			if (data->timeout_ns < 1000000000)
738c0c88871SWolfgang Muees 				data->timeout_ns = 1000000000;	/* 1s */
739c0c88871SWolfgang Muees 		} else {
740c0c88871SWolfgang Muees 			if (data->timeout_ns < 100000000)
741c0c88871SWolfgang Muees 				data->timeout_ns =  100000000;	/* 100ms */
742c0c88871SWolfgang Muees 		}
743c0c88871SWolfgang Muees 	}
744aaac1b47SPierre Ossman }
745aaac1b47SPierre Ossman EXPORT_SYMBOL(mmc_set_data_timeout);
746aaac1b47SPierre Ossman 
7476c0cedd1SAdrian Hunter /*
7486c0cedd1SAdrian Hunter  * Allow claiming an already claimed host if the context is the same or there is
7496c0cedd1SAdrian Hunter  * no context but the task is the same.
7506c0cedd1SAdrian Hunter  */
mmc_ctx_matches(struct mmc_host * host,struct mmc_ctx * ctx,struct task_struct * task)7516c0cedd1SAdrian Hunter static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx,
7526c0cedd1SAdrian Hunter 				   struct task_struct *task)
7536c0cedd1SAdrian Hunter {
7546c0cedd1SAdrian Hunter 	return host->claimer == ctx ||
7556c0cedd1SAdrian Hunter 	       (!ctx && task && host->claimer->task == task);
7566c0cedd1SAdrian Hunter }
7576c0cedd1SAdrian Hunter 
mmc_ctx_set_claimer(struct mmc_host * host,struct mmc_ctx * ctx,struct task_struct * task)7586c0cedd1SAdrian Hunter static inline void mmc_ctx_set_claimer(struct mmc_host *host,
7596c0cedd1SAdrian Hunter 				       struct mmc_ctx *ctx,
7606c0cedd1SAdrian Hunter 				       struct task_struct *task)
7616c0cedd1SAdrian Hunter {
7626c0cedd1SAdrian Hunter 	if (!host->claimer) {
7636c0cedd1SAdrian Hunter 		if (ctx)
7646c0cedd1SAdrian Hunter 			host->claimer = ctx;
7656c0cedd1SAdrian Hunter 		else
7666c0cedd1SAdrian Hunter 			host->claimer = &host->default_ctx;
7676c0cedd1SAdrian Hunter 	}
7686c0cedd1SAdrian Hunter 	if (task)
7696c0cedd1SAdrian Hunter 		host->claimer->task = task;
7706c0cedd1SAdrian Hunter }
7716c0cedd1SAdrian Hunter 
772ad3868b2SPierre Ossman /**
7732342f332SNicolas Pitre  *	__mmc_claim_host - exclusively claim a host
774aaac1b47SPierre Ossman  *	@host: mmc host to claim
7756c0cedd1SAdrian Hunter  *	@ctx: context that claims the host or NULL in which case the default
7766c0cedd1SAdrian Hunter  *	context will be used
7772342f332SNicolas Pitre  *	@abort: whether or not the operation should be aborted
778aaac1b47SPierre Ossman  *
7792342f332SNicolas Pitre  *	Claim a host for a set of operations.  If @abort is non null and
7802342f332SNicolas Pitre  *	dereference a non-zero value then this will return prematurely with
7812342f332SNicolas Pitre  *	that non-zero value without acquiring the lock.  Returns zero
7822342f332SNicolas Pitre  *	with the lock held otherwise.
783aaac1b47SPierre Ossman  */
__mmc_claim_host(struct mmc_host * host,struct mmc_ctx * ctx,atomic_t * abort)7846c0cedd1SAdrian Hunter int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
7856c0cedd1SAdrian Hunter 		     atomic_t *abort)
786aaac1b47SPierre Ossman {
7876c0cedd1SAdrian Hunter 	struct task_struct *task = ctx ? NULL : current;
788aaac1b47SPierre Ossman 	DECLARE_WAITQUEUE(wait, current);
789aaac1b47SPierre Ossman 	unsigned long flags;
7902342f332SNicolas Pitre 	int stop;
7919250aea7SUlf Hansson 	bool pm = false;
792aaac1b47SPierre Ossman 
793cf795bfbSPierre Ossman 	might_sleep();
794cf795bfbSPierre Ossman 
795aaac1b47SPierre Ossman 	add_wait_queue(&host->wq, &wait);
796aaac1b47SPierre Ossman 	spin_lock_irqsave(&host->lock, flags);
797aaac1b47SPierre Ossman 	while (1) {
798aaac1b47SPierre Ossman 		set_current_state(TASK_UNINTERRUPTIBLE);
7992342f332SNicolas Pitre 		stop = abort ? atomic_read(abort) : 0;
8006c0cedd1SAdrian Hunter 		if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task))
801aaac1b47SPierre Ossman 			break;
802aaac1b47SPierre Ossman 		spin_unlock_irqrestore(&host->lock, flags);
803aaac1b47SPierre Ossman 		schedule();
804aaac1b47SPierre Ossman 		spin_lock_irqsave(&host->lock, flags);
805aaac1b47SPierre Ossman 	}
806aaac1b47SPierre Ossman 	set_current_state(TASK_RUNNING);
807319a3f14SAdrian Hunter 	if (!stop) {
808aaac1b47SPierre Ossman 		host->claimed = 1;
8096c0cedd1SAdrian Hunter 		mmc_ctx_set_claimer(host, ctx, task);
810319a3f14SAdrian Hunter 		host->claim_cnt += 1;
8119250aea7SUlf Hansson 		if (host->claim_cnt == 1)
8129250aea7SUlf Hansson 			pm = true;
813319a3f14SAdrian Hunter 	} else
8142342f332SNicolas Pitre 		wake_up(&host->wq);
815aaac1b47SPierre Ossman 	spin_unlock_irqrestore(&host->lock, flags);
816aaac1b47SPierre Ossman 	remove_wait_queue(&host->wq, &wait);
8179250aea7SUlf Hansson 
8189250aea7SUlf Hansson 	if (pm)
8199250aea7SUlf Hansson 		pm_runtime_get_sync(mmc_dev(host));
8209250aea7SUlf Hansson 
8212342f332SNicolas Pitre 	return stop;
822aaac1b47SPierre Ossman }
8232342f332SNicolas Pitre EXPORT_SYMBOL(__mmc_claim_host);
824aaac1b47SPierre Ossman 
825319a3f14SAdrian Hunter /**
826907d2e7cSAdrian Hunter  *	mmc_release_host - release a host
827ab1efd27SUlf Hansson  *	@host: mmc host to release
828ab1efd27SUlf Hansson  *
829907d2e7cSAdrian Hunter  *	Release a MMC host, allowing others to claim the host
830907d2e7cSAdrian Hunter  *	for their operations.
831ab1efd27SUlf Hansson  */
mmc_release_host(struct mmc_host * host)832907d2e7cSAdrian Hunter void mmc_release_host(struct mmc_host *host)
8338ea926b2SAdrian Hunter {
8348ea926b2SAdrian Hunter 	unsigned long flags;
8358ea926b2SAdrian Hunter 
836907d2e7cSAdrian Hunter 	WARN_ON(!host->claimed);
837907d2e7cSAdrian Hunter 
8388ea926b2SAdrian Hunter 	spin_lock_irqsave(&host->lock, flags);
839319a3f14SAdrian Hunter 	if (--host->claim_cnt) {
840319a3f14SAdrian Hunter 		/* Release for nested claim */
8418ea926b2SAdrian Hunter 		spin_unlock_irqrestore(&host->lock, flags);
842319a3f14SAdrian Hunter 	} else {
843319a3f14SAdrian Hunter 		host->claimed = 0;
8446c0cedd1SAdrian Hunter 		host->claimer->task = NULL;
845319a3f14SAdrian Hunter 		host->claimer = NULL;
846319a3f14SAdrian Hunter 		spin_unlock_irqrestore(&host->lock, flags);
8478ea926b2SAdrian Hunter 		wake_up(&host->wq);
8489250aea7SUlf Hansson 		pm_runtime_mark_last_busy(mmc_dev(host));
8497d5ef512SUlf Hansson 		if (host->caps & MMC_CAP_SYNC_RUNTIME_PM)
8507d5ef512SUlf Hansson 			pm_runtime_put_sync_suspend(mmc_dev(host));
8517d5ef512SUlf Hansson 		else
8529250aea7SUlf Hansson 			pm_runtime_put_autosuspend(mmc_dev(host));
8538ea926b2SAdrian Hunter 	}
854319a3f14SAdrian Hunter }
855aaac1b47SPierre Ossman EXPORT_SYMBOL(mmc_release_host);
856aaac1b47SPierre Ossman 
8577ea239d9SPierre Ossman /*
858e94cfef6SUlf Hansson  * This is a helper function, which fetches a runtime pm reference for the
859e94cfef6SUlf Hansson  * card device and also claims the host.
860e94cfef6SUlf Hansson  */
mmc_get_card(struct mmc_card * card,struct mmc_ctx * ctx)8616c0cedd1SAdrian Hunter void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx)
862e94cfef6SUlf Hansson {
863e94cfef6SUlf Hansson 	pm_runtime_get_sync(&card->dev);
8646c0cedd1SAdrian Hunter 	__mmc_claim_host(card->host, ctx, NULL);
865e94cfef6SUlf Hansson }
866e94cfef6SUlf Hansson EXPORT_SYMBOL(mmc_get_card);
867e94cfef6SUlf Hansson 
868e94cfef6SUlf Hansson /*
869e94cfef6SUlf Hansson  * This is a helper function, which releases the host and drops the runtime
870e94cfef6SUlf Hansson  * pm reference for the card device.
871e94cfef6SUlf Hansson  */
mmc_put_card(struct mmc_card * card,struct mmc_ctx * ctx)8726c0cedd1SAdrian Hunter void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx)
873e94cfef6SUlf Hansson {
8746c0cedd1SAdrian Hunter 	struct mmc_host *host = card->host;
8756c0cedd1SAdrian Hunter 
8766c0cedd1SAdrian Hunter 	WARN_ON(ctx && host->claimer != ctx);
8776c0cedd1SAdrian Hunter 
8786c0cedd1SAdrian Hunter 	mmc_release_host(host);
879e94cfef6SUlf Hansson 	pm_runtime_mark_last_busy(&card->dev);
880e94cfef6SUlf Hansson 	pm_runtime_put_autosuspend(&card->dev);
881e94cfef6SUlf Hansson }
882e94cfef6SUlf Hansson EXPORT_SYMBOL(mmc_put_card);
883e94cfef6SUlf Hansson 
884e94cfef6SUlf Hansson /*
8857ea239d9SPierre Ossman  * Internal function that does the actual ios call to the host driver,
8867ea239d9SPierre Ossman  * optionally printing some debug output.
8877ea239d9SPierre Ossman  */
mmc_set_ios(struct mmc_host * host)888aaac1b47SPierre Ossman static inline void mmc_set_ios(struct mmc_host *host)
889aaac1b47SPierre Ossman {
890aaac1b47SPierre Ossman 	struct mmc_ios *ios = &host->ios;
891aaac1b47SPierre Ossman 
892aaac1b47SPierre Ossman 	pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
893aaac1b47SPierre Ossman 		"width %u timing %u\n",
894aaac1b47SPierre Ossman 		 mmc_hostname(host), ios->clock, ios->bus_mode,
895aaac1b47SPierre Ossman 		 ios->power_mode, ios->chip_select, ios->vdd,
896ed9feec7SWolfram Sang 		 1 << ios->bus_width, ios->timing);
897aaac1b47SPierre Ossman 
898aaac1b47SPierre Ossman 	host->ops->set_ios(host, ios);
899aaac1b47SPierre Ossman }
900aaac1b47SPierre Ossman 
9017ea239d9SPierre Ossman /*
9027ea239d9SPierre Ossman  * Control chip select pin on a host.
9037ea239d9SPierre Ossman  */
mmc_set_chip_select(struct mmc_host * host,int mode)904da7fbe58SPierre Ossman void mmc_set_chip_select(struct mmc_host *host, int mode)
905aaac1b47SPierre Ossman {
906da7fbe58SPierre Ossman 	host->ios.chip_select = mode;
907da7fbe58SPierre Ossman 	mmc_set_ios(host);
908aaac1b47SPierre Ossman }
909aaac1b47SPierre Ossman 
910aaac1b47SPierre Ossman /*
9117ea239d9SPierre Ossman  * Sets the host clock to the highest possible frequency that
9127ea239d9SPierre Ossman  * is below "hz".
9137ea239d9SPierre Ossman  */
mmc_set_clock(struct mmc_host * host,unsigned int hz)9149eadcc05SUlf Hansson void mmc_set_clock(struct mmc_host *host, unsigned int hz)
9157ea239d9SPierre Ossman {
9166a98f1e8SAdrian Hunter 	WARN_ON(hz && hz < host->f_min);
9177ea239d9SPierre Ossman 
9187ea239d9SPierre Ossman 	if (hz > host->f_max)
9197ea239d9SPierre Ossman 		hz = host->f_max;
9207ea239d9SPierre Ossman 
9217ea239d9SPierre Ossman 	host->ios.clock = hz;
9227ea239d9SPierre Ossman 	mmc_set_ios(host);
9237ea239d9SPierre Ossman }
9247ea239d9SPierre Ossman 
mmc_execute_tuning(struct mmc_card * card)92563e415c6SAdrian Hunter int mmc_execute_tuning(struct mmc_card *card)
92663e415c6SAdrian Hunter {
92763e415c6SAdrian Hunter 	struct mmc_host *host = card->host;
92863e415c6SAdrian Hunter 	u32 opcode;
92963e415c6SAdrian Hunter 	int err;
93063e415c6SAdrian Hunter 
93163e415c6SAdrian Hunter 	if (!host->ops->execute_tuning)
93263e415c6SAdrian Hunter 		return 0;
93363e415c6SAdrian Hunter 
9343e207c8cSAdrian Hunter 	if (host->cqe_on)
9353e207c8cSAdrian Hunter 		host->cqe_ops->cqe_off(host);
9363e207c8cSAdrian Hunter 
93763e415c6SAdrian Hunter 	if (mmc_card_mmc(card))
93863e415c6SAdrian Hunter 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
93963e415c6SAdrian Hunter 	else
94063e415c6SAdrian Hunter 		opcode = MMC_SEND_TUNING_BLOCK;
94163e415c6SAdrian Hunter 
94263e415c6SAdrian Hunter 	err = host->ops->execute_tuning(host, opcode);
94383359288SWolfram Sang 	if (!err) {
9448ffb2611SWolfram Sang 		mmc_retune_clear(host);
94579d5a65aSAdrian Hunter 		mmc_retune_enable(host);
94683359288SWolfram Sang 		return 0;
94777347edaSWolfram Sang 	}
94863e415c6SAdrian Hunter 
94983359288SWolfram Sang 	/* Only print error when we don't check for card removal */
95091f059c9SShaik Sajida Bhanu 	if (!host->detect_change) {
95163e415c6SAdrian Hunter 		pr_err("%s: tuning execution failed: %d\n",
95263e415c6SAdrian Hunter 			mmc_hostname(host), err);
95391f059c9SShaik Sajida Bhanu 		mmc_debugfs_err_stats_inc(host, MMC_ERR_TUNING);
95491f059c9SShaik Sajida Bhanu 	}
95563e415c6SAdrian Hunter 
95663e415c6SAdrian Hunter 	return err;
95763e415c6SAdrian Hunter }
95863e415c6SAdrian Hunter 
9597ea239d9SPierre Ossman /*
9607ea239d9SPierre Ossman  * Change the bus mode (open drain/push-pull) of a host.
9617ea239d9SPierre Ossman  */
mmc_set_bus_mode(struct mmc_host * host,unsigned int mode)9627ea239d9SPierre Ossman void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
9637ea239d9SPierre Ossman {
9647ea239d9SPierre Ossman 	host->ios.bus_mode = mode;
9657ea239d9SPierre Ossman 	mmc_set_ios(host);
9667ea239d9SPierre Ossman }
9677ea239d9SPierre Ossman 
9687ea239d9SPierre Ossman /*
9697ea239d9SPierre Ossman  * Change data bus width of a host.
9707ea239d9SPierre Ossman  */
mmc_set_bus_width(struct mmc_host * host,unsigned int width)9717ea239d9SPierre Ossman void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
9727ea239d9SPierre Ossman {
9734c4cb171SPhilip Rakity 	host->ios.bus_width = width;
9744c4cb171SPhilip Rakity 	mmc_set_ios(host);
9757ea239d9SPierre Ossman }
9767ea239d9SPierre Ossman 
9772d079c43SJohan Rudholm /*
9782d079c43SJohan Rudholm  * Set initial state after a power cycle or a hw_reset.
9792d079c43SJohan Rudholm  */
mmc_set_initial_state(struct mmc_host * host)9802d079c43SJohan Rudholm void mmc_set_initial_state(struct mmc_host *host)
9812d079c43SJohan Rudholm {
9823e207c8cSAdrian Hunter 	if (host->cqe_on)
9833e207c8cSAdrian Hunter 		host->cqe_ops->cqe_off(host);
9843e207c8cSAdrian Hunter 
98579d5a65aSAdrian Hunter 	mmc_retune_disable(host);
98679d5a65aSAdrian Hunter 
9872d079c43SJohan Rudholm 	if (mmc_host_is_spi(host))
9882d079c43SJohan Rudholm 		host->ios.chip_select = MMC_CS_HIGH;
9892d079c43SJohan Rudholm 	else
9902d079c43SJohan Rudholm 		host->ios.chip_select = MMC_CS_DONTCARE;
9912d079c43SJohan Rudholm 	host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
9922d079c43SJohan Rudholm 	host->ios.bus_width = MMC_BUS_WIDTH_1;
9932d079c43SJohan Rudholm 	host->ios.timing = MMC_TIMING_LEGACY;
99475e8a228SAdrian Hunter 	host->ios.drv_type = 0;
99581ac2af6SShawn Lin 	host->ios.enhanced_strobe = false;
99681ac2af6SShawn Lin 
99781ac2af6SShawn Lin 	/*
99881ac2af6SShawn Lin 	 * Make sure we are in non-enhanced strobe mode before we
99981ac2af6SShawn Lin 	 * actually enable it in ext_csd.
100081ac2af6SShawn Lin 	 */
100181ac2af6SShawn Lin 	if ((host->caps2 & MMC_CAP2_HS400_ES) &&
100281ac2af6SShawn Lin 	     host->ops->hs400_enhanced_strobe)
100381ac2af6SShawn Lin 		host->ops->hs400_enhanced_strobe(host, &host->ios);
10042d079c43SJohan Rudholm 
10052d079c43SJohan Rudholm 	mmc_set_ios(host);
100693f1c150SEric Biggers 
100793f1c150SEric Biggers 	mmc_crypto_set_initial_state(host);
10082d079c43SJohan Rudholm }
10092d079c43SJohan Rudholm 
101086e8286aSAnton Vorontsov /**
101186e8286aSAnton Vorontsov  * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
101286e8286aSAnton Vorontsov  * @vdd:	voltage (mV)
101386e8286aSAnton Vorontsov  * @low_bits:	prefer low bits in boundary cases
101486e8286aSAnton Vorontsov  *
101586e8286aSAnton Vorontsov  * This function returns the OCR bit number according to the provided @vdd
101686e8286aSAnton Vorontsov  * value. If conversion is not possible a negative errno value returned.
101786e8286aSAnton Vorontsov  *
101886e8286aSAnton Vorontsov  * Depending on the @low_bits flag the function prefers low or high OCR bits
101986e8286aSAnton Vorontsov  * on boundary voltages. For example,
102086e8286aSAnton Vorontsov  * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
102186e8286aSAnton Vorontsov  * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
102286e8286aSAnton Vorontsov  *
102386e8286aSAnton Vorontsov  * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
102486e8286aSAnton Vorontsov  */
mmc_vdd_to_ocrbitnum(int vdd,bool low_bits)102586e8286aSAnton Vorontsov static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
102686e8286aSAnton Vorontsov {
102786e8286aSAnton Vorontsov 	const int max_bit = ilog2(MMC_VDD_35_36);
102886e8286aSAnton Vorontsov 	int bit;
102986e8286aSAnton Vorontsov 
103086e8286aSAnton Vorontsov 	if (vdd < 1650 || vdd > 3600)
103186e8286aSAnton Vorontsov 		return -EINVAL;
103286e8286aSAnton Vorontsov 
103386e8286aSAnton Vorontsov 	if (vdd >= 1650 && vdd <= 1950)
103486e8286aSAnton Vorontsov 		return ilog2(MMC_VDD_165_195);
103586e8286aSAnton Vorontsov 
103686e8286aSAnton Vorontsov 	if (low_bits)
103786e8286aSAnton Vorontsov 		vdd -= 1;
103886e8286aSAnton Vorontsov 
103986e8286aSAnton Vorontsov 	/* Base 2000 mV, step 100 mV, bit's base 8. */
104086e8286aSAnton Vorontsov 	bit = (vdd - 2000) / 100 + 8;
104186e8286aSAnton Vorontsov 	if (bit > max_bit)
104286e8286aSAnton Vorontsov 		return max_bit;
104386e8286aSAnton Vorontsov 	return bit;
104486e8286aSAnton Vorontsov }
104586e8286aSAnton Vorontsov 
104686e8286aSAnton Vorontsov /**
104786e8286aSAnton Vorontsov  * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
104886e8286aSAnton Vorontsov  * @vdd_min:	minimum voltage value (mV)
104986e8286aSAnton Vorontsov  * @vdd_max:	maximum voltage value (mV)
105086e8286aSAnton Vorontsov  *
105186e8286aSAnton Vorontsov  * This function returns the OCR mask bits according to the provided @vdd_min
105286e8286aSAnton Vorontsov  * and @vdd_max values. If conversion is not possible the function returns 0.
105386e8286aSAnton Vorontsov  *
105486e8286aSAnton Vorontsov  * Notes wrt boundary cases:
105586e8286aSAnton Vorontsov  * This function sets the OCR bits for all boundary voltages, for example
105686e8286aSAnton Vorontsov  * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
105786e8286aSAnton Vorontsov  * MMC_VDD_34_35 mask.
105886e8286aSAnton Vorontsov  */
mmc_vddrange_to_ocrmask(int vdd_min,int vdd_max)105986e8286aSAnton Vorontsov u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
106086e8286aSAnton Vorontsov {
106186e8286aSAnton Vorontsov 	u32 mask = 0;
106286e8286aSAnton Vorontsov 
106386e8286aSAnton Vorontsov 	if (vdd_max < vdd_min)
106486e8286aSAnton Vorontsov 		return 0;
106586e8286aSAnton Vorontsov 
106686e8286aSAnton Vorontsov 	/* Prefer high bits for the boundary vdd_max values. */
106786e8286aSAnton Vorontsov 	vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
106886e8286aSAnton Vorontsov 	if (vdd_max < 0)
106986e8286aSAnton Vorontsov 		return 0;
107086e8286aSAnton Vorontsov 
107186e8286aSAnton Vorontsov 	/* Prefer low bits for the boundary vdd_min values. */
107286e8286aSAnton Vorontsov 	vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
107386e8286aSAnton Vorontsov 	if (vdd_min < 0)
107486e8286aSAnton Vorontsov 		return 0;
107586e8286aSAnton Vorontsov 
107686e8286aSAnton Vorontsov 	/* Fill the mask, from max bit to min bit. */
107786e8286aSAnton Vorontsov 	while (vdd_max >= vdd_min)
107886e8286aSAnton Vorontsov 		mask |= 1 << vdd_max--;
107986e8286aSAnton Vorontsov 
108086e8286aSAnton Vorontsov 	return mask;
108186e8286aSAnton Vorontsov }
108286e8286aSAnton Vorontsov 
mmc_of_get_func_num(struct device_node * node)108325185f3fSSascha Hauer static int mmc_of_get_func_num(struct device_node *node)
108425185f3fSSascha Hauer {
108525185f3fSSascha Hauer 	u32 reg;
108625185f3fSSascha Hauer 	int ret;
108725185f3fSSascha Hauer 
108825185f3fSSascha Hauer 	ret = of_property_read_u32(node, "reg", &reg);
108925185f3fSSascha Hauer 	if (ret < 0)
109025185f3fSSascha Hauer 		return ret;
109125185f3fSSascha Hauer 
109225185f3fSSascha Hauer 	return reg;
109325185f3fSSascha Hauer }
109425185f3fSSascha Hauer 
mmc_of_find_child_device(struct mmc_host * host,unsigned func_num)109525185f3fSSascha Hauer struct device_node *mmc_of_find_child_device(struct mmc_host *host,
109625185f3fSSascha Hauer 		unsigned func_num)
109725185f3fSSascha Hauer {
109825185f3fSSascha Hauer 	struct device_node *node;
109925185f3fSSascha Hauer 
110025185f3fSSascha Hauer 	if (!host->parent || !host->parent->of_node)
110125185f3fSSascha Hauer 		return NULL;
110225185f3fSSascha Hauer 
110325185f3fSSascha Hauer 	for_each_child_of_node(host->parent->of_node, node) {
110425185f3fSSascha Hauer 		if (mmc_of_get_func_num(node) == func_num)
110525185f3fSSascha Hauer 			return node;
110625185f3fSSascha Hauer 	}
110725185f3fSSascha Hauer 
110825185f3fSSascha Hauer 	return NULL;
110925185f3fSSascha Hauer }
111025185f3fSSascha Hauer 
11117ea239d9SPierre Ossman /*
1112aaac1b47SPierre Ossman  * Mask off any voltages we don't support and select
1113aaac1b47SPierre Ossman  * the lowest voltage
1114aaac1b47SPierre Ossman  */
mmc_select_voltage(struct mmc_host * host,u32 ocr)11157ea239d9SPierre Ossman u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1116aaac1b47SPierre Ossman {
1117aaac1b47SPierre Ossman 	int bit;
1118aaac1b47SPierre Ossman 
1119726d6f23SUlf Hansson 	/*
1120726d6f23SUlf Hansson 	 * Sanity check the voltages that the card claims to
1121726d6f23SUlf Hansson 	 * support.
1122726d6f23SUlf Hansson 	 */
1123726d6f23SUlf Hansson 	if (ocr & 0x7F) {
1124726d6f23SUlf Hansson 		dev_warn(mmc_dev(host),
1125726d6f23SUlf Hansson 		"card claims to support voltages below defined range\n");
1126726d6f23SUlf Hansson 		ocr &= ~0x7F;
1127726d6f23SUlf Hansson 	}
1128726d6f23SUlf Hansson 
1129aaac1b47SPierre Ossman 	ocr &= host->ocr_avail;
1130ce69d37bSUlf Hansson 	if (!ocr) {
1131ce69d37bSUlf Hansson 		dev_warn(mmc_dev(host), "no support for card's volts\n");
1132ce69d37bSUlf Hansson 		return 0;
1133ce69d37bSUlf Hansson 	}
1134aaac1b47SPierre Ossman 
1135ce69d37bSUlf Hansson 	if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1136ce69d37bSUlf Hansson 		bit = ffs(ocr) - 1;
1137aaac1b47SPierre Ossman 		ocr &= 3 << bit;
1138ce69d37bSUlf Hansson 		mmc_power_cycle(host, ocr);
1139aaac1b47SPierre Ossman 	} else {
1140ce69d37bSUlf Hansson 		bit = fls(ocr) - 1;
114139a72dbfSYann Gautier 		/*
114239a72dbfSYann Gautier 		 * The bit variable represents the highest voltage bit set in
114339a72dbfSYann Gautier 		 * the OCR register.
114439a72dbfSYann Gautier 		 * To keep a range of 2 values (e.g. 3.2V/3.3V and 3.3V/3.4V),
114539a72dbfSYann Gautier 		 * we must shift the mask '3' with (bit - 1).
114639a72dbfSYann Gautier 		 */
114739a72dbfSYann Gautier 		ocr &= 3 << (bit - 1);
1148ce69d37bSUlf Hansson 		if (bit != host->ios.vdd)
1149ce69d37bSUlf Hansson 			dev_warn(mmc_dev(host), "exceeding card's volts\n");
1150aaac1b47SPierre Ossman 	}
1151aaac1b47SPierre Ossman 
1152aaac1b47SPierre Ossman 	return ocr;
1153aaac1b47SPierre Ossman }
1154aaac1b47SPierre Ossman 
mmc_set_signal_voltage(struct mmc_host * host,int signal_voltage)11554e74b6b3SUlf Hansson int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1156567c8903SJohan Rudholm {
1157567c8903SJohan Rudholm 	int err = 0;
1158567c8903SJohan Rudholm 	int old_signal_voltage = host->ios.signal_voltage;
1159567c8903SJohan Rudholm 
1160567c8903SJohan Rudholm 	host->ios.signal_voltage = signal_voltage;
11619eadcc05SUlf Hansson 	if (host->ops->start_signal_voltage_switch)
1162567c8903SJohan Rudholm 		err = host->ops->start_signal_voltage_switch(host, &host->ios);
1163567c8903SJohan Rudholm 
1164567c8903SJohan Rudholm 	if (err)
1165567c8903SJohan Rudholm 		host->ios.signal_voltage = old_signal_voltage;
1166567c8903SJohan Rudholm 
1167567c8903SJohan Rudholm 	return err;
1168567c8903SJohan Rudholm 
1169567c8903SJohan Rudholm }
1170567c8903SJohan Rudholm 
mmc_set_initial_signal_voltage(struct mmc_host * host)1171508c9864SUlf Hansson void mmc_set_initial_signal_voltage(struct mmc_host *host)
1172508c9864SUlf Hansson {
1173508c9864SUlf Hansson 	/* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1174508c9864SUlf Hansson 	if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
1175508c9864SUlf Hansson 		dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1176508c9864SUlf Hansson 	else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1177508c9864SUlf Hansson 		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1178508c9864SUlf Hansson 	else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
1179508c9864SUlf Hansson 		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1180508c9864SUlf Hansson }
1181508c9864SUlf Hansson 
mmc_host_set_uhs_voltage(struct mmc_host * host)11823f496afbSAdrian Hunter int mmc_host_set_uhs_voltage(struct mmc_host *host)
11833f496afbSAdrian Hunter {
11843f496afbSAdrian Hunter 	u32 clock;
11853f496afbSAdrian Hunter 
11863f496afbSAdrian Hunter 	/*
11873f496afbSAdrian Hunter 	 * During a signal voltage level switch, the clock must be gated
11883f496afbSAdrian Hunter 	 * for 5 ms according to the SD spec
11893f496afbSAdrian Hunter 	 */
11903f496afbSAdrian Hunter 	clock = host->ios.clock;
11913f496afbSAdrian Hunter 	host->ios.clock = 0;
11923f496afbSAdrian Hunter 	mmc_set_ios(host);
11933f496afbSAdrian Hunter 
11943f496afbSAdrian Hunter 	if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
11953f496afbSAdrian Hunter 		return -EAGAIN;
11963f496afbSAdrian Hunter 
11973f496afbSAdrian Hunter 	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
11983f496afbSAdrian Hunter 	mmc_delay(10);
11993f496afbSAdrian Hunter 	host->ios.clock = clock;
12003f496afbSAdrian Hunter 	mmc_set_ios(host);
12013f496afbSAdrian Hunter 
12023f496afbSAdrian Hunter 	return 0;
12033f496afbSAdrian Hunter }
12043f496afbSAdrian Hunter 
mmc_set_uhs_voltage(struct mmc_host * host,u32 ocr)12052ed573b6SUlf Hansson int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
1206f2119df6SArindam Nath {
1207c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
1208f2119df6SArindam Nath 	int err = 0;
1209f2119df6SArindam Nath 
1210f2119df6SArindam Nath 	/*
12110797e5f1SJohan Rudholm 	 * If we cannot switch voltages, return failure so the caller
12120797e5f1SJohan Rudholm 	 * can continue without UHS mode
12130797e5f1SJohan Rudholm 	 */
12140797e5f1SJohan Rudholm 	if (!host->ops->start_signal_voltage_switch)
12150797e5f1SJohan Rudholm 		return -EPERM;
12160797e5f1SJohan Rudholm 	if (!host->ops->card_busy)
12176606110dSJoe Perches 		pr_warn("%s: cannot verify signal voltage switch\n",
12180797e5f1SJohan Rudholm 			mmc_hostname(host));
12190797e5f1SJohan Rudholm 
1220f2119df6SArindam Nath 	cmd.opcode = SD_SWITCH_VOLTAGE;
1221f2119df6SArindam Nath 	cmd.arg = 0;
1222f2119df6SArindam Nath 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1223f2119df6SArindam Nath 
1224f2119df6SArindam Nath 	err = mmc_wait_for_cmd(host, &cmd, 0);
1225f2119df6SArindam Nath 	if (err)
1226147186f5SDooHyun Hwang 		goto power_cycle;
1227f2119df6SArindam Nath 
12289eadcc05SUlf Hansson 	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
12299eadcc05SUlf Hansson 		return -EIO;
12309eadcc05SUlf Hansson 
12310797e5f1SJohan Rudholm 	/*
12320797e5f1SJohan Rudholm 	 * The card should drive cmd and dat[0:3] low immediately
12330797e5f1SJohan Rudholm 	 * after the response of cmd11, but wait 1 ms to be sure
12340797e5f1SJohan Rudholm 	 */
12350797e5f1SJohan Rudholm 	mmc_delay(1);
12360797e5f1SJohan Rudholm 	if (host->ops->card_busy && !host->ops->card_busy(host)) {
12370797e5f1SJohan Rudholm 		err = -EAGAIN;
12380797e5f1SJohan Rudholm 		goto power_cycle;
12390797e5f1SJohan Rudholm 	}
12400797e5f1SJohan Rudholm 
12413f496afbSAdrian Hunter 	if (mmc_host_set_uhs_voltage(host)) {
12420797e5f1SJohan Rudholm 		/*
12430797e5f1SJohan Rudholm 		 * Voltages may not have been switched, but we've already
12440797e5f1SJohan Rudholm 		 * sent CMD11, so a power cycle is required anyway
12450797e5f1SJohan Rudholm 		 */
12460797e5f1SJohan Rudholm 		err = -EAGAIN;
12470797e5f1SJohan Rudholm 		goto power_cycle;
1248f2119df6SArindam Nath 	}
1249f2119df6SArindam Nath 
12500797e5f1SJohan Rudholm 	/* Wait for at least 1 ms according to spec */
12510797e5f1SJohan Rudholm 	mmc_delay(1);
12520797e5f1SJohan Rudholm 
12530797e5f1SJohan Rudholm 	/*
12540797e5f1SJohan Rudholm 	 * Failure to switch is indicated by the card holding
12550797e5f1SJohan Rudholm 	 * dat[0:3] low
12560797e5f1SJohan Rudholm 	 */
12570797e5f1SJohan Rudholm 	if (host->ops->card_busy && host->ops->card_busy(host))
12580797e5f1SJohan Rudholm 		err = -EAGAIN;
12590797e5f1SJohan Rudholm 
12600797e5f1SJohan Rudholm power_cycle:
12610797e5f1SJohan Rudholm 	if (err) {
12620797e5f1SJohan Rudholm 		pr_debug("%s: Signal voltage switch failed, "
12630797e5f1SJohan Rudholm 			"power cycling card\n", mmc_hostname(host));
12640f791fdaSUlf Hansson 		mmc_power_cycle(host, ocr);
12650797e5f1SJohan Rudholm 	}
12660797e5f1SJohan Rudholm 
12670797e5f1SJohan Rudholm 	return err;
1268f2119df6SArindam Nath }
1269f2119df6SArindam Nath 
1270aaac1b47SPierre Ossman /*
12717ea239d9SPierre Ossman  * Select timing parameters for host.
1272aaac1b47SPierre Ossman  */
mmc_set_timing(struct mmc_host * host,unsigned int timing)12737ea239d9SPierre Ossman void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1274aaac1b47SPierre Ossman {
12757ea239d9SPierre Ossman 	host->ios.timing = timing;
12767ea239d9SPierre Ossman 	mmc_set_ios(host);
1277aaac1b47SPierre Ossman }
1278aaac1b47SPierre Ossman 
1279aaac1b47SPierre Ossman /*
1280d6d50a15SArindam Nath  * Select appropriate driver type for host.
1281d6d50a15SArindam Nath  */
mmc_set_driver_type(struct mmc_host * host,unsigned int drv_type)1282d6d50a15SArindam Nath void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1283d6d50a15SArindam Nath {
1284d6d50a15SArindam Nath 	host->ios.drv_type = drv_type;
1285d6d50a15SArindam Nath 	mmc_set_ios(host);
1286d6d50a15SArindam Nath }
1287d6d50a15SArindam Nath 
mmc_select_drive_strength(struct mmc_card * card,unsigned int max_dtr,int card_drv_type,int * drv_type)1288e23350b3SAdrian Hunter int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1289e23350b3SAdrian Hunter 			      int card_drv_type, int *drv_type)
1290e23350b3SAdrian Hunter {
1291e23350b3SAdrian Hunter 	struct mmc_host *host = card->host;
1292e23350b3SAdrian Hunter 	int host_drv_type = SD_DRIVER_TYPE_B;
1293e23350b3SAdrian Hunter 
1294e23350b3SAdrian Hunter 	*drv_type = 0;
1295e23350b3SAdrian Hunter 
1296e23350b3SAdrian Hunter 	if (!host->ops->select_drive_strength)
1297e23350b3SAdrian Hunter 		return 0;
1298e23350b3SAdrian Hunter 
1299e23350b3SAdrian Hunter 	/* Use SD definition of driver strength for hosts */
1300e23350b3SAdrian Hunter 	if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1301e23350b3SAdrian Hunter 		host_drv_type |= SD_DRIVER_TYPE_A;
1302e23350b3SAdrian Hunter 
1303e23350b3SAdrian Hunter 	if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1304e23350b3SAdrian Hunter 		host_drv_type |= SD_DRIVER_TYPE_C;
1305e23350b3SAdrian Hunter 
1306e23350b3SAdrian Hunter 	if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1307e23350b3SAdrian Hunter 		host_drv_type |= SD_DRIVER_TYPE_D;
1308e23350b3SAdrian Hunter 
1309e23350b3SAdrian Hunter 	/*
1310e23350b3SAdrian Hunter 	 * The drive strength that the hardware can support
1311e23350b3SAdrian Hunter 	 * depends on the board design.  Pass the appropriate
1312e23350b3SAdrian Hunter 	 * information and let the hardware specific code
1313e23350b3SAdrian Hunter 	 * return what is possible given the options
1314e23350b3SAdrian Hunter 	 */
13159eadcc05SUlf Hansson 	return host->ops->select_drive_strength(card, max_dtr,
1316e23350b3SAdrian Hunter 						host_drv_type,
1317e23350b3SAdrian Hunter 						card_drv_type,
1318e23350b3SAdrian Hunter 						drv_type);
1319e23350b3SAdrian Hunter }
1320e23350b3SAdrian Hunter 
1321d6d50a15SArindam Nath /*
1322aaac1b47SPierre Ossman  * Apply power to the MMC stack.  This is a two-stage process.
1323aaac1b47SPierre Ossman  * First, we enable power to the card without the clock running.
1324aaac1b47SPierre Ossman  * We then wait a bit for the power to stabilise.  Finally,
1325aaac1b47SPierre Ossman  * enable the bus drivers and clock to the card.
1326aaac1b47SPierre Ossman  *
1327aaac1b47SPierre Ossman  * We must _NOT_ enable the clock prior to power stablising.
1328aaac1b47SPierre Ossman  *
1329aaac1b47SPierre Ossman  * If a host does all the power sequencing itself, ignore the
1330aaac1b47SPierre Ossman  * initial MMC_POWER_UP stage.
1331aaac1b47SPierre Ossman  */
mmc_power_up(struct mmc_host * host,u32 ocr)13324a065193SUlf Hansson void mmc_power_up(struct mmc_host *host, u32 ocr)
1333aaac1b47SPierre Ossman {
1334fa550189SUlf Hansson 	if (host->ios.power_mode == MMC_POWER_ON)
1335fa550189SUlf Hansson 		return;
1336fa550189SUlf Hansson 
13373aa8793fSUlf Hansson 	mmc_pwrseq_pre_power_on(host);
13383aa8793fSUlf Hansson 
13394a065193SUlf Hansson 	host->ios.vdd = fls(ocr) - 1;
1340aaac1b47SPierre Ossman 	host->ios.power_mode = MMC_POWER_UP;
13412d079c43SJohan Rudholm 	/* Set initial state and call mmc_set_ios */
13422d079c43SJohan Rudholm 	mmc_set_initial_state(host);
1343aaac1b47SPierre Ossman 
1344508c9864SUlf Hansson 	mmc_set_initial_signal_voltage(host);
1345108ecc4cSAaron Lu 
1346f9996aeeSPierre Ossman 	/*
1347f9996aeeSPierre Ossman 	 * This delay should be sufficient to allow the power supply
1348f9996aeeSPierre Ossman 	 * to reach the minimum voltage.
1349f9996aeeSPierre Ossman 	 */
13506d796c68SShawn Lin 	mmc_delay(host->ios.power_delay_ms);
1351aaac1b47SPierre Ossman 
13524febb7e2SUlf Hansson 	mmc_pwrseq_post_power_on(host);
13534febb7e2SUlf Hansson 
135488ae8b86SHein Tibosch 	host->ios.clock = host->f_init;
13558dfd0374SSascha Hauer 
1356aaac1b47SPierre Ossman 	host->ios.power_mode = MMC_POWER_ON;
1357aaac1b47SPierre Ossman 	mmc_set_ios(host);
1358aaac1b47SPierre Ossman 
1359f9996aeeSPierre Ossman 	/*
1360f9996aeeSPierre Ossman 	 * This delay must be at least 74 clock sizes, or 1 ms, or the
1361f9996aeeSPierre Ossman 	 * time required to reach a stable voltage.
1362f9996aeeSPierre Ossman 	 */
13636d796c68SShawn Lin 	mmc_delay(host->ios.power_delay_ms);
1364aaac1b47SPierre Ossman }
1365aaac1b47SPierre Ossman 
mmc_power_off(struct mmc_host * host)13667f7e4129SUlf Hansson void mmc_power_off(struct mmc_host *host)
1367aaac1b47SPierre Ossman {
1368fa550189SUlf Hansson 	if (host->ios.power_mode == MMC_POWER_OFF)
1369fa550189SUlf Hansson 		return;
1370fa550189SUlf Hansson 
13713aa8793fSUlf Hansson 	mmc_pwrseq_power_off(host);
13723aa8793fSUlf Hansson 
1373aaac1b47SPierre Ossman 	host->ios.clock = 0;
1374aaac1b47SPierre Ossman 	host->ios.vdd = 0;
1375b33d46c3SUlf Hansson 
1376aaac1b47SPierre Ossman 	host->ios.power_mode = MMC_POWER_OFF;
13772d079c43SJohan Rudholm 	/* Set initial state and call mmc_set_ios */
13782d079c43SJohan Rudholm 	mmc_set_initial_state(host);
1379778e277cSMika Westerberg 
1380041beb1dSDaniel Drake 	/*
1381041beb1dSDaniel Drake 	 * Some configurations, such as the 802.11 SDIO card in the OLPC
1382041beb1dSDaniel Drake 	 * XO-1.5, require a short delay after poweroff before the card
1383041beb1dSDaniel Drake 	 * can be successfully turned on again.
1384041beb1dSDaniel Drake 	 */
1385041beb1dSDaniel Drake 	mmc_delay(1);
1386aaac1b47SPierre Ossman }
1387aaac1b47SPierre Ossman 
mmc_power_cycle(struct mmc_host * host,u32 ocr)13884a065193SUlf Hansson void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1389276e090fSJohan Rudholm {
1390276e090fSJohan Rudholm 	mmc_power_off(host);
1391276e090fSJohan Rudholm 	/* Wait at least 1 ms according to SD spec */
1392276e090fSJohan Rudholm 	mmc_delay(1);
13934a065193SUlf Hansson 	mmc_power_up(host, ocr);
1394276e090fSJohan Rudholm }
1395276e090fSJohan Rudholm 
1396aaac1b47SPierre Ossman /*
13977ea239d9SPierre Ossman  * Assign a mmc bus handler to a host. Only one bus handler may control a
13987ea239d9SPierre Ossman  * host at any given time.
1399aaac1b47SPierre Ossman  */
mmc_attach_bus(struct mmc_host * host,const struct mmc_bus_ops * ops)14007ea239d9SPierre Ossman void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1401aaac1b47SPierre Ossman {
14027ea239d9SPierre Ossman 	host->bus_ops = ops;
1403aaac1b47SPierre Ossman }
1404aaac1b47SPierre Ossman 
1405aaac1b47SPierre Ossman /*
14067f7e4129SUlf Hansson  * Remove the current bus handler from a host.
1407aaac1b47SPierre Ossman  */
mmc_detach_bus(struct mmc_host * host)14087ea239d9SPierre Ossman void mmc_detach_bus(struct mmc_host *host)
1409aaac1b47SPierre Ossman {
1410e9ce2ce1SUlf Hansson 	host->bus_ops = NULL;
1411aaac1b47SPierre Ossman }
1412aaac1b47SPierre Ossman 
_mmc_detect_change(struct mmc_host * host,unsigned long delay,bool cd_irq)14132ac55d5eSUlf Hansson void _mmc_detect_change(struct mmc_host *host, unsigned long delay, bool cd_irq)
1414bbd43682SUlf Hansson {
1415bbd43682SUlf Hansson 	/*
1416b52fb259SUlf Hansson 	 * Prevent system sleep for 5s to allow user space to consume the
1417b52fb259SUlf Hansson 	 * corresponding uevent. This is especially useful, when CD irq is used
1418b52fb259SUlf Hansson 	 * as a system wakeup, but doesn't hurt in other cases.
1419bbd43682SUlf Hansson 	 */
1420b52fb259SUlf Hansson 	if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL))
1421b52fb259SUlf Hansson 		__pm_wakeup_event(host->ws, 5000);
1422bbd43682SUlf Hansson 
1423bbd43682SUlf Hansson 	host->detect_change = 1;
1424bbd43682SUlf Hansson 	mmc_schedule_delayed_work(&host->detect, delay);
1425bbd43682SUlf Hansson }
1426bbd43682SUlf Hansson 
1427aaac1b47SPierre Ossman /**
1428aaac1b47SPierre Ossman  *	mmc_detect_change - process change of state on a MMC socket
1429aaac1b47SPierre Ossman  *	@host: host which changed state.
1430aaac1b47SPierre Ossman  *	@delay: optional delay to wait before detection (jiffies)
1431aaac1b47SPierre Ossman  *
143267a61c48SPierre Ossman  *	MMC drivers should call this when they detect a card has been
143367a61c48SPierre Ossman  *	inserted or removed. The MMC layer will confirm that any
143467a61c48SPierre Ossman  *	present card is still functional, and initialize any newly
143567a61c48SPierre Ossman  *	inserted.
1436aaac1b47SPierre Ossman  */
mmc_detect_change(struct mmc_host * host,unsigned long delay)1437aaac1b47SPierre Ossman void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1438aaac1b47SPierre Ossman {
1439bbd43682SUlf Hansson 	_mmc_detect_change(host, delay, true);
1440aaac1b47SPierre Ossman }
1441aaac1b47SPierre Ossman EXPORT_SYMBOL(mmc_detect_change);
1442aaac1b47SPierre Ossman 
mmc_init_erase(struct mmc_card * card)1443dfe86cbaSAdrian Hunter void mmc_init_erase(struct mmc_card *card)
1444dfe86cbaSAdrian Hunter {
1445dfe86cbaSAdrian Hunter 	unsigned int sz;
1446dfe86cbaSAdrian Hunter 
1447dfe86cbaSAdrian Hunter 	if (is_power_of_2(card->erase_size))
1448dfe86cbaSAdrian Hunter 		card->erase_shift = ffs(card->erase_size) - 1;
1449dfe86cbaSAdrian Hunter 	else
1450dfe86cbaSAdrian Hunter 		card->erase_shift = 0;
1451dfe86cbaSAdrian Hunter 
1452dfe86cbaSAdrian Hunter 	/*
1453dfe86cbaSAdrian Hunter 	 * It is possible to erase an arbitrarily large area of an SD or MMC
1454dfe86cbaSAdrian Hunter 	 * card.  That is not desirable because it can take a long time
1455dfe86cbaSAdrian Hunter 	 * (minutes) potentially delaying more important I/O, and also the
1456dfe86cbaSAdrian Hunter 	 * timeout calculations become increasingly hugely over-estimated.
1457dfe86cbaSAdrian Hunter 	 * Consequently, 'pref_erase' is defined as a guide to limit erases
1458dfe86cbaSAdrian Hunter 	 * to that size and alignment.
1459dfe86cbaSAdrian Hunter 	 *
1460dfe86cbaSAdrian Hunter 	 * For SD cards that define Allocation Unit size, limit erases to one
1461c6d8fd61SGwendal Grignou 	 * Allocation Unit at a time.
1462c6d8fd61SGwendal Grignou 	 * For MMC, have a stab at ai good value and for modern cards it will
1463c6d8fd61SGwendal Grignou 	 * end up being 4MiB. Note that if the value is too small, it can end
1464c6d8fd61SGwendal Grignou 	 * up taking longer to erase. Also note, erase_size is already set to
1465c6d8fd61SGwendal Grignou 	 * High Capacity Erase Size if available when this function is called.
1466dfe86cbaSAdrian Hunter 	 */
1467dfe86cbaSAdrian Hunter 	if (mmc_card_sd(card) && card->ssr.au) {
1468dfe86cbaSAdrian Hunter 		card->pref_erase = card->ssr.au;
1469dfe86cbaSAdrian Hunter 		card->erase_shift = ffs(card->ssr.au) - 1;
1470cc8aa7deSChuanxiao Dong 	} else if (card->erase_size) {
1471dfe86cbaSAdrian Hunter 		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1472dfe86cbaSAdrian Hunter 		if (sz < 128)
1473dfe86cbaSAdrian Hunter 			card->pref_erase = 512 * 1024 / 512;
1474dfe86cbaSAdrian Hunter 		else if (sz < 512)
1475dfe86cbaSAdrian Hunter 			card->pref_erase = 1024 * 1024 / 512;
1476dfe86cbaSAdrian Hunter 		else if (sz < 1024)
1477dfe86cbaSAdrian Hunter 			card->pref_erase = 2 * 1024 * 1024 / 512;
1478dfe86cbaSAdrian Hunter 		else
1479dfe86cbaSAdrian Hunter 			card->pref_erase = 4 * 1024 * 1024 / 512;
1480dfe86cbaSAdrian Hunter 		if (card->pref_erase < card->erase_size)
1481dfe86cbaSAdrian Hunter 			card->pref_erase = card->erase_size;
1482dfe86cbaSAdrian Hunter 		else {
1483dfe86cbaSAdrian Hunter 			sz = card->pref_erase % card->erase_size;
1484dfe86cbaSAdrian Hunter 			if (sz)
1485dfe86cbaSAdrian Hunter 				card->pref_erase += card->erase_size - sz;
1486dfe86cbaSAdrian Hunter 		}
1487cc8aa7deSChuanxiao Dong 	} else
1488cc8aa7deSChuanxiao Dong 		card->pref_erase = 0;
1489dfe86cbaSAdrian Hunter }
1490dfe86cbaSAdrian Hunter 
is_trim_arg(unsigned int arg)1491489d1445SChristian Löhle static bool is_trim_arg(unsigned int arg)
1492489d1445SChristian Löhle {
1493489d1445SChristian Löhle 	return (arg & MMC_TRIM_OR_DISCARD_ARGS) && arg != MMC_DISCARD_ARG;
1494489d1445SChristian Löhle }
1495489d1445SChristian Löhle 
mmc_mmc_erase_timeout(struct mmc_card * card,unsigned int arg,unsigned int qty)1496eaa02f75SAndrei Warkentin static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1497dfe86cbaSAdrian Hunter 				          unsigned int arg, unsigned int qty)
1498dfe86cbaSAdrian Hunter {
1499dfe86cbaSAdrian Hunter 	unsigned int erase_timeout;
1500dfe86cbaSAdrian Hunter 
15017194efb8SAdrian Hunter 	if (arg == MMC_DISCARD_ARG ||
15027194efb8SAdrian Hunter 	    (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
15037194efb8SAdrian Hunter 		erase_timeout = card->ext_csd.trim_timeout;
15047194efb8SAdrian Hunter 	} else if (card->ext_csd.erase_group_def & 1) {
1505dfe86cbaSAdrian Hunter 		/* High Capacity Erase Group Size uses HC timeouts */
1506dfe86cbaSAdrian Hunter 		if (arg == MMC_TRIM_ARG)
1507dfe86cbaSAdrian Hunter 			erase_timeout = card->ext_csd.trim_timeout;
1508dfe86cbaSAdrian Hunter 		else
1509dfe86cbaSAdrian Hunter 			erase_timeout = card->ext_csd.hc_erase_timeout;
1510dfe86cbaSAdrian Hunter 	} else {
1511dfe86cbaSAdrian Hunter 		/* CSD Erase Group Size uses write timeout */
1512dfe86cbaSAdrian Hunter 		unsigned int mult = (10 << card->csd.r2w_factor);
15134406ae21SShawn Lin 		unsigned int timeout_clks = card->csd.taac_clks * mult;
1514dfe86cbaSAdrian Hunter 		unsigned int timeout_us;
1515dfe86cbaSAdrian Hunter 
15164406ae21SShawn Lin 		/* Avoid overflow: e.g. taac_ns=80000000 mult=1280 */
15174406ae21SShawn Lin 		if (card->csd.taac_ns < 1000000)
15184406ae21SShawn Lin 			timeout_us = (card->csd.taac_ns * mult) / 1000;
1519dfe86cbaSAdrian Hunter 		else
15204406ae21SShawn Lin 			timeout_us = (card->csd.taac_ns / 1000) * mult;
1521dfe86cbaSAdrian Hunter 
1522dfe86cbaSAdrian Hunter 		/*
1523dfe86cbaSAdrian Hunter 		 * ios.clock is only a target.  The real clock rate might be
1524dfe86cbaSAdrian Hunter 		 * less but not that much less, so fudge it by multiplying by 2.
1525dfe86cbaSAdrian Hunter 		 */
1526dfe86cbaSAdrian Hunter 		timeout_clks <<= 1;
1527dfe86cbaSAdrian Hunter 		timeout_us += (timeout_clks * 1000) /
15289eadcc05SUlf Hansson 			      (card->host->ios.clock / 1000);
1529dfe86cbaSAdrian Hunter 
1530dfe86cbaSAdrian Hunter 		erase_timeout = timeout_us / 1000;
1531dfe86cbaSAdrian Hunter 
1532dfe86cbaSAdrian Hunter 		/*
1533dfe86cbaSAdrian Hunter 		 * Theoretically, the calculation could underflow so round up
1534dfe86cbaSAdrian Hunter 		 * to 1ms in that case.
1535dfe86cbaSAdrian Hunter 		 */
1536dfe86cbaSAdrian Hunter 		if (!erase_timeout)
1537dfe86cbaSAdrian Hunter 			erase_timeout = 1;
1538dfe86cbaSAdrian Hunter 	}
1539dfe86cbaSAdrian Hunter 
1540dfe86cbaSAdrian Hunter 	/* Multiplier for secure operations */
1541dfe86cbaSAdrian Hunter 	if (arg & MMC_SECURE_ARGS) {
1542dfe86cbaSAdrian Hunter 		if (arg == MMC_SECURE_ERASE_ARG)
1543dfe86cbaSAdrian Hunter 			erase_timeout *= card->ext_csd.sec_erase_mult;
1544dfe86cbaSAdrian Hunter 		else
1545dfe86cbaSAdrian Hunter 			erase_timeout *= card->ext_csd.sec_trim_mult;
1546dfe86cbaSAdrian Hunter 	}
1547dfe86cbaSAdrian Hunter 
1548dfe86cbaSAdrian Hunter 	erase_timeout *= qty;
1549dfe86cbaSAdrian Hunter 
1550dfe86cbaSAdrian Hunter 	/*
1551dfe86cbaSAdrian Hunter 	 * Ensure at least a 1 second timeout for SPI as per
1552dfe86cbaSAdrian Hunter 	 * 'mmc_set_data_timeout()'
1553dfe86cbaSAdrian Hunter 	 */
1554dfe86cbaSAdrian Hunter 	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1555dfe86cbaSAdrian Hunter 		erase_timeout = 1000;
1556dfe86cbaSAdrian Hunter 
1557eaa02f75SAndrei Warkentin 	return erase_timeout;
1558dfe86cbaSAdrian Hunter }
1559dfe86cbaSAdrian Hunter 
mmc_sd_erase_timeout(struct mmc_card * card,unsigned int arg,unsigned int qty)1560eaa02f75SAndrei Warkentin static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1561eaa02f75SAndrei Warkentin 					 unsigned int arg,
1562dfe86cbaSAdrian Hunter 					 unsigned int qty)
1563dfe86cbaSAdrian Hunter {
1564eaa02f75SAndrei Warkentin 	unsigned int erase_timeout;
1565eaa02f75SAndrei Warkentin 
1566ad9be7ffSAvri Altman 	/* for DISCARD none of the below calculation applies.
1567ad9be7ffSAvri Altman 	 * the busy timeout is 250msec per discard command.
1568ad9be7ffSAvri Altman 	 */
1569ad9be7ffSAvri Altman 	if (arg == SD_DISCARD_ARG)
1570ad9be7ffSAvri Altman 		return SD_DISCARD_TIMEOUT_MS;
1571ad9be7ffSAvri Altman 
1572dfe86cbaSAdrian Hunter 	if (card->ssr.erase_timeout) {
1573dfe86cbaSAdrian Hunter 		/* Erase timeout specified in SD Status Register (SSR) */
1574eaa02f75SAndrei Warkentin 		erase_timeout = card->ssr.erase_timeout * qty +
1575dfe86cbaSAdrian Hunter 				card->ssr.erase_offset;
1576dfe86cbaSAdrian Hunter 	} else {
1577dfe86cbaSAdrian Hunter 		/*
1578dfe86cbaSAdrian Hunter 		 * Erase timeout not specified in SD Status Register (SSR) so
1579dfe86cbaSAdrian Hunter 		 * use 250ms per write block.
1580dfe86cbaSAdrian Hunter 		 */
1581eaa02f75SAndrei Warkentin 		erase_timeout = 250 * qty;
1582dfe86cbaSAdrian Hunter 	}
1583dfe86cbaSAdrian Hunter 
1584dfe86cbaSAdrian Hunter 	/* Must not be less than 1 second */
1585eaa02f75SAndrei Warkentin 	if (erase_timeout < 1000)
1586eaa02f75SAndrei Warkentin 		erase_timeout = 1000;
1587eaa02f75SAndrei Warkentin 
1588eaa02f75SAndrei Warkentin 	return erase_timeout;
1589dfe86cbaSAdrian Hunter }
1590dfe86cbaSAdrian Hunter 
mmc_erase_timeout(struct mmc_card * card,unsigned int arg,unsigned int qty)1591eaa02f75SAndrei Warkentin static unsigned int mmc_erase_timeout(struct mmc_card *card,
1592eaa02f75SAndrei Warkentin 				      unsigned int arg,
1593dfe86cbaSAdrian Hunter 				      unsigned int qty)
1594dfe86cbaSAdrian Hunter {
1595dfe86cbaSAdrian Hunter 	if (mmc_card_sd(card))
1596eaa02f75SAndrei Warkentin 		return mmc_sd_erase_timeout(card, arg, qty);
1597dfe86cbaSAdrian Hunter 	else
1598eaa02f75SAndrei Warkentin 		return mmc_mmc_erase_timeout(card, arg, qty);
1599dfe86cbaSAdrian Hunter }
1600dfe86cbaSAdrian Hunter 
mmc_do_erase(struct mmc_card * card,unsigned int from,unsigned int to,unsigned int arg)1601dfe86cbaSAdrian Hunter static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1602dfe86cbaSAdrian Hunter 			unsigned int to, unsigned int arg)
1603dfe86cbaSAdrian Hunter {
1604c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
1605bb4eecf2SBaolin Wang 	unsigned int qty = 0, busy_timeout = 0;
1606e62f1e0bSUlf Hansson 	bool use_r1b_resp;
1607dfe86cbaSAdrian Hunter 	int err;
1608dfe86cbaSAdrian Hunter 
16098f11d106SAdrian Hunter 	mmc_retune_hold(card->host);
16108f11d106SAdrian Hunter 
1611dfe86cbaSAdrian Hunter 	/*
1612dfe86cbaSAdrian Hunter 	 * qty is used to calculate the erase timeout which depends on how many
1613dfe86cbaSAdrian Hunter 	 * erase groups (or allocation units in SD terminology) are affected.
1614dfe86cbaSAdrian Hunter 	 * We count erasing part of an erase group as one erase group.
1615dfe86cbaSAdrian Hunter 	 * For SD, the allocation units are always a power of 2.  For MMC, the
1616dfe86cbaSAdrian Hunter 	 * erase group size is almost certainly also power of 2, but it does not
1617dfe86cbaSAdrian Hunter 	 * seem to insist on that in the JEDEC standard, so we fall back to
1618dfe86cbaSAdrian Hunter 	 * division in that case.  SD may not specify an allocation unit size,
1619dfe86cbaSAdrian Hunter 	 * in which case the timeout is based on the number of write blocks.
1620dfe86cbaSAdrian Hunter 	 *
1621dfe86cbaSAdrian Hunter 	 * Note that the timeout for secure trim 2 will only be correct if the
1622dfe86cbaSAdrian Hunter 	 * number of erase groups specified is the same as the total of all
1623dfe86cbaSAdrian Hunter 	 * preceding secure trim 1 commands.  Since the power may have been
1624dfe86cbaSAdrian Hunter 	 * lost since the secure trim 1 commands occurred, it is generally
1625dfe86cbaSAdrian Hunter 	 * impossible to calculate the secure trim 2 timeout correctly.
1626dfe86cbaSAdrian Hunter 	 */
1627dfe86cbaSAdrian Hunter 	if (card->erase_shift)
1628dfe86cbaSAdrian Hunter 		qty += ((to >> card->erase_shift) -
1629dfe86cbaSAdrian Hunter 			(from >> card->erase_shift)) + 1;
1630dfe86cbaSAdrian Hunter 	else if (mmc_card_sd(card))
1631dfe86cbaSAdrian Hunter 		qty += to - from + 1;
1632dfe86cbaSAdrian Hunter 	else
1633dfe86cbaSAdrian Hunter 		qty += ((to / card->erase_size) -
1634dfe86cbaSAdrian Hunter 			(from / card->erase_size)) + 1;
1635dfe86cbaSAdrian Hunter 
1636dfe86cbaSAdrian Hunter 	if (!mmc_card_blockaddr(card)) {
1637dfe86cbaSAdrian Hunter 		from <<= 9;
1638dfe86cbaSAdrian Hunter 		to <<= 9;
1639dfe86cbaSAdrian Hunter 	}
1640dfe86cbaSAdrian Hunter 
1641dfe86cbaSAdrian Hunter 	if (mmc_card_sd(card))
1642dfe86cbaSAdrian Hunter 		cmd.opcode = SD_ERASE_WR_BLK_START;
1643dfe86cbaSAdrian Hunter 	else
1644dfe86cbaSAdrian Hunter 		cmd.opcode = MMC_ERASE_GROUP_START;
1645dfe86cbaSAdrian Hunter 	cmd.arg = from;
1646dfe86cbaSAdrian Hunter 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1647dfe86cbaSAdrian Hunter 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1648dfe86cbaSAdrian Hunter 	if (err) {
1649a3c76eb9SGirish K S 		pr_err("mmc_erase: group start error %d, "
1650dfe86cbaSAdrian Hunter 		       "status %#x\n", err, cmd.resp[0]);
165167716327SAdrian Hunter 		err = -EIO;
1652dfe86cbaSAdrian Hunter 		goto out;
1653dfe86cbaSAdrian Hunter 	}
1654dfe86cbaSAdrian Hunter 
1655dfe86cbaSAdrian Hunter 	memset(&cmd, 0, sizeof(struct mmc_command));
1656dfe86cbaSAdrian Hunter 	if (mmc_card_sd(card))
1657dfe86cbaSAdrian Hunter 		cmd.opcode = SD_ERASE_WR_BLK_END;
1658dfe86cbaSAdrian Hunter 	else
1659dfe86cbaSAdrian Hunter 		cmd.opcode = MMC_ERASE_GROUP_END;
1660dfe86cbaSAdrian Hunter 	cmd.arg = to;
1661dfe86cbaSAdrian Hunter 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1662dfe86cbaSAdrian Hunter 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1663dfe86cbaSAdrian Hunter 	if (err) {
1664a3c76eb9SGirish K S 		pr_err("mmc_erase: group end error %d, status %#x\n",
1665dfe86cbaSAdrian Hunter 		       err, cmd.resp[0]);
166667716327SAdrian Hunter 		err = -EIO;
1667dfe86cbaSAdrian Hunter 		goto out;
1668dfe86cbaSAdrian Hunter 	}
1669dfe86cbaSAdrian Hunter 
1670dfe86cbaSAdrian Hunter 	memset(&cmd, 0, sizeof(struct mmc_command));
1671dfe86cbaSAdrian Hunter 	cmd.opcode = MMC_ERASE;
1672dfe86cbaSAdrian Hunter 	cmd.arg = arg;
1673bb4eecf2SBaolin Wang 	busy_timeout = mmc_erase_timeout(card, arg, qty);
1674e62f1e0bSUlf Hansson 	use_r1b_resp = mmc_prepare_busy_cmd(card->host, &cmd, busy_timeout);
1675bb4eecf2SBaolin Wang 
1676dfe86cbaSAdrian Hunter 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1677dfe86cbaSAdrian Hunter 	if (err) {
1678a3c76eb9SGirish K S 		pr_err("mmc_erase: erase error %d, status %#x\n",
1679dfe86cbaSAdrian Hunter 		       err, cmd.resp[0]);
1680dfe86cbaSAdrian Hunter 		err = -EIO;
1681dfe86cbaSAdrian Hunter 		goto out;
1682dfe86cbaSAdrian Hunter 	}
1683dfe86cbaSAdrian Hunter 
1684dfe86cbaSAdrian Hunter 	if (mmc_host_is_spi(card->host))
1685dfe86cbaSAdrian Hunter 		goto out;
1686dfe86cbaSAdrian Hunter 
1687bb4eecf2SBaolin Wang 	/*
1688bb4eecf2SBaolin Wang 	 * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
1689bb4eecf2SBaolin Wang 	 * shall be avoided.
1690bb4eecf2SBaolin Wang 	 */
1691bb4eecf2SBaolin Wang 	if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
1692bb4eecf2SBaolin Wang 		goto out;
1693bb4eecf2SBaolin Wang 
16940d84c3e6SUlf Hansson 	/* Let's poll to find out when the erase operation completes. */
169504f967adSUlf Hansson 	err = mmc_poll_for_busy(card, busy_timeout, false, MMC_BUSY_ERASE);
1696833b5117SMartin Hicks 
1697dfe86cbaSAdrian Hunter out:
16988f11d106SAdrian Hunter 	mmc_retune_release(card->host);
1699dfe86cbaSAdrian Hunter 	return err;
1700dfe86cbaSAdrian Hunter }
1701dfe86cbaSAdrian Hunter 
mmc_align_erase_size(struct mmc_card * card,unsigned int * from,unsigned int * to,unsigned int nr)170271085123SBaolin Wang static unsigned int mmc_align_erase_size(struct mmc_card *card,
170371085123SBaolin Wang 					 unsigned int *from,
170471085123SBaolin Wang 					 unsigned int *to,
170571085123SBaolin Wang 					 unsigned int nr)
170671085123SBaolin Wang {
170771085123SBaolin Wang 	unsigned int from_new = *from, nr_new = nr, rem;
170871085123SBaolin Wang 
17096c689886SBaolin Wang 	/*
17106c689886SBaolin Wang 	 * When the 'card->erase_size' is power of 2, we can use round_up/down()
17116c689886SBaolin Wang 	 * to align the erase size efficiently.
17126c689886SBaolin Wang 	 */
17136c689886SBaolin Wang 	if (is_power_of_2(card->erase_size)) {
17146c689886SBaolin Wang 		unsigned int temp = from_new;
17156c689886SBaolin Wang 
17166c689886SBaolin Wang 		from_new = round_up(temp, card->erase_size);
17176c689886SBaolin Wang 		rem = from_new - temp;
17186c689886SBaolin Wang 
17196c689886SBaolin Wang 		if (nr_new > rem)
17206c689886SBaolin Wang 			nr_new -= rem;
17216c689886SBaolin Wang 		else
17226c689886SBaolin Wang 			return 0;
17236c689886SBaolin Wang 
17246c689886SBaolin Wang 		nr_new = round_down(nr_new, card->erase_size);
17256c689886SBaolin Wang 	} else {
172671085123SBaolin Wang 		rem = from_new % card->erase_size;
172771085123SBaolin Wang 		if (rem) {
172871085123SBaolin Wang 			rem = card->erase_size - rem;
172971085123SBaolin Wang 			from_new += rem;
173071085123SBaolin Wang 			if (nr_new > rem)
173171085123SBaolin Wang 				nr_new -= rem;
173271085123SBaolin Wang 			else
173371085123SBaolin Wang 				return 0;
173471085123SBaolin Wang 		}
173571085123SBaolin Wang 
173671085123SBaolin Wang 		rem = nr_new % card->erase_size;
173771085123SBaolin Wang 		if (rem)
173871085123SBaolin Wang 			nr_new -= rem;
17396c689886SBaolin Wang 	}
174071085123SBaolin Wang 
174171085123SBaolin Wang 	if (nr_new == 0)
174271085123SBaolin Wang 		return 0;
174371085123SBaolin Wang 
174471085123SBaolin Wang 	*to = from_new + nr_new;
174571085123SBaolin Wang 	*from = from_new;
174671085123SBaolin Wang 
174771085123SBaolin Wang 	return nr_new;
174871085123SBaolin Wang }
174971085123SBaolin Wang 
1750dfe86cbaSAdrian Hunter /**
1751dfe86cbaSAdrian Hunter  * mmc_erase - erase sectors.
1752dfe86cbaSAdrian Hunter  * @card: card to erase
1753dfe86cbaSAdrian Hunter  * @from: first sector to erase
1754dfe86cbaSAdrian Hunter  * @nr: number of sectors to erase
1755bc47e2f6SAvri Altman  * @arg: erase command argument
1756dfe86cbaSAdrian Hunter  *
1757dfe86cbaSAdrian Hunter  * Caller must claim host before calling this function.
1758dfe86cbaSAdrian Hunter  */
mmc_erase(struct mmc_card * card,unsigned int from,unsigned int nr,unsigned int arg)1759dfe86cbaSAdrian Hunter int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
1760dfe86cbaSAdrian Hunter 	      unsigned int arg)
1761dfe86cbaSAdrian Hunter {
1762dfe86cbaSAdrian Hunter 	unsigned int rem, to = from + nr;
1763642c28abSDavid Jander 	int err;
1764dfe86cbaSAdrian Hunter 
176594fe2580SUlf Hansson 	if (!(card->csd.cmdclass & CCC_ERASE))
1766dfe86cbaSAdrian Hunter 		return -EOPNOTSUPP;
1767dfe86cbaSAdrian Hunter 
1768dfe86cbaSAdrian Hunter 	if (!card->erase_size)
1769dfe86cbaSAdrian Hunter 		return -EOPNOTSUPP;
1770dfe86cbaSAdrian Hunter 
1771bc47e2f6SAvri Altman 	if (mmc_card_sd(card) && arg != SD_ERASE_ARG && arg != SD_DISCARD_ARG)
1772dfe86cbaSAdrian Hunter 		return -EOPNOTSUPP;
1773dfe86cbaSAdrian Hunter 
1774bc47e2f6SAvri Altman 	if (mmc_card_mmc(card) && (arg & MMC_SECURE_ARGS) &&
1775dfe86cbaSAdrian Hunter 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
1776dfe86cbaSAdrian Hunter 		return -EOPNOTSUPP;
1777dfe86cbaSAdrian Hunter 
1778489d1445SChristian Löhle 	if (mmc_card_mmc(card) && is_trim_arg(arg) &&
1779dfe86cbaSAdrian Hunter 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
1780dfe86cbaSAdrian Hunter 		return -EOPNOTSUPP;
1781dfe86cbaSAdrian Hunter 
1782dfe86cbaSAdrian Hunter 	if (arg == MMC_SECURE_ERASE_ARG) {
1783dfe86cbaSAdrian Hunter 		if (from % card->erase_size || nr % card->erase_size)
1784dfe86cbaSAdrian Hunter 			return -EINVAL;
1785dfe86cbaSAdrian Hunter 	}
1786dfe86cbaSAdrian Hunter 
178771085123SBaolin Wang 	if (arg == MMC_ERASE_ARG)
178871085123SBaolin Wang 		nr = mmc_align_erase_size(card, &from, &to, nr);
1789dfe86cbaSAdrian Hunter 
1790dfe86cbaSAdrian Hunter 	if (nr == 0)
1791dfe86cbaSAdrian Hunter 		return 0;
1792dfe86cbaSAdrian Hunter 
1793dfe86cbaSAdrian Hunter 	if (to <= from)
1794dfe86cbaSAdrian Hunter 		return -EINVAL;
1795dfe86cbaSAdrian Hunter 
1796dfe86cbaSAdrian Hunter 	/* 'from' and 'to' are inclusive */
1797dfe86cbaSAdrian Hunter 	to -= 1;
1798dfe86cbaSAdrian Hunter 
1799642c28abSDavid Jander 	/*
1800642c28abSDavid Jander 	 * Special case where only one erase-group fits in the timeout budget:
1801642c28abSDavid Jander 	 * If the region crosses an erase-group boundary on this particular
1802642c28abSDavid Jander 	 * case, we will be trimming more than one erase-group which, does not
1803642c28abSDavid Jander 	 * fit in the timeout budget of the controller, so we need to split it
1804642c28abSDavid Jander 	 * and call mmc_do_erase() twice if necessary. This special case is
1805642c28abSDavid Jander 	 * identified by the card->eg_boundary flag.
1806642c28abSDavid Jander 	 */
1807642c28abSDavid Jander 	rem = card->erase_size - (from % card->erase_size);
1808489d1445SChristian Löhle 	if ((arg & MMC_TRIM_OR_DISCARD_ARGS) && card->eg_boundary && nr > rem) {
1809642c28abSDavid Jander 		err = mmc_do_erase(card, from, from + rem - 1, arg);
1810642c28abSDavid Jander 		from += rem;
1811642c28abSDavid Jander 		if ((err) || (to <= from))
1812642c28abSDavid Jander 			return err;
1813642c28abSDavid Jander 	}
1814642c28abSDavid Jander 
1815dfe86cbaSAdrian Hunter 	return mmc_do_erase(card, from, to, arg);
1816dfe86cbaSAdrian Hunter }
1817dfe86cbaSAdrian Hunter EXPORT_SYMBOL(mmc_erase);
1818dfe86cbaSAdrian Hunter 
mmc_can_erase(struct mmc_card * card)1819dfe86cbaSAdrian Hunter int mmc_can_erase(struct mmc_card *card)
1820dfe86cbaSAdrian Hunter {
182194fe2580SUlf Hansson 	if (card->csd.cmdclass & CCC_ERASE && card->erase_size)
1822dfe86cbaSAdrian Hunter 		return 1;
1823dfe86cbaSAdrian Hunter 	return 0;
1824dfe86cbaSAdrian Hunter }
1825dfe86cbaSAdrian Hunter EXPORT_SYMBOL(mmc_can_erase);
1826dfe86cbaSAdrian Hunter 
mmc_can_trim(struct mmc_card * card)1827dfe86cbaSAdrian Hunter int mmc_can_trim(struct mmc_card *card)
1828dfe86cbaSAdrian Hunter {
1829b5b4ff0aSShawn Lin 	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
1830b5b4ff0aSShawn Lin 	    (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
1831dfe86cbaSAdrian Hunter 		return 1;
1832dfe86cbaSAdrian Hunter 	return 0;
1833dfe86cbaSAdrian Hunter }
1834dfe86cbaSAdrian Hunter EXPORT_SYMBOL(mmc_can_trim);
1835dfe86cbaSAdrian Hunter 
mmc_can_discard(struct mmc_card * card)1836b3bf9153SKyungmin Park int mmc_can_discard(struct mmc_card *card)
1837b3bf9153SKyungmin Park {
1838b3bf9153SKyungmin Park 	/*
1839b3bf9153SKyungmin Park 	 * As there's no way to detect the discard support bit at v4.5
1840b3bf9153SKyungmin Park 	 * use the s/w feature support filed.
1841b3bf9153SKyungmin Park 	 */
1842b3bf9153SKyungmin Park 	if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
1843b3bf9153SKyungmin Park 		return 1;
1844b3bf9153SKyungmin Park 	return 0;
1845b3bf9153SKyungmin Park }
1846b3bf9153SKyungmin Park EXPORT_SYMBOL(mmc_can_discard);
1847b3bf9153SKyungmin Park 
mmc_can_sanitize(struct mmc_card * card)1848d9ddd629SKyungmin Park int mmc_can_sanitize(struct mmc_card *card)
1849d9ddd629SKyungmin Park {
185028302812SAdrian Hunter 	if (!mmc_can_trim(card) && !mmc_can_erase(card))
185128302812SAdrian Hunter 		return 0;
1852d9ddd629SKyungmin Park 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
1853d9ddd629SKyungmin Park 		return 1;
1854d9ddd629SKyungmin Park 	return 0;
1855d9ddd629SKyungmin Park }
1856d9ddd629SKyungmin Park 
mmc_can_secure_erase_trim(struct mmc_card * card)1857dfe86cbaSAdrian Hunter int mmc_can_secure_erase_trim(struct mmc_card *card)
1858dfe86cbaSAdrian Hunter {
18595204d00fSLukas Czerner 	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
18605204d00fSLukas Czerner 	    !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
1861dfe86cbaSAdrian Hunter 		return 1;
1862dfe86cbaSAdrian Hunter 	return 0;
1863dfe86cbaSAdrian Hunter }
1864dfe86cbaSAdrian Hunter EXPORT_SYMBOL(mmc_can_secure_erase_trim);
1865dfe86cbaSAdrian Hunter 
mmc_erase_group_aligned(struct mmc_card * card,unsigned int from,unsigned int nr)1866dfe86cbaSAdrian Hunter int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1867dfe86cbaSAdrian Hunter 			    unsigned int nr)
1868dfe86cbaSAdrian Hunter {
1869dfe86cbaSAdrian Hunter 	if (!card->erase_size)
1870dfe86cbaSAdrian Hunter 		return 0;
1871dfe86cbaSAdrian Hunter 	if (from % card->erase_size || nr % card->erase_size)
1872dfe86cbaSAdrian Hunter 		return 0;
1873dfe86cbaSAdrian Hunter 	return 1;
1874dfe86cbaSAdrian Hunter }
1875dfe86cbaSAdrian Hunter EXPORT_SYMBOL(mmc_erase_group_aligned);
1876aaac1b47SPierre Ossman 
mmc_do_calc_max_discard(struct mmc_card * card,unsigned int arg)1877e056a1b5SAdrian Hunter static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
1878e056a1b5SAdrian Hunter 					    unsigned int arg)
1879e056a1b5SAdrian Hunter {
1880e056a1b5SAdrian Hunter 	struct mmc_host *host = card->host;
1881bb4eecf2SBaolin Wang 	unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
1882e056a1b5SAdrian Hunter 	unsigned int last_timeout = 0;
188312182affSUlf Hansson 	unsigned int max_busy_timeout = host->max_busy_timeout ?
188412182affSUlf Hansson 			host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
1885e056a1b5SAdrian Hunter 
1886bb4eecf2SBaolin Wang 	if (card->erase_shift) {
1887e056a1b5SAdrian Hunter 		max_qty = UINT_MAX >> card->erase_shift;
1888bb4eecf2SBaolin Wang 		min_qty = card->pref_erase >> card->erase_shift;
1889bb4eecf2SBaolin Wang 	} else if (mmc_card_sd(card)) {
1890e056a1b5SAdrian Hunter 		max_qty = UINT_MAX;
1891bb4eecf2SBaolin Wang 		min_qty = card->pref_erase;
1892bb4eecf2SBaolin Wang 	} else {
1893e056a1b5SAdrian Hunter 		max_qty = UINT_MAX / card->erase_size;
1894bb4eecf2SBaolin Wang 		min_qty = card->pref_erase / card->erase_size;
1895bb4eecf2SBaolin Wang 	}
1896e056a1b5SAdrian Hunter 
1897bb4eecf2SBaolin Wang 	/*
1898bb4eecf2SBaolin Wang 	 * We should not only use 'host->max_busy_timeout' as the limitation
1899bb4eecf2SBaolin Wang 	 * when deciding the max discard sectors. We should set a balance value
1900bb4eecf2SBaolin Wang 	 * to improve the erase speed, and it can not get too long timeout at
1901bb4eecf2SBaolin Wang 	 * the same time.
1902bb4eecf2SBaolin Wang 	 *
1903bb4eecf2SBaolin Wang 	 * Here we set 'card->pref_erase' as the minimal discard sectors no
1904bb4eecf2SBaolin Wang 	 * matter what size of 'host->max_busy_timeout', but if the
1905bb4eecf2SBaolin Wang 	 * 'host->max_busy_timeout' is large enough for more discard sectors,
1906bb4eecf2SBaolin Wang 	 * then we can continue to increase the max discard sectors until we
190712182affSUlf Hansson 	 * get a balance value. In cases when the 'host->max_busy_timeout'
190812182affSUlf Hansson 	 * isn't specified, use the default max erase timeout.
1909bb4eecf2SBaolin Wang 	 */
1910e056a1b5SAdrian Hunter 	do {
1911e056a1b5SAdrian Hunter 		y = 0;
1912e056a1b5SAdrian Hunter 		for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
1913e056a1b5SAdrian Hunter 			timeout = mmc_erase_timeout(card, arg, qty + x);
1914bb4eecf2SBaolin Wang 
191512182affSUlf Hansson 			if (qty + x > min_qty && timeout > max_busy_timeout)
1916e056a1b5SAdrian Hunter 				break;
1917bb4eecf2SBaolin Wang 
1918e056a1b5SAdrian Hunter 			if (timeout < last_timeout)
1919e056a1b5SAdrian Hunter 				break;
1920e056a1b5SAdrian Hunter 			last_timeout = timeout;
1921e056a1b5SAdrian Hunter 			y = x;
1922e056a1b5SAdrian Hunter 		}
1923e056a1b5SAdrian Hunter 		qty += y;
1924e056a1b5SAdrian Hunter 	} while (y);
1925e056a1b5SAdrian Hunter 
1926e056a1b5SAdrian Hunter 	if (!qty)
1927e056a1b5SAdrian Hunter 		return 0;
1928e056a1b5SAdrian Hunter 
1929642c28abSDavid Jander 	/*
1930642c28abSDavid Jander 	 * When specifying a sector range to trim, chances are we might cross
1931642c28abSDavid Jander 	 * an erase-group boundary even if the amount of sectors is less than
1932642c28abSDavid Jander 	 * one erase-group.
1933642c28abSDavid Jander 	 * If we can only fit one erase-group in the controller timeout budget,
1934642c28abSDavid Jander 	 * we have to care that erase-group boundaries are not crossed by a
1935642c28abSDavid Jander 	 * single trim operation. We flag that special case with "eg_boundary".
1936642c28abSDavid Jander 	 * In all other cases we can just decrement qty and pretend that we
1937642c28abSDavid Jander 	 * always touch (qty + 1) erase-groups as a simple optimization.
1938642c28abSDavid Jander 	 */
1939e056a1b5SAdrian Hunter 	if (qty == 1)
1940642c28abSDavid Jander 		card->eg_boundary = 1;
1941642c28abSDavid Jander 	else
1942642c28abSDavid Jander 		qty--;
1943e056a1b5SAdrian Hunter 
1944e056a1b5SAdrian Hunter 	/* Convert qty to sectors */
1945e056a1b5SAdrian Hunter 	if (card->erase_shift)
1946642c28abSDavid Jander 		max_discard = qty << card->erase_shift;
1947e056a1b5SAdrian Hunter 	else if (mmc_card_sd(card))
1948642c28abSDavid Jander 		max_discard = qty + 1;
1949e056a1b5SAdrian Hunter 	else
1950642c28abSDavid Jander 		max_discard = qty * card->erase_size;
1951e056a1b5SAdrian Hunter 
1952e056a1b5SAdrian Hunter 	return max_discard;
1953e056a1b5SAdrian Hunter }
1954e056a1b5SAdrian Hunter 
mmc_calc_max_discard(struct mmc_card * card)1955e056a1b5SAdrian Hunter unsigned int mmc_calc_max_discard(struct mmc_card *card)
1956e056a1b5SAdrian Hunter {
1957e056a1b5SAdrian Hunter 	struct mmc_host *host = card->host;
1958e056a1b5SAdrian Hunter 	unsigned int max_discard, max_trim;
1959e056a1b5SAdrian Hunter 
1960e056a1b5SAdrian Hunter 	/*
1961e056a1b5SAdrian Hunter 	 * Without erase_group_def set, MMC erase timeout depends on clock
1962e056a1b5SAdrian Hunter 	 * frequence which can change.  In that case, the best choice is
1963e056a1b5SAdrian Hunter 	 * just the preferred erase size.
1964e056a1b5SAdrian Hunter 	 */
1965e056a1b5SAdrian Hunter 	if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
1966e056a1b5SAdrian Hunter 		return card->pref_erase;
1967e056a1b5SAdrian Hunter 
1968e056a1b5SAdrian Hunter 	max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
1969d4721339SJiong Wu 	if (mmc_can_trim(card)) {
1970e056a1b5SAdrian Hunter 		max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
1971d4721339SJiong Wu 		if (max_trim < max_discard || max_discard == 0)
1972e056a1b5SAdrian Hunter 			max_discard = max_trim;
1973e056a1b5SAdrian Hunter 	} else if (max_discard < card->erase_size) {
1974e056a1b5SAdrian Hunter 		max_discard = 0;
1975e056a1b5SAdrian Hunter 	}
1976e056a1b5SAdrian Hunter 	pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
197712182affSUlf Hansson 		mmc_hostname(host), max_discard, host->max_busy_timeout ?
197812182affSUlf Hansson 		host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
1979e056a1b5SAdrian Hunter 	return max_discard;
1980e056a1b5SAdrian Hunter }
1981e056a1b5SAdrian Hunter EXPORT_SYMBOL(mmc_calc_max_discard);
1982e056a1b5SAdrian Hunter 
mmc_card_is_blockaddr(struct mmc_card * card)198333e6d74dSUlf Hansson bool mmc_card_is_blockaddr(struct mmc_card *card)
198433e6d74dSUlf Hansson {
198533e6d74dSUlf Hansson 	return card ? mmc_card_blockaddr(card) : false;
198633e6d74dSUlf Hansson }
198733e6d74dSUlf Hansson EXPORT_SYMBOL(mmc_card_is_blockaddr);
198833e6d74dSUlf Hansson 
mmc_set_blocklen(struct mmc_card * card,unsigned int blocklen)19890f8d8ea6SAdrian Hunter int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
19900f8d8ea6SAdrian Hunter {
1991c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
19920f8d8ea6SAdrian Hunter 
19931712c937SZiyuan Xu 	if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
19941712c937SZiyuan Xu 	    mmc_card_hs400(card) || mmc_card_hs400es(card))
19950f8d8ea6SAdrian Hunter 		return 0;
19960f8d8ea6SAdrian Hunter 
19970f8d8ea6SAdrian Hunter 	cmd.opcode = MMC_SET_BLOCKLEN;
19980f8d8ea6SAdrian Hunter 	cmd.arg = blocklen;
19990f8d8ea6SAdrian Hunter 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
20000f8d8ea6SAdrian Hunter 	return mmc_wait_for_cmd(card->host, &cmd, 5);
20010f8d8ea6SAdrian Hunter }
20020f8d8ea6SAdrian Hunter EXPORT_SYMBOL(mmc_set_blocklen);
20030f8d8ea6SAdrian Hunter 
mmc_hw_reset_for_init(struct mmc_host * host)2004b2499518SAdrian Hunter static void mmc_hw_reset_for_init(struct mmc_host *host)
2005b2499518SAdrian Hunter {
200652c8212dSUlf Hansson 	mmc_pwrseq_reset(host);
200752c8212dSUlf Hansson 
200832f18e59SWolfram Sang 	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->card_hw_reset)
2009b2499518SAdrian Hunter 		return;
201032f18e59SWolfram Sang 	host->ops->card_hw_reset(host);
2011b2499518SAdrian Hunter }
2012b2499518SAdrian Hunter 
20133439c588SWolfram Sang /**
20143439c588SWolfram Sang  * mmc_hw_reset - reset the card in hardware
2015b71597edSWolfram Sang  * @card: card to be reset
20163439c588SWolfram Sang  *
20173439c588SWolfram Sang  * Hard reset the card. This function is only for upper layers, like the
20183439c588SWolfram Sang  * block layer or card drivers. You cannot use it in host drivers (struct
20193439c588SWolfram Sang  * mmc_card might be gone then).
20203439c588SWolfram Sang  *
20213439c588SWolfram Sang  * Return: 0 on success, -errno on failure
20223439c588SWolfram Sang  */
mmc_hw_reset(struct mmc_card * card)2023b71597edSWolfram Sang int mmc_hw_reset(struct mmc_card *card)
2024b2499518SAdrian Hunter {
2025b71597edSWolfram Sang 	struct mmc_host *host = card->host;
2026f855a371SJohan Rudholm 	int ret;
2027b2499518SAdrian Hunter 
20283a3db603SUlf Hansson 	ret = host->bus_ops->hw_reset(host);
20292ac55d5eSUlf Hansson 	if (ret < 0)
20303a3db603SUlf Hansson 		pr_warn("%s: tried to HW reset card, got error %d\n",
20314e6c7178SGwendal Grignou 			mmc_hostname(host), ret);
2032b2499518SAdrian Hunter 
2033f855a371SJohan Rudholm 	return ret;
2034b2499518SAdrian Hunter }
2035b2499518SAdrian Hunter EXPORT_SYMBOL(mmc_hw_reset);
2036b2499518SAdrian Hunter 
mmc_sw_reset(struct mmc_card * card)20379723f69dSWolfram Sang int mmc_sw_reset(struct mmc_card *card)
20381433269cSUlf Hansson {
20399723f69dSWolfram Sang 	struct mmc_host *host = card->host;
20401433269cSUlf Hansson 	int ret;
20411433269cSUlf Hansson 
2042fefdd3c9SUlf Hansson 	if (!host->bus_ops->sw_reset)
20431433269cSUlf Hansson 		return -EOPNOTSUPP;
20441433269cSUlf Hansson 
20451433269cSUlf Hansson 	ret = host->bus_ops->sw_reset(host);
20461433269cSUlf Hansson 	if (ret)
20471433269cSUlf Hansson 		pr_warn("%s: tried to SW reset card, got error %d\n",
20481433269cSUlf Hansson 			mmc_hostname(host), ret);
20491433269cSUlf Hansson 
20501433269cSUlf Hansson 	return ret;
20511433269cSUlf Hansson }
20521433269cSUlf Hansson EXPORT_SYMBOL(mmc_sw_reset);
20531433269cSUlf Hansson 
mmc_rescan_try_freq(struct mmc_host * host,unsigned freq)2054807e8e40SAndy Ross static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2055aaac1b47SPierre Ossman {
2056807e8e40SAndy Ross 	host->f_init = freq;
20574c2ef25fSMaxim Levitsky 
205869f25f9bSShawn Lin 	pr_debug("%s: %s: trying to init card at %u Hz\n",
2059807e8e40SAndy Ross 		mmc_hostname(host), __func__, host->f_init);
206069f25f9bSShawn Lin 
20614a065193SUlf Hansson 	mmc_power_up(host, host->ocr_avail);
20622f94e55aSPhilip Rakity 
20632f94e55aSPhilip Rakity 	/*
2064b2499518SAdrian Hunter 	 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2065b2499518SAdrian Hunter 	 * do a hardware reset if possible.
2066b2499518SAdrian Hunter 	 */
2067b2499518SAdrian Hunter 	mmc_hw_reset_for_init(host);
2068b2499518SAdrian Hunter 
2069b2499518SAdrian Hunter 	/*
20702f94e55aSPhilip Rakity 	 * sdio_reset sends CMD52 to reset card.  Since we do not know
20712f94e55aSPhilip Rakity 	 * if the card is being re-initialized, just send it.  CMD52
20722f94e55aSPhilip Rakity 	 * should be ignored by SD/eMMC cards.
2073100a606dSCarlo Caione 	 * Skip it if we already know that we do not support SDIO commands
20742f94e55aSPhilip Rakity 	 */
2075100a606dSCarlo Caione 	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2076807e8e40SAndy Ross 		sdio_reset(host);
2077100a606dSCarlo Caione 
2078807e8e40SAndy Ross 	mmc_go_idle(host);
20794c2ef25fSMaxim Levitsky 
2080ead49373SUlf Hansson 	if (!(host->caps2 & MMC_CAP2_NO_SD)) {
2081ead49373SUlf Hansson 		if (mmc_send_if_cond_pcie(host, host->ocr_avail))
2082ead49373SUlf Hansson 			goto out;
2083ead49373SUlf Hansson 		if (mmc_card_sd_express(host))
2084ead49373SUlf Hansson 			return 0;
2085ead49373SUlf Hansson 	}
2086807e8e40SAndy Ross 
2087807e8e40SAndy Ross 	/* Order's important: probe SDIO, then SD, then MMC */
2088100a606dSCarlo Caione 	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2089807e8e40SAndy Ross 		if (!mmc_attach_sdio(host))
2090807e8e40SAndy Ross 			return 0;
2091100a606dSCarlo Caione 
20921b8d79c5SUlf Hansson 	if (!(host->caps2 & MMC_CAP2_NO_SD))
2093807e8e40SAndy Ross 		if (!mmc_attach_sd(host))
2094807e8e40SAndy Ross 			return 0;
20951b8d79c5SUlf Hansson 
2096a0c3b68cSShawn Lin 	if (!(host->caps2 & MMC_CAP2_NO_MMC))
2097807e8e40SAndy Ross 		if (!mmc_attach_mmc(host))
2098807e8e40SAndy Ross 			return 0;
2099807e8e40SAndy Ross 
2100ead49373SUlf Hansson out:
2101807e8e40SAndy Ross 	mmc_power_off(host);
2102807e8e40SAndy Ross 	return -EIO;
21034c2ef25fSMaxim Levitsky }
21044c2ef25fSMaxim Levitsky 
_mmc_detect_card_removed(struct mmc_host * host)2105d3049504SAdrian Hunter int _mmc_detect_card_removed(struct mmc_host *host)
2106d3049504SAdrian Hunter {
2107d3049504SAdrian Hunter 	int ret;
2108d3049504SAdrian Hunter 
2109d3049504SAdrian Hunter 	if (!host->card || mmc_card_removed(host->card))
2110d3049504SAdrian Hunter 		return 1;
2111d3049504SAdrian Hunter 
2112d3049504SAdrian Hunter 	ret = host->bus_ops->alive(host);
21131450734eSKevin Liu 
21141450734eSKevin Liu 	/*
21151450734eSKevin Liu 	 * Card detect status and alive check may be out of sync if card is
21161450734eSKevin Liu 	 * removed slowly, when card detect switch changes while card/slot
21171450734eSKevin Liu 	 * pads are still contacted in hardware (refer to "SD Card Mechanical
21181450734eSKevin Liu 	 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
21191450734eSKevin Liu 	 * detect work 200ms later for this case.
21201450734eSKevin Liu 	 */
21211450734eSKevin Liu 	if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
21221450734eSKevin Liu 		mmc_detect_change(host, msecs_to_jiffies(200));
21231450734eSKevin Liu 		pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
21241450734eSKevin Liu 	}
21251450734eSKevin Liu 
2126d3049504SAdrian Hunter 	if (ret) {
2127d3049504SAdrian Hunter 		mmc_card_set_removed(host->card);
2128d3049504SAdrian Hunter 		pr_debug("%s: card remove detected\n", mmc_hostname(host));
2129d3049504SAdrian Hunter 	}
2130d3049504SAdrian Hunter 
2131d3049504SAdrian Hunter 	return ret;
2132d3049504SAdrian Hunter }
2133d3049504SAdrian Hunter 
mmc_detect_card_removed(struct mmc_host * host)2134d3049504SAdrian Hunter int mmc_detect_card_removed(struct mmc_host *host)
2135d3049504SAdrian Hunter {
2136d3049504SAdrian Hunter 	struct mmc_card *card = host->card;
2137f0cc9cf9SUlf Hansson 	int ret;
2138d3049504SAdrian Hunter 
2139d3049504SAdrian Hunter 	WARN_ON(!host->claimed);
2140f0cc9cf9SUlf Hansson 
2141f0cc9cf9SUlf Hansson 	if (!card)
2142f0cc9cf9SUlf Hansson 		return 1;
2143f0cc9cf9SUlf Hansson 
21446067bafeSJaehoon Chung 	if (!mmc_card_is_removable(host))
21451ff2575bSUlf Hansson 		return 0;
21461ff2575bSUlf Hansson 
2147f0cc9cf9SUlf Hansson 	ret = mmc_card_removed(card);
2148d3049504SAdrian Hunter 	/*
2149d3049504SAdrian Hunter 	 * The card will be considered unchanged unless we have been asked to
2150d3049504SAdrian Hunter 	 * detect a change or host requires polling to provide card detection.
2151d3049504SAdrian Hunter 	 */
2152b6891679SUlf Hansson 	if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2153f0cc9cf9SUlf Hansson 		return ret;
2154d3049504SAdrian Hunter 
2155d3049504SAdrian Hunter 	host->detect_change = 0;
2156f0cc9cf9SUlf Hansson 	if (!ret) {
2157f0cc9cf9SUlf Hansson 		ret = _mmc_detect_card_removed(host);
2158b6891679SUlf Hansson 		if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2159f0cc9cf9SUlf Hansson 			/*
2160f0cc9cf9SUlf Hansson 			 * Schedule a detect work as soon as possible to let a
2161f0cc9cf9SUlf Hansson 			 * rescan handle the card removal.
2162f0cc9cf9SUlf Hansson 			 */
2163f0cc9cf9SUlf Hansson 			cancel_delayed_work(&host->detect);
2164bbd43682SUlf Hansson 			_mmc_detect_change(host, 0, false);
2165f0cc9cf9SUlf Hansson 		}
2166f0cc9cf9SUlf Hansson 	}
2167d3049504SAdrian Hunter 
2168f0cc9cf9SUlf Hansson 	return ret;
2169d3049504SAdrian Hunter }
2170d3049504SAdrian Hunter EXPORT_SYMBOL(mmc_detect_card_removed);
2171d3049504SAdrian Hunter 
mmc_card_alternative_gpt_sector(struct mmc_card * card,sector_t * gpt_sector)2172dc913385SDmitry Osipenko int mmc_card_alternative_gpt_sector(struct mmc_card *card, sector_t *gpt_sector)
2173dc913385SDmitry Osipenko {
2174dc913385SDmitry Osipenko 	unsigned int boot_sectors_num;
2175dc913385SDmitry Osipenko 
2176dc913385SDmitry Osipenko 	if ((!(card->host->caps2 & MMC_CAP2_ALT_GPT_TEGRA)))
2177dc913385SDmitry Osipenko 		return -EOPNOTSUPP;
2178dc913385SDmitry Osipenko 
2179dc913385SDmitry Osipenko 	/* filter out unrelated cards */
2180dc913385SDmitry Osipenko 	if (card->ext_csd.rev < 3 ||
2181dc913385SDmitry Osipenko 	    !mmc_card_mmc(card) ||
2182dc913385SDmitry Osipenko 	    !mmc_card_is_blockaddr(card) ||
2183dc913385SDmitry Osipenko 	     mmc_card_is_removable(card->host))
2184dc913385SDmitry Osipenko 		return -ENOENT;
2185dc913385SDmitry Osipenko 
2186dc913385SDmitry Osipenko 	/*
2187dc913385SDmitry Osipenko 	 * eMMC storage has two special boot partitions in addition to the
2188dc913385SDmitry Osipenko 	 * main one.  NVIDIA's bootloader linearizes eMMC boot0->boot1->main
2189dc913385SDmitry Osipenko 	 * accesses, this means that the partition table addresses are shifted
2190dc913385SDmitry Osipenko 	 * by the size of boot partitions.  In accordance with the eMMC
2191dc913385SDmitry Osipenko 	 * specification, the boot partition size is calculated as follows:
2192dc913385SDmitry Osipenko 	 *
2193dc913385SDmitry Osipenko 	 *	boot partition size = 128K byte x BOOT_SIZE_MULT
2194dc913385SDmitry Osipenko 	 *
2195dc913385SDmitry Osipenko 	 * Calculate number of sectors occupied by the both boot partitions.
2196dc913385SDmitry Osipenko 	 */
2197dc913385SDmitry Osipenko 	boot_sectors_num = card->ext_csd.raw_boot_mult * SZ_128K /
2198dc913385SDmitry Osipenko 			   SZ_512 * MMC_NUM_BOOT_PARTITION;
2199dc913385SDmitry Osipenko 
2200dc913385SDmitry Osipenko 	/* Defined by NVIDIA and used by Android devices. */
2201dc913385SDmitry Osipenko 	*gpt_sector = card->ext_csd.sectors - boot_sectors_num - 1;
2202dc913385SDmitry Osipenko 
2203dc913385SDmitry Osipenko 	return 0;
2204dc913385SDmitry Osipenko }
2205dc913385SDmitry Osipenko EXPORT_SYMBOL(mmc_card_alternative_gpt_sector);
2206dc913385SDmitry Osipenko 
mmc_rescan(struct work_struct * work)2207fa700d73SUlf Hansson void mmc_rescan(struct work_struct *work)
2208807e8e40SAndy Ross {
2209fa700d73SUlf Hansson 	struct mmc_host *host =
2210fa700d73SUlf Hansson 		container_of(work, struct mmc_host, detect.work);
2211807e8e40SAndy Ross 	int i;
22124c2ef25fSMaxim Levitsky 
2213807e8e40SAndy Ross 	if (host->rescan_disable)
2214807e8e40SAndy Ross 		return;
22157ea239d9SPierre Ossman 
22163339d1e3SJohan Rudholm 	/* If there is a non-removable card registered, only scan once */
22176067bafeSJaehoon Chung 	if (!mmc_card_is_removable(host) && host->rescan_entered)
22183339d1e3SJohan Rudholm 		return;
22193339d1e3SJohan Rudholm 	host->rescan_entered = 1;
22203339d1e3SJohan Rudholm 
222186236813SUlf Hansson 	if (host->trigger_card_event && host->ops->card_event) {
2222d234d212SUlf Hansson 		mmc_claim_host(host);
222386236813SUlf Hansson 		host->ops->card_event(host);
2224d234d212SUlf Hansson 		mmc_release_host(host);
222586236813SUlf Hansson 		host->trigger_card_event = false;
222686236813SUlf Hansson 	}
222786236813SUlf Hansson 
222899b4ddd8SUlf Hansson 	/* Verify a registered card to be functional, else remove it. */
2229e9ce2ce1SUlf Hansson 	if (host->bus_ops)
223094d89efbSJorg Schummer 		host->bus_ops->detect(host);
223194d89efbSJorg Schummer 
2232d3049504SAdrian Hunter 	host->detect_change = 0;
2233d3049504SAdrian Hunter 
223494d89efbSJorg Schummer 	/* if there still is a card present, stop here */
2235e9ce2ce1SUlf Hansson 	if (host->bus_ops != NULL)
223694d89efbSJorg Schummer 		goto out;
2237aaac1b47SPierre Ossman 
2238d234d212SUlf Hansson 	mmc_claim_host(host);
22396067bafeSJaehoon Chung 	if (mmc_card_is_removable(host) && host->ops->get_cd &&
2240c1b55bfcSSascha Hauer 			host->ops->get_cd(host) == 0) {
2241fa550189SUlf Hansson 		mmc_power_off(host);
2242fa550189SUlf Hansson 		mmc_release_host(host);
224328f52482SAnton Vorontsov 		goto out;
2244fa550189SUlf Hansson 	}
224528f52482SAnton Vorontsov 
2246ead49373SUlf Hansson 	/* If an SD express card is present, then leave it as is. */
2247ead49373SUlf Hansson 	if (mmc_card_sd_express(host)) {
2248ead49373SUlf Hansson 		mmc_release_host(host);
2249ead49373SUlf Hansson 		goto out;
2250ead49373SUlf Hansson 	}
2251ead49373SUlf Hansson 
225288ae8b86SHein Tibosch 	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2253661cf2d8SMichał Mirosław 		unsigned int freq = freqs[i];
2254661cf2d8SMichał Mirosław 		if (freq > host->f_max) {
2255661cf2d8SMichał Mirosław 			if (i + 1 < ARRAY_SIZE(freqs))
2256661cf2d8SMichał Mirosław 				continue;
2257661cf2d8SMichał Mirosław 			freq = host->f_max;
2258661cf2d8SMichał Mirosław 		}
2259661cf2d8SMichał Mirosław 		if (!mmc_rescan_try_freq(host, max(freq, host->f_min)))
2260807e8e40SAndy Ross 			break;
226106b2233aSJaehoon Chung 		if (freqs[i] <= host->f_min)
2262807e8e40SAndy Ross 			break;
2263807e8e40SAndy Ross 	}
226491f059c9SShaik Sajida Bhanu 
2265f6ca8f90SMarc Gonzalez 	/* A non-removable card should have been detected by now. */
2266f6ca8f90SMarc Gonzalez 	if (!mmc_card_is_removable(host) && !host->bus_ops)
2267f6ca8f90SMarc Gonzalez 		pr_info("%s: Failed to initialize a non-removable card",
2268f6ca8f90SMarc Gonzalez 			mmc_hostname(host));
2269f6ca8f90SMarc Gonzalez 
227091f059c9SShaik Sajida Bhanu 	/*
227191f059c9SShaik Sajida Bhanu 	 * Ignore the command timeout errors observed during
227291f059c9SShaik Sajida Bhanu 	 * the card init as those are excepted.
227391f059c9SShaik Sajida Bhanu 	 */
227491f059c9SShaik Sajida Bhanu 	host->err_stats[MMC_ERR_CMD_TIMEOUT] = 0;
227588ae8b86SHein Tibosch 	mmc_release_host(host);
2276aaac1b47SPierre Ossman 
227728f52482SAnton Vorontsov  out:
227828f52482SAnton Vorontsov 	if (host->caps & MMC_CAP_NEEDS_POLL)
227928f52482SAnton Vorontsov 		mmc_schedule_delayed_work(&host->detect, HZ);
2280aaac1b47SPierre Ossman }
2281aaac1b47SPierre Ossman 
mmc_start_host(struct mmc_host * host)2282b93931a6SPierre Ossman void mmc_start_host(struct mmc_host *host)
2283aaac1b47SPierre Ossman {
2284661cf2d8SMichał Mirosław 	host->f_init = max(min(freqs[0], host->f_max), host->f_min);
2285d9adcc12SGuennadi Liakhovetski 	host->rescan_disable = 0;
22868d1ffc8cSUlf Hansson 
2287c2c24819SUlf Hansson 	if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
22888d1ffc8cSUlf Hansson 		mmc_claim_host(host);
22894a065193SUlf Hansson 		mmc_power_up(host, host->ocr_avail);
22908d1ffc8cSUlf Hansson 		mmc_release_host(host);
2291c2c24819SUlf Hansson 	}
22928d1ffc8cSUlf Hansson 
2293740a221eSAdrian Hunter 	mmc_gpiod_request_cd_irq(host);
2294fa700d73SUlf Hansson 	_mmc_detect_change(host, 0, false);
2295aaac1b47SPierre Ossman }
2296aaac1b47SPierre Ossman 
__mmc_stop_host(struct mmc_host * host)229766c915d0SUlf Hansson void __mmc_stop_host(struct mmc_host *host)
2298aaac1b47SPierre Ossman {
229903dbaa04SAdrian Hunter 	if (host->slot.cd_irq >= 0) {
230036f1d7e8SAdrian Hunter 		mmc_gpio_set_cd_wake(host, false);
2301740a221eSAdrian Hunter 		disable_irq(host->slot.cd_irq);
230203dbaa04SAdrian Hunter 	}
2303aaac1b47SPierre Ossman 
2304d9adcc12SGuennadi Liakhovetski 	host->rescan_disable = 1;
2305d9bcbf34SGuennadi Liakhovetski 	cancel_delayed_work_sync(&host->detect);
230666c915d0SUlf Hansson }
230766c915d0SUlf Hansson 
mmc_stop_host(struct mmc_host * host)230866c915d0SUlf Hansson void mmc_stop_host(struct mmc_host *host)
230966c915d0SUlf Hansson {
231066c915d0SUlf Hansson 	__mmc_stop_host(host);
2311aaac1b47SPierre Ossman 
2312da68c4ebSNicolas Pitre 	/* clear pm flags now and let card drivers set them as needed */
2313da68c4ebSNicolas Pitre 	host->pm_flags = 0;
2314da68c4ebSNicolas Pitre 
2315e9ce2ce1SUlf Hansson 	if (host->bus_ops) {
23160db13fc2SGuennadi Liakhovetski 		/* Calling bus_ops->remove() with a claimed host can deadlock */
23177ea239d9SPierre Ossman 		host->bus_ops->remove(host);
23187ea239d9SPierre Ossman 		mmc_claim_host(host);
23197ea239d9SPierre Ossman 		mmc_detach_bus(host);
23207f7e4129SUlf Hansson 		mmc_power_off(host);
23217ea239d9SPierre Ossman 		mmc_release_host(host);
232253509f0fSDenis Karpov 		return;
2323aaac1b47SPierre Ossman 	}
23247ea239d9SPierre Ossman 
23258d1ffc8cSUlf Hansson 	mmc_claim_host(host);
2326aaac1b47SPierre Ossman 	mmc_power_off(host);
23278d1ffc8cSUlf Hansson 	mmc_release_host(host);
2328aaac1b47SPierre Ossman }
2329aaac1b47SPierre Ossman 
mmc_init(void)2330ffce2e7eSPierre Ossman static int __init mmc_init(void)
2331ffce2e7eSPierre Ossman {
2332ffce2e7eSPierre Ossman 	int ret;
2333ffce2e7eSPierre Ossman 
2334ffce2e7eSPierre Ossman 	ret = mmc_register_bus();
2335e29a7d73SPierre Ossman 	if (ret)
2336520bd7a8SUlf Hansson 		return ret;
2337e29a7d73SPierre Ossman 
2338ffce2e7eSPierre Ossman 	ret = mmc_register_host_class();
2339ffce2e7eSPierre Ossman 	if (ret)
2340e29a7d73SPierre Ossman 		goto unregister_bus;
2341e29a7d73SPierre Ossman 
2342e29a7d73SPierre Ossman 	ret = sdio_register_bus();
2343e29a7d73SPierre Ossman 	if (ret)
2344e29a7d73SPierre Ossman 		goto unregister_host_class;
2345e29a7d73SPierre Ossman 
2346e29a7d73SPierre Ossman 	return 0;
2347e29a7d73SPierre Ossman 
2348e29a7d73SPierre Ossman unregister_host_class:
2349e29a7d73SPierre Ossman 	mmc_unregister_host_class();
2350e29a7d73SPierre Ossman unregister_bus:
2351ffce2e7eSPierre Ossman 	mmc_unregister_bus();
2352ffce2e7eSPierre Ossman 	return ret;
2353ffce2e7eSPierre Ossman }
2354ffce2e7eSPierre Ossman 
mmc_exit(void)2355ffce2e7eSPierre Ossman static void __exit mmc_exit(void)
2356ffce2e7eSPierre Ossman {
2357e29a7d73SPierre Ossman 	sdio_unregister_bus();
2358ffce2e7eSPierre Ossman 	mmc_unregister_host_class();
2359ffce2e7eSPierre Ossman 	mmc_unregister_bus();
2360ffce2e7eSPierre Ossman }
2361ffce2e7eSPierre Ossman 
236226074962SNicolas Pitre subsys_initcall(mmc_init);
2363ffce2e7eSPierre Ossman module_exit(mmc_exit);
2364ffce2e7eSPierre Ossman 
2365aaac1b47SPierre Ossman MODULE_LICENSE("GPL");
2366