xref: /linux/drivers/mmc/core/mmc_ops.c (revision 31368ce83c59a5422ee621a38aeea98142d0ecf7)
1 /*
2  *  linux/drivers/mmc/core/mmc_ops.h
3  *
4  *  Copyright 2006-2007 Pierre Ossman
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/export.h>
14 #include <linux/types.h>
15 #include <linux/scatterlist.h>
16 
17 #include <linux/mmc/host.h>
18 #include <linux/mmc/card.h>
19 #include <linux/mmc/mmc.h>
20 
21 #include "core.h"
22 #include "card.h"
23 #include "host.h"
24 #include "mmc_ops.h"
25 
26 #define MMC_OPS_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
27 
28 static const u8 tuning_blk_pattern_4bit[] = {
29 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
30 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
31 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
32 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
33 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
34 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
35 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
36 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
37 };
38 
39 static const u8 tuning_blk_pattern_8bit[] = {
40 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
41 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
42 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
43 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
44 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
45 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
46 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
47 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
48 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
49 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
50 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
51 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
52 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
53 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
54 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
55 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
56 };
57 
58 int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
59 {
60 	int err;
61 	struct mmc_command cmd = {};
62 
63 	cmd.opcode = MMC_SEND_STATUS;
64 	if (!mmc_host_is_spi(card->host))
65 		cmd.arg = card->rca << 16;
66 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
67 
68 	err = mmc_wait_for_cmd(card->host, &cmd, retries);
69 	if (err)
70 		return err;
71 
72 	/* NOTE: callers are required to understand the difference
73 	 * between "native" and SPI format status words!
74 	 */
75 	if (status)
76 		*status = cmd.resp[0];
77 
78 	return 0;
79 }
80 EXPORT_SYMBOL_GPL(__mmc_send_status);
81 
82 int mmc_send_status(struct mmc_card *card, u32 *status)
83 {
84 	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
85 }
86 
87 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
88 {
89 	struct mmc_command cmd = {};
90 
91 	cmd.opcode = MMC_SELECT_CARD;
92 
93 	if (card) {
94 		cmd.arg = card->rca << 16;
95 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
96 	} else {
97 		cmd.arg = 0;
98 		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
99 	}
100 
101 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
102 }
103 
104 int mmc_select_card(struct mmc_card *card)
105 {
106 
107 	return _mmc_select_card(card->host, card);
108 }
109 
110 int mmc_deselect_cards(struct mmc_host *host)
111 {
112 	return _mmc_select_card(host, NULL);
113 }
114 
115 /*
116  * Write the value specified in the device tree or board code into the optional
117  * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
118  * drive strength of the DAT and CMD outputs. The actual meaning of a given
119  * value is hardware dependant.
120  * The presence of the DSR register can be determined from the CSD register,
121  * bit 76.
122  */
123 int mmc_set_dsr(struct mmc_host *host)
124 {
125 	struct mmc_command cmd = {};
126 
127 	cmd.opcode = MMC_SET_DSR;
128 
129 	cmd.arg = (host->dsr << 16) | 0xffff;
130 	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
131 
132 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
133 }
134 
135 int mmc_go_idle(struct mmc_host *host)
136 {
137 	int err;
138 	struct mmc_command cmd = {};
139 
140 	/*
141 	 * Non-SPI hosts need to prevent chipselect going active during
142 	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
143 	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
144 	 *
145 	 * SPI hosts ignore ios.chip_select; it's managed according to
146 	 * rules that must accommodate non-MMC slaves which this layer
147 	 * won't even know about.
148 	 */
149 	if (!mmc_host_is_spi(host)) {
150 		mmc_set_chip_select(host, MMC_CS_HIGH);
151 		mmc_delay(1);
152 	}
153 
154 	cmd.opcode = MMC_GO_IDLE_STATE;
155 	cmd.arg = 0;
156 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
157 
158 	err = mmc_wait_for_cmd(host, &cmd, 0);
159 
160 	mmc_delay(1);
161 
162 	if (!mmc_host_is_spi(host)) {
163 		mmc_set_chip_select(host, MMC_CS_DONTCARE);
164 		mmc_delay(1);
165 	}
166 
167 	host->use_spi_crc = 0;
168 
169 	return err;
170 }
171 
172 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
173 {
174 	struct mmc_command cmd = {};
175 	int i, err = 0;
176 
177 	cmd.opcode = MMC_SEND_OP_COND;
178 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
179 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
180 
181 	for (i = 100; i; i--) {
182 		err = mmc_wait_for_cmd(host, &cmd, 0);
183 		if (err)
184 			break;
185 
186 		/* if we're just probing, do a single pass */
187 		if (ocr == 0)
188 			break;
189 
190 		/* otherwise wait until reset completes */
191 		if (mmc_host_is_spi(host)) {
192 			if (!(cmd.resp[0] & R1_SPI_IDLE))
193 				break;
194 		} else {
195 			if (cmd.resp[0] & MMC_CARD_BUSY)
196 				break;
197 		}
198 
199 		err = -ETIMEDOUT;
200 
201 		mmc_delay(10);
202 	}
203 
204 	if (rocr && !mmc_host_is_spi(host))
205 		*rocr = cmd.resp[0];
206 
207 	return err;
208 }
209 
210 int mmc_set_relative_addr(struct mmc_card *card)
211 {
212 	struct mmc_command cmd = {};
213 
214 	cmd.opcode = MMC_SET_RELATIVE_ADDR;
215 	cmd.arg = card->rca << 16;
216 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
217 
218 	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
219 }
220 
221 static int
222 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
223 {
224 	int err;
225 	struct mmc_command cmd = {};
226 
227 	cmd.opcode = opcode;
228 	cmd.arg = arg;
229 	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
230 
231 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
232 	if (err)
233 		return err;
234 
235 	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
236 
237 	return 0;
238 }
239 
240 /*
241  * NOTE: void *buf, caller for the buf is required to use DMA-capable
242  * buffer or on-stack buffer (with some overhead in callee).
243  */
244 static int
245 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
246 		u32 opcode, void *buf, unsigned len)
247 {
248 	struct mmc_request mrq = {};
249 	struct mmc_command cmd = {};
250 	struct mmc_data data = {};
251 	struct scatterlist sg;
252 
253 	mrq.cmd = &cmd;
254 	mrq.data = &data;
255 
256 	cmd.opcode = opcode;
257 	cmd.arg = 0;
258 
259 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
260 	 * rely on callers to never use this with "native" calls for reading
261 	 * CSD or CID.  Native versions of those commands use the R2 type,
262 	 * not R1 plus a data block.
263 	 */
264 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
265 
266 	data.blksz = len;
267 	data.blocks = 1;
268 	data.flags = MMC_DATA_READ;
269 	data.sg = &sg;
270 	data.sg_len = 1;
271 
272 	sg_init_one(&sg, buf, len);
273 
274 	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
275 		/*
276 		 * The spec states that CSR and CID accesses have a timeout
277 		 * of 64 clock cycles.
278 		 */
279 		data.timeout_ns = 0;
280 		data.timeout_clks = 64;
281 	} else
282 		mmc_set_data_timeout(&data, card);
283 
284 	mmc_wait_for_req(host, &mrq);
285 
286 	if (cmd.error)
287 		return cmd.error;
288 	if (data.error)
289 		return data.error;
290 
291 	return 0;
292 }
293 
294 static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
295 {
296 	int ret, i;
297 	__be32 *csd_tmp;
298 
299 	csd_tmp = kzalloc(16, GFP_KERNEL);
300 	if (!csd_tmp)
301 		return -ENOMEM;
302 
303 	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
304 	if (ret)
305 		goto err;
306 
307 	for (i = 0; i < 4; i++)
308 		csd[i] = be32_to_cpu(csd_tmp[i]);
309 
310 err:
311 	kfree(csd_tmp);
312 	return ret;
313 }
314 
315 int mmc_send_csd(struct mmc_card *card, u32 *csd)
316 {
317 	if (mmc_host_is_spi(card->host))
318 		return mmc_spi_send_csd(card, csd);
319 
320 	return mmc_send_cxd_native(card->host, card->rca << 16,	csd,
321 				MMC_SEND_CSD);
322 }
323 
324 static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
325 {
326 	int ret, i;
327 	__be32 *cid_tmp;
328 
329 	cid_tmp = kzalloc(16, GFP_KERNEL);
330 	if (!cid_tmp)
331 		return -ENOMEM;
332 
333 	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
334 	if (ret)
335 		goto err;
336 
337 	for (i = 0; i < 4; i++)
338 		cid[i] = be32_to_cpu(cid_tmp[i]);
339 
340 err:
341 	kfree(cid_tmp);
342 	return ret;
343 }
344 
345 int mmc_send_cid(struct mmc_host *host, u32 *cid)
346 {
347 	if (mmc_host_is_spi(host))
348 		return mmc_spi_send_cid(host, cid);
349 
350 	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
351 }
352 
353 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
354 {
355 	int err;
356 	u8 *ext_csd;
357 
358 	if (!card || !new_ext_csd)
359 		return -EINVAL;
360 
361 	if (!mmc_can_ext_csd(card))
362 		return -EOPNOTSUPP;
363 
364 	/*
365 	 * As the ext_csd is so large and mostly unused, we don't store the
366 	 * raw block in mmc_card.
367 	 */
368 	ext_csd = kzalloc(512, GFP_KERNEL);
369 	if (!ext_csd)
370 		return -ENOMEM;
371 
372 	err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
373 				512);
374 	if (err)
375 		kfree(ext_csd);
376 	else
377 		*new_ext_csd = ext_csd;
378 
379 	return err;
380 }
381 EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
382 
383 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
384 {
385 	struct mmc_command cmd = {};
386 	int err;
387 
388 	cmd.opcode = MMC_SPI_READ_OCR;
389 	cmd.arg = highcap ? (1 << 30) : 0;
390 	cmd.flags = MMC_RSP_SPI_R3;
391 
392 	err = mmc_wait_for_cmd(host, &cmd, 0);
393 
394 	*ocrp = cmd.resp[1];
395 	return err;
396 }
397 
398 int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
399 {
400 	struct mmc_command cmd = {};
401 	int err;
402 
403 	cmd.opcode = MMC_SPI_CRC_ON_OFF;
404 	cmd.flags = MMC_RSP_SPI_R1;
405 	cmd.arg = use_crc;
406 
407 	err = mmc_wait_for_cmd(host, &cmd, 0);
408 	if (!err)
409 		host->use_spi_crc = use_crc;
410 	return err;
411 }
412 
413 static int mmc_switch_status_error(struct mmc_host *host, u32 status)
414 {
415 	if (mmc_host_is_spi(host)) {
416 		if (status & R1_SPI_ILLEGAL_COMMAND)
417 			return -EBADMSG;
418 	} else {
419 		if (status & 0xFDFFA000)
420 			pr_warn("%s: unexpected status %#x after switch\n",
421 				mmc_hostname(host), status);
422 		if (status & R1_SWITCH_ERROR)
423 			return -EBADMSG;
424 	}
425 	return 0;
426 }
427 
428 /* Caller must hold re-tuning */
429 int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
430 {
431 	u32 status;
432 	int err;
433 
434 	err = mmc_send_status(card, &status);
435 	if (!crc_err_fatal && err == -EILSEQ)
436 		return 0;
437 	if (err)
438 		return err;
439 
440 	return mmc_switch_status_error(card->host, status);
441 }
442 
443 int mmc_switch_status(struct mmc_card *card)
444 {
445 	return __mmc_switch_status(card, true);
446 }
447 
448 static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
449 			bool send_status, bool retry_crc_err)
450 {
451 	struct mmc_host *host = card->host;
452 	int err;
453 	unsigned long timeout;
454 	u32 status = 0;
455 	bool expired = false;
456 	bool busy = false;
457 
458 	/* We have an unspecified cmd timeout, use the fallback value. */
459 	if (!timeout_ms)
460 		timeout_ms = MMC_OPS_TIMEOUT_MS;
461 
462 	/*
463 	 * In cases when not allowed to poll by using CMD13 or because we aren't
464 	 * capable of polling by using ->card_busy(), then rely on waiting the
465 	 * stated timeout to be sufficient.
466 	 */
467 	if (!send_status && !host->ops->card_busy) {
468 		mmc_delay(timeout_ms);
469 		return 0;
470 	}
471 
472 	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
473 	do {
474 		/*
475 		 * Due to the possibility of being preempted while polling,
476 		 * check the expiration time first.
477 		 */
478 		expired = time_after(jiffies, timeout);
479 
480 		if (host->ops->card_busy) {
481 			busy = host->ops->card_busy(host);
482 		} else {
483 			err = mmc_send_status(card, &status);
484 			if (retry_crc_err && err == -EILSEQ) {
485 				busy = true;
486 			} else if (err) {
487 				return err;
488 			} else {
489 				err = mmc_switch_status_error(host, status);
490 				if (err)
491 					return err;
492 				busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
493 			}
494 		}
495 
496 		/* Timeout if the device still remains busy. */
497 		if (expired && busy) {
498 			pr_err("%s: Card stuck being busy! %s\n",
499 				mmc_hostname(host), __func__);
500 			return -ETIMEDOUT;
501 		}
502 	} while (busy);
503 
504 	return 0;
505 }
506 
507 /**
508  *	__mmc_switch - modify EXT_CSD register
509  *	@card: the MMC card associated with the data transfer
510  *	@set: cmd set values
511  *	@index: EXT_CSD register index
512  *	@value: value to program into EXT_CSD register
513  *	@timeout_ms: timeout (ms) for operation performed by register write,
514  *                   timeout of zero implies maximum possible timeout
515  *	@timing: new timing to change to
516  *	@use_busy_signal: use the busy signal as response type
517  *	@send_status: send status cmd to poll for busy
518  *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
519  *
520  *	Modifies the EXT_CSD register for selected card.
521  */
522 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
523 		unsigned int timeout_ms, unsigned char timing,
524 		bool use_busy_signal, bool send_status,	bool retry_crc_err)
525 {
526 	struct mmc_host *host = card->host;
527 	int err;
528 	struct mmc_command cmd = {};
529 	bool use_r1b_resp = use_busy_signal;
530 	unsigned char old_timing = host->ios.timing;
531 
532 	mmc_retune_hold(host);
533 
534 	/*
535 	 * If the cmd timeout and the max_busy_timeout of the host are both
536 	 * specified, let's validate them. A failure means we need to prevent
537 	 * the host from doing hw busy detection, which is done by converting
538 	 * to a R1 response instead of a R1B.
539 	 */
540 	if (timeout_ms && host->max_busy_timeout &&
541 		(timeout_ms > host->max_busy_timeout))
542 		use_r1b_resp = false;
543 
544 	cmd.opcode = MMC_SWITCH;
545 	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
546 		  (index << 16) |
547 		  (value << 8) |
548 		  set;
549 	cmd.flags = MMC_CMD_AC;
550 	if (use_r1b_resp) {
551 		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
552 		/*
553 		 * A busy_timeout of zero means the host can decide to use
554 		 * whatever value it finds suitable.
555 		 */
556 		cmd.busy_timeout = timeout_ms;
557 	} else {
558 		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
559 	}
560 
561 	if (index == EXT_CSD_SANITIZE_START)
562 		cmd.sanitize_busy = true;
563 
564 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
565 	if (err)
566 		goto out;
567 
568 	/* No need to check card status in case of unblocking command */
569 	if (!use_busy_signal)
570 		goto out;
571 
572 	/*If SPI or used HW busy detection above, then we don't need to poll. */
573 	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
574 		mmc_host_is_spi(host))
575 		goto out_tim;
576 
577 	/* Let's try to poll to find out when the command is completed. */
578 	err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
579 	if (err)
580 		goto out;
581 
582 out_tim:
583 	/* Switch to new timing before check switch status. */
584 	if (timing)
585 		mmc_set_timing(host, timing);
586 
587 	if (send_status) {
588 		err = mmc_switch_status(card);
589 		if (err && timing)
590 			mmc_set_timing(host, old_timing);
591 	}
592 out:
593 	mmc_retune_release(host);
594 
595 	return err;
596 }
597 
598 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
599 		unsigned int timeout_ms)
600 {
601 	return __mmc_switch(card, set, index, value, timeout_ms, 0,
602 			true, true, false);
603 }
604 EXPORT_SYMBOL_GPL(mmc_switch);
605 
606 int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
607 {
608 	struct mmc_request mrq = {};
609 	struct mmc_command cmd = {};
610 	struct mmc_data data = {};
611 	struct scatterlist sg;
612 	struct mmc_ios *ios = &host->ios;
613 	const u8 *tuning_block_pattern;
614 	int size, err = 0;
615 	u8 *data_buf;
616 
617 	if (ios->bus_width == MMC_BUS_WIDTH_8) {
618 		tuning_block_pattern = tuning_blk_pattern_8bit;
619 		size = sizeof(tuning_blk_pattern_8bit);
620 	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
621 		tuning_block_pattern = tuning_blk_pattern_4bit;
622 		size = sizeof(tuning_blk_pattern_4bit);
623 	} else
624 		return -EINVAL;
625 
626 	data_buf = kzalloc(size, GFP_KERNEL);
627 	if (!data_buf)
628 		return -ENOMEM;
629 
630 	mrq.cmd = &cmd;
631 	mrq.data = &data;
632 
633 	cmd.opcode = opcode;
634 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
635 
636 	data.blksz = size;
637 	data.blocks = 1;
638 	data.flags = MMC_DATA_READ;
639 
640 	/*
641 	 * According to the tuning specs, Tuning process
642 	 * is normally shorter 40 executions of CMD19,
643 	 * and timeout value should be shorter than 150 ms
644 	 */
645 	data.timeout_ns = 150 * NSEC_PER_MSEC;
646 
647 	data.sg = &sg;
648 	data.sg_len = 1;
649 	sg_init_one(&sg, data_buf, size);
650 
651 	mmc_wait_for_req(host, &mrq);
652 
653 	if (cmd_error)
654 		*cmd_error = cmd.error;
655 
656 	if (cmd.error) {
657 		err = cmd.error;
658 		goto out;
659 	}
660 
661 	if (data.error) {
662 		err = data.error;
663 		goto out;
664 	}
665 
666 	if (memcmp(data_buf, tuning_block_pattern, size))
667 		err = -EIO;
668 
669 out:
670 	kfree(data_buf);
671 	return err;
672 }
673 EXPORT_SYMBOL_GPL(mmc_send_tuning);
674 
675 int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
676 {
677 	struct mmc_command cmd = {};
678 
679 	/*
680 	 * eMMC specification specifies that CMD12 can be used to stop a tuning
681 	 * command, but SD specification does not, so do nothing unless it is
682 	 * eMMC.
683 	 */
684 	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
685 		return 0;
686 
687 	cmd.opcode = MMC_STOP_TRANSMISSION;
688 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
689 
690 	/*
691 	 * For drivers that override R1 to R1b, set an arbitrary timeout based
692 	 * on the tuning timeout i.e. 150ms.
693 	 */
694 	cmd.busy_timeout = 150;
695 
696 	return mmc_wait_for_cmd(host, &cmd, 0);
697 }
698 EXPORT_SYMBOL_GPL(mmc_abort_tuning);
699 
700 static int
701 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
702 		  u8 len)
703 {
704 	struct mmc_request mrq = {};
705 	struct mmc_command cmd = {};
706 	struct mmc_data data = {};
707 	struct scatterlist sg;
708 	u8 *data_buf;
709 	u8 *test_buf;
710 	int i, err;
711 	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
712 	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
713 
714 	/* dma onto stack is unsafe/nonportable, but callers to this
715 	 * routine normally provide temporary on-stack buffers ...
716 	 */
717 	data_buf = kmalloc(len, GFP_KERNEL);
718 	if (!data_buf)
719 		return -ENOMEM;
720 
721 	if (len == 8)
722 		test_buf = testdata_8bit;
723 	else if (len == 4)
724 		test_buf = testdata_4bit;
725 	else {
726 		pr_err("%s: Invalid bus_width %d\n",
727 		       mmc_hostname(host), len);
728 		kfree(data_buf);
729 		return -EINVAL;
730 	}
731 
732 	if (opcode == MMC_BUS_TEST_W)
733 		memcpy(data_buf, test_buf, len);
734 
735 	mrq.cmd = &cmd;
736 	mrq.data = &data;
737 	cmd.opcode = opcode;
738 	cmd.arg = 0;
739 
740 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
741 	 * rely on callers to never use this with "native" calls for reading
742 	 * CSD or CID.  Native versions of those commands use the R2 type,
743 	 * not R1 plus a data block.
744 	 */
745 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
746 
747 	data.blksz = len;
748 	data.blocks = 1;
749 	if (opcode == MMC_BUS_TEST_R)
750 		data.flags = MMC_DATA_READ;
751 	else
752 		data.flags = MMC_DATA_WRITE;
753 
754 	data.sg = &sg;
755 	data.sg_len = 1;
756 	mmc_set_data_timeout(&data, card);
757 	sg_init_one(&sg, data_buf, len);
758 	mmc_wait_for_req(host, &mrq);
759 	err = 0;
760 	if (opcode == MMC_BUS_TEST_R) {
761 		for (i = 0; i < len / 4; i++)
762 			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
763 				err = -EIO;
764 				break;
765 			}
766 	}
767 	kfree(data_buf);
768 
769 	if (cmd.error)
770 		return cmd.error;
771 	if (data.error)
772 		return data.error;
773 
774 	return err;
775 }
776 
777 int mmc_bus_test(struct mmc_card *card, u8 bus_width)
778 {
779 	int width;
780 
781 	if (bus_width == MMC_BUS_WIDTH_8)
782 		width = 8;
783 	else if (bus_width == MMC_BUS_WIDTH_4)
784 		width = 4;
785 	else if (bus_width == MMC_BUS_WIDTH_1)
786 		return 0; /* no need for test */
787 	else
788 		return -EINVAL;
789 
790 	/*
791 	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
792 	 * is a problem.  This improves chances that the test will work.
793 	 */
794 	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
795 	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
796 }
797 
798 static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
799 {
800 	struct mmc_command cmd = {};
801 	unsigned int opcode;
802 	int err;
803 
804 	if (!card->ext_csd.hpi) {
805 		pr_warn("%s: Card didn't support HPI command\n",
806 			mmc_hostname(card->host));
807 		return -EINVAL;
808 	}
809 
810 	opcode = card->ext_csd.hpi_cmd;
811 	if (opcode == MMC_STOP_TRANSMISSION)
812 		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
813 	else if (opcode == MMC_SEND_STATUS)
814 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
815 
816 	cmd.opcode = opcode;
817 	cmd.arg = card->rca << 16 | 1;
818 
819 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
820 	if (err) {
821 		pr_warn("%s: error %d interrupting operation. "
822 			"HPI command response %#x\n", mmc_hostname(card->host),
823 			err, cmd.resp[0]);
824 		return err;
825 	}
826 	if (status)
827 		*status = cmd.resp[0];
828 
829 	return 0;
830 }
831 
832 /**
833  *	mmc_interrupt_hpi - Issue for High priority Interrupt
834  *	@card: the MMC card associated with the HPI transfer
835  *
836  *	Issued High Priority Interrupt, and check for card status
837  *	until out-of prg-state.
838  */
839 int mmc_interrupt_hpi(struct mmc_card *card)
840 {
841 	int err;
842 	u32 status;
843 	unsigned long prg_wait;
844 
845 	if (!card->ext_csd.hpi_en) {
846 		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
847 		return 1;
848 	}
849 
850 	mmc_claim_host(card->host);
851 	err = mmc_send_status(card, &status);
852 	if (err) {
853 		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
854 		goto out;
855 	}
856 
857 	switch (R1_CURRENT_STATE(status)) {
858 	case R1_STATE_IDLE:
859 	case R1_STATE_READY:
860 	case R1_STATE_STBY:
861 	case R1_STATE_TRAN:
862 		/*
863 		 * In idle and transfer states, HPI is not needed and the caller
864 		 * can issue the next intended command immediately
865 		 */
866 		goto out;
867 	case R1_STATE_PRG:
868 		break;
869 	default:
870 		/* In all other states, it's illegal to issue HPI */
871 		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
872 			mmc_hostname(card->host), R1_CURRENT_STATE(status));
873 		err = -EINVAL;
874 		goto out;
875 	}
876 
877 	err = mmc_send_hpi_cmd(card, &status);
878 	if (err)
879 		goto out;
880 
881 	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
882 	do {
883 		err = mmc_send_status(card, &status);
884 
885 		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
886 			break;
887 		if (time_after(jiffies, prg_wait))
888 			err = -ETIMEDOUT;
889 	} while (!err);
890 
891 out:
892 	mmc_release_host(card->host);
893 	return err;
894 }
895 
896 int mmc_can_ext_csd(struct mmc_card *card)
897 {
898 	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
899 }
900 
901 /**
902  *	mmc_stop_bkops - stop ongoing BKOPS
903  *	@card: MMC card to check BKOPS
904  *
905  *	Send HPI command to stop ongoing background operations to
906  *	allow rapid servicing of foreground operations, e.g. read/
907  *	writes. Wait until the card comes out of the programming state
908  *	to avoid errors in servicing read/write requests.
909  */
910 int mmc_stop_bkops(struct mmc_card *card)
911 {
912 	int err = 0;
913 
914 	err = mmc_interrupt_hpi(card);
915 
916 	/*
917 	 * If err is EINVAL, we can't issue an HPI.
918 	 * It should complete the BKOPS.
919 	 */
920 	if (!err || (err == -EINVAL)) {
921 		mmc_card_clr_doing_bkops(card);
922 		mmc_retune_release(card->host);
923 		err = 0;
924 	}
925 
926 	return err;
927 }
928 
929 static int mmc_read_bkops_status(struct mmc_card *card)
930 {
931 	int err;
932 	u8 *ext_csd;
933 
934 	mmc_claim_host(card->host);
935 	err = mmc_get_ext_csd(card, &ext_csd);
936 	mmc_release_host(card->host);
937 	if (err)
938 		return err;
939 
940 	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
941 	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
942 	kfree(ext_csd);
943 	return 0;
944 }
945 
946 /**
947  *	mmc_start_bkops - start BKOPS for supported cards
948  *	@card: MMC card to start BKOPS
949  *	@form_exception: A flag to indicate if this function was
950  *			 called due to an exception raised by the card
951  *
952  *	Start background operations whenever requested.
953  *	When the urgent BKOPS bit is set in a R1 command response
954  *	then background operations should be started immediately.
955 */
956 void mmc_start_bkops(struct mmc_card *card, bool from_exception)
957 {
958 	int err;
959 	int timeout;
960 	bool use_busy_signal;
961 
962 	if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
963 		return;
964 
965 	err = mmc_read_bkops_status(card);
966 	if (err) {
967 		pr_err("%s: Failed to read bkops status: %d\n",
968 		       mmc_hostname(card->host), err);
969 		return;
970 	}
971 
972 	if (!card->ext_csd.raw_bkops_status)
973 		return;
974 
975 	if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
976 	    from_exception)
977 		return;
978 
979 	mmc_claim_host(card->host);
980 	if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
981 		timeout = MMC_OPS_TIMEOUT_MS;
982 		use_busy_signal = true;
983 	} else {
984 		timeout = 0;
985 		use_busy_signal = false;
986 	}
987 
988 	mmc_retune_hold(card->host);
989 
990 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
991 			EXT_CSD_BKOPS_START, 1, timeout, 0,
992 			use_busy_signal, true, false);
993 	if (err) {
994 		pr_warn("%s: Error %d starting bkops\n",
995 			mmc_hostname(card->host), err);
996 		mmc_retune_release(card->host);
997 		goto out;
998 	}
999 
1000 	/*
1001 	 * For urgent bkops status (LEVEL_2 and more)
1002 	 * bkops executed synchronously, otherwise
1003 	 * the operation is in progress
1004 	 */
1005 	if (!use_busy_signal)
1006 		mmc_card_set_doing_bkops(card);
1007 	else
1008 		mmc_retune_release(card->host);
1009 out:
1010 	mmc_release_host(card->host);
1011 }
1012 
1013 /*
1014  * Flush the cache to the non-volatile storage.
1015  */
1016 int mmc_flush_cache(struct mmc_card *card)
1017 {
1018 	int err = 0;
1019 
1020 	if (mmc_card_mmc(card) &&
1021 			(card->ext_csd.cache_size > 0) &&
1022 			(card->ext_csd.cache_ctrl & 1)) {
1023 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1024 				EXT_CSD_FLUSH_CACHE, 1, 0);
1025 		if (err)
1026 			pr_err("%s: cache flush error %d\n",
1027 					mmc_hostname(card->host), err);
1028 	}
1029 
1030 	return err;
1031 }
1032 EXPORT_SYMBOL(mmc_flush_cache);
1033 
1034 static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
1035 {
1036 	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
1037 	int err;
1038 
1039 	if (!card->ext_csd.cmdq_support)
1040 		return -EOPNOTSUPP;
1041 
1042 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
1043 			 val, card->ext_csd.generic_cmd6_time);
1044 	if (!err)
1045 		card->ext_csd.cmdq_en = enable;
1046 
1047 	return err;
1048 }
1049 
1050 int mmc_cmdq_enable(struct mmc_card *card)
1051 {
1052 	return mmc_cmdq_switch(card, true);
1053 }
1054 EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
1055 
1056 int mmc_cmdq_disable(struct mmc_card *card)
1057 {
1058 	return mmc_cmdq_switch(card, false);
1059 }
1060 EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
1061