xref: /linux/drivers/mmc/core/mmc_ops.c (revision 93d90ad708b8da6efc0e487b66111aa9db7f70c7)
1 /*
2  *  linux/drivers/mmc/core/mmc_ops.h
3  *
4  *  Copyright 2006-2007 Pierre Ossman
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/export.h>
14 #include <linux/types.h>
15 #include <linux/scatterlist.h>
16 
17 #include <linux/mmc/host.h>
18 #include <linux/mmc/card.h>
19 #include <linux/mmc/mmc.h>
20 
21 #include "core.h"
22 #include "mmc_ops.h"
23 
24 #define MMC_OPS_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
25 
26 static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
27 				    bool ignore_crc)
28 {
29 	int err;
30 	struct mmc_command cmd = {0};
31 
32 	BUG_ON(!card);
33 	BUG_ON(!card->host);
34 
35 	cmd.opcode = MMC_SEND_STATUS;
36 	if (!mmc_host_is_spi(card->host))
37 		cmd.arg = card->rca << 16;
38 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
39 	if (ignore_crc)
40 		cmd.flags &= ~MMC_RSP_CRC;
41 
42 	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
43 	if (err)
44 		return err;
45 
46 	/* NOTE: callers are required to understand the difference
47 	 * between "native" and SPI format status words!
48 	 */
49 	if (status)
50 		*status = cmd.resp[0];
51 
52 	return 0;
53 }
54 
55 int mmc_send_status(struct mmc_card *card, u32 *status)
56 {
57 	return __mmc_send_status(card, status, false);
58 }
59 
60 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
61 {
62 	int err;
63 	struct mmc_command cmd = {0};
64 
65 	BUG_ON(!host);
66 
67 	cmd.opcode = MMC_SELECT_CARD;
68 
69 	if (card) {
70 		cmd.arg = card->rca << 16;
71 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
72 	} else {
73 		cmd.arg = 0;
74 		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
75 	}
76 
77 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
78 	if (err)
79 		return err;
80 
81 	return 0;
82 }
83 
84 int mmc_select_card(struct mmc_card *card)
85 {
86 	BUG_ON(!card);
87 
88 	return _mmc_select_card(card->host, card);
89 }
90 
91 int mmc_deselect_cards(struct mmc_host *host)
92 {
93 	return _mmc_select_card(host, NULL);
94 }
95 
96 /*
97  * Write the value specified in the device tree or board code into the optional
98  * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
99  * drive strength of the DAT and CMD outputs. The actual meaning of a given
100  * value is hardware dependant.
101  * The presence of the DSR register can be determined from the CSD register,
102  * bit 76.
103  */
104 int mmc_set_dsr(struct mmc_host *host)
105 {
106 	struct mmc_command cmd = {0};
107 
108 	cmd.opcode = MMC_SET_DSR;
109 
110 	cmd.arg = (host->dsr << 16) | 0xffff;
111 	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
112 
113 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
114 }
115 
116 int mmc_go_idle(struct mmc_host *host)
117 {
118 	int err;
119 	struct mmc_command cmd = {0};
120 
121 	/*
122 	 * Non-SPI hosts need to prevent chipselect going active during
123 	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
124 	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
125 	 *
126 	 * SPI hosts ignore ios.chip_select; it's managed according to
127 	 * rules that must accommodate non-MMC slaves which this layer
128 	 * won't even know about.
129 	 */
130 	if (!mmc_host_is_spi(host)) {
131 		mmc_set_chip_select(host, MMC_CS_HIGH);
132 		mmc_delay(1);
133 	}
134 
135 	cmd.opcode = MMC_GO_IDLE_STATE;
136 	cmd.arg = 0;
137 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
138 
139 	err = mmc_wait_for_cmd(host, &cmd, 0);
140 
141 	mmc_delay(1);
142 
143 	if (!mmc_host_is_spi(host)) {
144 		mmc_set_chip_select(host, MMC_CS_DONTCARE);
145 		mmc_delay(1);
146 	}
147 
148 	host->use_spi_crc = 0;
149 
150 	return err;
151 }
152 
153 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
154 {
155 	struct mmc_command cmd = {0};
156 	int i, err = 0;
157 
158 	BUG_ON(!host);
159 
160 	cmd.opcode = MMC_SEND_OP_COND;
161 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
162 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
163 
164 	for (i = 100; i; i--) {
165 		err = mmc_wait_for_cmd(host, &cmd, 0);
166 		if (err)
167 			break;
168 
169 		/* if we're just probing, do a single pass */
170 		if (ocr == 0)
171 			break;
172 
173 		/* otherwise wait until reset completes */
174 		if (mmc_host_is_spi(host)) {
175 			if (!(cmd.resp[0] & R1_SPI_IDLE))
176 				break;
177 		} else {
178 			if (cmd.resp[0] & MMC_CARD_BUSY)
179 				break;
180 		}
181 
182 		err = -ETIMEDOUT;
183 
184 		mmc_delay(10);
185 	}
186 
187 	if (rocr && !mmc_host_is_spi(host))
188 		*rocr = cmd.resp[0];
189 
190 	return err;
191 }
192 
193 int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
194 {
195 	int err;
196 	struct mmc_command cmd = {0};
197 
198 	BUG_ON(!host);
199 	BUG_ON(!cid);
200 
201 	cmd.opcode = MMC_ALL_SEND_CID;
202 	cmd.arg = 0;
203 	cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
204 
205 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
206 	if (err)
207 		return err;
208 
209 	memcpy(cid, cmd.resp, sizeof(u32) * 4);
210 
211 	return 0;
212 }
213 
214 int mmc_set_relative_addr(struct mmc_card *card)
215 {
216 	int err;
217 	struct mmc_command cmd = {0};
218 
219 	BUG_ON(!card);
220 	BUG_ON(!card->host);
221 
222 	cmd.opcode = MMC_SET_RELATIVE_ADDR;
223 	cmd.arg = card->rca << 16;
224 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
225 
226 	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
227 	if (err)
228 		return err;
229 
230 	return 0;
231 }
232 
233 static int
234 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
235 {
236 	int err;
237 	struct mmc_command cmd = {0};
238 
239 	BUG_ON(!host);
240 	BUG_ON(!cxd);
241 
242 	cmd.opcode = opcode;
243 	cmd.arg = arg;
244 	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
245 
246 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
247 	if (err)
248 		return err;
249 
250 	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
251 
252 	return 0;
253 }
254 
255 /*
256  * NOTE: void *buf, caller for the buf is required to use DMA-capable
257  * buffer or on-stack buffer (with some overhead in callee).
258  */
259 static int
260 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
261 		u32 opcode, void *buf, unsigned len)
262 {
263 	struct mmc_request mrq = {NULL};
264 	struct mmc_command cmd = {0};
265 	struct mmc_data data = {0};
266 	struct scatterlist sg;
267 
268 	mrq.cmd = &cmd;
269 	mrq.data = &data;
270 
271 	cmd.opcode = opcode;
272 	cmd.arg = 0;
273 
274 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
275 	 * rely on callers to never use this with "native" calls for reading
276 	 * CSD or CID.  Native versions of those commands use the R2 type,
277 	 * not R1 plus a data block.
278 	 */
279 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
280 
281 	data.blksz = len;
282 	data.blocks = 1;
283 	data.flags = MMC_DATA_READ;
284 	data.sg = &sg;
285 	data.sg_len = 1;
286 
287 	sg_init_one(&sg, buf, len);
288 
289 	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
290 		/*
291 		 * The spec states that CSR and CID accesses have a timeout
292 		 * of 64 clock cycles.
293 		 */
294 		data.timeout_ns = 0;
295 		data.timeout_clks = 64;
296 	} else
297 		mmc_set_data_timeout(&data, card);
298 
299 	mmc_wait_for_req(host, &mrq);
300 
301 	if (cmd.error)
302 		return cmd.error;
303 	if (data.error)
304 		return data.error;
305 
306 	return 0;
307 }
308 
309 int mmc_send_csd(struct mmc_card *card, u32 *csd)
310 {
311 	int ret, i;
312 	u32 *csd_tmp;
313 
314 	if (!mmc_host_is_spi(card->host))
315 		return mmc_send_cxd_native(card->host, card->rca << 16,
316 				csd, MMC_SEND_CSD);
317 
318 	csd_tmp = kzalloc(16, GFP_KERNEL);
319 	if (!csd_tmp)
320 		return -ENOMEM;
321 
322 	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
323 	if (ret)
324 		goto err;
325 
326 	for (i = 0;i < 4;i++)
327 		csd[i] = be32_to_cpu(csd_tmp[i]);
328 
329 err:
330 	kfree(csd_tmp);
331 	return ret;
332 }
333 
334 int mmc_send_cid(struct mmc_host *host, u32 *cid)
335 {
336 	int ret, i;
337 	u32 *cid_tmp;
338 
339 	if (!mmc_host_is_spi(host)) {
340 		if (!host->card)
341 			return -EINVAL;
342 		return mmc_send_cxd_native(host, host->card->rca << 16,
343 				cid, MMC_SEND_CID);
344 	}
345 
346 	cid_tmp = kzalloc(16, GFP_KERNEL);
347 	if (!cid_tmp)
348 		return -ENOMEM;
349 
350 	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
351 	if (ret)
352 		goto err;
353 
354 	for (i = 0;i < 4;i++)
355 		cid[i] = be32_to_cpu(cid_tmp[i]);
356 
357 err:
358 	kfree(cid_tmp);
359 	return ret;
360 }
361 
362 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
363 {
364 	int err;
365 	u8 *ext_csd;
366 
367 	if (!card || !new_ext_csd)
368 		return -EINVAL;
369 
370 	if (!mmc_can_ext_csd(card))
371 		return -EOPNOTSUPP;
372 
373 	/*
374 	 * As the ext_csd is so large and mostly unused, we don't store the
375 	 * raw block in mmc_card.
376 	 */
377 	ext_csd = kzalloc(512, GFP_KERNEL);
378 	if (!ext_csd)
379 		return -ENOMEM;
380 
381 	err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
382 				512);
383 	if (err)
384 		kfree(ext_csd);
385 	else
386 		*new_ext_csd = ext_csd;
387 
388 	return err;
389 }
390 EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
391 
392 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
393 {
394 	struct mmc_command cmd = {0};
395 	int err;
396 
397 	cmd.opcode = MMC_SPI_READ_OCR;
398 	cmd.arg = highcap ? (1 << 30) : 0;
399 	cmd.flags = MMC_RSP_SPI_R3;
400 
401 	err = mmc_wait_for_cmd(host, &cmd, 0);
402 
403 	*ocrp = cmd.resp[1];
404 	return err;
405 }
406 
407 int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
408 {
409 	struct mmc_command cmd = {0};
410 	int err;
411 
412 	cmd.opcode = MMC_SPI_CRC_ON_OFF;
413 	cmd.flags = MMC_RSP_SPI_R1;
414 	cmd.arg = use_crc;
415 
416 	err = mmc_wait_for_cmd(host, &cmd, 0);
417 	if (!err)
418 		host->use_spi_crc = use_crc;
419 	return err;
420 }
421 
422 /**
423  *	__mmc_switch - modify EXT_CSD register
424  *	@card: the MMC card associated with the data transfer
425  *	@set: cmd set values
426  *	@index: EXT_CSD register index
427  *	@value: value to program into EXT_CSD register
428  *	@timeout_ms: timeout (ms) for operation performed by register write,
429  *                   timeout of zero implies maximum possible timeout
430  *	@use_busy_signal: use the busy signal as response type
431  *	@send_status: send status cmd to poll for busy
432  *	@ignore_crc: ignore CRC errors when sending status cmd to poll for busy
433  *
434  *	Modifies the EXT_CSD register for selected card.
435  */
436 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
437 		unsigned int timeout_ms, bool use_busy_signal, bool send_status,
438 		bool ignore_crc)
439 {
440 	struct mmc_host *host = card->host;
441 	int err;
442 	struct mmc_command cmd = {0};
443 	unsigned long timeout;
444 	u32 status = 0;
445 	bool use_r1b_resp = use_busy_signal;
446 
447 	/*
448 	 * If the cmd timeout and the max_busy_timeout of the host are both
449 	 * specified, let's validate them. A failure means we need to prevent
450 	 * the host from doing hw busy detection, which is done by converting
451 	 * to a R1 response instead of a R1B.
452 	 */
453 	if (timeout_ms && host->max_busy_timeout &&
454 		(timeout_ms > host->max_busy_timeout))
455 		use_r1b_resp = false;
456 
457 	cmd.opcode = MMC_SWITCH;
458 	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
459 		  (index << 16) |
460 		  (value << 8) |
461 		  set;
462 	cmd.flags = MMC_CMD_AC;
463 	if (use_r1b_resp) {
464 		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
465 		/*
466 		 * A busy_timeout of zero means the host can decide to use
467 		 * whatever value it finds suitable.
468 		 */
469 		cmd.busy_timeout = timeout_ms;
470 	} else {
471 		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
472 	}
473 
474 	if (index == EXT_CSD_SANITIZE_START)
475 		cmd.sanitize_busy = true;
476 
477 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
478 	if (err)
479 		return err;
480 
481 	/* No need to check card status in case of unblocking command */
482 	if (!use_busy_signal)
483 		return 0;
484 
485 	/*
486 	 * CRC errors shall only be ignored in cases were CMD13 is used to poll
487 	 * to detect busy completion.
488 	 */
489 	if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
490 		ignore_crc = false;
491 
492 	/* We have an unspecified cmd timeout, use the fallback value. */
493 	if (!timeout_ms)
494 		timeout_ms = MMC_OPS_TIMEOUT_MS;
495 
496 	/* Must check status to be sure of no errors. */
497 	timeout = jiffies + msecs_to_jiffies(timeout_ms);
498 	do {
499 		if (send_status) {
500 			err = __mmc_send_status(card, &status, ignore_crc);
501 			if (err)
502 				return err;
503 		}
504 		if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
505 			break;
506 		if (mmc_host_is_spi(host))
507 			break;
508 
509 		/*
510 		 * We are not allowed to issue a status command and the host
511 		 * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only
512 		 * rely on waiting for the stated timeout to be sufficient.
513 		 */
514 		if (!send_status) {
515 			mmc_delay(timeout_ms);
516 			return 0;
517 		}
518 
519 		/* Timeout if the device never leaves the program state. */
520 		if (time_after(jiffies, timeout)) {
521 			pr_err("%s: Card stuck in programming state! %s\n",
522 				mmc_hostname(host), __func__);
523 			return -ETIMEDOUT;
524 		}
525 	} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
526 
527 	if (mmc_host_is_spi(host)) {
528 		if (status & R1_SPI_ILLEGAL_COMMAND)
529 			return -EBADMSG;
530 	} else {
531 		if (status & 0xFDFFA000)
532 			pr_warn("%s: unexpected status %#x after switch\n",
533 				mmc_hostname(host), status);
534 		if (status & R1_SWITCH_ERROR)
535 			return -EBADMSG;
536 	}
537 
538 	return 0;
539 }
540 EXPORT_SYMBOL_GPL(__mmc_switch);
541 
542 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
543 		unsigned int timeout_ms)
544 {
545 	return __mmc_switch(card, set, index, value, timeout_ms, true, true,
546 				false);
547 }
548 EXPORT_SYMBOL_GPL(mmc_switch);
549 
550 int mmc_send_tuning(struct mmc_host *host)
551 {
552 	struct mmc_request mrq = {NULL};
553 	struct mmc_command cmd = {0};
554 	struct mmc_data data = {0};
555 	struct scatterlist sg;
556 	struct mmc_ios *ios = &host->ios;
557 	const u8 *tuning_block_pattern;
558 	int size, err = 0;
559 	u8 *data_buf;
560 	u32 opcode;
561 
562 	if (ios->bus_width == MMC_BUS_WIDTH_8) {
563 		tuning_block_pattern = tuning_blk_pattern_8bit;
564 		size = sizeof(tuning_blk_pattern_8bit);
565 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
566 	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
567 		tuning_block_pattern = tuning_blk_pattern_4bit;
568 		size = sizeof(tuning_blk_pattern_4bit);
569 		opcode = MMC_SEND_TUNING_BLOCK;
570 	} else
571 		return -EINVAL;
572 
573 	data_buf = kzalloc(size, GFP_KERNEL);
574 	if (!data_buf)
575 		return -ENOMEM;
576 
577 	mrq.cmd = &cmd;
578 	mrq.data = &data;
579 
580 	cmd.opcode = opcode;
581 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
582 
583 	data.blksz = size;
584 	data.blocks = 1;
585 	data.flags = MMC_DATA_READ;
586 
587 	/*
588 	 * According to the tuning specs, Tuning process
589 	 * is normally shorter 40 executions of CMD19,
590 	 * and timeout value should be shorter than 150 ms
591 	 */
592 	data.timeout_ns = 150 * NSEC_PER_MSEC;
593 
594 	data.sg = &sg;
595 	data.sg_len = 1;
596 	sg_init_one(&sg, data_buf, size);
597 
598 	mmc_wait_for_req(host, &mrq);
599 
600 	if (cmd.error) {
601 		err = cmd.error;
602 		goto out;
603 	}
604 
605 	if (data.error) {
606 		err = data.error;
607 		goto out;
608 	}
609 
610 	if (memcmp(data_buf, tuning_block_pattern, size))
611 		err = -EIO;
612 
613 out:
614 	kfree(data_buf);
615 	return err;
616 }
617 EXPORT_SYMBOL_GPL(mmc_send_tuning);
618 
619 static int
620 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
621 		  u8 len)
622 {
623 	struct mmc_request mrq = {NULL};
624 	struct mmc_command cmd = {0};
625 	struct mmc_data data = {0};
626 	struct scatterlist sg;
627 	u8 *data_buf;
628 	u8 *test_buf;
629 	int i, err;
630 	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
631 	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
632 
633 	/* dma onto stack is unsafe/nonportable, but callers to this
634 	 * routine normally provide temporary on-stack buffers ...
635 	 */
636 	data_buf = kmalloc(len, GFP_KERNEL);
637 	if (!data_buf)
638 		return -ENOMEM;
639 
640 	if (len == 8)
641 		test_buf = testdata_8bit;
642 	else if (len == 4)
643 		test_buf = testdata_4bit;
644 	else {
645 		pr_err("%s: Invalid bus_width %d\n",
646 		       mmc_hostname(host), len);
647 		kfree(data_buf);
648 		return -EINVAL;
649 	}
650 
651 	if (opcode == MMC_BUS_TEST_W)
652 		memcpy(data_buf, test_buf, len);
653 
654 	mrq.cmd = &cmd;
655 	mrq.data = &data;
656 	cmd.opcode = opcode;
657 	cmd.arg = 0;
658 
659 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
660 	 * rely on callers to never use this with "native" calls for reading
661 	 * CSD or CID.  Native versions of those commands use the R2 type,
662 	 * not R1 plus a data block.
663 	 */
664 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
665 
666 	data.blksz = len;
667 	data.blocks = 1;
668 	if (opcode == MMC_BUS_TEST_R)
669 		data.flags = MMC_DATA_READ;
670 	else
671 		data.flags = MMC_DATA_WRITE;
672 
673 	data.sg = &sg;
674 	data.sg_len = 1;
675 	mmc_set_data_timeout(&data, card);
676 	sg_init_one(&sg, data_buf, len);
677 	mmc_wait_for_req(host, &mrq);
678 	err = 0;
679 	if (opcode == MMC_BUS_TEST_R) {
680 		for (i = 0; i < len / 4; i++)
681 			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
682 				err = -EIO;
683 				break;
684 			}
685 	}
686 	kfree(data_buf);
687 
688 	if (cmd.error)
689 		return cmd.error;
690 	if (data.error)
691 		return data.error;
692 
693 	return err;
694 }
695 
696 int mmc_bus_test(struct mmc_card *card, u8 bus_width)
697 {
698 	int err, width;
699 
700 	if (bus_width == MMC_BUS_WIDTH_8)
701 		width = 8;
702 	else if (bus_width == MMC_BUS_WIDTH_4)
703 		width = 4;
704 	else if (bus_width == MMC_BUS_WIDTH_1)
705 		return 0; /* no need for test */
706 	else
707 		return -EINVAL;
708 
709 	/*
710 	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
711 	 * is a problem.  This improves chances that the test will work.
712 	 */
713 	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
714 	err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
715 	return err;
716 }
717 
718 int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
719 {
720 	struct mmc_command cmd = {0};
721 	unsigned int opcode;
722 	int err;
723 
724 	if (!card->ext_csd.hpi) {
725 		pr_warn("%s: Card didn't support HPI command\n",
726 			mmc_hostname(card->host));
727 		return -EINVAL;
728 	}
729 
730 	opcode = card->ext_csd.hpi_cmd;
731 	if (opcode == MMC_STOP_TRANSMISSION)
732 		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
733 	else if (opcode == MMC_SEND_STATUS)
734 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
735 
736 	cmd.opcode = opcode;
737 	cmd.arg = card->rca << 16 | 1;
738 
739 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
740 	if (err) {
741 		pr_warn("%s: error %d interrupting operation. "
742 			"HPI command response %#x\n", mmc_hostname(card->host),
743 			err, cmd.resp[0]);
744 		return err;
745 	}
746 	if (status)
747 		*status = cmd.resp[0];
748 
749 	return 0;
750 }
751 
752 int mmc_can_ext_csd(struct mmc_card *card)
753 {
754 	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
755 }
756