xref: /linux/drivers/mmc/core/mmc_ops.c (revision 08ec212c0f92cbf30e3ecc7349f18151714041d6)
1 /*
2  *  linux/drivers/mmc/core/mmc_ops.h
3  *
4  *  Copyright 2006-2007 Pierre Ossman
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/export.h>
14 #include <linux/types.h>
15 #include <linux/scatterlist.h>
16 
17 #include <linux/mmc/host.h>
18 #include <linux/mmc/card.h>
19 #include <linux/mmc/mmc.h>
20 
21 #include "core.h"
22 #include "mmc_ops.h"
23 
24 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
25 {
26 	int err;
27 	struct mmc_command cmd = {0};
28 
29 	BUG_ON(!host);
30 
31 	cmd.opcode = MMC_SELECT_CARD;
32 
33 	if (card) {
34 		cmd.arg = card->rca << 16;
35 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
36 	} else {
37 		cmd.arg = 0;
38 		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
39 	}
40 
41 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
42 	if (err)
43 		return err;
44 
45 	return 0;
46 }
47 
48 int mmc_select_card(struct mmc_card *card)
49 {
50 	BUG_ON(!card);
51 
52 	return _mmc_select_card(card->host, card);
53 }
54 
55 int mmc_deselect_cards(struct mmc_host *host)
56 {
57 	return _mmc_select_card(host, NULL);
58 }
59 
60 int mmc_card_sleepawake(struct mmc_host *host, int sleep)
61 {
62 	struct mmc_command cmd = {0};
63 	struct mmc_card *card = host->card;
64 	int err;
65 
66 	if (sleep)
67 		mmc_deselect_cards(host);
68 
69 	cmd.opcode = MMC_SLEEP_AWAKE;
70 	cmd.arg = card->rca << 16;
71 	if (sleep)
72 		cmd.arg |= 1 << 15;
73 
74 	cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
75 	err = mmc_wait_for_cmd(host, &cmd, 0);
76 	if (err)
77 		return err;
78 
79 	/*
80 	 * If the host does not wait while the card signals busy, then we will
81 	 * will have to wait the sleep/awake timeout.  Note, we cannot use the
82 	 * SEND_STATUS command to poll the status because that command (and most
83 	 * others) is invalid while the card sleeps.
84 	 */
85 	if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
86 		mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000));
87 
88 	if (!sleep)
89 		err = mmc_select_card(card);
90 
91 	return err;
92 }
93 
94 int mmc_go_idle(struct mmc_host *host)
95 {
96 	int err;
97 	struct mmc_command cmd = {0};
98 
99 	/*
100 	 * Non-SPI hosts need to prevent chipselect going active during
101 	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
102 	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
103 	 *
104 	 * SPI hosts ignore ios.chip_select; it's managed according to
105 	 * rules that must accommodate non-MMC slaves which this layer
106 	 * won't even know about.
107 	 */
108 	if (!mmc_host_is_spi(host)) {
109 		mmc_set_chip_select(host, MMC_CS_HIGH);
110 		mmc_delay(1);
111 	}
112 
113 	cmd.opcode = MMC_GO_IDLE_STATE;
114 	cmd.arg = 0;
115 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
116 
117 	err = mmc_wait_for_cmd(host, &cmd, 0);
118 
119 	mmc_delay(1);
120 
121 	if (!mmc_host_is_spi(host)) {
122 		mmc_set_chip_select(host, MMC_CS_DONTCARE);
123 		mmc_delay(1);
124 	}
125 
126 	host->use_spi_crc = 0;
127 
128 	return err;
129 }
130 
131 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
132 {
133 	struct mmc_command cmd = {0};
134 	int i, err = 0;
135 
136 	BUG_ON(!host);
137 
138 	cmd.opcode = MMC_SEND_OP_COND;
139 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
140 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
141 
142 	for (i = 100; i; i--) {
143 		err = mmc_wait_for_cmd(host, &cmd, 0);
144 		if (err)
145 			break;
146 
147 		/* if we're just probing, do a single pass */
148 		if (ocr == 0)
149 			break;
150 
151 		/* otherwise wait until reset completes */
152 		if (mmc_host_is_spi(host)) {
153 			if (!(cmd.resp[0] & R1_SPI_IDLE))
154 				break;
155 		} else {
156 			if (cmd.resp[0] & MMC_CARD_BUSY)
157 				break;
158 		}
159 
160 		err = -ETIMEDOUT;
161 
162 		mmc_delay(10);
163 	}
164 
165 	if (rocr && !mmc_host_is_spi(host))
166 		*rocr = cmd.resp[0];
167 
168 	return err;
169 }
170 
171 int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
172 {
173 	int err;
174 	struct mmc_command cmd = {0};
175 
176 	BUG_ON(!host);
177 	BUG_ON(!cid);
178 
179 	cmd.opcode = MMC_ALL_SEND_CID;
180 	cmd.arg = 0;
181 	cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
182 
183 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
184 	if (err)
185 		return err;
186 
187 	memcpy(cid, cmd.resp, sizeof(u32) * 4);
188 
189 	return 0;
190 }
191 
192 int mmc_set_relative_addr(struct mmc_card *card)
193 {
194 	int err;
195 	struct mmc_command cmd = {0};
196 
197 	BUG_ON(!card);
198 	BUG_ON(!card->host);
199 
200 	cmd.opcode = MMC_SET_RELATIVE_ADDR;
201 	cmd.arg = card->rca << 16;
202 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
203 
204 	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
205 	if (err)
206 		return err;
207 
208 	return 0;
209 }
210 
211 static int
212 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
213 {
214 	int err;
215 	struct mmc_command cmd = {0};
216 
217 	BUG_ON(!host);
218 	BUG_ON(!cxd);
219 
220 	cmd.opcode = opcode;
221 	cmd.arg = arg;
222 	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
223 
224 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
225 	if (err)
226 		return err;
227 
228 	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
229 
230 	return 0;
231 }
232 
233 /*
234  * NOTE: void *buf, caller for the buf is required to use DMA-capable
235  * buffer or on-stack buffer (with some overhead in callee).
236  */
237 static int
238 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
239 		u32 opcode, void *buf, unsigned len)
240 {
241 	struct mmc_request mrq = {NULL};
242 	struct mmc_command cmd = {0};
243 	struct mmc_data data = {0};
244 	struct scatterlist sg;
245 	void *data_buf;
246 	int is_on_stack;
247 
248 	is_on_stack = object_is_on_stack(buf);
249 	if (is_on_stack) {
250 		/*
251 		 * dma onto stack is unsafe/nonportable, but callers to this
252 		 * routine normally provide temporary on-stack buffers ...
253 		 */
254 		data_buf = kmalloc(len, GFP_KERNEL);
255 		if (!data_buf)
256 			return -ENOMEM;
257 	} else
258 		data_buf = buf;
259 
260 	mrq.cmd = &cmd;
261 	mrq.data = &data;
262 
263 	cmd.opcode = opcode;
264 	cmd.arg = 0;
265 
266 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
267 	 * rely on callers to never use this with "native" calls for reading
268 	 * CSD or CID.  Native versions of those commands use the R2 type,
269 	 * not R1 plus a data block.
270 	 */
271 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
272 
273 	data.blksz = len;
274 	data.blocks = 1;
275 	data.flags = MMC_DATA_READ;
276 	data.sg = &sg;
277 	data.sg_len = 1;
278 
279 	sg_init_one(&sg, data_buf, len);
280 
281 	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
282 		/*
283 		 * The spec states that CSR and CID accesses have a timeout
284 		 * of 64 clock cycles.
285 		 */
286 		data.timeout_ns = 0;
287 		data.timeout_clks = 64;
288 	} else
289 		mmc_set_data_timeout(&data, card);
290 
291 	mmc_wait_for_req(host, &mrq);
292 
293 	if (is_on_stack) {
294 		memcpy(buf, data_buf, len);
295 		kfree(data_buf);
296 	}
297 
298 	if (cmd.error)
299 		return cmd.error;
300 	if (data.error)
301 		return data.error;
302 
303 	return 0;
304 }
305 
306 int mmc_send_csd(struct mmc_card *card, u32 *csd)
307 {
308 	int ret, i;
309 	u32 *csd_tmp;
310 
311 	if (!mmc_host_is_spi(card->host))
312 		return mmc_send_cxd_native(card->host, card->rca << 16,
313 				csd, MMC_SEND_CSD);
314 
315 	csd_tmp = kmalloc(16, GFP_KERNEL);
316 	if (!csd_tmp)
317 		return -ENOMEM;
318 
319 	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
320 	if (ret)
321 		goto err;
322 
323 	for (i = 0;i < 4;i++)
324 		csd[i] = be32_to_cpu(csd_tmp[i]);
325 
326 err:
327 	kfree(csd_tmp);
328 	return ret;
329 }
330 
331 int mmc_send_cid(struct mmc_host *host, u32 *cid)
332 {
333 	int ret, i;
334 	u32 *cid_tmp;
335 
336 	if (!mmc_host_is_spi(host)) {
337 		if (!host->card)
338 			return -EINVAL;
339 		return mmc_send_cxd_native(host, host->card->rca << 16,
340 				cid, MMC_SEND_CID);
341 	}
342 
343 	cid_tmp = kmalloc(16, GFP_KERNEL);
344 	if (!cid_tmp)
345 		return -ENOMEM;
346 
347 	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
348 	if (ret)
349 		goto err;
350 
351 	for (i = 0;i < 4;i++)
352 		cid[i] = be32_to_cpu(cid_tmp[i]);
353 
354 err:
355 	kfree(cid_tmp);
356 	return ret;
357 }
358 
359 int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
360 {
361 	return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
362 			ext_csd, 512);
363 }
364 
365 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
366 {
367 	struct mmc_command cmd = {0};
368 	int err;
369 
370 	cmd.opcode = MMC_SPI_READ_OCR;
371 	cmd.arg = highcap ? (1 << 30) : 0;
372 	cmd.flags = MMC_RSP_SPI_R3;
373 
374 	err = mmc_wait_for_cmd(host, &cmd, 0);
375 
376 	*ocrp = cmd.resp[1];
377 	return err;
378 }
379 
380 int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
381 {
382 	struct mmc_command cmd = {0};
383 	int err;
384 
385 	cmd.opcode = MMC_SPI_CRC_ON_OFF;
386 	cmd.flags = MMC_RSP_SPI_R1;
387 	cmd.arg = use_crc;
388 
389 	err = mmc_wait_for_cmd(host, &cmd, 0);
390 	if (!err)
391 		host->use_spi_crc = use_crc;
392 	return err;
393 }
394 
395 /**
396  *	__mmc_switch - modify EXT_CSD register
397  *	@card: the MMC card associated with the data transfer
398  *	@set: cmd set values
399  *	@index: EXT_CSD register index
400  *	@value: value to program into EXT_CSD register
401  *	@timeout_ms: timeout (ms) for operation performed by register write,
402  *                   timeout of zero implies maximum possible timeout
403  *	@use_busy_signal: use the busy signal as response type
404  *
405  *	Modifies the EXT_CSD register for selected card.
406  */
407 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
408 	       unsigned int timeout_ms, bool use_busy_signal)
409 {
410 	int err;
411 	struct mmc_command cmd = {0};
412 	u32 status;
413 
414 	BUG_ON(!card);
415 	BUG_ON(!card->host);
416 
417 	cmd.opcode = MMC_SWITCH;
418 	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
419 		  (index << 16) |
420 		  (value << 8) |
421 		  set;
422 	cmd.flags = MMC_CMD_AC;
423 	if (use_busy_signal)
424 		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
425 	else
426 		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
427 
428 
429 	cmd.cmd_timeout_ms = timeout_ms;
430 
431 	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
432 	if (err)
433 		return err;
434 
435 	/* No need to check card status in case of unblocking command */
436 	if (!use_busy_signal)
437 		return 0;
438 
439 	/* Must check status to be sure of no errors */
440 	do {
441 		err = mmc_send_status(card, &status);
442 		if (err)
443 			return err;
444 		if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
445 			break;
446 		if (mmc_host_is_spi(card->host))
447 			break;
448 	} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
449 
450 	if (mmc_host_is_spi(card->host)) {
451 		if (status & R1_SPI_ILLEGAL_COMMAND)
452 			return -EBADMSG;
453 	} else {
454 		if (status & 0xFDFFA000)
455 			pr_warning("%s: unexpected status %#x after "
456 			       "switch", mmc_hostname(card->host), status);
457 		if (status & R1_SWITCH_ERROR)
458 			return -EBADMSG;
459 	}
460 
461 	return 0;
462 }
463 EXPORT_SYMBOL_GPL(__mmc_switch);
464 
465 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
466 		unsigned int timeout_ms)
467 {
468 	return __mmc_switch(card, set, index, value, timeout_ms, true);
469 }
470 EXPORT_SYMBOL_GPL(mmc_switch);
471 
472 int mmc_send_status(struct mmc_card *card, u32 *status)
473 {
474 	int err;
475 	struct mmc_command cmd = {0};
476 
477 	BUG_ON(!card);
478 	BUG_ON(!card->host);
479 
480 	cmd.opcode = MMC_SEND_STATUS;
481 	if (!mmc_host_is_spi(card->host))
482 		cmd.arg = card->rca << 16;
483 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
484 
485 	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
486 	if (err)
487 		return err;
488 
489 	/* NOTE: callers are required to understand the difference
490 	 * between "native" and SPI format status words!
491 	 */
492 	if (status)
493 		*status = cmd.resp[0];
494 
495 	return 0;
496 }
497 
498 static int
499 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
500 		  u8 len)
501 {
502 	struct mmc_request mrq = {NULL};
503 	struct mmc_command cmd = {0};
504 	struct mmc_data data = {0};
505 	struct scatterlist sg;
506 	u8 *data_buf;
507 	u8 *test_buf;
508 	int i, err;
509 	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
510 	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
511 
512 	/* dma onto stack is unsafe/nonportable, but callers to this
513 	 * routine normally provide temporary on-stack buffers ...
514 	 */
515 	data_buf = kmalloc(len, GFP_KERNEL);
516 	if (!data_buf)
517 		return -ENOMEM;
518 
519 	if (len == 8)
520 		test_buf = testdata_8bit;
521 	else if (len == 4)
522 		test_buf = testdata_4bit;
523 	else {
524 		pr_err("%s: Invalid bus_width %d\n",
525 		       mmc_hostname(host), len);
526 		kfree(data_buf);
527 		return -EINVAL;
528 	}
529 
530 	if (opcode == MMC_BUS_TEST_W)
531 		memcpy(data_buf, test_buf, len);
532 
533 	mrq.cmd = &cmd;
534 	mrq.data = &data;
535 	cmd.opcode = opcode;
536 	cmd.arg = 0;
537 
538 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
539 	 * rely on callers to never use this with "native" calls for reading
540 	 * CSD or CID.  Native versions of those commands use the R2 type,
541 	 * not R1 plus a data block.
542 	 */
543 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
544 
545 	data.blksz = len;
546 	data.blocks = 1;
547 	if (opcode == MMC_BUS_TEST_R)
548 		data.flags = MMC_DATA_READ;
549 	else
550 		data.flags = MMC_DATA_WRITE;
551 
552 	data.sg = &sg;
553 	data.sg_len = 1;
554 	sg_init_one(&sg, data_buf, len);
555 	mmc_wait_for_req(host, &mrq);
556 	err = 0;
557 	if (opcode == MMC_BUS_TEST_R) {
558 		for (i = 0; i < len / 4; i++)
559 			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
560 				err = -EIO;
561 				break;
562 			}
563 	}
564 	kfree(data_buf);
565 
566 	if (cmd.error)
567 		return cmd.error;
568 	if (data.error)
569 		return data.error;
570 
571 	return err;
572 }
573 
574 int mmc_bus_test(struct mmc_card *card, u8 bus_width)
575 {
576 	int err, width;
577 
578 	if (bus_width == MMC_BUS_WIDTH_8)
579 		width = 8;
580 	else if (bus_width == MMC_BUS_WIDTH_4)
581 		width = 4;
582 	else if (bus_width == MMC_BUS_WIDTH_1)
583 		return 0; /* no need for test */
584 	else
585 		return -EINVAL;
586 
587 	/*
588 	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
589 	 * is a problem.  This improves chances that the test will work.
590 	 */
591 	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
592 	err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
593 	return err;
594 }
595 
596 int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
597 {
598 	struct mmc_command cmd = {0};
599 	unsigned int opcode;
600 	int err;
601 
602 	if (!card->ext_csd.hpi) {
603 		pr_warning("%s: Card didn't support HPI command\n",
604 			   mmc_hostname(card->host));
605 		return -EINVAL;
606 	}
607 
608 	opcode = card->ext_csd.hpi_cmd;
609 	if (opcode == MMC_STOP_TRANSMISSION)
610 		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
611 	else if (opcode == MMC_SEND_STATUS)
612 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
613 
614 	cmd.opcode = opcode;
615 	cmd.arg = card->rca << 16 | 1;
616 
617 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
618 	if (err) {
619 		pr_warn("%s: error %d interrupting operation. "
620 			"HPI command response %#x\n", mmc_hostname(card->host),
621 			err, cmd.resp[0]);
622 		return err;
623 	}
624 	if (status)
625 		*status = cmd.resp[0];
626 
627 	return 0;
628 }
629