xref: /linux/drivers/mmc/core/core.c (revision aeb3f46252e26acdc60a1a8e31fb1ca6319d9a07)
1 /*
2  *  linux/drivers/mmc/core/core.c
3  *
4  *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5  *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6  *  Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
7  *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <asm/scatterlist.h>
22 #include <linux/scatterlist.h>
23 
24 #include <linux/mmc/card.h>
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/mmc.h>
27 #include <linux/mmc/sd.h>
28 
29 #include "core.h"
30 #include "bus.h"
31 #include "host.h"
32 
33 #include "mmc_ops.h"
34 #include "sd_ops.h"
35 
36 extern int mmc_attach_mmc(struct mmc_host *host, u32 ocr);
37 extern int mmc_attach_sd(struct mmc_host *host, u32 ocr);
38 
39 static struct workqueue_struct *workqueue;
40 
41 /*
42  * Internal function. Schedule delayed work in the MMC work queue.
43  */
44 static int mmc_schedule_delayed_work(struct delayed_work *work,
45 				     unsigned long delay)
46 {
47 	return queue_delayed_work(workqueue, work, delay);
48 }
49 
50 /*
51  * Internal function. Flush all scheduled work from the MMC work queue.
52  */
53 static void mmc_flush_scheduled_work(void)
54 {
55 	flush_workqueue(workqueue);
56 }
57 
58 /**
59  *	mmc_request_done - finish processing an MMC request
60  *	@host: MMC host which completed request
61  *	@mrq: MMC request which request
62  *
63  *	MMC drivers should call this function when they have completed
64  *	their processing of a request.
65  */
66 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
67 {
68 	struct mmc_command *cmd = mrq->cmd;
69 	int err = cmd->error;
70 
71 	if (err && cmd->retries) {
72 		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
73 			mmc_hostname(host), cmd->opcode, err);
74 
75 		cmd->retries--;
76 		cmd->error = 0;
77 		host->ops->request(host, mrq);
78 	} else {
79 		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
80 			mmc_hostname(host), cmd->opcode, err,
81 			cmd->resp[0], cmd->resp[1],
82 			cmd->resp[2], cmd->resp[3]);
83 
84 		if (mrq->data) {
85 			pr_debug("%s:     %d bytes transferred: %d\n",
86 				mmc_hostname(host),
87 				mrq->data->bytes_xfered, mrq->data->error);
88 		}
89 
90 		if (mrq->stop) {
91 			pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
92 				mmc_hostname(host), mrq->stop->opcode,
93 				mrq->stop->error,
94 				mrq->stop->resp[0], mrq->stop->resp[1],
95 				mrq->stop->resp[2], mrq->stop->resp[3]);
96 		}
97 
98 		if (mrq->done)
99 			mrq->done(mrq);
100 	}
101 }
102 
103 EXPORT_SYMBOL(mmc_request_done);
104 
105 static void
106 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
107 {
108 #ifdef CONFIG_MMC_DEBUG
109 	unsigned int i, sz;
110 #endif
111 
112 	pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
113 		 mmc_hostname(host), mrq->cmd->opcode,
114 		 mrq->cmd->arg, mrq->cmd->flags);
115 
116 	if (mrq->data) {
117 		pr_debug("%s:     blksz %d blocks %d flags %08x "
118 			"tsac %d ms nsac %d\n",
119 			mmc_hostname(host), mrq->data->blksz,
120 			mrq->data->blocks, mrq->data->flags,
121 			mrq->data->timeout_ns / 10000000,
122 			mrq->data->timeout_clks);
123 	}
124 
125 	if (mrq->stop) {
126 		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
127 			 mmc_hostname(host), mrq->stop->opcode,
128 			 mrq->stop->arg, mrq->stop->flags);
129 	}
130 
131 	WARN_ON(!host->claimed);
132 
133 	mrq->cmd->error = 0;
134 	mrq->cmd->mrq = mrq;
135 	if (mrq->data) {
136 		BUG_ON(mrq->data->blksz > host->max_blk_size);
137 		BUG_ON(mrq->data->blocks > host->max_blk_count);
138 		BUG_ON(mrq->data->blocks * mrq->data->blksz >
139 			host->max_req_size);
140 
141 #ifdef CONFIG_MMC_DEBUG
142 		sz = 0;
143 		for (i = 0;i < mrq->data->sg_len;i++)
144 			sz += mrq->data->sg[i].length;
145 		BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
146 #endif
147 
148 		mrq->cmd->data = mrq->data;
149 		mrq->data->error = 0;
150 		mrq->data->mrq = mrq;
151 		if (mrq->stop) {
152 			mrq->data->stop = mrq->stop;
153 			mrq->stop->error = 0;
154 			mrq->stop->mrq = mrq;
155 		}
156 	}
157 	host->ops->request(host, mrq);
158 }
159 
160 static void mmc_wait_done(struct mmc_request *mrq)
161 {
162 	complete(mrq->done_data);
163 }
164 
165 /**
166  *	mmc_wait_for_req - start a request and wait for completion
167  *	@host: MMC host to start command
168  *	@mrq: MMC request to start
169  *
170  *	Start a new MMC custom command request for a host, and wait
171  *	for the command to complete. Does not attempt to parse the
172  *	response.
173  */
174 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
175 {
176 	DECLARE_COMPLETION_ONSTACK(complete);
177 
178 	mrq->done_data = &complete;
179 	mrq->done = mmc_wait_done;
180 
181 	mmc_start_request(host, mrq);
182 
183 	wait_for_completion(&complete);
184 }
185 
186 EXPORT_SYMBOL(mmc_wait_for_req);
187 
188 /**
189  *	mmc_wait_for_cmd - start a command and wait for completion
190  *	@host: MMC host to start command
191  *	@cmd: MMC command to start
192  *	@retries: maximum number of retries
193  *
194  *	Start a new MMC command for a host, and wait for the command
195  *	to complete.  Return any error that occurred while the command
196  *	was executing.  Do not attempt to parse the response.
197  */
198 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
199 {
200 	struct mmc_request mrq;
201 
202 	BUG_ON(!host->claimed);
203 
204 	memset(&mrq, 0, sizeof(struct mmc_request));
205 
206 	memset(cmd->resp, 0, sizeof(cmd->resp));
207 	cmd->retries = retries;
208 
209 	mrq.cmd = cmd;
210 	cmd->data = NULL;
211 
212 	mmc_wait_for_req(host, &mrq);
213 
214 	return cmd->error;
215 }
216 
217 EXPORT_SYMBOL(mmc_wait_for_cmd);
218 
219 /**
220  *	mmc_set_data_timeout - set the timeout for a data command
221  *	@data: data phase for command
222  *	@card: the MMC card associated with the data transfer
223  *	@write: flag to differentiate reads from writes
224  *
225  *	Computes the data timeout parameters according to the
226  *	correct algorithm given the card type.
227  */
228 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card,
229 			  int write)
230 {
231 	unsigned int mult;
232 
233 	/*
234 	 * SD cards use a 100 multiplier rather than 10
235 	 */
236 	mult = mmc_card_sd(card) ? 100 : 10;
237 
238 	/*
239 	 * Scale up the multiplier (and therefore the timeout) by
240 	 * the r2w factor for writes.
241 	 */
242 	if (write)
243 		mult <<= card->csd.r2w_factor;
244 
245 	data->timeout_ns = card->csd.tacc_ns * mult;
246 	data->timeout_clks = card->csd.tacc_clks * mult;
247 
248 	/*
249 	 * SD cards also have an upper limit on the timeout.
250 	 */
251 	if (mmc_card_sd(card)) {
252 		unsigned int timeout_us, limit_us;
253 
254 		timeout_us = data->timeout_ns / 1000;
255 		timeout_us += data->timeout_clks * 1000 /
256 			(card->host->ios.clock / 1000);
257 
258 		if (write)
259 			limit_us = 250000;
260 		else
261 			limit_us = 100000;
262 
263 		/*
264 		 * SDHC cards always use these fixed values.
265 		 */
266 		if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
267 			data->timeout_ns = limit_us * 1000;
268 			data->timeout_clks = 0;
269 		}
270 	}
271 }
272 EXPORT_SYMBOL(mmc_set_data_timeout);
273 
274 /**
275  *	mmc_claim_host - exclusively claim a host
276  *	@host: mmc host to claim
277  *
278  *	Claim a host for a set of operations.
279  */
280 void mmc_claim_host(struct mmc_host *host)
281 {
282 	DECLARE_WAITQUEUE(wait, current);
283 	unsigned long flags;
284 
285 	might_sleep();
286 
287 	add_wait_queue(&host->wq, &wait);
288 	spin_lock_irqsave(&host->lock, flags);
289 	while (1) {
290 		set_current_state(TASK_UNINTERRUPTIBLE);
291 		if (!host->claimed)
292 			break;
293 		spin_unlock_irqrestore(&host->lock, flags);
294 		schedule();
295 		spin_lock_irqsave(&host->lock, flags);
296 	}
297 	set_current_state(TASK_RUNNING);
298 	host->claimed = 1;
299 	spin_unlock_irqrestore(&host->lock, flags);
300 	remove_wait_queue(&host->wq, &wait);
301 }
302 
303 EXPORT_SYMBOL(mmc_claim_host);
304 
305 /**
306  *	mmc_release_host - release a host
307  *	@host: mmc host to release
308  *
309  *	Release a MMC host, allowing others to claim the host
310  *	for their operations.
311  */
312 void mmc_release_host(struct mmc_host *host)
313 {
314 	unsigned long flags;
315 
316 	BUG_ON(!host->claimed);
317 
318 	spin_lock_irqsave(&host->lock, flags);
319 	host->claimed = 0;
320 	spin_unlock_irqrestore(&host->lock, flags);
321 
322 	wake_up(&host->wq);
323 }
324 
325 EXPORT_SYMBOL(mmc_release_host);
326 
327 /*
328  * Internal function that does the actual ios call to the host driver,
329  * optionally printing some debug output.
330  */
331 static inline void mmc_set_ios(struct mmc_host *host)
332 {
333 	struct mmc_ios *ios = &host->ios;
334 
335 	pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
336 		"width %u timing %u\n",
337 		 mmc_hostname(host), ios->clock, ios->bus_mode,
338 		 ios->power_mode, ios->chip_select, ios->vdd,
339 		 ios->bus_width, ios->timing);
340 
341 	host->ops->set_ios(host, ios);
342 }
343 
344 /*
345  * Control chip select pin on a host.
346  */
347 void mmc_set_chip_select(struct mmc_host *host, int mode)
348 {
349 	host->ios.chip_select = mode;
350 	mmc_set_ios(host);
351 }
352 
353 /*
354  * Sets the host clock to the highest possible frequency that
355  * is below "hz".
356  */
357 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
358 {
359 	WARN_ON(hz < host->f_min);
360 
361 	if (hz > host->f_max)
362 		hz = host->f_max;
363 
364 	host->ios.clock = hz;
365 	mmc_set_ios(host);
366 }
367 
368 /*
369  * Change the bus mode (open drain/push-pull) of a host.
370  */
371 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
372 {
373 	host->ios.bus_mode = mode;
374 	mmc_set_ios(host);
375 }
376 
377 /*
378  * Change data bus width of a host.
379  */
380 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
381 {
382 	host->ios.bus_width = width;
383 	mmc_set_ios(host);
384 }
385 
386 /*
387  * Mask off any voltages we don't support and select
388  * the lowest voltage
389  */
390 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
391 {
392 	int bit;
393 
394 	ocr &= host->ocr_avail;
395 
396 	bit = ffs(ocr);
397 	if (bit) {
398 		bit -= 1;
399 
400 		ocr &= 3 << bit;
401 
402 		host->ios.vdd = bit;
403 		mmc_set_ios(host);
404 	} else {
405 		ocr = 0;
406 	}
407 
408 	return ocr;
409 }
410 
411 /*
412  * Select timing parameters for host.
413  */
414 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
415 {
416 	host->ios.timing = timing;
417 	mmc_set_ios(host);
418 }
419 
420 /*
421  * Apply power to the MMC stack.  This is a two-stage process.
422  * First, we enable power to the card without the clock running.
423  * We then wait a bit for the power to stabilise.  Finally,
424  * enable the bus drivers and clock to the card.
425  *
426  * We must _NOT_ enable the clock prior to power stablising.
427  *
428  * If a host does all the power sequencing itself, ignore the
429  * initial MMC_POWER_UP stage.
430  */
431 static void mmc_power_up(struct mmc_host *host)
432 {
433 	int bit = fls(host->ocr_avail) - 1;
434 
435 	host->ios.vdd = bit;
436 	host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
437 	host->ios.chip_select = MMC_CS_DONTCARE;
438 	host->ios.power_mode = MMC_POWER_UP;
439 	host->ios.bus_width = MMC_BUS_WIDTH_1;
440 	host->ios.timing = MMC_TIMING_LEGACY;
441 	mmc_set_ios(host);
442 
443 	mmc_delay(1);
444 
445 	host->ios.clock = host->f_min;
446 	host->ios.power_mode = MMC_POWER_ON;
447 	mmc_set_ios(host);
448 
449 	mmc_delay(2);
450 }
451 
452 static void mmc_power_off(struct mmc_host *host)
453 {
454 	host->ios.clock = 0;
455 	host->ios.vdd = 0;
456 	host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
457 	host->ios.chip_select = MMC_CS_DONTCARE;
458 	host->ios.power_mode = MMC_POWER_OFF;
459 	host->ios.bus_width = MMC_BUS_WIDTH_1;
460 	host->ios.timing = MMC_TIMING_LEGACY;
461 	mmc_set_ios(host);
462 }
463 
464 /*
465  * Cleanup when the last reference to the bus operator is dropped.
466  */
467 void __mmc_release_bus(struct mmc_host *host)
468 {
469 	BUG_ON(!host);
470 	BUG_ON(host->bus_refs);
471 	BUG_ON(!host->bus_dead);
472 
473 	host->bus_ops = NULL;
474 }
475 
476 /*
477  * Increase reference count of bus operator
478  */
479 static inline void mmc_bus_get(struct mmc_host *host)
480 {
481 	unsigned long flags;
482 
483 	spin_lock_irqsave(&host->lock, flags);
484 	host->bus_refs++;
485 	spin_unlock_irqrestore(&host->lock, flags);
486 }
487 
488 /*
489  * Decrease reference count of bus operator and free it if
490  * it is the last reference.
491  */
492 static inline void mmc_bus_put(struct mmc_host *host)
493 {
494 	unsigned long flags;
495 
496 	spin_lock_irqsave(&host->lock, flags);
497 	host->bus_refs--;
498 	if ((host->bus_refs == 0) && host->bus_ops)
499 		__mmc_release_bus(host);
500 	spin_unlock_irqrestore(&host->lock, flags);
501 }
502 
503 /*
504  * Assign a mmc bus handler to a host. Only one bus handler may control a
505  * host at any given time.
506  */
507 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
508 {
509 	unsigned long flags;
510 
511 	BUG_ON(!host);
512 	BUG_ON(!ops);
513 
514 	BUG_ON(!host->claimed);
515 
516 	spin_lock_irqsave(&host->lock, flags);
517 
518 	BUG_ON(host->bus_ops);
519 	BUG_ON(host->bus_refs);
520 
521 	host->bus_ops = ops;
522 	host->bus_refs = 1;
523 	host->bus_dead = 0;
524 
525 	spin_unlock_irqrestore(&host->lock, flags);
526 }
527 
528 /*
529  * Remove the current bus handler from a host. Assumes that there are
530  * no interesting cards left, so the bus is powered down.
531  */
532 void mmc_detach_bus(struct mmc_host *host)
533 {
534 	unsigned long flags;
535 
536 	BUG_ON(!host);
537 
538 	BUG_ON(!host->claimed);
539 	BUG_ON(!host->bus_ops);
540 
541 	spin_lock_irqsave(&host->lock, flags);
542 
543 	host->bus_dead = 1;
544 
545 	spin_unlock_irqrestore(&host->lock, flags);
546 
547 	mmc_power_off(host);
548 
549 	mmc_bus_put(host);
550 }
551 
552 /**
553  *	mmc_detect_change - process change of state on a MMC socket
554  *	@host: host which changed state.
555  *	@delay: optional delay to wait before detection (jiffies)
556  *
557  *	MMC drivers should call this when they detect a card has been
558  *	inserted or removed. The MMC layer will confirm that any
559  *	present card is still functional, and initialize any newly
560  *	inserted.
561  */
562 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
563 {
564 #ifdef CONFIG_MMC_DEBUG
565 	unsigned long flags;
566 	spin_lock_irqsave(&host->lock, flags);
567 	BUG_ON(host->removed);
568 	spin_unlock_irqrestore(&host->lock, flags);
569 #endif
570 
571 	mmc_schedule_delayed_work(&host->detect, delay);
572 }
573 
574 EXPORT_SYMBOL(mmc_detect_change);
575 
576 
577 void mmc_rescan(struct work_struct *work)
578 {
579 	struct mmc_host *host =
580 		container_of(work, struct mmc_host, detect.work);
581 	u32 ocr;
582 	int err;
583 
584 	mmc_bus_get(host);
585 
586 	if (host->bus_ops == NULL) {
587 		/*
588 		 * Only we can add a new handler, so it's safe to
589 		 * release the lock here.
590 		 */
591 		mmc_bus_put(host);
592 
593 		mmc_claim_host(host);
594 
595 		mmc_power_up(host);
596 		mmc_go_idle(host);
597 
598 		mmc_send_if_cond(host, host->ocr_avail);
599 
600 		err = mmc_send_app_op_cond(host, 0, &ocr);
601 		if (err == MMC_ERR_NONE) {
602 			if (mmc_attach_sd(host, ocr))
603 				mmc_power_off(host);
604 		} else {
605 			/*
606 			 * If we fail to detect any SD cards then try
607 			 * searching for MMC cards.
608 			 */
609 			err = mmc_send_op_cond(host, 0, &ocr);
610 			if (err == MMC_ERR_NONE) {
611 				if (mmc_attach_mmc(host, ocr))
612 					mmc_power_off(host);
613 			} else {
614 				mmc_power_off(host);
615 				mmc_release_host(host);
616 			}
617 		}
618 	} else {
619 		if (host->bus_ops->detect && !host->bus_dead)
620 			host->bus_ops->detect(host);
621 
622 		mmc_bus_put(host);
623 	}
624 }
625 
626 void mmc_start_host(struct mmc_host *host)
627 {
628 	mmc_power_off(host);
629 	mmc_detect_change(host, 0);
630 }
631 
632 void mmc_stop_host(struct mmc_host *host)
633 {
634 #ifdef CONFIG_MMC_DEBUG
635 	unsigned long flags;
636 	spin_lock_irqsave(&host->lock, flags);
637 	host->removed = 1;
638 	spin_unlock_irqrestore(&host->lock, flags);
639 #endif
640 
641 	mmc_flush_scheduled_work();
642 
643 	mmc_bus_get(host);
644 	if (host->bus_ops && !host->bus_dead) {
645 		if (host->bus_ops->remove)
646 			host->bus_ops->remove(host);
647 
648 		mmc_claim_host(host);
649 		mmc_detach_bus(host);
650 		mmc_release_host(host);
651 	}
652 	mmc_bus_put(host);
653 
654 	BUG_ON(host->card);
655 
656 	mmc_power_off(host);
657 }
658 
659 #ifdef CONFIG_PM
660 
661 /**
662  *	mmc_suspend_host - suspend a host
663  *	@host: mmc host
664  *	@state: suspend mode (PM_SUSPEND_xxx)
665  */
666 int mmc_suspend_host(struct mmc_host *host, pm_message_t state)
667 {
668 	mmc_flush_scheduled_work();
669 
670 	mmc_bus_get(host);
671 	if (host->bus_ops && !host->bus_dead) {
672 		if (host->bus_ops->suspend)
673 			host->bus_ops->suspend(host);
674 		if (!host->bus_ops->resume) {
675 			if (host->bus_ops->remove)
676 				host->bus_ops->remove(host);
677 
678 			mmc_claim_host(host);
679 			mmc_detach_bus(host);
680 			mmc_release_host(host);
681 		}
682 	}
683 	mmc_bus_put(host);
684 
685 	mmc_power_off(host);
686 
687 	return 0;
688 }
689 
690 EXPORT_SYMBOL(mmc_suspend_host);
691 
692 /**
693  *	mmc_resume_host - resume a previously suspended host
694  *	@host: mmc host
695  */
696 int mmc_resume_host(struct mmc_host *host)
697 {
698 	mmc_bus_get(host);
699 	if (host->bus_ops && !host->bus_dead) {
700 		mmc_power_up(host);
701 		BUG_ON(!host->bus_ops->resume);
702 		host->bus_ops->resume(host);
703 	}
704 	mmc_bus_put(host);
705 
706 	/*
707 	 * We add a slight delay here so that resume can progress
708 	 * in parallel.
709 	 */
710 	mmc_detect_change(host, 1);
711 
712 	return 0;
713 }
714 
715 EXPORT_SYMBOL(mmc_resume_host);
716 
717 #endif
718 
719 static int __init mmc_init(void)
720 {
721 	int ret;
722 
723 	workqueue = create_singlethread_workqueue("kmmcd");
724 	if (!workqueue)
725 		return -ENOMEM;
726 
727 	ret = mmc_register_bus();
728 	if (ret == 0) {
729 		ret = mmc_register_host_class();
730 		if (ret)
731 			mmc_unregister_bus();
732 	}
733 	return ret;
734 }
735 
736 static void __exit mmc_exit(void)
737 {
738 	mmc_unregister_host_class();
739 	mmc_unregister_bus();
740 	destroy_workqueue(workqueue);
741 }
742 
743 module_init(mmc_init);
744 module_exit(mmc_exit);
745 
746 MODULE_LICENSE("GPL");
747