xref: /linux/drivers/mmc/host/pxamci.c (revision 2b8232ce512105e28453f301d1510de8363bccd1)
1 /*
2  *  linux/drivers/mmc/host/pxa.c - PXA MMCI driver
3  *
4  *  Copyright (C) 2003 Russell King, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  This hardware is really sick:
11  *   - No way to clear interrupts.
12  *   - Have to turn off the clock whenever we touch the device.
13  *   - Doesn't tell you how many data blocks were transferred.
14  *  Yuck!
15  *
16  *	1 and 3 byte data transfers not supported
17  *	max block length up to 1023
18  */
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/ioport.h>
22 #include <linux/platform_device.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/mmc/host.h>
27 
28 #include <asm/dma.h>
29 #include <asm/io.h>
30 #include <asm/scatterlist.h>
31 #include <asm/sizes.h>
32 
33 #include <asm/arch/pxa-regs.h>
34 #include <asm/arch/mmc.h>
35 
36 #include "pxamci.h"
37 
38 #define DRIVER_NAME	"pxa2xx-mci"
39 
40 #define NR_SG	1
41 
42 struct pxamci_host {
43 	struct mmc_host		*mmc;
44 	spinlock_t		lock;
45 	struct resource		*res;
46 	void __iomem		*base;
47 	int			irq;
48 	int			dma;
49 	unsigned int		clkrt;
50 	unsigned int		cmdat;
51 	unsigned int		imask;
52 	unsigned int		power_mode;
53 	struct pxamci_platform_data *pdata;
54 
55 	struct mmc_request	*mrq;
56 	struct mmc_command	*cmd;
57 	struct mmc_data		*data;
58 
59 	dma_addr_t		sg_dma;
60 	struct pxa_dma_desc	*sg_cpu;
61 	unsigned int		dma_len;
62 
63 	unsigned int		dma_dir;
64 };
65 
66 static void pxamci_stop_clock(struct pxamci_host *host)
67 {
68 	if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
69 		unsigned long timeout = 10000;
70 		unsigned int v;
71 
72 		writel(STOP_CLOCK, host->base + MMC_STRPCL);
73 
74 		do {
75 			v = readl(host->base + MMC_STAT);
76 			if (!(v & STAT_CLK_EN))
77 				break;
78 			udelay(1);
79 		} while (timeout--);
80 
81 		if (v & STAT_CLK_EN)
82 			dev_err(mmc_dev(host->mmc), "unable to stop clock\n");
83 	}
84 }
85 
86 static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask)
87 {
88 	unsigned long flags;
89 
90 	spin_lock_irqsave(&host->lock, flags);
91 	host->imask &= ~mask;
92 	writel(host->imask, host->base + MMC_I_MASK);
93 	spin_unlock_irqrestore(&host->lock, flags);
94 }
95 
96 static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
97 {
98 	unsigned long flags;
99 
100 	spin_lock_irqsave(&host->lock, flags);
101 	host->imask |= mask;
102 	writel(host->imask, host->base + MMC_I_MASK);
103 	spin_unlock_irqrestore(&host->lock, flags);
104 }
105 
106 static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
107 {
108 	unsigned int nob = data->blocks;
109 	unsigned long long clks;
110 	unsigned int timeout;
111 	u32 dcmd;
112 	int i;
113 
114 	host->data = data;
115 
116 	if (data->flags & MMC_DATA_STREAM)
117 		nob = 0xffff;
118 
119 	writel(nob, host->base + MMC_NOB);
120 	writel(data->blksz, host->base + MMC_BLKLEN);
121 
122 	clks = (unsigned long long)data->timeout_ns * CLOCKRATE;
123 	do_div(clks, 1000000000UL);
124 	timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt);
125 	writel((timeout + 255) / 256, host->base + MMC_RDTO);
126 
127 	if (data->flags & MMC_DATA_READ) {
128 		host->dma_dir = DMA_FROM_DEVICE;
129 		dcmd = DCMD_INCTRGADDR | DCMD_FLOWTRG;
130 		DRCMRTXMMC = 0;
131 		DRCMRRXMMC = host->dma | DRCMR_MAPVLD;
132 	} else {
133 		host->dma_dir = DMA_TO_DEVICE;
134 		dcmd = DCMD_INCSRCADDR | DCMD_FLOWSRC;
135 		DRCMRRXMMC = 0;
136 		DRCMRTXMMC = host->dma | DRCMR_MAPVLD;
137 	}
138 
139 	dcmd |= DCMD_BURST32 | DCMD_WIDTH1;
140 
141 	host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
142 				   host->dma_dir);
143 
144 	for (i = 0; i < host->dma_len; i++) {
145 		unsigned int length = sg_dma_len(&data->sg[i]);
146 		host->sg_cpu[i].dcmd = dcmd | length;
147 		if (length & 31 && !(data->flags & MMC_DATA_READ))
148 			host->sg_cpu[i].dcmd |= DCMD_ENDIRQEN;
149 		if (data->flags & MMC_DATA_READ) {
150 			host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO;
151 			host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]);
152 		} else {
153 			host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]);
154 			host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO;
155 		}
156 		host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) *
157 					sizeof(struct pxa_dma_desc);
158 	}
159 	host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP;
160 	wmb();
161 
162 	DDADR(host->dma) = host->sg_dma;
163 	DCSR(host->dma) = DCSR_RUN;
164 }
165 
166 static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
167 {
168 	WARN_ON(host->cmd != NULL);
169 	host->cmd = cmd;
170 
171 	if (cmd->flags & MMC_RSP_BUSY)
172 		cmdat |= CMDAT_BUSY;
173 
174 #define RSP_TYPE(x)	((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
175 	switch (RSP_TYPE(mmc_resp_type(cmd))) {
176 	case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */
177 		cmdat |= CMDAT_RESP_SHORT;
178 		break;
179 	case RSP_TYPE(MMC_RSP_R3):
180 		cmdat |= CMDAT_RESP_R3;
181 		break;
182 	case RSP_TYPE(MMC_RSP_R2):
183 		cmdat |= CMDAT_RESP_R2;
184 		break;
185 	default:
186 		break;
187 	}
188 
189 	writel(cmd->opcode, host->base + MMC_CMD);
190 	writel(cmd->arg >> 16, host->base + MMC_ARGH);
191 	writel(cmd->arg & 0xffff, host->base + MMC_ARGL);
192 	writel(cmdat, host->base + MMC_CMDAT);
193 	writel(host->clkrt, host->base + MMC_CLKRT);
194 
195 	writel(START_CLOCK, host->base + MMC_STRPCL);
196 
197 	pxamci_enable_irq(host, END_CMD_RES);
198 }
199 
200 static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
201 {
202 	host->mrq = NULL;
203 	host->cmd = NULL;
204 	host->data = NULL;
205 	mmc_request_done(host->mmc, mrq);
206 }
207 
208 static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
209 {
210 	struct mmc_command *cmd = host->cmd;
211 	int i;
212 	u32 v;
213 
214 	if (!cmd)
215 		return 0;
216 
217 	host->cmd = NULL;
218 
219 	/*
220 	 * Did I mention this is Sick.  We always need to
221 	 * discard the upper 8 bits of the first 16-bit word.
222 	 */
223 	v = readl(host->base + MMC_RES) & 0xffff;
224 	for (i = 0; i < 4; i++) {
225 		u32 w1 = readl(host->base + MMC_RES) & 0xffff;
226 		u32 w2 = readl(host->base + MMC_RES) & 0xffff;
227 		cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8;
228 		v = w2;
229 	}
230 
231 	if (stat & STAT_TIME_OUT_RESPONSE) {
232 		cmd->error = -ETIMEDOUT;
233 	} else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
234 #ifdef CONFIG_PXA27x
235 		/*
236 		 * workaround for erratum #42:
237 		 * Intel PXA27x Family Processor Specification Update Rev 001
238 		 * A bogus CRC error can appear if the msb of a 136 bit
239 		 * response is a one.
240 		 */
241 		if (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000) {
242 			pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode);
243 		} else
244 #endif
245 		cmd->error = -EILSEQ;
246 	}
247 
248 	pxamci_disable_irq(host, END_CMD_RES);
249 	if (host->data && !cmd->error) {
250 		pxamci_enable_irq(host, DATA_TRAN_DONE);
251 	} else {
252 		pxamci_finish_request(host, host->mrq);
253 	}
254 
255 	return 1;
256 }
257 
258 static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
259 {
260 	struct mmc_data *data = host->data;
261 
262 	if (!data)
263 		return 0;
264 
265 	DCSR(host->dma) = 0;
266 	dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
267 		     host->dma_dir);
268 
269 	if (stat & STAT_READ_TIME_OUT)
270 		data->error = -ETIMEDOUT;
271 	else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR))
272 		data->error = -EILSEQ;
273 
274 	/*
275 	 * There appears to be a hardware design bug here.  There seems to
276 	 * be no way to find out how much data was transferred to the card.
277 	 * This means that if there was an error on any block, we mark all
278 	 * data blocks as being in error.
279 	 */
280 	if (!data->error)
281 		data->bytes_xfered = data->blocks * data->blksz;
282 	else
283 		data->bytes_xfered = 0;
284 
285 	pxamci_disable_irq(host, DATA_TRAN_DONE);
286 
287 	host->data = NULL;
288 	if (host->mrq->stop) {
289 		pxamci_stop_clock(host);
290 		pxamci_start_cmd(host, host->mrq->stop, host->cmdat);
291 	} else {
292 		pxamci_finish_request(host, host->mrq);
293 	}
294 
295 	return 1;
296 }
297 
298 static irqreturn_t pxamci_irq(int irq, void *devid)
299 {
300 	struct pxamci_host *host = devid;
301 	unsigned int ireg;
302 	int handled = 0;
303 
304 	ireg = readl(host->base + MMC_I_REG) & ~readl(host->base + MMC_I_MASK);
305 
306 	if (ireg) {
307 		unsigned stat = readl(host->base + MMC_STAT);
308 
309 		pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat);
310 
311 		if (ireg & END_CMD_RES)
312 			handled |= pxamci_cmd_done(host, stat);
313 		if (ireg & DATA_TRAN_DONE)
314 			handled |= pxamci_data_done(host, stat);
315 		if (ireg & SDIO_INT) {
316 			mmc_signal_sdio_irq(host->mmc);
317 			handled = 1;
318 		}
319 	}
320 
321 	return IRQ_RETVAL(handled);
322 }
323 
324 static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
325 {
326 	struct pxamci_host *host = mmc_priv(mmc);
327 	unsigned int cmdat;
328 
329 	WARN_ON(host->mrq != NULL);
330 
331 	host->mrq = mrq;
332 
333 	pxamci_stop_clock(host);
334 
335 	cmdat = host->cmdat;
336 	host->cmdat &= ~CMDAT_INIT;
337 
338 	if (mrq->data) {
339 		pxamci_setup_data(host, mrq->data);
340 
341 		cmdat &= ~CMDAT_BUSY;
342 		cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
343 		if (mrq->data->flags & MMC_DATA_WRITE)
344 			cmdat |= CMDAT_WRITE;
345 
346 		if (mrq->data->flags & MMC_DATA_STREAM)
347 			cmdat |= CMDAT_STREAM;
348 	}
349 
350 	pxamci_start_cmd(host, mrq->cmd, cmdat);
351 }
352 
353 static int pxamci_get_ro(struct mmc_host *mmc)
354 {
355 	struct pxamci_host *host = mmc_priv(mmc);
356 
357 	if (host->pdata && host->pdata->get_ro)
358 		return host->pdata->get_ro(mmc_dev(mmc));
359 	/* Host doesn't support read only detection so assume writeable */
360 	return 0;
361 }
362 
363 static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
364 {
365 	struct pxamci_host *host = mmc_priv(mmc);
366 
367 	if (ios->clock) {
368 		unsigned int clk = CLOCKRATE / ios->clock;
369 		if (CLOCKRATE / clk > ios->clock)
370 			clk <<= 1;
371 		host->clkrt = fls(clk) - 1;
372 		pxa_set_cken(CKEN_MMC, 1);
373 
374 		/*
375 		 * we write clkrt on the next command
376 		 */
377 	} else {
378 		pxamci_stop_clock(host);
379 		pxa_set_cken(CKEN_MMC, 0);
380 	}
381 
382 	if (host->power_mode != ios->power_mode) {
383 		host->power_mode = ios->power_mode;
384 
385 		if (host->pdata && host->pdata->setpower)
386 			host->pdata->setpower(mmc_dev(mmc), ios->vdd);
387 
388 		if (ios->power_mode == MMC_POWER_ON)
389 			host->cmdat |= CMDAT_INIT;
390 	}
391 
392 	if (ios->bus_width == MMC_BUS_WIDTH_4)
393 		host->cmdat |= CMDAT_SD_4DAT;
394 	else
395 		host->cmdat &= ~CMDAT_SD_4DAT;
396 
397 	pr_debug("PXAMCI: clkrt = %x cmdat = %x\n",
398 		 host->clkrt, host->cmdat);
399 }
400 
401 static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
402 {
403 	struct pxamci_host *pxa_host = mmc_priv(host);
404 
405 	if (enable)
406 		pxamci_enable_irq(pxa_host, SDIO_INT);
407 	else
408 		pxamci_disable_irq(pxa_host, SDIO_INT);
409 }
410 
411 static const struct mmc_host_ops pxamci_ops = {
412 	.request		= pxamci_request,
413 	.get_ro			= pxamci_get_ro,
414 	.set_ios		= pxamci_set_ios,
415 	.enable_sdio_irq	= pxamci_enable_sdio_irq,
416 };
417 
418 static void pxamci_dma_irq(int dma, void *devid)
419 {
420 	struct pxamci_host *host = devid;
421 	int dcsr = DCSR(dma);
422 	DCSR(dma) = dcsr & ~DCSR_STOPIRQEN;
423 
424 	if (dcsr & DCSR_ENDINTR) {
425 		writel(BUF_PART_FULL, host->base + MMC_PRTBUF);
426 	} else {
427 		printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n",
428 		       mmc_hostname(host->mmc), dma, dcsr);
429 		host->data->error = -EIO;
430 		pxamci_data_done(host, 0);
431 	}
432 }
433 
434 static irqreturn_t pxamci_detect_irq(int irq, void *devid)
435 {
436 	struct pxamci_host *host = mmc_priv(devid);
437 
438 	mmc_detect_change(devid, host->pdata->detect_delay);
439 	return IRQ_HANDLED;
440 }
441 
442 static int pxamci_probe(struct platform_device *pdev)
443 {
444 	struct mmc_host *mmc;
445 	struct pxamci_host *host = NULL;
446 	struct resource *r;
447 	int ret, irq;
448 
449 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
450 	irq = platform_get_irq(pdev, 0);
451 	if (!r || irq < 0)
452 		return -ENXIO;
453 
454 	r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
455 	if (!r)
456 		return -EBUSY;
457 
458 	mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
459 	if (!mmc) {
460 		ret = -ENOMEM;
461 		goto out;
462 	}
463 
464 	mmc->ops = &pxamci_ops;
465 	mmc->f_min = CLOCKRATE_MIN;
466 	mmc->f_max = CLOCKRATE_MAX;
467 
468 	/*
469 	 * We can do SG-DMA, but we don't because we never know how much
470 	 * data we successfully wrote to the card.
471 	 */
472 	mmc->max_phys_segs = NR_SG;
473 
474 	/*
475 	 * Our hardware DMA can handle a maximum of one page per SG entry.
476 	 */
477 	mmc->max_seg_size = PAGE_SIZE;
478 
479 	/*
480 	 * Block length register is only 10 bits before PXA27x.
481 	 */
482 	mmc->max_blk_size = (cpu_is_pxa21x() || cpu_is_pxa25x()) ? 1023 : 2048;
483 
484 	/*
485 	 * Block count register is 16 bits.
486 	 */
487 	mmc->max_blk_count = 65535;
488 
489 	host = mmc_priv(mmc);
490 	host->mmc = mmc;
491 	host->dma = -1;
492 	host->pdata = pdev->dev.platform_data;
493 	mmc->ocr_avail = host->pdata ?
494 			 host->pdata->ocr_mask :
495 			 MMC_VDD_32_33|MMC_VDD_33_34;
496 	mmc->caps = 0;
497 	host->cmdat = 0;
498 	if (!cpu_is_pxa21x() && !cpu_is_pxa25x()) {
499 		mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
500 		host->cmdat |= CMDAT_SDIO_INT_EN;
501 	}
502 
503 	host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
504 	if (!host->sg_cpu) {
505 		ret = -ENOMEM;
506 		goto out;
507 	}
508 
509 	spin_lock_init(&host->lock);
510 	host->res = r;
511 	host->irq = irq;
512 	host->imask = MMC_I_MASK_ALL;
513 
514 	host->base = ioremap(r->start, SZ_4K);
515 	if (!host->base) {
516 		ret = -ENOMEM;
517 		goto out;
518 	}
519 
520 	/*
521 	 * Ensure that the host controller is shut down, and setup
522 	 * with our defaults.
523 	 */
524 	pxamci_stop_clock(host);
525 	writel(0, host->base + MMC_SPI);
526 	writel(64, host->base + MMC_RESTO);
527 	writel(host->imask, host->base + MMC_I_MASK);
528 
529 	host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW,
530 				    pxamci_dma_irq, host);
531 	if (host->dma < 0) {
532 		ret = -EBUSY;
533 		goto out;
534 	}
535 
536 	ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
537 	if (ret)
538 		goto out;
539 
540 	platform_set_drvdata(pdev, mmc);
541 
542 	if (host->pdata && host->pdata->init)
543 		host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc);
544 
545 	mmc_add_host(mmc);
546 
547 	return 0;
548 
549  out:
550 	if (host) {
551 		if (host->dma >= 0)
552 			pxa_free_dma(host->dma);
553 		if (host->base)
554 			iounmap(host->base);
555 		if (host->sg_cpu)
556 			dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
557 	}
558 	if (mmc)
559 		mmc_free_host(mmc);
560 	release_resource(r);
561 	return ret;
562 }
563 
564 static int pxamci_remove(struct platform_device *pdev)
565 {
566 	struct mmc_host *mmc = platform_get_drvdata(pdev);
567 
568 	platform_set_drvdata(pdev, NULL);
569 
570 	if (mmc) {
571 		struct pxamci_host *host = mmc_priv(mmc);
572 
573 		if (host->pdata && host->pdata->exit)
574 			host->pdata->exit(&pdev->dev, mmc);
575 
576 		mmc_remove_host(mmc);
577 
578 		pxamci_stop_clock(host);
579 		writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
580 		       END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
581 		       host->base + MMC_I_MASK);
582 
583 		DRCMRRXMMC = 0;
584 		DRCMRTXMMC = 0;
585 
586 		free_irq(host->irq, host);
587 		pxa_free_dma(host->dma);
588 		iounmap(host->base);
589 		dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
590 
591 		release_resource(host->res);
592 
593 		mmc_free_host(mmc);
594 	}
595 	return 0;
596 }
597 
598 #ifdef CONFIG_PM
599 static int pxamci_suspend(struct platform_device *dev, pm_message_t state)
600 {
601 	struct mmc_host *mmc = platform_get_drvdata(dev);
602 	int ret = 0;
603 
604 	if (mmc)
605 		ret = mmc_suspend_host(mmc, state);
606 
607 	return ret;
608 }
609 
610 static int pxamci_resume(struct platform_device *dev)
611 {
612 	struct mmc_host *mmc = platform_get_drvdata(dev);
613 	int ret = 0;
614 
615 	if (mmc)
616 		ret = mmc_resume_host(mmc);
617 
618 	return ret;
619 }
620 #else
621 #define pxamci_suspend	NULL
622 #define pxamci_resume	NULL
623 #endif
624 
625 static struct platform_driver pxamci_driver = {
626 	.probe		= pxamci_probe,
627 	.remove		= pxamci_remove,
628 	.suspend	= pxamci_suspend,
629 	.resume		= pxamci_resume,
630 	.driver		= {
631 		.name	= DRIVER_NAME,
632 	},
633 };
634 
635 static int __init pxamci_init(void)
636 {
637 	return platform_driver_register(&pxamci_driver);
638 }
639 
640 static void __exit pxamci_exit(void)
641 {
642 	platform_driver_unregister(&pxamci_driver);
643 }
644 
645 module_init(pxamci_init);
646 module_exit(pxamci_exit);
647 
648 MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
649 MODULE_LICENSE("GPL");
650