xref: /linux/drivers/mmc/host/pxamci.c (revision 2277ab4a1df50e05bc732fe9488d4e902bb8399a)
1 /*
2  *  linux/drivers/mmc/host/pxa.c - PXA MMCI driver
3  *
4  *  Copyright (C) 2003 Russell King, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  This hardware is really sick:
11  *   - No way to clear interrupts.
12  *   - Have to turn off the clock whenever we touch the device.
13  *   - Doesn't tell you how many data blocks were transferred.
14  *  Yuck!
15  *
16  *	1 and 3 byte data transfers not supported
17  *	max block length up to 1023
18  */
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/ioport.h>
22 #include <linux/platform_device.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/clk.h>
27 #include <linux/err.h>
28 #include <linux/mmc/host.h>
29 #include <linux/io.h>
30 #include <linux/regulator/consumer.h>
31 
32 #include <asm/sizes.h>
33 
34 #include <mach/hardware.h>
35 #include <mach/dma.h>
36 #include <mach/mmc.h>
37 
38 #include "pxamci.h"
39 
40 #define DRIVER_NAME	"pxa2xx-mci"
41 
42 #define NR_SG	1
43 #define CLKRT_OFF	(~0)
44 
45 struct pxamci_host {
46 	struct mmc_host		*mmc;
47 	spinlock_t		lock;
48 	struct resource		*res;
49 	void __iomem		*base;
50 	struct clk		*clk;
51 	unsigned long		clkrate;
52 	int			irq;
53 	int			dma;
54 	unsigned int		clkrt;
55 	unsigned int		cmdat;
56 	unsigned int		imask;
57 	unsigned int		power_mode;
58 	struct pxamci_platform_data *pdata;
59 
60 	struct mmc_request	*mrq;
61 	struct mmc_command	*cmd;
62 	struct mmc_data		*data;
63 
64 	dma_addr_t		sg_dma;
65 	struct pxa_dma_desc	*sg_cpu;
66 	unsigned int		dma_len;
67 
68 	unsigned int		dma_dir;
69 	unsigned int		dma_drcmrrx;
70 	unsigned int		dma_drcmrtx;
71 
72 	struct regulator	*vcc;
73 };
74 
75 static inline void pxamci_init_ocr(struct pxamci_host *host)
76 {
77 #ifdef CONFIG_REGULATOR
78 	host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
79 
80 	if (IS_ERR(host->vcc))
81 		host->vcc = NULL;
82 	else {
83 		host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
84 		if (host->pdata && host->pdata->ocr_mask)
85 			dev_warn(mmc_dev(host->mmc),
86 				"ocr_mask/setpower will not be used\n");
87 	}
88 #endif
89 	if (host->vcc == NULL) {
90 		/* fall-back to platform data */
91 		host->mmc->ocr_avail = host->pdata ?
92 			host->pdata->ocr_mask :
93 			MMC_VDD_32_33 | MMC_VDD_33_34;
94 	}
95 }
96 
97 static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd)
98 {
99 #ifdef CONFIG_REGULATOR
100 	if (host->vcc)
101 		mmc_regulator_set_ocr(host->vcc, vdd);
102 #endif
103 	if (!host->vcc && host->pdata && host->pdata->setpower)
104 		host->pdata->setpower(mmc_dev(host->mmc), vdd);
105 }
106 
107 static void pxamci_stop_clock(struct pxamci_host *host)
108 {
109 	if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
110 		unsigned long timeout = 10000;
111 		unsigned int v;
112 
113 		writel(STOP_CLOCK, host->base + MMC_STRPCL);
114 
115 		do {
116 			v = readl(host->base + MMC_STAT);
117 			if (!(v & STAT_CLK_EN))
118 				break;
119 			udelay(1);
120 		} while (timeout--);
121 
122 		if (v & STAT_CLK_EN)
123 			dev_err(mmc_dev(host->mmc), "unable to stop clock\n");
124 	}
125 }
126 
127 static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask)
128 {
129 	unsigned long flags;
130 
131 	spin_lock_irqsave(&host->lock, flags);
132 	host->imask &= ~mask;
133 	writel(host->imask, host->base + MMC_I_MASK);
134 	spin_unlock_irqrestore(&host->lock, flags);
135 }
136 
137 static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
138 {
139 	unsigned long flags;
140 
141 	spin_lock_irqsave(&host->lock, flags);
142 	host->imask |= mask;
143 	writel(host->imask, host->base + MMC_I_MASK);
144 	spin_unlock_irqrestore(&host->lock, flags);
145 }
146 
147 static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
148 {
149 	unsigned int nob = data->blocks;
150 	unsigned long long clks;
151 	unsigned int timeout;
152 	bool dalgn = 0;
153 	u32 dcmd;
154 	int i;
155 
156 	host->data = data;
157 
158 	if (data->flags & MMC_DATA_STREAM)
159 		nob = 0xffff;
160 
161 	writel(nob, host->base + MMC_NOB);
162 	writel(data->blksz, host->base + MMC_BLKLEN);
163 
164 	clks = (unsigned long long)data->timeout_ns * host->clkrate;
165 	do_div(clks, 1000000000UL);
166 	timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt);
167 	writel((timeout + 255) / 256, host->base + MMC_RDTO);
168 
169 	if (data->flags & MMC_DATA_READ) {
170 		host->dma_dir = DMA_FROM_DEVICE;
171 		dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
172 		DRCMR(host->dma_drcmrtx) = 0;
173 		DRCMR(host->dma_drcmrrx) = host->dma | DRCMR_MAPVLD;
174 	} else {
175 		host->dma_dir = DMA_TO_DEVICE;
176 		dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
177 		DRCMR(host->dma_drcmrrx) = 0;
178 		DRCMR(host->dma_drcmrtx) = host->dma | DRCMR_MAPVLD;
179 	}
180 
181 	dcmd |= DCMD_BURST32 | DCMD_WIDTH1;
182 
183 	host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
184 				   host->dma_dir);
185 
186 	for (i = 0; i < host->dma_len; i++) {
187 		unsigned int length = sg_dma_len(&data->sg[i]);
188 		host->sg_cpu[i].dcmd = dcmd | length;
189 		if (length & 31 && !(data->flags & MMC_DATA_READ))
190 			host->sg_cpu[i].dcmd |= DCMD_ENDIRQEN;
191 		/* Not aligned to 8-byte boundary? */
192 		if (sg_dma_address(&data->sg[i]) & 0x7)
193 			dalgn = 1;
194 		if (data->flags & MMC_DATA_READ) {
195 			host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO;
196 			host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]);
197 		} else {
198 			host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]);
199 			host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO;
200 		}
201 		host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) *
202 					sizeof(struct pxa_dma_desc);
203 	}
204 	host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP;
205 	wmb();
206 
207 	/*
208 	 * The PXA27x DMA controller encounters overhead when working with
209 	 * unaligned (to 8-byte boundaries) data, so switch on byte alignment
210 	 * mode only if we have unaligned data.
211 	 */
212 	if (dalgn)
213 		DALGN |= (1 << host->dma);
214 	else
215 		DALGN &= ~(1 << host->dma);
216 	DDADR(host->dma) = host->sg_dma;
217 
218 	/*
219 	 * workaround for erratum #91:
220 	 * only start DMA now if we are doing a read,
221 	 * otherwise we wait until CMD/RESP has finished
222 	 * before starting DMA.
223 	 */
224 	if (!cpu_is_pxa27x() || data->flags & MMC_DATA_READ)
225 		DCSR(host->dma) = DCSR_RUN;
226 }
227 
228 static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
229 {
230 	WARN_ON(host->cmd != NULL);
231 	host->cmd = cmd;
232 
233 	if (cmd->flags & MMC_RSP_BUSY)
234 		cmdat |= CMDAT_BUSY;
235 
236 #define RSP_TYPE(x)	((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
237 	switch (RSP_TYPE(mmc_resp_type(cmd))) {
238 	case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */
239 		cmdat |= CMDAT_RESP_SHORT;
240 		break;
241 	case RSP_TYPE(MMC_RSP_R3):
242 		cmdat |= CMDAT_RESP_R3;
243 		break;
244 	case RSP_TYPE(MMC_RSP_R2):
245 		cmdat |= CMDAT_RESP_R2;
246 		break;
247 	default:
248 		break;
249 	}
250 
251 	writel(cmd->opcode, host->base + MMC_CMD);
252 	writel(cmd->arg >> 16, host->base + MMC_ARGH);
253 	writel(cmd->arg & 0xffff, host->base + MMC_ARGL);
254 	writel(cmdat, host->base + MMC_CMDAT);
255 	writel(host->clkrt, host->base + MMC_CLKRT);
256 
257 	writel(START_CLOCK, host->base + MMC_STRPCL);
258 
259 	pxamci_enable_irq(host, END_CMD_RES);
260 }
261 
262 static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
263 {
264 	host->mrq = NULL;
265 	host->cmd = NULL;
266 	host->data = NULL;
267 	mmc_request_done(host->mmc, mrq);
268 }
269 
270 static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
271 {
272 	struct mmc_command *cmd = host->cmd;
273 	int i;
274 	u32 v;
275 
276 	if (!cmd)
277 		return 0;
278 
279 	host->cmd = NULL;
280 
281 	/*
282 	 * Did I mention this is Sick.  We always need to
283 	 * discard the upper 8 bits of the first 16-bit word.
284 	 */
285 	v = readl(host->base + MMC_RES) & 0xffff;
286 	for (i = 0; i < 4; i++) {
287 		u32 w1 = readl(host->base + MMC_RES) & 0xffff;
288 		u32 w2 = readl(host->base + MMC_RES) & 0xffff;
289 		cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8;
290 		v = w2;
291 	}
292 
293 	if (stat & STAT_TIME_OUT_RESPONSE) {
294 		cmd->error = -ETIMEDOUT;
295 	} else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
296 		/*
297 		 * workaround for erratum #42:
298 		 * Intel PXA27x Family Processor Specification Update Rev 001
299 		 * A bogus CRC error can appear if the msb of a 136 bit
300 		 * response is a one.
301 		 */
302 		if (cpu_is_pxa27x() &&
303 		    (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000))
304 			pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode);
305 		else
306 			cmd->error = -EILSEQ;
307 	}
308 
309 	pxamci_disable_irq(host, END_CMD_RES);
310 	if (host->data && !cmd->error) {
311 		pxamci_enable_irq(host, DATA_TRAN_DONE);
312 		/*
313 		 * workaround for erratum #91, if doing write
314 		 * enable DMA late
315 		 */
316 		if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE)
317 			DCSR(host->dma) = DCSR_RUN;
318 	} else {
319 		pxamci_finish_request(host, host->mrq);
320 	}
321 
322 	return 1;
323 }
324 
325 static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
326 {
327 	struct mmc_data *data = host->data;
328 
329 	if (!data)
330 		return 0;
331 
332 	DCSR(host->dma) = 0;
333 	dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
334 		     host->dma_dir);
335 
336 	if (stat & STAT_READ_TIME_OUT)
337 		data->error = -ETIMEDOUT;
338 	else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR))
339 		data->error = -EILSEQ;
340 
341 	/*
342 	 * There appears to be a hardware design bug here.  There seems to
343 	 * be no way to find out how much data was transferred to the card.
344 	 * This means that if there was an error on any block, we mark all
345 	 * data blocks as being in error.
346 	 */
347 	if (!data->error)
348 		data->bytes_xfered = data->blocks * data->blksz;
349 	else
350 		data->bytes_xfered = 0;
351 
352 	pxamci_disable_irq(host, DATA_TRAN_DONE);
353 
354 	host->data = NULL;
355 	if (host->mrq->stop) {
356 		pxamci_stop_clock(host);
357 		pxamci_start_cmd(host, host->mrq->stop, host->cmdat);
358 	} else {
359 		pxamci_finish_request(host, host->mrq);
360 	}
361 
362 	return 1;
363 }
364 
365 static irqreturn_t pxamci_irq(int irq, void *devid)
366 {
367 	struct pxamci_host *host = devid;
368 	unsigned int ireg;
369 	int handled = 0;
370 
371 	ireg = readl(host->base + MMC_I_REG) & ~readl(host->base + MMC_I_MASK);
372 
373 	if (ireg) {
374 		unsigned stat = readl(host->base + MMC_STAT);
375 
376 		pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat);
377 
378 		if (ireg & END_CMD_RES)
379 			handled |= pxamci_cmd_done(host, stat);
380 		if (ireg & DATA_TRAN_DONE)
381 			handled |= pxamci_data_done(host, stat);
382 		if (ireg & SDIO_INT) {
383 			mmc_signal_sdio_irq(host->mmc);
384 			handled = 1;
385 		}
386 	}
387 
388 	return IRQ_RETVAL(handled);
389 }
390 
391 static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
392 {
393 	struct pxamci_host *host = mmc_priv(mmc);
394 	unsigned int cmdat;
395 
396 	WARN_ON(host->mrq != NULL);
397 
398 	host->mrq = mrq;
399 
400 	pxamci_stop_clock(host);
401 
402 	cmdat = host->cmdat;
403 	host->cmdat &= ~CMDAT_INIT;
404 
405 	if (mrq->data) {
406 		pxamci_setup_data(host, mrq->data);
407 
408 		cmdat &= ~CMDAT_BUSY;
409 		cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
410 		if (mrq->data->flags & MMC_DATA_WRITE)
411 			cmdat |= CMDAT_WRITE;
412 
413 		if (mrq->data->flags & MMC_DATA_STREAM)
414 			cmdat |= CMDAT_STREAM;
415 	}
416 
417 	pxamci_start_cmd(host, mrq->cmd, cmdat);
418 }
419 
420 static int pxamci_get_ro(struct mmc_host *mmc)
421 {
422 	struct pxamci_host *host = mmc_priv(mmc);
423 
424 	if (host->pdata && host->pdata->get_ro)
425 		return !!host->pdata->get_ro(mmc_dev(mmc));
426 	/*
427 	 * Board doesn't support read only detection; let the mmc core
428 	 * decide what to do.
429 	 */
430 	return -ENOSYS;
431 }
432 
433 static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
434 {
435 	struct pxamci_host *host = mmc_priv(mmc);
436 
437 	if (ios->clock) {
438 		unsigned long rate = host->clkrate;
439 		unsigned int clk = rate / ios->clock;
440 
441 		if (host->clkrt == CLKRT_OFF)
442 			clk_enable(host->clk);
443 
444 		if (ios->clock == 26000000) {
445 			/* to support 26MHz on pxa300/pxa310 */
446 			host->clkrt = 7;
447 		} else {
448 			/* to handle (19.5MHz, 26MHz) */
449 			if (!clk)
450 				clk = 1;
451 
452 			/*
453 			 * clk might result in a lower divisor than we
454 			 * desire.  check for that condition and adjust
455 			 * as appropriate.
456 			 */
457 			if (rate / clk > ios->clock)
458 				clk <<= 1;
459 			host->clkrt = fls(clk) - 1;
460 		}
461 
462 		/*
463 		 * we write clkrt on the next command
464 		 */
465 	} else {
466 		pxamci_stop_clock(host);
467 		if (host->clkrt != CLKRT_OFF) {
468 			host->clkrt = CLKRT_OFF;
469 			clk_disable(host->clk);
470 		}
471 	}
472 
473 	if (host->power_mode != ios->power_mode) {
474 		host->power_mode = ios->power_mode;
475 
476 		pxamci_set_power(host, ios->vdd);
477 
478 		if (ios->power_mode == MMC_POWER_ON)
479 			host->cmdat |= CMDAT_INIT;
480 	}
481 
482 	if (ios->bus_width == MMC_BUS_WIDTH_4)
483 		host->cmdat |= CMDAT_SD_4DAT;
484 	else
485 		host->cmdat &= ~CMDAT_SD_4DAT;
486 
487 	pr_debug("PXAMCI: clkrt = %x cmdat = %x\n",
488 		 host->clkrt, host->cmdat);
489 }
490 
491 static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
492 {
493 	struct pxamci_host *pxa_host = mmc_priv(host);
494 
495 	if (enable)
496 		pxamci_enable_irq(pxa_host, SDIO_INT);
497 	else
498 		pxamci_disable_irq(pxa_host, SDIO_INT);
499 }
500 
501 static const struct mmc_host_ops pxamci_ops = {
502 	.request		= pxamci_request,
503 	.get_ro			= pxamci_get_ro,
504 	.set_ios		= pxamci_set_ios,
505 	.enable_sdio_irq	= pxamci_enable_sdio_irq,
506 };
507 
508 static void pxamci_dma_irq(int dma, void *devid)
509 {
510 	struct pxamci_host *host = devid;
511 	int dcsr = DCSR(dma);
512 	DCSR(dma) = dcsr & ~DCSR_STOPIRQEN;
513 
514 	if (dcsr & DCSR_ENDINTR) {
515 		writel(BUF_PART_FULL, host->base + MMC_PRTBUF);
516 	} else {
517 		printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n",
518 		       mmc_hostname(host->mmc), dma, dcsr);
519 		host->data->error = -EIO;
520 		pxamci_data_done(host, 0);
521 	}
522 }
523 
524 static irqreturn_t pxamci_detect_irq(int irq, void *devid)
525 {
526 	struct pxamci_host *host = mmc_priv(devid);
527 
528 	mmc_detect_change(devid, host->pdata->detect_delay);
529 	return IRQ_HANDLED;
530 }
531 
532 static int pxamci_probe(struct platform_device *pdev)
533 {
534 	struct mmc_host *mmc;
535 	struct pxamci_host *host = NULL;
536 	struct resource *r, *dmarx, *dmatx;
537 	int ret, irq;
538 
539 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
540 	irq = platform_get_irq(pdev, 0);
541 	if (!r || irq < 0)
542 		return -ENXIO;
543 
544 	r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
545 	if (!r)
546 		return -EBUSY;
547 
548 	mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
549 	if (!mmc) {
550 		ret = -ENOMEM;
551 		goto out;
552 	}
553 
554 	mmc->ops = &pxamci_ops;
555 
556 	/*
557 	 * We can do SG-DMA, but we don't because we never know how much
558 	 * data we successfully wrote to the card.
559 	 */
560 	mmc->max_phys_segs = NR_SG;
561 
562 	/*
563 	 * Our hardware DMA can handle a maximum of one page per SG entry.
564 	 */
565 	mmc->max_seg_size = PAGE_SIZE;
566 
567 	/*
568 	 * Block length register is only 10 bits before PXA27x.
569 	 */
570 	mmc->max_blk_size = cpu_is_pxa25x() ? 1023 : 2048;
571 
572 	/*
573 	 * Block count register is 16 bits.
574 	 */
575 	mmc->max_blk_count = 65535;
576 
577 	host = mmc_priv(mmc);
578 	host->mmc = mmc;
579 	host->dma = -1;
580 	host->pdata = pdev->dev.platform_data;
581 	host->clkrt = CLKRT_OFF;
582 
583 	host->clk = clk_get(&pdev->dev, NULL);
584 	if (IS_ERR(host->clk)) {
585 		ret = PTR_ERR(host->clk);
586 		host->clk = NULL;
587 		goto out;
588 	}
589 
590 	host->clkrate = clk_get_rate(host->clk);
591 
592 	/*
593 	 * Calculate minimum clock rate, rounding up.
594 	 */
595 	mmc->f_min = (host->clkrate + 63) / 64;
596 	mmc->f_max = (cpu_is_pxa300() || cpu_is_pxa310()) ? 26000000
597 							  : host->clkrate;
598 
599 	pxamci_init_ocr(host);
600 
601 	mmc->caps = 0;
602 	host->cmdat = 0;
603 	if (!cpu_is_pxa25x()) {
604 		mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
605 		host->cmdat |= CMDAT_SDIO_INT_EN;
606 		if (cpu_is_pxa300() || cpu_is_pxa310())
607 			mmc->caps |= MMC_CAP_MMC_HIGHSPEED |
608 				     MMC_CAP_SD_HIGHSPEED;
609 	}
610 
611 	host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
612 	if (!host->sg_cpu) {
613 		ret = -ENOMEM;
614 		goto out;
615 	}
616 
617 	spin_lock_init(&host->lock);
618 	host->res = r;
619 	host->irq = irq;
620 	host->imask = MMC_I_MASK_ALL;
621 
622 	host->base = ioremap(r->start, SZ_4K);
623 	if (!host->base) {
624 		ret = -ENOMEM;
625 		goto out;
626 	}
627 
628 	/*
629 	 * Ensure that the host controller is shut down, and setup
630 	 * with our defaults.
631 	 */
632 	pxamci_stop_clock(host);
633 	writel(0, host->base + MMC_SPI);
634 	writel(64, host->base + MMC_RESTO);
635 	writel(host->imask, host->base + MMC_I_MASK);
636 
637 	host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW,
638 				    pxamci_dma_irq, host);
639 	if (host->dma < 0) {
640 		ret = -EBUSY;
641 		goto out;
642 	}
643 
644 	ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
645 	if (ret)
646 		goto out;
647 
648 	platform_set_drvdata(pdev, mmc);
649 
650 	dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0);
651 	if (!dmarx) {
652 		ret = -ENXIO;
653 		goto out;
654 	}
655 	host->dma_drcmrrx = dmarx->start;
656 
657 	dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1);
658 	if (!dmatx) {
659 		ret = -ENXIO;
660 		goto out;
661 	}
662 	host->dma_drcmrtx = dmatx->start;
663 
664 	if (host->pdata && host->pdata->init)
665 		host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc);
666 
667 	mmc_add_host(mmc);
668 
669 	return 0;
670 
671  out:
672 	if (host) {
673 		if (host->dma >= 0)
674 			pxa_free_dma(host->dma);
675 		if (host->base)
676 			iounmap(host->base);
677 		if (host->sg_cpu)
678 			dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
679 		if (host->clk)
680 			clk_put(host->clk);
681 	}
682 	if (mmc)
683 		mmc_free_host(mmc);
684 	release_resource(r);
685 	return ret;
686 }
687 
688 static int pxamci_remove(struct platform_device *pdev)
689 {
690 	struct mmc_host *mmc = platform_get_drvdata(pdev);
691 
692 	platform_set_drvdata(pdev, NULL);
693 
694 	if (mmc) {
695 		struct pxamci_host *host = mmc_priv(mmc);
696 
697 		if (host->vcc)
698 			regulator_put(host->vcc);
699 
700 		if (host->pdata && host->pdata->exit)
701 			host->pdata->exit(&pdev->dev, mmc);
702 
703 		mmc_remove_host(mmc);
704 
705 		pxamci_stop_clock(host);
706 		writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
707 		       END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
708 		       host->base + MMC_I_MASK);
709 
710 		DRCMR(host->dma_drcmrrx) = 0;
711 		DRCMR(host->dma_drcmrtx) = 0;
712 
713 		free_irq(host->irq, host);
714 		pxa_free_dma(host->dma);
715 		iounmap(host->base);
716 		dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
717 
718 		clk_put(host->clk);
719 
720 		release_resource(host->res);
721 
722 		mmc_free_host(mmc);
723 	}
724 	return 0;
725 }
726 
727 #ifdef CONFIG_PM
728 static int pxamci_suspend(struct platform_device *dev, pm_message_t state)
729 {
730 	struct mmc_host *mmc = platform_get_drvdata(dev);
731 	int ret = 0;
732 
733 	if (mmc)
734 		ret = mmc_suspend_host(mmc, state);
735 
736 	return ret;
737 }
738 
739 static int pxamci_resume(struct platform_device *dev)
740 {
741 	struct mmc_host *mmc = platform_get_drvdata(dev);
742 	int ret = 0;
743 
744 	if (mmc)
745 		ret = mmc_resume_host(mmc);
746 
747 	return ret;
748 }
749 #else
750 #define pxamci_suspend	NULL
751 #define pxamci_resume	NULL
752 #endif
753 
754 static struct platform_driver pxamci_driver = {
755 	.probe		= pxamci_probe,
756 	.remove		= pxamci_remove,
757 	.suspend	= pxamci_suspend,
758 	.resume		= pxamci_resume,
759 	.driver		= {
760 		.name	= DRIVER_NAME,
761 		.owner	= THIS_MODULE,
762 	},
763 };
764 
765 static int __init pxamci_init(void)
766 {
767 	return platform_driver_register(&pxamci_driver);
768 }
769 
770 static void __exit pxamci_exit(void)
771 {
772 	platform_driver_unregister(&pxamci_driver);
773 }
774 
775 module_init(pxamci_init);
776 module_exit(pxamci_exit);
777 
778 MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
779 MODULE_LICENSE("GPL");
780 MODULE_ALIAS("platform:pxa2xx-mci");
781