xref: /linux/drivers/mmc/host/mxs-mmc.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * Portions copyright (C) 2003 Russell King, PXA MMCI Driver
3  * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
4  *
5  * Copyright 2008 Embedded Alley Solutions, Inc.
6  * Copyright 2009-2011 Freescale Semiconductor, Inc.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21  */
22 
23 #include <linux/kernel.h>
24 #include <linux/init.h>
25 #include <linux/ioport.h>
26 #include <linux/of.h>
27 #include <linux/of_device.h>
28 #include <linux/of_gpio.h>
29 #include <linux/platform_device.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/dmaengine.h>
34 #include <linux/highmem.h>
35 #include <linux/clk.h>
36 #include <linux/err.h>
37 #include <linux/completion.h>
38 #include <linux/mmc/host.h>
39 #include <linux/mmc/mmc.h>
40 #include <linux/mmc/sdio.h>
41 #include <linux/mmc/slot-gpio.h>
42 #include <linux/gpio.h>
43 #include <linux/regulator/consumer.h>
44 #include <linux/module.h>
45 #include <linux/stmp_device.h>
46 #include <linux/spi/mxs-spi.h>
47 
48 #define DRIVER_NAME	"mxs-mmc"
49 
50 #define MXS_MMC_IRQ_BITS	(BM_SSP_CTRL1_SDIO_IRQ		| \
51 				 BM_SSP_CTRL1_RESP_ERR_IRQ	| \
52 				 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ	| \
53 				 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ	| \
54 				 BM_SSP_CTRL1_DATA_CRC_IRQ	| \
55 				 BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ	| \
56 				 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ  | \
57 				 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
58 
59 /* card detect polling timeout */
60 #define MXS_MMC_DETECT_TIMEOUT			(HZ/2)
61 
62 struct mxs_mmc_host {
63 	struct mxs_ssp			ssp;
64 
65 	struct mmc_host			*mmc;
66 	struct mmc_request		*mrq;
67 	struct mmc_command		*cmd;
68 	struct mmc_data			*data;
69 
70 	unsigned char			bus_width;
71 	spinlock_t			lock;
72 	int				sdio_irq_en;
73 	bool				broken_cd;
74 };
75 
76 static int mxs_mmc_get_cd(struct mmc_host *mmc)
77 {
78 	struct mxs_mmc_host *host = mmc_priv(mmc);
79 	struct mxs_ssp *ssp = &host->ssp;
80 	int present, ret;
81 
82 	if (host->broken_cd)
83 		return -ENOSYS;
84 
85 	ret = mmc_gpio_get_cd(mmc);
86 	if (ret >= 0)
87 		return ret;
88 
89 	present = mmc->caps & MMC_CAP_NEEDS_POLL ||
90 		!(readl(ssp->base + HW_SSP_STATUS(ssp)) &
91 			BM_SSP_STATUS_CARD_DETECT);
92 
93 	if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
94 		present = !present;
95 
96 	return present;
97 }
98 
99 static int mxs_mmc_reset(struct mxs_mmc_host *host)
100 {
101 	struct mxs_ssp *ssp = &host->ssp;
102 	u32 ctrl0, ctrl1;
103 	int ret;
104 
105 	ret = stmp_reset_block(ssp->base);
106 	if (ret)
107 		return ret;
108 
109 	ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
110 	ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
111 		BF_SSP(0x7, CTRL1_WORD_LENGTH) |
112 		BM_SSP_CTRL1_DMA_ENABLE |
113 		BM_SSP_CTRL1_POLARITY |
114 		BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
115 		BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
116 		BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
117 		BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
118 		BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
119 
120 	writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
121 	       BF_SSP(2, TIMING_CLOCK_DIVIDE) |
122 	       BF_SSP(0, TIMING_CLOCK_RATE),
123 	       ssp->base + HW_SSP_TIMING(ssp));
124 
125 	if (host->sdio_irq_en) {
126 		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
127 		ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
128 	}
129 
130 	writel(ctrl0, ssp->base + HW_SSP_CTRL0);
131 	writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp));
132 	return 0;
133 }
134 
135 static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
136 			      struct mmc_command *cmd);
137 
138 static void mxs_mmc_request_done(struct mxs_mmc_host *host)
139 {
140 	struct mmc_command *cmd = host->cmd;
141 	struct mmc_data *data = host->data;
142 	struct mmc_request *mrq = host->mrq;
143 	struct mxs_ssp *ssp = &host->ssp;
144 
145 	if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
146 		if (mmc_resp_type(cmd) & MMC_RSP_136) {
147 			cmd->resp[3] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
148 			cmd->resp[2] = readl(ssp->base + HW_SSP_SDRESP1(ssp));
149 			cmd->resp[1] = readl(ssp->base + HW_SSP_SDRESP2(ssp));
150 			cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP3(ssp));
151 		} else {
152 			cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
153 		}
154 	}
155 
156 	if (cmd == mrq->sbc) {
157 		/* Finished CMD23, now send actual command. */
158 		mxs_mmc_start_cmd(host, mrq->cmd);
159 		return;
160 	} else if (data) {
161 		dma_unmap_sg(mmc_dev(host->mmc), data->sg,
162 			     data->sg_len, ssp->dma_dir);
163 		/*
164 		 * If there was an error on any block, we mark all
165 		 * data blocks as being in error.
166 		 */
167 		if (!data->error)
168 			data->bytes_xfered = data->blocks * data->blksz;
169 		else
170 			data->bytes_xfered = 0;
171 
172 		host->data = NULL;
173 		if (data->stop && (data->error || !mrq->sbc)) {
174 			mxs_mmc_start_cmd(host, mrq->stop);
175 			return;
176 		}
177 	}
178 
179 	host->mrq = NULL;
180 	mmc_request_done(host->mmc, mrq);
181 }
182 
183 static void mxs_mmc_dma_irq_callback(void *param)
184 {
185 	struct mxs_mmc_host *host = param;
186 
187 	mxs_mmc_request_done(host);
188 }
189 
190 static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
191 {
192 	struct mxs_mmc_host *host = dev_id;
193 	struct mmc_command *cmd = host->cmd;
194 	struct mmc_data *data = host->data;
195 	struct mxs_ssp *ssp = &host->ssp;
196 	u32 stat;
197 
198 	spin_lock(&host->lock);
199 
200 	stat = readl(ssp->base + HW_SSP_CTRL1(ssp));
201 	writel(stat & MXS_MMC_IRQ_BITS,
202 	       ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
203 
204 	spin_unlock(&host->lock);
205 
206 	if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
207 		mmc_signal_sdio_irq(host->mmc);
208 
209 	if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
210 		cmd->error = -ETIMEDOUT;
211 	else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
212 		cmd->error = -EIO;
213 
214 	if (data) {
215 		if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
216 			    BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
217 			data->error = -ETIMEDOUT;
218 		else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
219 			data->error = -EILSEQ;
220 		else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
221 				 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
222 			data->error = -EIO;
223 	}
224 
225 	return IRQ_HANDLED;
226 }
227 
228 static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
229 	struct mxs_mmc_host *host, unsigned long flags)
230 {
231 	struct mxs_ssp *ssp = &host->ssp;
232 	struct dma_async_tx_descriptor *desc;
233 	struct mmc_data *data = host->data;
234 	struct scatterlist * sgl;
235 	unsigned int sg_len;
236 
237 	if (data) {
238 		/* data */
239 		dma_map_sg(mmc_dev(host->mmc), data->sg,
240 			   data->sg_len, ssp->dma_dir);
241 		sgl = data->sg;
242 		sg_len = data->sg_len;
243 	} else {
244 		/* pio */
245 		sgl = (struct scatterlist *) ssp->ssp_pio_words;
246 		sg_len = SSP_PIO_NUM;
247 	}
248 
249 	desc = dmaengine_prep_slave_sg(ssp->dmach,
250 				sgl, sg_len, ssp->slave_dirn, flags);
251 	if (desc) {
252 		desc->callback = mxs_mmc_dma_irq_callback;
253 		desc->callback_param = host;
254 	} else {
255 		if (data)
256 			dma_unmap_sg(mmc_dev(host->mmc), data->sg,
257 				     data->sg_len, ssp->dma_dir);
258 	}
259 
260 	return desc;
261 }
262 
263 static void mxs_mmc_bc(struct mxs_mmc_host *host)
264 {
265 	struct mxs_ssp *ssp = &host->ssp;
266 	struct mmc_command *cmd = host->cmd;
267 	struct dma_async_tx_descriptor *desc;
268 	u32 ctrl0, cmd0, cmd1;
269 
270 	ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
271 	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
272 	cmd1 = cmd->arg;
273 
274 	if (host->sdio_irq_en) {
275 		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
276 		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
277 	}
278 
279 	ssp->ssp_pio_words[0] = ctrl0;
280 	ssp->ssp_pio_words[1] = cmd0;
281 	ssp->ssp_pio_words[2] = cmd1;
282 	ssp->dma_dir = DMA_NONE;
283 	ssp->slave_dirn = DMA_TRANS_NONE;
284 	desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
285 	if (!desc)
286 		goto out;
287 
288 	dmaengine_submit(desc);
289 	dma_async_issue_pending(ssp->dmach);
290 	return;
291 
292 out:
293 	dev_warn(mmc_dev(host->mmc),
294 		 "%s: failed to prep dma\n", __func__);
295 }
296 
297 static void mxs_mmc_ac(struct mxs_mmc_host *host)
298 {
299 	struct mxs_ssp *ssp = &host->ssp;
300 	struct mmc_command *cmd = host->cmd;
301 	struct dma_async_tx_descriptor *desc;
302 	u32 ignore_crc, get_resp, long_resp;
303 	u32 ctrl0, cmd0, cmd1;
304 
305 	ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
306 			0 : BM_SSP_CTRL0_IGNORE_CRC;
307 	get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
308 			BM_SSP_CTRL0_GET_RESP : 0;
309 	long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
310 			BM_SSP_CTRL0_LONG_RESP : 0;
311 
312 	ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
313 	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
314 	cmd1 = cmd->arg;
315 
316 	if (cmd->opcode == MMC_STOP_TRANSMISSION)
317 		cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
318 
319 	if (host->sdio_irq_en) {
320 		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
321 		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
322 	}
323 
324 	ssp->ssp_pio_words[0] = ctrl0;
325 	ssp->ssp_pio_words[1] = cmd0;
326 	ssp->ssp_pio_words[2] = cmd1;
327 	ssp->dma_dir = DMA_NONE;
328 	ssp->slave_dirn = DMA_TRANS_NONE;
329 	desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
330 	if (!desc)
331 		goto out;
332 
333 	dmaengine_submit(desc);
334 	dma_async_issue_pending(ssp->dmach);
335 	return;
336 
337 out:
338 	dev_warn(mmc_dev(host->mmc),
339 		 "%s: failed to prep dma\n", __func__);
340 }
341 
342 static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
343 {
344 	const unsigned int ssp_timeout_mul = 4096;
345 	/*
346 	 * Calculate ticks in ms since ns are large numbers
347 	 * and might overflow
348 	 */
349 	const unsigned int clock_per_ms = clock_rate / 1000;
350 	const unsigned int ms = ns / 1000;
351 	const unsigned int ticks = ms * clock_per_ms;
352 	const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
353 
354 	WARN_ON(ssp_ticks == 0);
355 	return ssp_ticks;
356 }
357 
358 static void mxs_mmc_adtc(struct mxs_mmc_host *host)
359 {
360 	struct mmc_command *cmd = host->cmd;
361 	struct mmc_data *data = cmd->data;
362 	struct dma_async_tx_descriptor *desc;
363 	struct scatterlist *sgl = data->sg, *sg;
364 	unsigned int sg_len = data->sg_len;
365 	unsigned int i;
366 
367 	unsigned short dma_data_dir, timeout;
368 	enum dma_transfer_direction slave_dirn;
369 	unsigned int data_size = 0, log2_blksz;
370 	unsigned int blocks = data->blocks;
371 
372 	struct mxs_ssp *ssp = &host->ssp;
373 
374 	u32 ignore_crc, get_resp, long_resp, read;
375 	u32 ctrl0, cmd0, cmd1, val;
376 
377 	ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
378 			0 : BM_SSP_CTRL0_IGNORE_CRC;
379 	get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
380 			BM_SSP_CTRL0_GET_RESP : 0;
381 	long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
382 			BM_SSP_CTRL0_LONG_RESP : 0;
383 
384 	if (data->flags & MMC_DATA_WRITE) {
385 		dma_data_dir = DMA_TO_DEVICE;
386 		slave_dirn = DMA_MEM_TO_DEV;
387 		read = 0;
388 	} else {
389 		dma_data_dir = DMA_FROM_DEVICE;
390 		slave_dirn = DMA_DEV_TO_MEM;
391 		read = BM_SSP_CTRL0_READ;
392 	}
393 
394 	ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
395 		ignore_crc | get_resp | long_resp |
396 		BM_SSP_CTRL0_DATA_XFER | read |
397 		BM_SSP_CTRL0_WAIT_FOR_IRQ |
398 		BM_SSP_CTRL0_ENABLE;
399 
400 	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
401 
402 	/* get logarithm to base 2 of block size for setting register */
403 	log2_blksz = ilog2(data->blksz);
404 
405 	/*
406 	 * take special care of the case that data size from data->sg
407 	 * is not equal to blocks x blksz
408 	 */
409 	for_each_sg(sgl, sg, sg_len, i)
410 		data_size += sg->length;
411 
412 	if (data_size != data->blocks * data->blksz)
413 		blocks = 1;
414 
415 	/* xfer count, block size and count need to be set differently */
416 	if (ssp_is_old(ssp)) {
417 		ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
418 		cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
419 			BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
420 	} else {
421 		writel(data_size, ssp->base + HW_SSP_XFER_SIZE);
422 		writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
423 		       BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
424 		       ssp->base + HW_SSP_BLOCK_SIZE);
425 	}
426 
427 	if (cmd->opcode == SD_IO_RW_EXTENDED)
428 		cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
429 
430 	cmd1 = cmd->arg;
431 
432 	if (host->sdio_irq_en) {
433 		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
434 		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
435 	}
436 
437 	/* set the timeout count */
438 	timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns);
439 	val = readl(ssp->base + HW_SSP_TIMING(ssp));
440 	val &= ~(BM_SSP_TIMING_TIMEOUT);
441 	val |= BF_SSP(timeout, TIMING_TIMEOUT);
442 	writel(val, ssp->base + HW_SSP_TIMING(ssp));
443 
444 	/* pio */
445 	ssp->ssp_pio_words[0] = ctrl0;
446 	ssp->ssp_pio_words[1] = cmd0;
447 	ssp->ssp_pio_words[2] = cmd1;
448 	ssp->dma_dir = DMA_NONE;
449 	ssp->slave_dirn = DMA_TRANS_NONE;
450 	desc = mxs_mmc_prep_dma(host, 0);
451 	if (!desc)
452 		goto out;
453 
454 	/* append data sg */
455 	WARN_ON(host->data != NULL);
456 	host->data = data;
457 	ssp->dma_dir = dma_data_dir;
458 	ssp->slave_dirn = slave_dirn;
459 	desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
460 	if (!desc)
461 		goto out;
462 
463 	dmaengine_submit(desc);
464 	dma_async_issue_pending(ssp->dmach);
465 	return;
466 out:
467 	dev_warn(mmc_dev(host->mmc),
468 		 "%s: failed to prep dma\n", __func__);
469 }
470 
471 static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
472 			      struct mmc_command *cmd)
473 {
474 	host->cmd = cmd;
475 
476 	switch (mmc_cmd_type(cmd)) {
477 	case MMC_CMD_BC:
478 		mxs_mmc_bc(host);
479 		break;
480 	case MMC_CMD_BCR:
481 		mxs_mmc_ac(host);
482 		break;
483 	case MMC_CMD_AC:
484 		mxs_mmc_ac(host);
485 		break;
486 	case MMC_CMD_ADTC:
487 		mxs_mmc_adtc(host);
488 		break;
489 	default:
490 		dev_warn(mmc_dev(host->mmc),
491 			 "%s: unknown MMC command\n", __func__);
492 		break;
493 	}
494 }
495 
496 static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
497 {
498 	struct mxs_mmc_host *host = mmc_priv(mmc);
499 
500 	WARN_ON(host->mrq != NULL);
501 	host->mrq = mrq;
502 
503 	if (mrq->sbc)
504 		mxs_mmc_start_cmd(host, mrq->sbc);
505 	else
506 		mxs_mmc_start_cmd(host, mrq->cmd);
507 }
508 
509 static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
510 {
511 	struct mxs_mmc_host *host = mmc_priv(mmc);
512 
513 	if (ios->bus_width == MMC_BUS_WIDTH_8)
514 		host->bus_width = 2;
515 	else if (ios->bus_width == MMC_BUS_WIDTH_4)
516 		host->bus_width = 1;
517 	else
518 		host->bus_width = 0;
519 
520 	if (ios->clock)
521 		mxs_ssp_set_clk_rate(&host->ssp, ios->clock);
522 }
523 
524 static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
525 {
526 	struct mxs_mmc_host *host = mmc_priv(mmc);
527 	struct mxs_ssp *ssp = &host->ssp;
528 	unsigned long flags;
529 
530 	spin_lock_irqsave(&host->lock, flags);
531 
532 	host->sdio_irq_en = enable;
533 
534 	if (enable) {
535 		writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
536 		       ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
537 		writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
538 		       ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_SET);
539 	} else {
540 		writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
541 		       ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
542 		writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
543 		       ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
544 	}
545 
546 	spin_unlock_irqrestore(&host->lock, flags);
547 
548 	if (enable && readl(ssp->base + HW_SSP_STATUS(ssp)) &
549 			BM_SSP_STATUS_SDIO_IRQ)
550 		mmc_signal_sdio_irq(host->mmc);
551 
552 }
553 
554 static const struct mmc_host_ops mxs_mmc_ops = {
555 	.request = mxs_mmc_request,
556 	.get_ro = mmc_gpio_get_ro,
557 	.get_cd = mxs_mmc_get_cd,
558 	.set_ios = mxs_mmc_set_ios,
559 	.enable_sdio_irq = mxs_mmc_enable_sdio_irq,
560 };
561 
562 static const struct platform_device_id mxs_ssp_ids[] = {
563 	{
564 		.name = "imx23-mmc",
565 		.driver_data = IMX23_SSP,
566 	}, {
567 		.name = "imx28-mmc",
568 		.driver_data = IMX28_SSP,
569 	}, {
570 		/* sentinel */
571 	}
572 };
573 MODULE_DEVICE_TABLE(platform, mxs_ssp_ids);
574 
575 static const struct of_device_id mxs_mmc_dt_ids[] = {
576 	{ .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_SSP, },
577 	{ .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_SSP, },
578 	{ /* sentinel */ }
579 };
580 MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
581 
582 static int mxs_mmc_probe(struct platform_device *pdev)
583 {
584 	const struct of_device_id *of_id =
585 			of_match_device(mxs_mmc_dt_ids, &pdev->dev);
586 	struct device_node *np = pdev->dev.of_node;
587 	struct mxs_mmc_host *host;
588 	struct mmc_host *mmc;
589 	struct resource *iores;
590 	int ret = 0, irq_err;
591 	struct regulator *reg_vmmc;
592 	struct mxs_ssp *ssp;
593 
594 	irq_err = platform_get_irq(pdev, 0);
595 	if (irq_err < 0)
596 		return irq_err;
597 
598 	mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
599 	if (!mmc)
600 		return -ENOMEM;
601 
602 	host = mmc_priv(mmc);
603 	ssp = &host->ssp;
604 	ssp->dev = &pdev->dev;
605 	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
606 	ssp->base = devm_ioremap_resource(&pdev->dev, iores);
607 	if (IS_ERR(ssp->base)) {
608 		ret = PTR_ERR(ssp->base);
609 		goto out_mmc_free;
610 	}
611 
612 	ssp->devid = (enum mxs_ssp_id) of_id->data;
613 
614 	host->mmc = mmc;
615 	host->sdio_irq_en = 0;
616 
617 	reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc");
618 	if (!IS_ERR(reg_vmmc)) {
619 		ret = regulator_enable(reg_vmmc);
620 		if (ret) {
621 			dev_err(&pdev->dev,
622 				"Failed to enable vmmc regulator: %d\n", ret);
623 			goto out_mmc_free;
624 		}
625 	}
626 
627 	ssp->clk = devm_clk_get(&pdev->dev, NULL);
628 	if (IS_ERR(ssp->clk)) {
629 		ret = PTR_ERR(ssp->clk);
630 		goto out_mmc_free;
631 	}
632 	ret = clk_prepare_enable(ssp->clk);
633 	if (ret)
634 		goto out_mmc_free;
635 
636 	ret = mxs_mmc_reset(host);
637 	if (ret) {
638 		dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret);
639 		goto out_clk_disable;
640 	}
641 
642 	ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
643 	if (!ssp->dmach) {
644 		dev_err(mmc_dev(host->mmc),
645 			"%s: failed to request dma\n", __func__);
646 		ret = -ENODEV;
647 		goto out_clk_disable;
648 	}
649 
650 	/* set mmc core parameters */
651 	mmc->ops = &mxs_mmc_ops;
652 	mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
653 		    MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL | MMC_CAP_CMD23;
654 
655 	host->broken_cd = of_property_read_bool(np, "broken-cd");
656 
657 	mmc->f_min = 400000;
658 	mmc->f_max = 288000000;
659 
660 	ret = mmc_of_parse(mmc);
661 	if (ret)
662 		goto out_clk_disable;
663 
664 	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
665 
666 	mmc->max_segs = 52;
667 	mmc->max_blk_size = 1 << 0xf;
668 	mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff;
669 	mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff;
670 	mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev);
671 
672 	platform_set_drvdata(pdev, mmc);
673 
674 	spin_lock_init(&host->lock);
675 
676 	ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
677 			       dev_name(&pdev->dev), host);
678 	if (ret)
679 		goto out_free_dma;
680 
681 	ret = mmc_add_host(mmc);
682 	if (ret)
683 		goto out_free_dma;
684 
685 	dev_info(mmc_dev(host->mmc), "initialized\n");
686 
687 	return 0;
688 
689 out_free_dma:
690 	dma_release_channel(ssp->dmach);
691 out_clk_disable:
692 	clk_disable_unprepare(ssp->clk);
693 out_mmc_free:
694 	mmc_free_host(mmc);
695 	return ret;
696 }
697 
698 static int mxs_mmc_remove(struct platform_device *pdev)
699 {
700 	struct mmc_host *mmc = platform_get_drvdata(pdev);
701 	struct mxs_mmc_host *host = mmc_priv(mmc);
702 	struct mxs_ssp *ssp = &host->ssp;
703 
704 	mmc_remove_host(mmc);
705 
706 	if (ssp->dmach)
707 		dma_release_channel(ssp->dmach);
708 
709 	clk_disable_unprepare(ssp->clk);
710 
711 	mmc_free_host(mmc);
712 
713 	return 0;
714 }
715 
716 #ifdef CONFIG_PM_SLEEP
717 static int mxs_mmc_suspend(struct device *dev)
718 {
719 	struct mmc_host *mmc = dev_get_drvdata(dev);
720 	struct mxs_mmc_host *host = mmc_priv(mmc);
721 	struct mxs_ssp *ssp = &host->ssp;
722 
723 	clk_disable_unprepare(ssp->clk);
724 	return 0;
725 }
726 
727 static int mxs_mmc_resume(struct device *dev)
728 {
729 	struct mmc_host *mmc = dev_get_drvdata(dev);
730 	struct mxs_mmc_host *host = mmc_priv(mmc);
731 	struct mxs_ssp *ssp = &host->ssp;
732 
733 	return clk_prepare_enable(ssp->clk);
734 }
735 #endif
736 
737 static SIMPLE_DEV_PM_OPS(mxs_mmc_pm_ops, mxs_mmc_suspend, mxs_mmc_resume);
738 
739 static struct platform_driver mxs_mmc_driver = {
740 	.probe		= mxs_mmc_probe,
741 	.remove		= mxs_mmc_remove,
742 	.id_table	= mxs_ssp_ids,
743 	.driver		= {
744 		.name	= DRIVER_NAME,
745 		.pm	= &mxs_mmc_pm_ops,
746 		.of_match_table = mxs_mmc_dt_ids,
747 	},
748 };
749 
750 module_platform_driver(mxs_mmc_driver);
751 
752 MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
753 MODULE_AUTHOR("Freescale Semiconductor");
754 MODULE_LICENSE("GPL");
755 MODULE_ALIAS("platform:" DRIVER_NAME);
756