xref: /linux/drivers/mmc/host/dw_mmc.c (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /*
2  * Synopsys DesignWare Multimedia Card Interface driver
3  *  (Based on NXP driver for lpc 31xx)
4  *
5  * Copyright (C) 2009 NXP Semiconductors
6  * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13 
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/mmc/dw_mmc.h>
34 #include <linux/bitops.h>
35 #include <linux/regulator/consumer.h>
36 #include <linux/workqueue.h>
37 #include <linux/of.h>
38 #include <linux/of_gpio.h>
39 
40 #include "dw_mmc.h"
41 
42 /* Common flag combinations */
43 #define DW_MCI_DATA_ERROR_FLAGS	(SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
44 				 SDMMC_INT_HTO | SDMMC_INT_SBE  | \
45 				 SDMMC_INT_EBE)
46 #define DW_MCI_CMD_ERROR_FLAGS	(SDMMC_INT_RTO | SDMMC_INT_RCRC | \
47 				 SDMMC_INT_RESP_ERR)
48 #define DW_MCI_ERROR_FLAGS	(DW_MCI_DATA_ERROR_FLAGS | \
49 				 DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
50 #define DW_MCI_SEND_STATUS	1
51 #define DW_MCI_RECV_STATUS	2
52 #define DW_MCI_DMA_THRESHOLD	16
53 
54 #define DW_MCI_FREQ_MAX	200000000	/* unit: HZ */
55 #define DW_MCI_FREQ_MIN	400000		/* unit: HZ */
56 
57 #ifdef CONFIG_MMC_DW_IDMAC
58 #define IDMAC_INT_CLR		(SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
59 				 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
60 				 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
61 				 SDMMC_IDMAC_INT_TI)
62 
63 struct idmac_desc {
64 	u32		des0;	/* Control Descriptor */
65 #define IDMAC_DES0_DIC	BIT(1)
66 #define IDMAC_DES0_LD	BIT(2)
67 #define IDMAC_DES0_FD	BIT(3)
68 #define IDMAC_DES0_CH	BIT(4)
69 #define IDMAC_DES0_ER	BIT(5)
70 #define IDMAC_DES0_CES	BIT(30)
71 #define IDMAC_DES0_OWN	BIT(31)
72 
73 	u32		des1;	/* Buffer sizes */
74 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
75 	((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
76 
77 	u32		des2;	/* buffer 1 physical address */
78 
79 	u32		des3;	/* buffer 2 physical address */
80 };
81 #endif /* CONFIG_MMC_DW_IDMAC */
82 
83 static const u8 tuning_blk_pattern_4bit[] = {
84 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
85 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
86 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
87 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
88 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
89 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
90 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
91 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
92 };
93 
94 static const u8 tuning_blk_pattern_8bit[] = {
95 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
96 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
97 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
98 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
99 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
100 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
101 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
102 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
103 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
104 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
105 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
106 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
107 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
108 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
109 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
110 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
111 };
112 
113 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
114 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
115 
116 #if defined(CONFIG_DEBUG_FS)
117 static int dw_mci_req_show(struct seq_file *s, void *v)
118 {
119 	struct dw_mci_slot *slot = s->private;
120 	struct mmc_request *mrq;
121 	struct mmc_command *cmd;
122 	struct mmc_command *stop;
123 	struct mmc_data	*data;
124 
125 	/* Make sure we get a consistent snapshot */
126 	spin_lock_bh(&slot->host->lock);
127 	mrq = slot->mrq;
128 
129 	if (mrq) {
130 		cmd = mrq->cmd;
131 		data = mrq->data;
132 		stop = mrq->stop;
133 
134 		if (cmd)
135 			seq_printf(s,
136 				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
137 				   cmd->opcode, cmd->arg, cmd->flags,
138 				   cmd->resp[0], cmd->resp[1], cmd->resp[2],
139 				   cmd->resp[2], cmd->error);
140 		if (data)
141 			seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
142 				   data->bytes_xfered, data->blocks,
143 				   data->blksz, data->flags, data->error);
144 		if (stop)
145 			seq_printf(s,
146 				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
147 				   stop->opcode, stop->arg, stop->flags,
148 				   stop->resp[0], stop->resp[1], stop->resp[2],
149 				   stop->resp[2], stop->error);
150 	}
151 
152 	spin_unlock_bh(&slot->host->lock);
153 
154 	return 0;
155 }
156 
157 static int dw_mci_req_open(struct inode *inode, struct file *file)
158 {
159 	return single_open(file, dw_mci_req_show, inode->i_private);
160 }
161 
162 static const struct file_operations dw_mci_req_fops = {
163 	.owner		= THIS_MODULE,
164 	.open		= dw_mci_req_open,
165 	.read		= seq_read,
166 	.llseek		= seq_lseek,
167 	.release	= single_release,
168 };
169 
170 static int dw_mci_regs_show(struct seq_file *s, void *v)
171 {
172 	seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
173 	seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
174 	seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
175 	seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
176 	seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
177 	seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
178 
179 	return 0;
180 }
181 
182 static int dw_mci_regs_open(struct inode *inode, struct file *file)
183 {
184 	return single_open(file, dw_mci_regs_show, inode->i_private);
185 }
186 
187 static const struct file_operations dw_mci_regs_fops = {
188 	.owner		= THIS_MODULE,
189 	.open		= dw_mci_regs_open,
190 	.read		= seq_read,
191 	.llseek		= seq_lseek,
192 	.release	= single_release,
193 };
194 
195 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
196 {
197 	struct mmc_host	*mmc = slot->mmc;
198 	struct dw_mci *host = slot->host;
199 	struct dentry *root;
200 	struct dentry *node;
201 
202 	root = mmc->debugfs_root;
203 	if (!root)
204 		return;
205 
206 	node = debugfs_create_file("regs", S_IRUSR, root, host,
207 				   &dw_mci_regs_fops);
208 	if (!node)
209 		goto err;
210 
211 	node = debugfs_create_file("req", S_IRUSR, root, slot,
212 				   &dw_mci_req_fops);
213 	if (!node)
214 		goto err;
215 
216 	node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
217 	if (!node)
218 		goto err;
219 
220 	node = debugfs_create_x32("pending_events", S_IRUSR, root,
221 				  (u32 *)&host->pending_events);
222 	if (!node)
223 		goto err;
224 
225 	node = debugfs_create_x32("completed_events", S_IRUSR, root,
226 				  (u32 *)&host->completed_events);
227 	if (!node)
228 		goto err;
229 
230 	return;
231 
232 err:
233 	dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
234 }
235 #endif /* defined(CONFIG_DEBUG_FS) */
236 
237 static void dw_mci_set_timeout(struct dw_mci *host)
238 {
239 	/* timeout (maximum) */
240 	mci_writel(host, TMOUT, 0xffffffff);
241 }
242 
243 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
244 {
245 	struct mmc_data	*data;
246 	struct dw_mci_slot *slot = mmc_priv(mmc);
247 	const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
248 	u32 cmdr;
249 	cmd->error = -EINPROGRESS;
250 
251 	cmdr = cmd->opcode;
252 
253 	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
254 	    cmd->opcode == MMC_GO_IDLE_STATE ||
255 	    cmd->opcode == MMC_GO_INACTIVE_STATE ||
256 	    (cmd->opcode == SD_IO_RW_DIRECT &&
257 	     ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
258 		cmdr |= SDMMC_CMD_STOP;
259 	else
260 		if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
261 			cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
262 
263 	if (cmd->flags & MMC_RSP_PRESENT) {
264 		/* We expect a response, so set this bit */
265 		cmdr |= SDMMC_CMD_RESP_EXP;
266 		if (cmd->flags & MMC_RSP_136)
267 			cmdr |= SDMMC_CMD_RESP_LONG;
268 	}
269 
270 	if (cmd->flags & MMC_RSP_CRC)
271 		cmdr |= SDMMC_CMD_RESP_CRC;
272 
273 	data = cmd->data;
274 	if (data) {
275 		cmdr |= SDMMC_CMD_DAT_EXP;
276 		if (data->flags & MMC_DATA_STREAM)
277 			cmdr |= SDMMC_CMD_STRM_MODE;
278 		if (data->flags & MMC_DATA_WRITE)
279 			cmdr |= SDMMC_CMD_DAT_WR;
280 	}
281 
282 	if (drv_data && drv_data->prepare_command)
283 		drv_data->prepare_command(slot->host, &cmdr);
284 
285 	return cmdr;
286 }
287 
288 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
289 {
290 	struct mmc_command *stop;
291 	u32 cmdr;
292 
293 	if (!cmd->data)
294 		return 0;
295 
296 	stop = &host->stop_abort;
297 	cmdr = cmd->opcode;
298 	memset(stop, 0, sizeof(struct mmc_command));
299 
300 	if (cmdr == MMC_READ_SINGLE_BLOCK ||
301 	    cmdr == MMC_READ_MULTIPLE_BLOCK ||
302 	    cmdr == MMC_WRITE_BLOCK ||
303 	    cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
304 		stop->opcode = MMC_STOP_TRANSMISSION;
305 		stop->arg = 0;
306 		stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
307 	} else if (cmdr == SD_IO_RW_EXTENDED) {
308 		stop->opcode = SD_IO_RW_DIRECT;
309 		stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
310 			     ((cmd->arg >> 28) & 0x7);
311 		stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
312 	} else {
313 		return 0;
314 	}
315 
316 	cmdr = stop->opcode | SDMMC_CMD_STOP |
317 		SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
318 
319 	return cmdr;
320 }
321 
322 static void dw_mci_start_command(struct dw_mci *host,
323 				 struct mmc_command *cmd, u32 cmd_flags)
324 {
325 	host->cmd = cmd;
326 	dev_vdbg(host->dev,
327 		 "start command: ARGR=0x%08x CMDR=0x%08x\n",
328 		 cmd->arg, cmd_flags);
329 
330 	mci_writel(host, CMDARG, cmd->arg);
331 	wmb();
332 
333 	mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
334 }
335 
336 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
337 {
338 	struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
339 	dw_mci_start_command(host, stop, host->stop_cmdr);
340 }
341 
342 /* DMA interface functions */
343 static void dw_mci_stop_dma(struct dw_mci *host)
344 {
345 	if (host->using_dma) {
346 		host->dma_ops->stop(host);
347 		host->dma_ops->cleanup(host);
348 	}
349 
350 	/* Data transfer was stopped by the interrupt handler */
351 	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
352 }
353 
354 static int dw_mci_get_dma_dir(struct mmc_data *data)
355 {
356 	if (data->flags & MMC_DATA_WRITE)
357 		return DMA_TO_DEVICE;
358 	else
359 		return DMA_FROM_DEVICE;
360 }
361 
362 #ifdef CONFIG_MMC_DW_IDMAC
363 static void dw_mci_dma_cleanup(struct dw_mci *host)
364 {
365 	struct mmc_data *data = host->data;
366 
367 	if (data)
368 		if (!data->host_cookie)
369 			dma_unmap_sg(host->dev,
370 				     data->sg,
371 				     data->sg_len,
372 				     dw_mci_get_dma_dir(data));
373 }
374 
375 static void dw_mci_idmac_reset(struct dw_mci *host)
376 {
377 	u32 bmod = mci_readl(host, BMOD);
378 	/* Software reset of DMA */
379 	bmod |= SDMMC_IDMAC_SWRESET;
380 	mci_writel(host, BMOD, bmod);
381 }
382 
383 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
384 {
385 	u32 temp;
386 
387 	/* Disable and reset the IDMAC interface */
388 	temp = mci_readl(host, CTRL);
389 	temp &= ~SDMMC_CTRL_USE_IDMAC;
390 	temp |= SDMMC_CTRL_DMA_RESET;
391 	mci_writel(host, CTRL, temp);
392 
393 	/* Stop the IDMAC running */
394 	temp = mci_readl(host, BMOD);
395 	temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
396 	temp |= SDMMC_IDMAC_SWRESET;
397 	mci_writel(host, BMOD, temp);
398 }
399 
400 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
401 {
402 	struct mmc_data *data = host->data;
403 
404 	dev_vdbg(host->dev, "DMA complete\n");
405 
406 	host->dma_ops->cleanup(host);
407 
408 	/*
409 	 * If the card was removed, data will be NULL. No point in trying to
410 	 * send the stop command or waiting for NBUSY in this case.
411 	 */
412 	if (data) {
413 		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
414 		tasklet_schedule(&host->tasklet);
415 	}
416 }
417 
418 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
419 				    unsigned int sg_len)
420 {
421 	int i;
422 	struct idmac_desc *desc = host->sg_cpu;
423 
424 	for (i = 0; i < sg_len; i++, desc++) {
425 		unsigned int length = sg_dma_len(&data->sg[i]);
426 		u32 mem_addr = sg_dma_address(&data->sg[i]);
427 
428 		/* Set the OWN bit and disable interrupts for this descriptor */
429 		desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
430 
431 		/* Buffer length */
432 		IDMAC_SET_BUFFER1_SIZE(desc, length);
433 
434 		/* Physical address to DMA to/from */
435 		desc->des2 = mem_addr;
436 	}
437 
438 	/* Set first descriptor */
439 	desc = host->sg_cpu;
440 	desc->des0 |= IDMAC_DES0_FD;
441 
442 	/* Set last descriptor */
443 	desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
444 	desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
445 	desc->des0 |= IDMAC_DES0_LD;
446 
447 	wmb();
448 }
449 
450 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
451 {
452 	u32 temp;
453 
454 	dw_mci_translate_sglist(host, host->data, sg_len);
455 
456 	/* Select IDMAC interface */
457 	temp = mci_readl(host, CTRL);
458 	temp |= SDMMC_CTRL_USE_IDMAC;
459 	mci_writel(host, CTRL, temp);
460 
461 	wmb();
462 
463 	/* Enable the IDMAC */
464 	temp = mci_readl(host, BMOD);
465 	temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
466 	mci_writel(host, BMOD, temp);
467 
468 	/* Start it running */
469 	mci_writel(host, PLDMND, 1);
470 }
471 
472 static int dw_mci_idmac_init(struct dw_mci *host)
473 {
474 	struct idmac_desc *p;
475 	int i;
476 
477 	/* Number of descriptors in the ring buffer */
478 	host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
479 
480 	/* Forward link the descriptor list */
481 	for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
482 		p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
483 
484 	/* Set the last descriptor as the end-of-ring descriptor */
485 	p->des3 = host->sg_dma;
486 	p->des0 = IDMAC_DES0_ER;
487 
488 	dw_mci_idmac_reset(host);
489 
490 	/* Mask out interrupts - get Tx & Rx complete only */
491 	mci_writel(host, IDSTS, IDMAC_INT_CLR);
492 	mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
493 		   SDMMC_IDMAC_INT_TI);
494 
495 	/* Set the descriptor base address */
496 	mci_writel(host, DBADDR, host->sg_dma);
497 	return 0;
498 }
499 
500 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
501 	.init = dw_mci_idmac_init,
502 	.start = dw_mci_idmac_start_dma,
503 	.stop = dw_mci_idmac_stop_dma,
504 	.complete = dw_mci_idmac_complete_dma,
505 	.cleanup = dw_mci_dma_cleanup,
506 };
507 #endif /* CONFIG_MMC_DW_IDMAC */
508 
509 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
510 				   struct mmc_data *data,
511 				   bool next)
512 {
513 	struct scatterlist *sg;
514 	unsigned int i, sg_len;
515 
516 	if (!next && data->host_cookie)
517 		return data->host_cookie;
518 
519 	/*
520 	 * We don't do DMA on "complex" transfers, i.e. with
521 	 * non-word-aligned buffers or lengths. Also, we don't bother
522 	 * with all the DMA setup overhead for short transfers.
523 	 */
524 	if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
525 		return -EINVAL;
526 
527 	if (data->blksz & 3)
528 		return -EINVAL;
529 
530 	for_each_sg(data->sg, sg, data->sg_len, i) {
531 		if (sg->offset & 3 || sg->length & 3)
532 			return -EINVAL;
533 	}
534 
535 	sg_len = dma_map_sg(host->dev,
536 			    data->sg,
537 			    data->sg_len,
538 			    dw_mci_get_dma_dir(data));
539 	if (sg_len == 0)
540 		return -EINVAL;
541 
542 	if (next)
543 		data->host_cookie = sg_len;
544 
545 	return sg_len;
546 }
547 
548 static void dw_mci_pre_req(struct mmc_host *mmc,
549 			   struct mmc_request *mrq,
550 			   bool is_first_req)
551 {
552 	struct dw_mci_slot *slot = mmc_priv(mmc);
553 	struct mmc_data *data = mrq->data;
554 
555 	if (!slot->host->use_dma || !data)
556 		return;
557 
558 	if (data->host_cookie) {
559 		data->host_cookie = 0;
560 		return;
561 	}
562 
563 	if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
564 		data->host_cookie = 0;
565 }
566 
567 static void dw_mci_post_req(struct mmc_host *mmc,
568 			    struct mmc_request *mrq,
569 			    int err)
570 {
571 	struct dw_mci_slot *slot = mmc_priv(mmc);
572 	struct mmc_data *data = mrq->data;
573 
574 	if (!slot->host->use_dma || !data)
575 		return;
576 
577 	if (data->host_cookie)
578 		dma_unmap_sg(slot->host->dev,
579 			     data->sg,
580 			     data->sg_len,
581 			     dw_mci_get_dma_dir(data));
582 	data->host_cookie = 0;
583 }
584 
585 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
586 {
587 #ifdef CONFIG_MMC_DW_IDMAC
588 	unsigned int blksz = data->blksz;
589 	const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
590 	u32 fifo_width = 1 << host->data_shift;
591 	u32 blksz_depth = blksz / fifo_width, fifoth_val;
592 	u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
593 	int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
594 
595 	tx_wmark = (host->fifo_depth) / 2;
596 	tx_wmark_invers = host->fifo_depth - tx_wmark;
597 
598 	/*
599 	 * MSIZE is '1',
600 	 * if blksz is not a multiple of the FIFO width
601 	 */
602 	if (blksz % fifo_width) {
603 		msize = 0;
604 		rx_wmark = 1;
605 		goto done;
606 	}
607 
608 	do {
609 		if (!((blksz_depth % mszs[idx]) ||
610 		     (tx_wmark_invers % mszs[idx]))) {
611 			msize = idx;
612 			rx_wmark = mszs[idx] - 1;
613 			break;
614 		}
615 	} while (--idx > 0);
616 	/*
617 	 * If idx is '0', it won't be tried
618 	 * Thus, initial values are uesed
619 	 */
620 done:
621 	fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
622 	mci_writel(host, FIFOTH, fifoth_val);
623 #endif
624 }
625 
626 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
627 {
628 	unsigned int blksz = data->blksz;
629 	u32 blksz_depth, fifo_depth;
630 	u16 thld_size;
631 
632 	WARN_ON(!(data->flags & MMC_DATA_READ));
633 
634 	if (host->timing != MMC_TIMING_MMC_HS200 &&
635 	    host->timing != MMC_TIMING_UHS_SDR104)
636 		goto disable;
637 
638 	blksz_depth = blksz / (1 << host->data_shift);
639 	fifo_depth = host->fifo_depth;
640 
641 	if (blksz_depth > fifo_depth)
642 		goto disable;
643 
644 	/*
645 	 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
646 	 * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
647 	 * Currently just choose blksz.
648 	 */
649 	thld_size = blksz;
650 	mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
651 	return;
652 
653 disable:
654 	mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
655 }
656 
657 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
658 {
659 	int sg_len;
660 	u32 temp;
661 
662 	host->using_dma = 0;
663 
664 	/* If we don't have a channel, we can't do DMA */
665 	if (!host->use_dma)
666 		return -ENODEV;
667 
668 	sg_len = dw_mci_pre_dma_transfer(host, data, 0);
669 	if (sg_len < 0) {
670 		host->dma_ops->stop(host);
671 		return sg_len;
672 	}
673 
674 	host->using_dma = 1;
675 
676 	dev_vdbg(host->dev,
677 		 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
678 		 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
679 		 sg_len);
680 
681 	/*
682 	 * Decide the MSIZE and RX/TX Watermark.
683 	 * If current block size is same with previous size,
684 	 * no need to update fifoth.
685 	 */
686 	if (host->prev_blksz != data->blksz)
687 		dw_mci_adjust_fifoth(host, data);
688 
689 	/* Enable the DMA interface */
690 	temp = mci_readl(host, CTRL);
691 	temp |= SDMMC_CTRL_DMA_ENABLE;
692 	mci_writel(host, CTRL, temp);
693 
694 	/* Disable RX/TX IRQs, let DMA handle it */
695 	temp = mci_readl(host, INTMASK);
696 	temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
697 	mci_writel(host, INTMASK, temp);
698 
699 	host->dma_ops->start(host, sg_len);
700 
701 	return 0;
702 }
703 
704 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
705 {
706 	u32 temp;
707 
708 	data->error = -EINPROGRESS;
709 
710 	WARN_ON(host->data);
711 	host->sg = NULL;
712 	host->data = data;
713 
714 	if (data->flags & MMC_DATA_READ) {
715 		host->dir_status = DW_MCI_RECV_STATUS;
716 		dw_mci_ctrl_rd_thld(host, data);
717 	} else {
718 		host->dir_status = DW_MCI_SEND_STATUS;
719 	}
720 
721 	if (dw_mci_submit_data_dma(host, data)) {
722 		int flags = SG_MITER_ATOMIC;
723 		if (host->data->flags & MMC_DATA_READ)
724 			flags |= SG_MITER_TO_SG;
725 		else
726 			flags |= SG_MITER_FROM_SG;
727 
728 		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
729 		host->sg = data->sg;
730 		host->part_buf_start = 0;
731 		host->part_buf_count = 0;
732 
733 		mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
734 		temp = mci_readl(host, INTMASK);
735 		temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
736 		mci_writel(host, INTMASK, temp);
737 
738 		temp = mci_readl(host, CTRL);
739 		temp &= ~SDMMC_CTRL_DMA_ENABLE;
740 		mci_writel(host, CTRL, temp);
741 
742 		/*
743 		 * Use the initial fifoth_val for PIO mode.
744 		 * If next issued data may be transfered by DMA mode,
745 		 * prev_blksz should be invalidated.
746 		 */
747 		mci_writel(host, FIFOTH, host->fifoth_val);
748 		host->prev_blksz = 0;
749 	} else {
750 		/*
751 		 * Keep the current block size.
752 		 * It will be used to decide whether to update
753 		 * fifoth register next time.
754 		 */
755 		host->prev_blksz = data->blksz;
756 	}
757 }
758 
759 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
760 {
761 	struct dw_mci *host = slot->host;
762 	unsigned long timeout = jiffies + msecs_to_jiffies(500);
763 	unsigned int cmd_status = 0;
764 
765 	mci_writel(host, CMDARG, arg);
766 	wmb();
767 	mci_writel(host, CMD, SDMMC_CMD_START | cmd);
768 
769 	while (time_before(jiffies, timeout)) {
770 		cmd_status = mci_readl(host, CMD);
771 		if (!(cmd_status & SDMMC_CMD_START))
772 			return;
773 	}
774 	dev_err(&slot->mmc->class_dev,
775 		"Timeout sending command (cmd %#x arg %#x status %#x)\n",
776 		cmd, arg, cmd_status);
777 }
778 
779 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
780 {
781 	struct dw_mci *host = slot->host;
782 	unsigned int clock = slot->clock;
783 	u32 div;
784 	u32 clk_en_a;
785 
786 	if (!clock) {
787 		mci_writel(host, CLKENA, 0);
788 		mci_send_cmd(slot,
789 			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
790 	} else if (clock != host->current_speed || force_clkinit) {
791 		div = host->bus_hz / clock;
792 		if (host->bus_hz % clock && host->bus_hz > clock)
793 			/*
794 			 * move the + 1 after the divide to prevent
795 			 * over-clocking the card.
796 			 */
797 			div += 1;
798 
799 		div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
800 
801 		if ((clock << div) != slot->__clk_old || force_clkinit)
802 			dev_info(&slot->mmc->class_dev,
803 				 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
804 				 slot->id, host->bus_hz, clock,
805 				 div ? ((host->bus_hz / div) >> 1) :
806 				 host->bus_hz, div);
807 
808 		/* disable clock */
809 		mci_writel(host, CLKENA, 0);
810 		mci_writel(host, CLKSRC, 0);
811 
812 		/* inform CIU */
813 		mci_send_cmd(slot,
814 			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
815 
816 		/* set clock to desired speed */
817 		mci_writel(host, CLKDIV, div);
818 
819 		/* inform CIU */
820 		mci_send_cmd(slot,
821 			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
822 
823 		/* enable clock; only low power if no SDIO */
824 		clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
825 		if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
826 			clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
827 		mci_writel(host, CLKENA, clk_en_a);
828 
829 		/* inform CIU */
830 		mci_send_cmd(slot,
831 			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
832 
833 		/* keep the clock with reflecting clock dividor */
834 		slot->__clk_old = clock << div;
835 	}
836 
837 	host->current_speed = clock;
838 
839 	/* Set the current slot bus width */
840 	mci_writel(host, CTYPE, (slot->ctype << slot->id));
841 }
842 
843 static void __dw_mci_start_request(struct dw_mci *host,
844 				   struct dw_mci_slot *slot,
845 				   struct mmc_command *cmd)
846 {
847 	struct mmc_request *mrq;
848 	struct mmc_data	*data;
849 	u32 cmdflags;
850 
851 	mrq = slot->mrq;
852 	if (host->pdata->select_slot)
853 		host->pdata->select_slot(slot->id);
854 
855 	host->cur_slot = slot;
856 	host->mrq = mrq;
857 
858 	host->pending_events = 0;
859 	host->completed_events = 0;
860 	host->cmd_status = 0;
861 	host->data_status = 0;
862 	host->dir_status = 0;
863 
864 	data = cmd->data;
865 	if (data) {
866 		dw_mci_set_timeout(host);
867 		mci_writel(host, BYTCNT, data->blksz*data->blocks);
868 		mci_writel(host, BLKSIZ, data->blksz);
869 	}
870 
871 	cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
872 
873 	/* this is the first command, send the initialization clock */
874 	if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
875 		cmdflags |= SDMMC_CMD_INIT;
876 
877 	if (data) {
878 		dw_mci_submit_data(host, data);
879 		wmb();
880 	}
881 
882 	dw_mci_start_command(host, cmd, cmdflags);
883 
884 	if (mrq->stop)
885 		host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
886 	else
887 		host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
888 }
889 
890 static void dw_mci_start_request(struct dw_mci *host,
891 				 struct dw_mci_slot *slot)
892 {
893 	struct mmc_request *mrq = slot->mrq;
894 	struct mmc_command *cmd;
895 
896 	cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
897 	__dw_mci_start_request(host, slot, cmd);
898 }
899 
900 /* must be called with host->lock held */
901 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
902 				 struct mmc_request *mrq)
903 {
904 	dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
905 		 host->state);
906 
907 	slot->mrq = mrq;
908 
909 	if (host->state == STATE_IDLE) {
910 		host->state = STATE_SENDING_CMD;
911 		dw_mci_start_request(host, slot);
912 	} else {
913 		list_add_tail(&slot->queue_node, &host->queue);
914 	}
915 }
916 
917 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
918 {
919 	struct dw_mci_slot *slot = mmc_priv(mmc);
920 	struct dw_mci *host = slot->host;
921 
922 	WARN_ON(slot->mrq);
923 
924 	/*
925 	 * The check for card presence and queueing of the request must be
926 	 * atomic, otherwise the card could be removed in between and the
927 	 * request wouldn't fail until another card was inserted.
928 	 */
929 	spin_lock_bh(&host->lock);
930 
931 	if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
932 		spin_unlock_bh(&host->lock);
933 		mrq->cmd->error = -ENOMEDIUM;
934 		mmc_request_done(mmc, mrq);
935 		return;
936 	}
937 
938 	dw_mci_queue_request(host, slot, mrq);
939 
940 	spin_unlock_bh(&host->lock);
941 }
942 
943 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
944 {
945 	struct dw_mci_slot *slot = mmc_priv(mmc);
946 	const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
947 	u32 regs;
948 
949 	switch (ios->bus_width) {
950 	case MMC_BUS_WIDTH_4:
951 		slot->ctype = SDMMC_CTYPE_4BIT;
952 		break;
953 	case MMC_BUS_WIDTH_8:
954 		slot->ctype = SDMMC_CTYPE_8BIT;
955 		break;
956 	default:
957 		/* set default 1 bit mode */
958 		slot->ctype = SDMMC_CTYPE_1BIT;
959 	}
960 
961 	regs = mci_readl(slot->host, UHS_REG);
962 
963 	/* DDR mode set */
964 	if (ios->timing == MMC_TIMING_UHS_DDR50)
965 		regs |= ((0x1 << slot->id) << 16);
966 	else
967 		regs &= ~((0x1 << slot->id) << 16);
968 
969 	mci_writel(slot->host, UHS_REG, regs);
970 	slot->host->timing = ios->timing;
971 
972 	/*
973 	 * Use mirror of ios->clock to prevent race with mmc
974 	 * core ios update when finding the minimum.
975 	 */
976 	slot->clock = ios->clock;
977 
978 	if (drv_data && drv_data->set_ios)
979 		drv_data->set_ios(slot->host, ios);
980 
981 	/* Slot specific timing and width adjustment */
982 	dw_mci_setup_bus(slot, false);
983 
984 	switch (ios->power_mode) {
985 	case MMC_POWER_UP:
986 		set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
987 		/* Power up slot */
988 		if (slot->host->pdata->setpower)
989 			slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
990 		regs = mci_readl(slot->host, PWREN);
991 		regs |= (1 << slot->id);
992 		mci_writel(slot->host, PWREN, regs);
993 		break;
994 	case MMC_POWER_OFF:
995 		/* Power down slot */
996 		if (slot->host->pdata->setpower)
997 			slot->host->pdata->setpower(slot->id, 0);
998 		regs = mci_readl(slot->host, PWREN);
999 		regs &= ~(1 << slot->id);
1000 		mci_writel(slot->host, PWREN, regs);
1001 		break;
1002 	default:
1003 		break;
1004 	}
1005 }
1006 
1007 static int dw_mci_get_ro(struct mmc_host *mmc)
1008 {
1009 	int read_only;
1010 	struct dw_mci_slot *slot = mmc_priv(mmc);
1011 	struct dw_mci_board *brd = slot->host->pdata;
1012 
1013 	/* Use platform get_ro function, else try on board write protect */
1014 	if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1015 		read_only = 0;
1016 	else if (brd->get_ro)
1017 		read_only = brd->get_ro(slot->id);
1018 	else if (gpio_is_valid(slot->wp_gpio))
1019 		read_only = gpio_get_value(slot->wp_gpio);
1020 	else
1021 		read_only =
1022 			mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1023 
1024 	dev_dbg(&mmc->class_dev, "card is %s\n",
1025 		read_only ? "read-only" : "read-write");
1026 
1027 	return read_only;
1028 }
1029 
1030 static int dw_mci_get_cd(struct mmc_host *mmc)
1031 {
1032 	int present;
1033 	struct dw_mci_slot *slot = mmc_priv(mmc);
1034 	struct dw_mci_board *brd = slot->host->pdata;
1035 
1036 	/* Use platform get_cd function, else try onboard card detect */
1037 	if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1038 		present = 1;
1039 	else if (brd->get_cd)
1040 		present = !brd->get_cd(slot->id);
1041 	else
1042 		present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1043 			== 0 ? 1 : 0;
1044 
1045 	if (present)
1046 		dev_dbg(&mmc->class_dev, "card is present\n");
1047 	else
1048 		dev_dbg(&mmc->class_dev, "card is not present\n");
1049 
1050 	return present;
1051 }
1052 
1053 /*
1054  * Disable lower power mode.
1055  *
1056  * Low power mode will stop the card clock when idle.  According to the
1057  * description of the CLKENA register we should disable low power mode
1058  * for SDIO cards if we need SDIO interrupts to work.
1059  *
1060  * This function is fast if low power mode is already disabled.
1061  */
1062 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1063 {
1064 	struct dw_mci *host = slot->host;
1065 	u32 clk_en_a;
1066 	const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1067 
1068 	clk_en_a = mci_readl(host, CLKENA);
1069 
1070 	if (clk_en_a & clken_low_pwr) {
1071 		mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1072 		mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1073 			     SDMMC_CMD_PRV_DAT_WAIT, 0);
1074 	}
1075 }
1076 
1077 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1078 {
1079 	struct dw_mci_slot *slot = mmc_priv(mmc);
1080 	struct dw_mci *host = slot->host;
1081 	u32 int_mask;
1082 
1083 	/* Enable/disable Slot Specific SDIO interrupt */
1084 	int_mask = mci_readl(host, INTMASK);
1085 	if (enb) {
1086 		/*
1087 		 * Turn off low power mode if it was enabled.  This is a bit of
1088 		 * a heavy operation and we disable / enable IRQs a lot, so
1089 		 * we'll leave low power mode disabled and it will get
1090 		 * re-enabled again in dw_mci_setup_bus().
1091 		 */
1092 		dw_mci_disable_low_power(slot);
1093 
1094 		mci_writel(host, INTMASK,
1095 			   (int_mask | SDMMC_INT_SDIO(slot->id)));
1096 	} else {
1097 		mci_writel(host, INTMASK,
1098 			   (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1099 	}
1100 }
1101 
1102 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1103 {
1104 	struct dw_mci_slot *slot = mmc_priv(mmc);
1105 	struct dw_mci *host = slot->host;
1106 	const struct dw_mci_drv_data *drv_data = host->drv_data;
1107 	struct dw_mci_tuning_data tuning_data;
1108 	int err = -ENOSYS;
1109 
1110 	if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1111 		if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1112 			tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1113 			tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1114 		} else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1115 			tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1116 			tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1117 		} else {
1118 			return -EINVAL;
1119 		}
1120 	} else if (opcode == MMC_SEND_TUNING_BLOCK) {
1121 		tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1122 		tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1123 	} else {
1124 		dev_err(host->dev,
1125 			"Undefined command(%d) for tuning\n", opcode);
1126 		return -EINVAL;
1127 	}
1128 
1129 	if (drv_data && drv_data->execute_tuning)
1130 		err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1131 	return err;
1132 }
1133 
1134 static const struct mmc_host_ops dw_mci_ops = {
1135 	.request		= dw_mci_request,
1136 	.pre_req		= dw_mci_pre_req,
1137 	.post_req		= dw_mci_post_req,
1138 	.set_ios		= dw_mci_set_ios,
1139 	.get_ro			= dw_mci_get_ro,
1140 	.get_cd			= dw_mci_get_cd,
1141 	.enable_sdio_irq	= dw_mci_enable_sdio_irq,
1142 	.execute_tuning		= dw_mci_execute_tuning,
1143 };
1144 
1145 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1146 	__releases(&host->lock)
1147 	__acquires(&host->lock)
1148 {
1149 	struct dw_mci_slot *slot;
1150 	struct mmc_host	*prev_mmc = host->cur_slot->mmc;
1151 
1152 	WARN_ON(host->cmd || host->data);
1153 
1154 	host->cur_slot->mrq = NULL;
1155 	host->mrq = NULL;
1156 	if (!list_empty(&host->queue)) {
1157 		slot = list_entry(host->queue.next,
1158 				  struct dw_mci_slot, queue_node);
1159 		list_del(&slot->queue_node);
1160 		dev_vdbg(host->dev, "list not empty: %s is next\n",
1161 			 mmc_hostname(slot->mmc));
1162 		host->state = STATE_SENDING_CMD;
1163 		dw_mci_start_request(host, slot);
1164 	} else {
1165 		dev_vdbg(host->dev, "list empty\n");
1166 		host->state = STATE_IDLE;
1167 	}
1168 
1169 	spin_unlock(&host->lock);
1170 	mmc_request_done(prev_mmc, mrq);
1171 	spin_lock(&host->lock);
1172 }
1173 
1174 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1175 {
1176 	u32 status = host->cmd_status;
1177 
1178 	host->cmd_status = 0;
1179 
1180 	/* Read the response from the card (up to 16 bytes) */
1181 	if (cmd->flags & MMC_RSP_PRESENT) {
1182 		if (cmd->flags & MMC_RSP_136) {
1183 			cmd->resp[3] = mci_readl(host, RESP0);
1184 			cmd->resp[2] = mci_readl(host, RESP1);
1185 			cmd->resp[1] = mci_readl(host, RESP2);
1186 			cmd->resp[0] = mci_readl(host, RESP3);
1187 		} else {
1188 			cmd->resp[0] = mci_readl(host, RESP0);
1189 			cmd->resp[1] = 0;
1190 			cmd->resp[2] = 0;
1191 			cmd->resp[3] = 0;
1192 		}
1193 	}
1194 
1195 	if (status & SDMMC_INT_RTO)
1196 		cmd->error = -ETIMEDOUT;
1197 	else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1198 		cmd->error = -EILSEQ;
1199 	else if (status & SDMMC_INT_RESP_ERR)
1200 		cmd->error = -EIO;
1201 	else
1202 		cmd->error = 0;
1203 
1204 	if (cmd->error) {
1205 		/* newer ip versions need a delay between retries */
1206 		if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1207 			mdelay(20);
1208 	}
1209 
1210 	return cmd->error;
1211 }
1212 
1213 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1214 {
1215 	u32 status = host->data_status;
1216 
1217 	if (status & DW_MCI_DATA_ERROR_FLAGS) {
1218 		if (status & SDMMC_INT_DRTO) {
1219 			data->error = -ETIMEDOUT;
1220 		} else if (status & SDMMC_INT_DCRC) {
1221 			data->error = -EILSEQ;
1222 		} else if (status & SDMMC_INT_EBE) {
1223 			if (host->dir_status ==
1224 				DW_MCI_SEND_STATUS) {
1225 				/*
1226 				 * No data CRC status was returned.
1227 				 * The number of bytes transferred
1228 				 * will be exaggerated in PIO mode.
1229 				 */
1230 				data->bytes_xfered = 0;
1231 				data->error = -ETIMEDOUT;
1232 			} else if (host->dir_status ==
1233 					DW_MCI_RECV_STATUS) {
1234 				data->error = -EIO;
1235 			}
1236 		} else {
1237 			/* SDMMC_INT_SBE is included */
1238 			data->error = -EIO;
1239 		}
1240 
1241 		dev_err(host->dev, "data error, status 0x%08x\n", status);
1242 
1243 		/*
1244 		 * After an error, there may be data lingering
1245 		 * in the FIFO
1246 		 */
1247 		dw_mci_fifo_reset(host);
1248 	} else {
1249 		data->bytes_xfered = data->blocks * data->blksz;
1250 		data->error = 0;
1251 	}
1252 
1253 	return data->error;
1254 }
1255 
1256 static void dw_mci_tasklet_func(unsigned long priv)
1257 {
1258 	struct dw_mci *host = (struct dw_mci *)priv;
1259 	struct mmc_data	*data;
1260 	struct mmc_command *cmd;
1261 	struct mmc_request *mrq;
1262 	enum dw_mci_state state;
1263 	enum dw_mci_state prev_state;
1264 	unsigned int err;
1265 
1266 	spin_lock(&host->lock);
1267 
1268 	state = host->state;
1269 	data = host->data;
1270 	mrq = host->mrq;
1271 
1272 	do {
1273 		prev_state = state;
1274 
1275 		switch (state) {
1276 		case STATE_IDLE:
1277 			break;
1278 
1279 		case STATE_SENDING_CMD:
1280 			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1281 						&host->pending_events))
1282 				break;
1283 
1284 			cmd = host->cmd;
1285 			host->cmd = NULL;
1286 			set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1287 			err = dw_mci_command_complete(host, cmd);
1288 			if (cmd == mrq->sbc && !err) {
1289 				prev_state = state = STATE_SENDING_CMD;
1290 				__dw_mci_start_request(host, host->cur_slot,
1291 						       mrq->cmd);
1292 				goto unlock;
1293 			}
1294 
1295 			if (cmd->data && err) {
1296 				dw_mci_stop_dma(host);
1297 				send_stop_abort(host, data);
1298 				state = STATE_SENDING_STOP;
1299 				break;
1300 			}
1301 
1302 			if (!cmd->data || err) {
1303 				dw_mci_request_end(host, mrq);
1304 				goto unlock;
1305 			}
1306 
1307 			prev_state = state = STATE_SENDING_DATA;
1308 			/* fall through */
1309 
1310 		case STATE_SENDING_DATA:
1311 			if (test_and_clear_bit(EVENT_DATA_ERROR,
1312 					       &host->pending_events)) {
1313 				dw_mci_stop_dma(host);
1314 				send_stop_abort(host, data);
1315 				state = STATE_DATA_ERROR;
1316 				break;
1317 			}
1318 
1319 			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1320 						&host->pending_events))
1321 				break;
1322 
1323 			set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1324 			prev_state = state = STATE_DATA_BUSY;
1325 			/* fall through */
1326 
1327 		case STATE_DATA_BUSY:
1328 			if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1329 						&host->pending_events))
1330 				break;
1331 
1332 			host->data = NULL;
1333 			set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1334 			err = dw_mci_data_complete(host, data);
1335 
1336 			if (!err) {
1337 				if (!data->stop || mrq->sbc) {
1338 					if (mrq->sbc)
1339 						data->stop->error = 0;
1340 					dw_mci_request_end(host, mrq);
1341 					goto unlock;
1342 				}
1343 
1344 				/* stop command for open-ended transfer*/
1345 				if (data->stop)
1346 					send_stop_abort(host, data);
1347 			}
1348 
1349 			/*
1350 			 * If err has non-zero,
1351 			 * stop-abort command has been already issued.
1352 			 */
1353 			prev_state = state = STATE_SENDING_STOP;
1354 
1355 			/* fall through */
1356 
1357 		case STATE_SENDING_STOP:
1358 			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1359 						&host->pending_events))
1360 				break;
1361 
1362 			/* CMD error in data command */
1363 			if (mrq->cmd->error && mrq->data)
1364 				dw_mci_fifo_reset(host);
1365 
1366 			host->cmd = NULL;
1367 			host->data = NULL;
1368 
1369 			if (mrq->stop)
1370 				dw_mci_command_complete(host, mrq->stop);
1371 			else
1372 				host->cmd_status = 0;
1373 
1374 			dw_mci_request_end(host, mrq);
1375 			goto unlock;
1376 
1377 		case STATE_DATA_ERROR:
1378 			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1379 						&host->pending_events))
1380 				break;
1381 
1382 			state = STATE_DATA_BUSY;
1383 			break;
1384 		}
1385 	} while (state != prev_state);
1386 
1387 	host->state = state;
1388 unlock:
1389 	spin_unlock(&host->lock);
1390 
1391 }
1392 
1393 /* push final bytes to part_buf, only use during push */
1394 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1395 {
1396 	memcpy((void *)&host->part_buf, buf, cnt);
1397 	host->part_buf_count = cnt;
1398 }
1399 
1400 /* append bytes to part_buf, only use during push */
1401 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1402 {
1403 	cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1404 	memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1405 	host->part_buf_count += cnt;
1406 	return cnt;
1407 }
1408 
1409 /* pull first bytes from part_buf, only use during pull */
1410 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1411 {
1412 	cnt = min(cnt, (int)host->part_buf_count);
1413 	if (cnt) {
1414 		memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1415 		       cnt);
1416 		host->part_buf_count -= cnt;
1417 		host->part_buf_start += cnt;
1418 	}
1419 	return cnt;
1420 }
1421 
1422 /* pull final bytes from the part_buf, assuming it's just been filled */
1423 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1424 {
1425 	memcpy(buf, &host->part_buf, cnt);
1426 	host->part_buf_start = cnt;
1427 	host->part_buf_count = (1 << host->data_shift) - cnt;
1428 }
1429 
1430 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1431 {
1432 	struct mmc_data *data = host->data;
1433 	int init_cnt = cnt;
1434 
1435 	/* try and push anything in the part_buf */
1436 	if (unlikely(host->part_buf_count)) {
1437 		int len = dw_mci_push_part_bytes(host, buf, cnt);
1438 		buf += len;
1439 		cnt -= len;
1440 		if (host->part_buf_count == 2) {
1441 			mci_writew(host, DATA(host->data_offset),
1442 					host->part_buf16);
1443 			host->part_buf_count = 0;
1444 		}
1445 	}
1446 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1447 	if (unlikely((unsigned long)buf & 0x1)) {
1448 		while (cnt >= 2) {
1449 			u16 aligned_buf[64];
1450 			int len = min(cnt & -2, (int)sizeof(aligned_buf));
1451 			int items = len >> 1;
1452 			int i;
1453 			/* memcpy from input buffer into aligned buffer */
1454 			memcpy(aligned_buf, buf, len);
1455 			buf += len;
1456 			cnt -= len;
1457 			/* push data from aligned buffer into fifo */
1458 			for (i = 0; i < items; ++i)
1459 				mci_writew(host, DATA(host->data_offset),
1460 						aligned_buf[i]);
1461 		}
1462 	} else
1463 #endif
1464 	{
1465 		u16 *pdata = buf;
1466 		for (; cnt >= 2; cnt -= 2)
1467 			mci_writew(host, DATA(host->data_offset), *pdata++);
1468 		buf = pdata;
1469 	}
1470 	/* put anything remaining in the part_buf */
1471 	if (cnt) {
1472 		dw_mci_set_part_bytes(host, buf, cnt);
1473 		 /* Push data if we have reached the expected data length */
1474 		if ((data->bytes_xfered + init_cnt) ==
1475 		    (data->blksz * data->blocks))
1476 			mci_writew(host, DATA(host->data_offset),
1477 				   host->part_buf16);
1478 	}
1479 }
1480 
1481 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1482 {
1483 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1484 	if (unlikely((unsigned long)buf & 0x1)) {
1485 		while (cnt >= 2) {
1486 			/* pull data from fifo into aligned buffer */
1487 			u16 aligned_buf[64];
1488 			int len = min(cnt & -2, (int)sizeof(aligned_buf));
1489 			int items = len >> 1;
1490 			int i;
1491 			for (i = 0; i < items; ++i)
1492 				aligned_buf[i] = mci_readw(host,
1493 						DATA(host->data_offset));
1494 			/* memcpy from aligned buffer into output buffer */
1495 			memcpy(buf, aligned_buf, len);
1496 			buf += len;
1497 			cnt -= len;
1498 		}
1499 	} else
1500 #endif
1501 	{
1502 		u16 *pdata = buf;
1503 		for (; cnt >= 2; cnt -= 2)
1504 			*pdata++ = mci_readw(host, DATA(host->data_offset));
1505 		buf = pdata;
1506 	}
1507 	if (cnt) {
1508 		host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1509 		dw_mci_pull_final_bytes(host, buf, cnt);
1510 	}
1511 }
1512 
1513 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1514 {
1515 	struct mmc_data *data = host->data;
1516 	int init_cnt = cnt;
1517 
1518 	/* try and push anything in the part_buf */
1519 	if (unlikely(host->part_buf_count)) {
1520 		int len = dw_mci_push_part_bytes(host, buf, cnt);
1521 		buf += len;
1522 		cnt -= len;
1523 		if (host->part_buf_count == 4) {
1524 			mci_writel(host, DATA(host->data_offset),
1525 					host->part_buf32);
1526 			host->part_buf_count = 0;
1527 		}
1528 	}
1529 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1530 	if (unlikely((unsigned long)buf & 0x3)) {
1531 		while (cnt >= 4) {
1532 			u32 aligned_buf[32];
1533 			int len = min(cnt & -4, (int)sizeof(aligned_buf));
1534 			int items = len >> 2;
1535 			int i;
1536 			/* memcpy from input buffer into aligned buffer */
1537 			memcpy(aligned_buf, buf, len);
1538 			buf += len;
1539 			cnt -= len;
1540 			/* push data from aligned buffer into fifo */
1541 			for (i = 0; i < items; ++i)
1542 				mci_writel(host, DATA(host->data_offset),
1543 						aligned_buf[i]);
1544 		}
1545 	} else
1546 #endif
1547 	{
1548 		u32 *pdata = buf;
1549 		for (; cnt >= 4; cnt -= 4)
1550 			mci_writel(host, DATA(host->data_offset), *pdata++);
1551 		buf = pdata;
1552 	}
1553 	/* put anything remaining in the part_buf */
1554 	if (cnt) {
1555 		dw_mci_set_part_bytes(host, buf, cnt);
1556 		 /* Push data if we have reached the expected data length */
1557 		if ((data->bytes_xfered + init_cnt) ==
1558 		    (data->blksz * data->blocks))
1559 			mci_writel(host, DATA(host->data_offset),
1560 				   host->part_buf32);
1561 	}
1562 }
1563 
1564 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1565 {
1566 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1567 	if (unlikely((unsigned long)buf & 0x3)) {
1568 		while (cnt >= 4) {
1569 			/* pull data from fifo into aligned buffer */
1570 			u32 aligned_buf[32];
1571 			int len = min(cnt & -4, (int)sizeof(aligned_buf));
1572 			int items = len >> 2;
1573 			int i;
1574 			for (i = 0; i < items; ++i)
1575 				aligned_buf[i] = mci_readl(host,
1576 						DATA(host->data_offset));
1577 			/* memcpy from aligned buffer into output buffer */
1578 			memcpy(buf, aligned_buf, len);
1579 			buf += len;
1580 			cnt -= len;
1581 		}
1582 	} else
1583 #endif
1584 	{
1585 		u32 *pdata = buf;
1586 		for (; cnt >= 4; cnt -= 4)
1587 			*pdata++ = mci_readl(host, DATA(host->data_offset));
1588 		buf = pdata;
1589 	}
1590 	if (cnt) {
1591 		host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1592 		dw_mci_pull_final_bytes(host, buf, cnt);
1593 	}
1594 }
1595 
1596 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1597 {
1598 	struct mmc_data *data = host->data;
1599 	int init_cnt = cnt;
1600 
1601 	/* try and push anything in the part_buf */
1602 	if (unlikely(host->part_buf_count)) {
1603 		int len = dw_mci_push_part_bytes(host, buf, cnt);
1604 		buf += len;
1605 		cnt -= len;
1606 
1607 		if (host->part_buf_count == 8) {
1608 			mci_writeq(host, DATA(host->data_offset),
1609 					host->part_buf);
1610 			host->part_buf_count = 0;
1611 		}
1612 	}
1613 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1614 	if (unlikely((unsigned long)buf & 0x7)) {
1615 		while (cnt >= 8) {
1616 			u64 aligned_buf[16];
1617 			int len = min(cnt & -8, (int)sizeof(aligned_buf));
1618 			int items = len >> 3;
1619 			int i;
1620 			/* memcpy from input buffer into aligned buffer */
1621 			memcpy(aligned_buf, buf, len);
1622 			buf += len;
1623 			cnt -= len;
1624 			/* push data from aligned buffer into fifo */
1625 			for (i = 0; i < items; ++i)
1626 				mci_writeq(host, DATA(host->data_offset),
1627 						aligned_buf[i]);
1628 		}
1629 	} else
1630 #endif
1631 	{
1632 		u64 *pdata = buf;
1633 		for (; cnt >= 8; cnt -= 8)
1634 			mci_writeq(host, DATA(host->data_offset), *pdata++);
1635 		buf = pdata;
1636 	}
1637 	/* put anything remaining in the part_buf */
1638 	if (cnt) {
1639 		dw_mci_set_part_bytes(host, buf, cnt);
1640 		/* Push data if we have reached the expected data length */
1641 		if ((data->bytes_xfered + init_cnt) ==
1642 		    (data->blksz * data->blocks))
1643 			mci_writeq(host, DATA(host->data_offset),
1644 				   host->part_buf);
1645 	}
1646 }
1647 
1648 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1649 {
1650 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1651 	if (unlikely((unsigned long)buf & 0x7)) {
1652 		while (cnt >= 8) {
1653 			/* pull data from fifo into aligned buffer */
1654 			u64 aligned_buf[16];
1655 			int len = min(cnt & -8, (int)sizeof(aligned_buf));
1656 			int items = len >> 3;
1657 			int i;
1658 			for (i = 0; i < items; ++i)
1659 				aligned_buf[i] = mci_readq(host,
1660 						DATA(host->data_offset));
1661 			/* memcpy from aligned buffer into output buffer */
1662 			memcpy(buf, aligned_buf, len);
1663 			buf += len;
1664 			cnt -= len;
1665 		}
1666 	} else
1667 #endif
1668 	{
1669 		u64 *pdata = buf;
1670 		for (; cnt >= 8; cnt -= 8)
1671 			*pdata++ = mci_readq(host, DATA(host->data_offset));
1672 		buf = pdata;
1673 	}
1674 	if (cnt) {
1675 		host->part_buf = mci_readq(host, DATA(host->data_offset));
1676 		dw_mci_pull_final_bytes(host, buf, cnt);
1677 	}
1678 }
1679 
1680 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1681 {
1682 	int len;
1683 
1684 	/* get remaining partial bytes */
1685 	len = dw_mci_pull_part_bytes(host, buf, cnt);
1686 	if (unlikely(len == cnt))
1687 		return;
1688 	buf += len;
1689 	cnt -= len;
1690 
1691 	/* get the rest of the data */
1692 	host->pull_data(host, buf, cnt);
1693 }
1694 
1695 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1696 {
1697 	struct sg_mapping_iter *sg_miter = &host->sg_miter;
1698 	void *buf;
1699 	unsigned int offset;
1700 	struct mmc_data	*data = host->data;
1701 	int shift = host->data_shift;
1702 	u32 status;
1703 	unsigned int len;
1704 	unsigned int remain, fcnt;
1705 
1706 	do {
1707 		if (!sg_miter_next(sg_miter))
1708 			goto done;
1709 
1710 		host->sg = sg_miter->piter.sg;
1711 		buf = sg_miter->addr;
1712 		remain = sg_miter->length;
1713 		offset = 0;
1714 
1715 		do {
1716 			fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1717 					<< shift) + host->part_buf_count;
1718 			len = min(remain, fcnt);
1719 			if (!len)
1720 				break;
1721 			dw_mci_pull_data(host, (void *)(buf + offset), len);
1722 			data->bytes_xfered += len;
1723 			offset += len;
1724 			remain -= len;
1725 		} while (remain);
1726 
1727 		sg_miter->consumed = offset;
1728 		status = mci_readl(host, MINTSTS);
1729 		mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1730 	/* if the RXDR is ready read again */
1731 	} while ((status & SDMMC_INT_RXDR) ||
1732 		 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1733 
1734 	if (!remain) {
1735 		if (!sg_miter_next(sg_miter))
1736 			goto done;
1737 		sg_miter->consumed = 0;
1738 	}
1739 	sg_miter_stop(sg_miter);
1740 	return;
1741 
1742 done:
1743 	sg_miter_stop(sg_miter);
1744 	host->sg = NULL;
1745 	smp_wmb();
1746 	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1747 }
1748 
1749 static void dw_mci_write_data_pio(struct dw_mci *host)
1750 {
1751 	struct sg_mapping_iter *sg_miter = &host->sg_miter;
1752 	void *buf;
1753 	unsigned int offset;
1754 	struct mmc_data	*data = host->data;
1755 	int shift = host->data_shift;
1756 	u32 status;
1757 	unsigned int len;
1758 	unsigned int fifo_depth = host->fifo_depth;
1759 	unsigned int remain, fcnt;
1760 
1761 	do {
1762 		if (!sg_miter_next(sg_miter))
1763 			goto done;
1764 
1765 		host->sg = sg_miter->piter.sg;
1766 		buf = sg_miter->addr;
1767 		remain = sg_miter->length;
1768 		offset = 0;
1769 
1770 		do {
1771 			fcnt = ((fifo_depth -
1772 				 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1773 					<< shift) - host->part_buf_count;
1774 			len = min(remain, fcnt);
1775 			if (!len)
1776 				break;
1777 			host->push_data(host, (void *)(buf + offset), len);
1778 			data->bytes_xfered += len;
1779 			offset += len;
1780 			remain -= len;
1781 		} while (remain);
1782 
1783 		sg_miter->consumed = offset;
1784 		status = mci_readl(host, MINTSTS);
1785 		mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1786 	} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1787 
1788 	if (!remain) {
1789 		if (!sg_miter_next(sg_miter))
1790 			goto done;
1791 		sg_miter->consumed = 0;
1792 	}
1793 	sg_miter_stop(sg_miter);
1794 	return;
1795 
1796 done:
1797 	sg_miter_stop(sg_miter);
1798 	host->sg = NULL;
1799 	smp_wmb();
1800 	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1801 }
1802 
1803 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1804 {
1805 	if (!host->cmd_status)
1806 		host->cmd_status = status;
1807 
1808 	smp_wmb();
1809 
1810 	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1811 	tasklet_schedule(&host->tasklet);
1812 }
1813 
1814 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1815 {
1816 	struct dw_mci *host = dev_id;
1817 	u32 pending;
1818 	int i;
1819 
1820 	pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1821 
1822 	/*
1823 	 * DTO fix - version 2.10a and below, and only if internal DMA
1824 	 * is configured.
1825 	 */
1826 	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1827 		if (!pending &&
1828 		    ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1829 			pending |= SDMMC_INT_DATA_OVER;
1830 	}
1831 
1832 	if (pending) {
1833 		if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1834 			mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1835 			host->cmd_status = pending;
1836 			smp_wmb();
1837 			set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1838 		}
1839 
1840 		if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1841 			/* if there is an error report DATA_ERROR */
1842 			mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1843 			host->data_status = pending;
1844 			smp_wmb();
1845 			set_bit(EVENT_DATA_ERROR, &host->pending_events);
1846 			tasklet_schedule(&host->tasklet);
1847 		}
1848 
1849 		if (pending & SDMMC_INT_DATA_OVER) {
1850 			mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1851 			if (!host->data_status)
1852 				host->data_status = pending;
1853 			smp_wmb();
1854 			if (host->dir_status == DW_MCI_RECV_STATUS) {
1855 				if (host->sg != NULL)
1856 					dw_mci_read_data_pio(host, true);
1857 			}
1858 			set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1859 			tasklet_schedule(&host->tasklet);
1860 		}
1861 
1862 		if (pending & SDMMC_INT_RXDR) {
1863 			mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1864 			if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1865 				dw_mci_read_data_pio(host, false);
1866 		}
1867 
1868 		if (pending & SDMMC_INT_TXDR) {
1869 			mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1870 			if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1871 				dw_mci_write_data_pio(host);
1872 		}
1873 
1874 		if (pending & SDMMC_INT_CMD_DONE) {
1875 			mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1876 			dw_mci_cmd_interrupt(host, pending);
1877 		}
1878 
1879 		if (pending & SDMMC_INT_CD) {
1880 			mci_writel(host, RINTSTS, SDMMC_INT_CD);
1881 			queue_work(host->card_workqueue, &host->card_work);
1882 		}
1883 
1884 		/* Handle SDIO Interrupts */
1885 		for (i = 0; i < host->num_slots; i++) {
1886 			struct dw_mci_slot *slot = host->slot[i];
1887 			if (pending & SDMMC_INT_SDIO(i)) {
1888 				mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1889 				mmc_signal_sdio_irq(slot->mmc);
1890 			}
1891 		}
1892 
1893 	}
1894 
1895 #ifdef CONFIG_MMC_DW_IDMAC
1896 	/* Handle DMA interrupts */
1897 	pending = mci_readl(host, IDSTS);
1898 	if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1899 		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1900 		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1901 		host->dma_ops->complete(host);
1902 	}
1903 #endif
1904 
1905 	return IRQ_HANDLED;
1906 }
1907 
1908 static void dw_mci_work_routine_card(struct work_struct *work)
1909 {
1910 	struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1911 	int i;
1912 
1913 	for (i = 0; i < host->num_slots; i++) {
1914 		struct dw_mci_slot *slot = host->slot[i];
1915 		struct mmc_host *mmc = slot->mmc;
1916 		struct mmc_request *mrq;
1917 		int present;
1918 
1919 		present = dw_mci_get_cd(mmc);
1920 		while (present != slot->last_detect_state) {
1921 			dev_dbg(&slot->mmc->class_dev, "card %s\n",
1922 				present ? "inserted" : "removed");
1923 
1924 			spin_lock_bh(&host->lock);
1925 
1926 			/* Card change detected */
1927 			slot->last_detect_state = present;
1928 
1929 			/* Mark card as present if applicable */
1930 			if (present != 0)
1931 				set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1932 
1933 			/* Clean up queue if present */
1934 			mrq = slot->mrq;
1935 			if (mrq) {
1936 				if (mrq == host->mrq) {
1937 					host->data = NULL;
1938 					host->cmd = NULL;
1939 
1940 					switch (host->state) {
1941 					case STATE_IDLE:
1942 						break;
1943 					case STATE_SENDING_CMD:
1944 						mrq->cmd->error = -ENOMEDIUM;
1945 						if (!mrq->data)
1946 							break;
1947 						/* fall through */
1948 					case STATE_SENDING_DATA:
1949 						mrq->data->error = -ENOMEDIUM;
1950 						dw_mci_stop_dma(host);
1951 						break;
1952 					case STATE_DATA_BUSY:
1953 					case STATE_DATA_ERROR:
1954 						if (mrq->data->error == -EINPROGRESS)
1955 							mrq->data->error = -ENOMEDIUM;
1956 						/* fall through */
1957 					case STATE_SENDING_STOP:
1958 						if (mrq->stop)
1959 							mrq->stop->error = -ENOMEDIUM;
1960 						break;
1961 					}
1962 
1963 					dw_mci_request_end(host, mrq);
1964 				} else {
1965 					list_del(&slot->queue_node);
1966 					mrq->cmd->error = -ENOMEDIUM;
1967 					if (mrq->data)
1968 						mrq->data->error = -ENOMEDIUM;
1969 					if (mrq->stop)
1970 						mrq->stop->error = -ENOMEDIUM;
1971 
1972 					spin_unlock(&host->lock);
1973 					mmc_request_done(slot->mmc, mrq);
1974 					spin_lock(&host->lock);
1975 				}
1976 			}
1977 
1978 			/* Power down slot */
1979 			if (present == 0) {
1980 				clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1981 
1982 				/* Clear down the FIFO */
1983 				dw_mci_fifo_reset(host);
1984 #ifdef CONFIG_MMC_DW_IDMAC
1985 				dw_mci_idmac_reset(host);
1986 #endif
1987 
1988 			}
1989 
1990 			spin_unlock_bh(&host->lock);
1991 
1992 			present = dw_mci_get_cd(mmc);
1993 		}
1994 
1995 		mmc_detect_change(slot->mmc,
1996 			msecs_to_jiffies(host->pdata->detect_delay_ms));
1997 	}
1998 }
1999 
2000 #ifdef CONFIG_OF
2001 /* given a slot id, find out the device node representing that slot */
2002 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2003 {
2004 	struct device_node *np;
2005 	const __be32 *addr;
2006 	int len;
2007 
2008 	if (!dev || !dev->of_node)
2009 		return NULL;
2010 
2011 	for_each_child_of_node(dev->of_node, np) {
2012 		addr = of_get_property(np, "reg", &len);
2013 		if (!addr || (len < sizeof(int)))
2014 			continue;
2015 		if (be32_to_cpup(addr) == slot)
2016 			return np;
2017 	}
2018 	return NULL;
2019 }
2020 
2021 static struct dw_mci_of_slot_quirks {
2022 	char *quirk;
2023 	int id;
2024 } of_slot_quirks[] = {
2025 	{
2026 		.quirk	= "disable-wp",
2027 		.id	= DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2028 	},
2029 };
2030 
2031 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2032 {
2033 	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2034 	int quirks = 0;
2035 	int idx;
2036 
2037 	/* get quirks */
2038 	for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2039 		if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2040 			quirks |= of_slot_quirks[idx].id;
2041 
2042 	return quirks;
2043 }
2044 
2045 /* find out bus-width for a given slot */
2046 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2047 {
2048 	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2049 	u32 bus_wd = 1;
2050 
2051 	if (!np)
2052 		return 1;
2053 
2054 	if (of_property_read_u32(np, "bus-width", &bus_wd))
2055 		dev_err(dev, "bus-width property not found, assuming width"
2056 			       " as 1\n");
2057 	return bus_wd;
2058 }
2059 
2060 /* find the write protect gpio for a given slot; or -1 if none specified */
2061 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2062 {
2063 	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2064 	int gpio;
2065 
2066 	if (!np)
2067 		return -EINVAL;
2068 
2069 	gpio = of_get_named_gpio(np, "wp-gpios", 0);
2070 
2071 	/* Having a missing entry is valid; return silently */
2072 	if (!gpio_is_valid(gpio))
2073 		return -EINVAL;
2074 
2075 	if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2076 		dev_warn(dev, "gpio [%d] request failed\n", gpio);
2077 		return -EINVAL;
2078 	}
2079 
2080 	return gpio;
2081 }
2082 #else /* CONFIG_OF */
2083 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2084 {
2085 	return 0;
2086 }
2087 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2088 {
2089 	return 1;
2090 }
2091 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2092 {
2093 	return NULL;
2094 }
2095 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2096 {
2097 	return -EINVAL;
2098 }
2099 #endif /* CONFIG_OF */
2100 
2101 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2102 {
2103 	struct mmc_host *mmc;
2104 	struct dw_mci_slot *slot;
2105 	const struct dw_mci_drv_data *drv_data = host->drv_data;
2106 	int ctrl_id, ret;
2107 	u32 freq[2];
2108 	u8 bus_width;
2109 
2110 	mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2111 	if (!mmc)
2112 		return -ENOMEM;
2113 
2114 	slot = mmc_priv(mmc);
2115 	slot->id = id;
2116 	slot->mmc = mmc;
2117 	slot->host = host;
2118 	host->slot[id] = slot;
2119 
2120 	slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2121 
2122 	mmc->ops = &dw_mci_ops;
2123 	if (of_property_read_u32_array(host->dev->of_node,
2124 				       "clock-freq-min-max", freq, 2)) {
2125 		mmc->f_min = DW_MCI_FREQ_MIN;
2126 		mmc->f_max = DW_MCI_FREQ_MAX;
2127 	} else {
2128 		mmc->f_min = freq[0];
2129 		mmc->f_max = freq[1];
2130 	}
2131 
2132 	if (host->pdata->get_ocr)
2133 		mmc->ocr_avail = host->pdata->get_ocr(id);
2134 	else
2135 		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2136 
2137 	/*
2138 	 * Start with slot power disabled, it will be enabled when a card
2139 	 * is detected.
2140 	 */
2141 	if (host->pdata->setpower)
2142 		host->pdata->setpower(id, 0);
2143 
2144 	if (host->pdata->caps)
2145 		mmc->caps = host->pdata->caps;
2146 
2147 	if (host->pdata->pm_caps)
2148 		mmc->pm_caps = host->pdata->pm_caps;
2149 
2150 	if (host->dev->of_node) {
2151 		ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2152 		if (ctrl_id < 0)
2153 			ctrl_id = 0;
2154 	} else {
2155 		ctrl_id = to_platform_device(host->dev)->id;
2156 	}
2157 	if (drv_data && drv_data->caps)
2158 		mmc->caps |= drv_data->caps[ctrl_id];
2159 
2160 	if (host->pdata->caps2)
2161 		mmc->caps2 = host->pdata->caps2;
2162 
2163 	if (host->pdata->get_bus_wd)
2164 		bus_width = host->pdata->get_bus_wd(slot->id);
2165 	else if (host->dev->of_node)
2166 		bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2167 	else
2168 		bus_width = 1;
2169 
2170 	switch (bus_width) {
2171 	case 8:
2172 		mmc->caps |= MMC_CAP_8_BIT_DATA;
2173 	case 4:
2174 		mmc->caps |= MMC_CAP_4_BIT_DATA;
2175 	}
2176 
2177 	if (host->pdata->blk_settings) {
2178 		mmc->max_segs = host->pdata->blk_settings->max_segs;
2179 		mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2180 		mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2181 		mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2182 		mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2183 	} else {
2184 		/* Useful defaults if platform data is unset. */
2185 #ifdef CONFIG_MMC_DW_IDMAC
2186 		mmc->max_segs = host->ring_size;
2187 		mmc->max_blk_size = 65536;
2188 		mmc->max_blk_count = host->ring_size;
2189 		mmc->max_seg_size = 0x1000;
2190 		mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2191 #else
2192 		mmc->max_segs = 64;
2193 		mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2194 		mmc->max_blk_count = 512;
2195 		mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2196 		mmc->max_seg_size = mmc->max_req_size;
2197 #endif /* CONFIG_MMC_DW_IDMAC */
2198 	}
2199 
2200 	if (dw_mci_get_cd(mmc))
2201 		set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2202 	else
2203 		clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2204 
2205 	slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2206 
2207 	ret = mmc_add_host(mmc);
2208 	if (ret)
2209 		goto err_setup_bus;
2210 
2211 #if defined(CONFIG_DEBUG_FS)
2212 	dw_mci_init_debugfs(slot);
2213 #endif
2214 
2215 	/* Card initially undetected */
2216 	slot->last_detect_state = 0;
2217 
2218 	return 0;
2219 
2220 err_setup_bus:
2221 	mmc_free_host(mmc);
2222 	return -EINVAL;
2223 }
2224 
2225 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2226 {
2227 	/* Shutdown detect IRQ */
2228 	if (slot->host->pdata->exit)
2229 		slot->host->pdata->exit(id);
2230 
2231 	/* Debugfs stuff is cleaned up by mmc core */
2232 	mmc_remove_host(slot->mmc);
2233 	slot->host->slot[id] = NULL;
2234 	mmc_free_host(slot->mmc);
2235 }
2236 
2237 static void dw_mci_init_dma(struct dw_mci *host)
2238 {
2239 	/* Alloc memory for sg translation */
2240 	host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2241 					  &host->sg_dma, GFP_KERNEL);
2242 	if (!host->sg_cpu) {
2243 		dev_err(host->dev, "%s: could not alloc DMA memory\n",
2244 			__func__);
2245 		goto no_dma;
2246 	}
2247 
2248 	/* Determine which DMA interface to use */
2249 #ifdef CONFIG_MMC_DW_IDMAC
2250 	host->dma_ops = &dw_mci_idmac_ops;
2251 	dev_info(host->dev, "Using internal DMA controller.\n");
2252 #endif
2253 
2254 	if (!host->dma_ops)
2255 		goto no_dma;
2256 
2257 	if (host->dma_ops->init && host->dma_ops->start &&
2258 	    host->dma_ops->stop && host->dma_ops->cleanup) {
2259 		if (host->dma_ops->init(host)) {
2260 			dev_err(host->dev, "%s: Unable to initialize "
2261 				"DMA Controller.\n", __func__);
2262 			goto no_dma;
2263 		}
2264 	} else {
2265 		dev_err(host->dev, "DMA initialization not found.\n");
2266 		goto no_dma;
2267 	}
2268 
2269 	host->use_dma = 1;
2270 	return;
2271 
2272 no_dma:
2273 	dev_info(host->dev, "Using PIO mode.\n");
2274 	host->use_dma = 0;
2275 	return;
2276 }
2277 
2278 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2279 {
2280 	unsigned long timeout = jiffies + msecs_to_jiffies(500);
2281 	u32 ctrl;
2282 
2283 	ctrl = mci_readl(host, CTRL);
2284 	ctrl |= reset;
2285 	mci_writel(host, CTRL, ctrl);
2286 
2287 	/* wait till resets clear */
2288 	do {
2289 		ctrl = mci_readl(host, CTRL);
2290 		if (!(ctrl & reset))
2291 			return true;
2292 	} while (time_before(jiffies, timeout));
2293 
2294 	dev_err(host->dev,
2295 		"Timeout resetting block (ctrl reset %#x)\n",
2296 		ctrl & reset);
2297 
2298 	return false;
2299 }
2300 
2301 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
2302 {
2303 	/*
2304 	 * Reseting generates a block interrupt, hence setting
2305 	 * the scatter-gather pointer to NULL.
2306 	 */
2307 	if (host->sg) {
2308 		sg_miter_stop(&host->sg_miter);
2309 		host->sg = NULL;
2310 	}
2311 
2312 	return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
2313 }
2314 
2315 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
2316 {
2317 	return dw_mci_ctrl_reset(host,
2318 				 SDMMC_CTRL_FIFO_RESET |
2319 				 SDMMC_CTRL_RESET |
2320 				 SDMMC_CTRL_DMA_RESET);
2321 }
2322 
2323 #ifdef CONFIG_OF
2324 static struct dw_mci_of_quirks {
2325 	char *quirk;
2326 	int id;
2327 } of_quirks[] = {
2328 	{
2329 		.quirk	= "broken-cd",
2330 		.id	= DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2331 	},
2332 };
2333 
2334 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2335 {
2336 	struct dw_mci_board *pdata;
2337 	struct device *dev = host->dev;
2338 	struct device_node *np = dev->of_node;
2339 	const struct dw_mci_drv_data *drv_data = host->drv_data;
2340 	int idx, ret;
2341 	u32 clock_frequency;
2342 
2343 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2344 	if (!pdata) {
2345 		dev_err(dev, "could not allocate memory for pdata\n");
2346 		return ERR_PTR(-ENOMEM);
2347 	}
2348 
2349 	/* find out number of slots supported */
2350 	if (of_property_read_u32(dev->of_node, "num-slots",
2351 				&pdata->num_slots)) {
2352 		dev_info(dev, "num-slots property not found, "
2353 				"assuming 1 slot is available\n");
2354 		pdata->num_slots = 1;
2355 	}
2356 
2357 	/* get quirks */
2358 	for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2359 		if (of_get_property(np, of_quirks[idx].quirk, NULL))
2360 			pdata->quirks |= of_quirks[idx].id;
2361 
2362 	if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2363 		dev_info(dev, "fifo-depth property not found, using "
2364 				"value of FIFOTH register as default\n");
2365 
2366 	of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2367 
2368 	if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2369 		pdata->bus_hz = clock_frequency;
2370 
2371 	if (drv_data && drv_data->parse_dt) {
2372 		ret = drv_data->parse_dt(host);
2373 		if (ret)
2374 			return ERR_PTR(ret);
2375 	}
2376 
2377 	if (of_find_property(np, "keep-power-in-suspend", NULL))
2378 		pdata->pm_caps |= MMC_PM_KEEP_POWER;
2379 
2380 	if (of_find_property(np, "enable-sdio-wakeup", NULL))
2381 		pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2382 
2383 	if (of_find_property(np, "supports-highspeed", NULL))
2384 		pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2385 
2386 	if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
2387 		pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2388 
2389 	if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
2390 		pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2391 
2392 	return pdata;
2393 }
2394 
2395 #else /* CONFIG_OF */
2396 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2397 {
2398 	return ERR_PTR(-EINVAL);
2399 }
2400 #endif /* CONFIG_OF */
2401 
2402 int dw_mci_probe(struct dw_mci *host)
2403 {
2404 	const struct dw_mci_drv_data *drv_data = host->drv_data;
2405 	int width, i, ret = 0;
2406 	u32 fifo_size;
2407 	int init_slots = 0;
2408 
2409 	if (!host->pdata) {
2410 		host->pdata = dw_mci_parse_dt(host);
2411 		if (IS_ERR(host->pdata)) {
2412 			dev_err(host->dev, "platform data not available\n");
2413 			return -EINVAL;
2414 		}
2415 	}
2416 
2417 	if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
2418 		dev_err(host->dev,
2419 			"Platform data must supply select_slot function\n");
2420 		return -ENODEV;
2421 	}
2422 
2423 	host->biu_clk = devm_clk_get(host->dev, "biu");
2424 	if (IS_ERR(host->biu_clk)) {
2425 		dev_dbg(host->dev, "biu clock not available\n");
2426 	} else {
2427 		ret = clk_prepare_enable(host->biu_clk);
2428 		if (ret) {
2429 			dev_err(host->dev, "failed to enable biu clock\n");
2430 			return ret;
2431 		}
2432 	}
2433 
2434 	host->ciu_clk = devm_clk_get(host->dev, "ciu");
2435 	if (IS_ERR(host->ciu_clk)) {
2436 		dev_dbg(host->dev, "ciu clock not available\n");
2437 		host->bus_hz = host->pdata->bus_hz;
2438 	} else {
2439 		ret = clk_prepare_enable(host->ciu_clk);
2440 		if (ret) {
2441 			dev_err(host->dev, "failed to enable ciu clock\n");
2442 			goto err_clk_biu;
2443 		}
2444 
2445 		if (host->pdata->bus_hz) {
2446 			ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2447 			if (ret)
2448 				dev_warn(host->dev,
2449 					 "Unable to set bus rate to %ul\n",
2450 					 host->pdata->bus_hz);
2451 		}
2452 		host->bus_hz = clk_get_rate(host->ciu_clk);
2453 	}
2454 
2455 	if (drv_data && drv_data->init) {
2456 		ret = drv_data->init(host);
2457 		if (ret) {
2458 			dev_err(host->dev,
2459 				"implementation specific init failed\n");
2460 			goto err_clk_ciu;
2461 		}
2462 	}
2463 
2464 	if (drv_data && drv_data->setup_clock) {
2465 		ret = drv_data->setup_clock(host);
2466 		if (ret) {
2467 			dev_err(host->dev,
2468 				"implementation specific clock setup failed\n");
2469 			goto err_clk_ciu;
2470 		}
2471 	}
2472 
2473 	host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
2474 	if (IS_ERR(host->vmmc)) {
2475 		ret = PTR_ERR(host->vmmc);
2476 		if (ret == -EPROBE_DEFER)
2477 			goto err_clk_ciu;
2478 
2479 		dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2480 		host->vmmc = NULL;
2481 	} else {
2482 		ret = regulator_enable(host->vmmc);
2483 		if (ret) {
2484 			if (ret != -EPROBE_DEFER)
2485 				dev_err(host->dev,
2486 					"regulator_enable fail: %d\n", ret);
2487 			goto err_clk_ciu;
2488 		}
2489 	}
2490 
2491 	if (!host->bus_hz) {
2492 		dev_err(host->dev,
2493 			"Platform data must supply bus speed\n");
2494 		ret = -ENODEV;
2495 		goto err_regulator;
2496 	}
2497 
2498 	host->quirks = host->pdata->quirks;
2499 
2500 	spin_lock_init(&host->lock);
2501 	INIT_LIST_HEAD(&host->queue);
2502 
2503 	/*
2504 	 * Get the host data width - this assumes that HCON has been set with
2505 	 * the correct values.
2506 	 */
2507 	i = (mci_readl(host, HCON) >> 7) & 0x7;
2508 	if (!i) {
2509 		host->push_data = dw_mci_push_data16;
2510 		host->pull_data = dw_mci_pull_data16;
2511 		width = 16;
2512 		host->data_shift = 1;
2513 	} else if (i == 2) {
2514 		host->push_data = dw_mci_push_data64;
2515 		host->pull_data = dw_mci_pull_data64;
2516 		width = 64;
2517 		host->data_shift = 3;
2518 	} else {
2519 		/* Check for a reserved value, and warn if it is */
2520 		WARN((i != 1),
2521 		     "HCON reports a reserved host data width!\n"
2522 		     "Defaulting to 32-bit access.\n");
2523 		host->push_data = dw_mci_push_data32;
2524 		host->pull_data = dw_mci_pull_data32;
2525 		width = 32;
2526 		host->data_shift = 2;
2527 	}
2528 
2529 	/* Reset all blocks */
2530 	if (!dw_mci_ctrl_all_reset(host))
2531 		return -ENODEV;
2532 
2533 	host->dma_ops = host->pdata->dma_ops;
2534 	dw_mci_init_dma(host);
2535 
2536 	/* Clear the interrupts for the host controller */
2537 	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2538 	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2539 
2540 	/* Put in max timeout */
2541 	mci_writel(host, TMOUT, 0xFFFFFFFF);
2542 
2543 	/*
2544 	 * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
2545 	 *                          Tx Mark = fifo_size / 2 DMA Size = 8
2546 	 */
2547 	if (!host->pdata->fifo_depth) {
2548 		/*
2549 		 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2550 		 * have been overwritten by the bootloader, just like we're
2551 		 * about to do, so if you know the value for your hardware, you
2552 		 * should put it in the platform data.
2553 		 */
2554 		fifo_size = mci_readl(host, FIFOTH);
2555 		fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2556 	} else {
2557 		fifo_size = host->pdata->fifo_depth;
2558 	}
2559 	host->fifo_depth = fifo_size;
2560 	host->fifoth_val =
2561 		SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2562 	mci_writel(host, FIFOTH, host->fifoth_val);
2563 
2564 	/* disable clock to CIU */
2565 	mci_writel(host, CLKENA, 0);
2566 	mci_writel(host, CLKSRC, 0);
2567 
2568 	/*
2569 	 * In 2.40a spec, Data offset is changed.
2570 	 * Need to check the version-id and set data-offset for DATA register.
2571 	 */
2572 	host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2573 	dev_info(host->dev, "Version ID is %04x\n", host->verid);
2574 
2575 	if (host->verid < DW_MMC_240A)
2576 		host->data_offset = DATA_OFFSET;
2577 	else
2578 		host->data_offset = DATA_240A_OFFSET;
2579 
2580 	tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2581 	host->card_workqueue = alloc_workqueue("dw-mci-card",
2582 			WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
2583 	if (!host->card_workqueue) {
2584 		ret = -ENOMEM;
2585 		goto err_dmaunmap;
2586 	}
2587 	INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2588 	ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2589 			       host->irq_flags, "dw-mci", host);
2590 	if (ret)
2591 		goto err_workqueue;
2592 
2593 	if (host->pdata->num_slots)
2594 		host->num_slots = host->pdata->num_slots;
2595 	else
2596 		host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2597 
2598 	/*
2599 	 * Enable interrupts for command done, data over, data empty, card det,
2600 	 * receive ready and error such as transmit, receive timeout, crc error
2601 	 */
2602 	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2603 	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2604 		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2605 		   DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2606 	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2607 
2608 	dev_info(host->dev, "DW MMC controller at irq %d, "
2609 		 "%d bit host data width, "
2610 		 "%u deep fifo\n",
2611 		 host->irq, width, fifo_size);
2612 
2613 	/* We need at least one slot to succeed */
2614 	for (i = 0; i < host->num_slots; i++) {
2615 		ret = dw_mci_init_slot(host, i);
2616 		if (ret)
2617 			dev_dbg(host->dev, "slot %d init failed\n", i);
2618 		else
2619 			init_slots++;
2620 	}
2621 
2622 	if (init_slots) {
2623 		dev_info(host->dev, "%d slots initialized\n", init_slots);
2624 	} else {
2625 		dev_dbg(host->dev, "attempted to initialize %d slots, "
2626 					"but failed on all\n", host->num_slots);
2627 		goto err_workqueue;
2628 	}
2629 
2630 	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2631 		dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2632 
2633 	return 0;
2634 
2635 err_workqueue:
2636 	destroy_workqueue(host->card_workqueue);
2637 
2638 err_dmaunmap:
2639 	if (host->use_dma && host->dma_ops->exit)
2640 		host->dma_ops->exit(host);
2641 
2642 err_regulator:
2643 	if (host->vmmc)
2644 		regulator_disable(host->vmmc);
2645 
2646 err_clk_ciu:
2647 	if (!IS_ERR(host->ciu_clk))
2648 		clk_disable_unprepare(host->ciu_clk);
2649 
2650 err_clk_biu:
2651 	if (!IS_ERR(host->biu_clk))
2652 		clk_disable_unprepare(host->biu_clk);
2653 
2654 	return ret;
2655 }
2656 EXPORT_SYMBOL(dw_mci_probe);
2657 
2658 void dw_mci_remove(struct dw_mci *host)
2659 {
2660 	int i;
2661 
2662 	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2663 	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2664 
2665 	for (i = 0; i < host->num_slots; i++) {
2666 		dev_dbg(host->dev, "remove slot %d\n", i);
2667 		if (host->slot[i])
2668 			dw_mci_cleanup_slot(host->slot[i], i);
2669 	}
2670 
2671 	/* disable clock to CIU */
2672 	mci_writel(host, CLKENA, 0);
2673 	mci_writel(host, CLKSRC, 0);
2674 
2675 	destroy_workqueue(host->card_workqueue);
2676 
2677 	if (host->use_dma && host->dma_ops->exit)
2678 		host->dma_ops->exit(host);
2679 
2680 	if (host->vmmc)
2681 		regulator_disable(host->vmmc);
2682 
2683 	if (!IS_ERR(host->ciu_clk))
2684 		clk_disable_unprepare(host->ciu_clk);
2685 
2686 	if (!IS_ERR(host->biu_clk))
2687 		clk_disable_unprepare(host->biu_clk);
2688 }
2689 EXPORT_SYMBOL(dw_mci_remove);
2690 
2691 
2692 
2693 #ifdef CONFIG_PM_SLEEP
2694 /*
2695  * TODO: we should probably disable the clock to the card in the suspend path.
2696  */
2697 int dw_mci_suspend(struct dw_mci *host)
2698 {
2699 	if (host->vmmc)
2700 		regulator_disable(host->vmmc);
2701 
2702 	return 0;
2703 }
2704 EXPORT_SYMBOL(dw_mci_suspend);
2705 
2706 int dw_mci_resume(struct dw_mci *host)
2707 {
2708 	int i, ret;
2709 
2710 	if (host->vmmc) {
2711 		ret = regulator_enable(host->vmmc);
2712 		if (ret) {
2713 			dev_err(host->dev,
2714 				"failed to enable regulator: %d\n", ret);
2715 			return ret;
2716 		}
2717 	}
2718 
2719 	if (!dw_mci_ctrl_all_reset(host)) {
2720 		ret = -ENODEV;
2721 		return ret;
2722 	}
2723 
2724 	if (host->use_dma && host->dma_ops->init)
2725 		host->dma_ops->init(host);
2726 
2727 	/*
2728 	 * Restore the initial value at FIFOTH register
2729 	 * And Invalidate the prev_blksz with zero
2730 	 */
2731 	mci_writel(host, FIFOTH, host->fifoth_val);
2732 	host->prev_blksz = 0;
2733 
2734 	/* Put in max timeout */
2735 	mci_writel(host, TMOUT, 0xFFFFFFFF);
2736 
2737 	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2738 	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2739 		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2740 		   DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2741 	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2742 
2743 	for (i = 0; i < host->num_slots; i++) {
2744 		struct dw_mci_slot *slot = host->slot[i];
2745 		if (!slot)
2746 			continue;
2747 		if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2748 			dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2749 			dw_mci_setup_bus(slot, true);
2750 		}
2751 	}
2752 	return 0;
2753 }
2754 EXPORT_SYMBOL(dw_mci_resume);
2755 #endif /* CONFIG_PM_SLEEP */
2756 
2757 static int __init dw_mci_init(void)
2758 {
2759 	pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2760 	return 0;
2761 }
2762 
2763 static void __exit dw_mci_exit(void)
2764 {
2765 }
2766 
2767 module_init(dw_mci_init);
2768 module_exit(dw_mci_exit);
2769 
2770 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2771 MODULE_AUTHOR("NXP Semiconductor VietNam");
2772 MODULE_AUTHOR("Imagination Technologies Ltd");
2773 MODULE_LICENSE("GPL v2");
2774