xref: /linux/drivers/mmc/host/omap.c (revision c145211d1f9e2ef19e7b4c2b943f68366daa97af)
1 /*
2  *  linux/drivers/mmc/host/omap.c
3  *
4  *  Copyright (C) 2004 Nokia Corporation
5  *  Written by Tuukka Tikkanen and Juha Yrj�l�<juha.yrjola@nokia.com>
6  *  Misc hacks here and there by Tony Lindgren <tony@atomide.com>
7  *  Other hacks (DMA, SD, etc) by David Brownell
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/init.h>
17 #include <linux/ioport.h>
18 #include <linux/platform_device.h>
19 #include <linux/interrupt.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/delay.h>
22 #include <linux/spinlock.h>
23 #include <linux/timer.h>
24 #include <linux/mmc/host.h>
25 #include <linux/mmc/card.h>
26 #include <linux/clk.h>
27 #include <linux/scatterlist.h>
28 #include <linux/i2c/tps65010.h>
29 #include <linux/slab.h>
30 
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 
34 #include <plat/board.h>
35 #include <plat/mmc.h>
36 #include <mach/gpio.h>
37 #include <plat/dma.h>
38 #include <plat/mux.h>
39 #include <plat/fpga.h>
40 
41 #define	OMAP_MMC_REG_CMD	0x00
42 #define	OMAP_MMC_REG_ARGL	0x04
43 #define	OMAP_MMC_REG_ARGH	0x08
44 #define	OMAP_MMC_REG_CON	0x0c
45 #define	OMAP_MMC_REG_STAT	0x10
46 #define	OMAP_MMC_REG_IE		0x14
47 #define	OMAP_MMC_REG_CTO	0x18
48 #define	OMAP_MMC_REG_DTO	0x1c
49 #define	OMAP_MMC_REG_DATA	0x20
50 #define	OMAP_MMC_REG_BLEN	0x24
51 #define	OMAP_MMC_REG_NBLK	0x28
52 #define	OMAP_MMC_REG_BUF	0x2c
53 #define OMAP_MMC_REG_SDIO	0x34
54 #define	OMAP_MMC_REG_REV	0x3c
55 #define	OMAP_MMC_REG_RSP0	0x40
56 #define	OMAP_MMC_REG_RSP1	0x44
57 #define	OMAP_MMC_REG_RSP2	0x48
58 #define	OMAP_MMC_REG_RSP3	0x4c
59 #define	OMAP_MMC_REG_RSP4	0x50
60 #define	OMAP_MMC_REG_RSP5	0x54
61 #define	OMAP_MMC_REG_RSP6	0x58
62 #define	OMAP_MMC_REG_RSP7	0x5c
63 #define	OMAP_MMC_REG_IOSR	0x60
64 #define	OMAP_MMC_REG_SYSC	0x64
65 #define	OMAP_MMC_REG_SYSS	0x68
66 
67 #define	OMAP_MMC_STAT_CARD_ERR		(1 << 14)
68 #define	OMAP_MMC_STAT_CARD_IRQ		(1 << 13)
69 #define	OMAP_MMC_STAT_OCR_BUSY		(1 << 12)
70 #define	OMAP_MMC_STAT_A_EMPTY		(1 << 11)
71 #define	OMAP_MMC_STAT_A_FULL		(1 << 10)
72 #define	OMAP_MMC_STAT_CMD_CRC		(1 <<  8)
73 #define	OMAP_MMC_STAT_CMD_TOUT		(1 <<  7)
74 #define	OMAP_MMC_STAT_DATA_CRC		(1 <<  6)
75 #define	OMAP_MMC_STAT_DATA_TOUT		(1 <<  5)
76 #define	OMAP_MMC_STAT_END_BUSY		(1 <<  4)
77 #define	OMAP_MMC_STAT_END_OF_DATA	(1 <<  3)
78 #define	OMAP_MMC_STAT_CARD_BUSY		(1 <<  2)
79 #define	OMAP_MMC_STAT_END_OF_CMD	(1 <<  0)
80 
81 #define OMAP_MMC_READ(host, reg)	__raw_readw((host)->virt_base + OMAP_MMC_REG_##reg)
82 #define OMAP_MMC_WRITE(host, reg, val)	__raw_writew((val), (host)->virt_base + OMAP_MMC_REG_##reg)
83 
84 /*
85  * Command types
86  */
87 #define OMAP_MMC_CMDTYPE_BC	0
88 #define OMAP_MMC_CMDTYPE_BCR	1
89 #define OMAP_MMC_CMDTYPE_AC	2
90 #define OMAP_MMC_CMDTYPE_ADTC	3
91 
92 
93 #define DRIVER_NAME "mmci-omap"
94 
95 /* Specifies how often in millisecs to poll for card status changes
96  * when the cover switch is open */
97 #define OMAP_MMC_COVER_POLL_DELAY	500
98 
99 struct mmc_omap_host;
100 
101 struct mmc_omap_slot {
102 	int			id;
103 	unsigned int		vdd;
104 	u16			saved_con;
105 	u16			bus_mode;
106 	unsigned int		fclk_freq;
107 	unsigned		powered:1;
108 
109 	struct tasklet_struct	cover_tasklet;
110 	struct timer_list       cover_timer;
111 	unsigned		cover_open;
112 
113 	struct mmc_request      *mrq;
114 	struct mmc_omap_host    *host;
115 	struct mmc_host		*mmc;
116 	struct omap_mmc_slot_data *pdata;
117 };
118 
119 struct mmc_omap_host {
120 	int			initialized;
121 	int			suspended;
122 	struct mmc_request *	mrq;
123 	struct mmc_command *	cmd;
124 	struct mmc_data *	data;
125 	struct mmc_host *	mmc;
126 	struct device *		dev;
127 	unsigned char		id; /* 16xx chips have 2 MMC blocks */
128 	struct clk *		iclk;
129 	struct clk *		fclk;
130 	struct resource		*mem_res;
131 	void __iomem		*virt_base;
132 	unsigned int		phys_base;
133 	int			irq;
134 	unsigned char		bus_mode;
135 	unsigned char		hw_bus_mode;
136 
137 	struct work_struct	cmd_abort_work;
138 	unsigned		abort:1;
139 	struct timer_list	cmd_abort_timer;
140 
141 	struct work_struct      slot_release_work;
142 	struct mmc_omap_slot    *next_slot;
143 	struct work_struct      send_stop_work;
144 	struct mmc_data		*stop_data;
145 
146 	unsigned int		sg_len;
147 	int			sg_idx;
148 	u16 *			buffer;
149 	u32			buffer_bytes_left;
150 	u32			total_bytes_left;
151 
152 	unsigned		use_dma:1;
153 	unsigned		brs_received:1, dma_done:1;
154 	unsigned		dma_is_read:1;
155 	unsigned		dma_in_use:1;
156 	int			dma_ch;
157 	spinlock_t		dma_lock;
158 	struct timer_list	dma_timer;
159 	unsigned		dma_len;
160 
161 	struct mmc_omap_slot    *slots[OMAP_MMC_MAX_SLOTS];
162 	struct mmc_omap_slot    *current_slot;
163 	spinlock_t              slot_lock;
164 	wait_queue_head_t       slot_wq;
165 	int                     nr_slots;
166 
167 	struct timer_list       clk_timer;
168 	spinlock_t		clk_lock;     /* for changing enabled state */
169 	unsigned int            fclk_enabled:1;
170 
171 	struct omap_mmc_platform_data *pdata;
172 };
173 
174 static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)
175 {
176 	unsigned long tick_ns;
177 
178 	if (slot != NULL && slot->host->fclk_enabled && slot->fclk_freq > 0) {
179 		tick_ns = (1000000000 + slot->fclk_freq - 1) / slot->fclk_freq;
180 		ndelay(8 * tick_ns);
181 	}
182 }
183 
184 static void mmc_omap_fclk_enable(struct mmc_omap_host *host, unsigned int enable)
185 {
186 	unsigned long flags;
187 
188 	spin_lock_irqsave(&host->clk_lock, flags);
189 	if (host->fclk_enabled != enable) {
190 		host->fclk_enabled = enable;
191 		if (enable)
192 			clk_enable(host->fclk);
193 		else
194 			clk_disable(host->fclk);
195 	}
196 	spin_unlock_irqrestore(&host->clk_lock, flags);
197 }
198 
199 static void mmc_omap_select_slot(struct mmc_omap_slot *slot, int claimed)
200 {
201 	struct mmc_omap_host *host = slot->host;
202 	unsigned long flags;
203 
204 	if (claimed)
205 		goto no_claim;
206 	spin_lock_irqsave(&host->slot_lock, flags);
207 	while (host->mmc != NULL) {
208 		spin_unlock_irqrestore(&host->slot_lock, flags);
209 		wait_event(host->slot_wq, host->mmc == NULL);
210 		spin_lock_irqsave(&host->slot_lock, flags);
211 	}
212 	host->mmc = slot->mmc;
213 	spin_unlock_irqrestore(&host->slot_lock, flags);
214 no_claim:
215 	del_timer(&host->clk_timer);
216 	if (host->current_slot != slot || !claimed)
217 		mmc_omap_fclk_offdelay(host->current_slot);
218 
219 	if (host->current_slot != slot) {
220 		OMAP_MMC_WRITE(host, CON, slot->saved_con & 0xFC00);
221 		if (host->pdata->switch_slot != NULL)
222 			host->pdata->switch_slot(mmc_dev(slot->mmc), slot->id);
223 		host->current_slot = slot;
224 	}
225 
226 	if (claimed) {
227 		mmc_omap_fclk_enable(host, 1);
228 
229 		/* Doing the dummy read here seems to work around some bug
230 		 * at least in OMAP24xx silicon where the command would not
231 		 * start after writing the CMD register. Sigh. */
232 		OMAP_MMC_READ(host, CON);
233 
234 		OMAP_MMC_WRITE(host, CON, slot->saved_con);
235 	} else
236 		mmc_omap_fclk_enable(host, 0);
237 }
238 
239 static void mmc_omap_start_request(struct mmc_omap_host *host,
240 				   struct mmc_request *req);
241 
242 static void mmc_omap_slot_release_work(struct work_struct *work)
243 {
244 	struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
245 						  slot_release_work);
246 	struct mmc_omap_slot *next_slot = host->next_slot;
247 	struct mmc_request *rq;
248 
249 	host->next_slot = NULL;
250 	mmc_omap_select_slot(next_slot, 1);
251 
252 	rq = next_slot->mrq;
253 	next_slot->mrq = NULL;
254 	mmc_omap_start_request(host, rq);
255 }
256 
257 static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled)
258 {
259 	struct mmc_omap_host *host = slot->host;
260 	unsigned long flags;
261 	int i;
262 
263 	BUG_ON(slot == NULL || host->mmc == NULL);
264 
265 	if (clk_enabled)
266 		/* Keeps clock running for at least 8 cycles on valid freq */
267 		mod_timer(&host->clk_timer, jiffies  + HZ/10);
268 	else {
269 		del_timer(&host->clk_timer);
270 		mmc_omap_fclk_offdelay(slot);
271 		mmc_omap_fclk_enable(host, 0);
272 	}
273 
274 	spin_lock_irqsave(&host->slot_lock, flags);
275 	/* Check for any pending requests */
276 	for (i = 0; i < host->nr_slots; i++) {
277 		struct mmc_omap_slot *new_slot;
278 
279 		if (host->slots[i] == NULL || host->slots[i]->mrq == NULL)
280 			continue;
281 
282 		BUG_ON(host->next_slot != NULL);
283 		new_slot = host->slots[i];
284 		/* The current slot should not have a request in queue */
285 		BUG_ON(new_slot == host->current_slot);
286 
287 		host->next_slot = new_slot;
288 		host->mmc = new_slot->mmc;
289 		spin_unlock_irqrestore(&host->slot_lock, flags);
290 		schedule_work(&host->slot_release_work);
291 		return;
292 	}
293 
294 	host->mmc = NULL;
295 	wake_up(&host->slot_wq);
296 	spin_unlock_irqrestore(&host->slot_lock, flags);
297 }
298 
299 static inline
300 int mmc_omap_cover_is_open(struct mmc_omap_slot *slot)
301 {
302 	if (slot->pdata->get_cover_state)
303 		return slot->pdata->get_cover_state(mmc_dev(slot->mmc),
304 						    slot->id);
305 	return 0;
306 }
307 
308 static ssize_t
309 mmc_omap_show_cover_switch(struct device *dev, struct device_attribute *attr,
310 			   char *buf)
311 {
312 	struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
313 	struct mmc_omap_slot *slot = mmc_priv(mmc);
314 
315 	return sprintf(buf, "%s\n", mmc_omap_cover_is_open(slot) ? "open" :
316 		       "closed");
317 }
318 
319 static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
320 
321 static ssize_t
322 mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr,
323 			char *buf)
324 {
325 	struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
326 	struct mmc_omap_slot *slot = mmc_priv(mmc);
327 
328 	return sprintf(buf, "%s\n", slot->pdata->name);
329 }
330 
331 static DEVICE_ATTR(slot_name, S_IRUGO, mmc_omap_show_slot_name, NULL);
332 
333 static void
334 mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
335 {
336 	u32 cmdreg;
337 	u32 resptype;
338 	u32 cmdtype;
339 
340 	host->cmd = cmd;
341 
342 	resptype = 0;
343 	cmdtype = 0;
344 
345 	/* Our hardware needs to know exact type */
346 	switch (mmc_resp_type(cmd)) {
347 	case MMC_RSP_NONE:
348 		break;
349 	case MMC_RSP_R1:
350 	case MMC_RSP_R1B:
351 		/* resp 1, 1b, 6, 7 */
352 		resptype = 1;
353 		break;
354 	case MMC_RSP_R2:
355 		resptype = 2;
356 		break;
357 	case MMC_RSP_R3:
358 		resptype = 3;
359 		break;
360 	default:
361 		dev_err(mmc_dev(host->mmc), "Invalid response type: %04x\n", mmc_resp_type(cmd));
362 		break;
363 	}
364 
365 	if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) {
366 		cmdtype = OMAP_MMC_CMDTYPE_ADTC;
367 	} else if (mmc_cmd_type(cmd) == MMC_CMD_BC) {
368 		cmdtype = OMAP_MMC_CMDTYPE_BC;
369 	} else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) {
370 		cmdtype = OMAP_MMC_CMDTYPE_BCR;
371 	} else {
372 		cmdtype = OMAP_MMC_CMDTYPE_AC;
373 	}
374 
375 	cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
376 
377 	if (host->current_slot->bus_mode == MMC_BUSMODE_OPENDRAIN)
378 		cmdreg |= 1 << 6;
379 
380 	if (cmd->flags & MMC_RSP_BUSY)
381 		cmdreg |= 1 << 11;
382 
383 	if (host->data && !(host->data->flags & MMC_DATA_WRITE))
384 		cmdreg |= 1 << 15;
385 
386 	mod_timer(&host->cmd_abort_timer, jiffies + HZ/2);
387 
388 	OMAP_MMC_WRITE(host, CTO, 200);
389 	OMAP_MMC_WRITE(host, ARGL, cmd->arg & 0xffff);
390 	OMAP_MMC_WRITE(host, ARGH, cmd->arg >> 16);
391 	OMAP_MMC_WRITE(host, IE,
392 		       OMAP_MMC_STAT_A_EMPTY    | OMAP_MMC_STAT_A_FULL    |
393 		       OMAP_MMC_STAT_CMD_CRC    | OMAP_MMC_STAT_CMD_TOUT  |
394 		       OMAP_MMC_STAT_DATA_CRC   | OMAP_MMC_STAT_DATA_TOUT |
395 		       OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR  |
396 		       OMAP_MMC_STAT_END_OF_DATA);
397 	OMAP_MMC_WRITE(host, CMD, cmdreg);
398 }
399 
400 static void
401 mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
402 		     int abort)
403 {
404 	enum dma_data_direction dma_data_dir;
405 
406 	BUG_ON(host->dma_ch < 0);
407 	if (data->error)
408 		omap_stop_dma(host->dma_ch);
409 	/* Release DMA channel lazily */
410 	mod_timer(&host->dma_timer, jiffies + HZ);
411 	if (data->flags & MMC_DATA_WRITE)
412 		dma_data_dir = DMA_TO_DEVICE;
413 	else
414 		dma_data_dir = DMA_FROM_DEVICE;
415 	dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
416 		     dma_data_dir);
417 }
418 
419 static void mmc_omap_send_stop_work(struct work_struct *work)
420 {
421 	struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
422 						  send_stop_work);
423 	struct mmc_omap_slot *slot = host->current_slot;
424 	struct mmc_data *data = host->stop_data;
425 	unsigned long tick_ns;
426 
427 	tick_ns = (1000000000 + slot->fclk_freq - 1)/slot->fclk_freq;
428 	ndelay(8*tick_ns);
429 
430 	mmc_omap_start_command(host, data->stop);
431 }
432 
433 static void
434 mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
435 {
436 	if (host->dma_in_use)
437 		mmc_omap_release_dma(host, data, data->error);
438 
439 	host->data = NULL;
440 	host->sg_len = 0;
441 
442 	/* NOTE:  MMC layer will sometimes poll-wait CMD13 next, issuing
443 	 * dozens of requests until the card finishes writing data.
444 	 * It'd be cheaper to just wait till an EOFB interrupt arrives...
445 	 */
446 
447 	if (!data->stop) {
448 		struct mmc_host *mmc;
449 
450 		host->mrq = NULL;
451 		mmc = host->mmc;
452 		mmc_omap_release_slot(host->current_slot, 1);
453 		mmc_request_done(mmc, data->mrq);
454 		return;
455 	}
456 
457 	host->stop_data = data;
458 	schedule_work(&host->send_stop_work);
459 }
460 
461 static void
462 mmc_omap_send_abort(struct mmc_omap_host *host, int maxloops)
463 {
464 	struct mmc_omap_slot *slot = host->current_slot;
465 	unsigned int restarts, passes, timeout;
466 	u16 stat = 0;
467 
468 	/* Sending abort takes 80 clocks. Have some extra and round up */
469 	timeout = (120*1000000 + slot->fclk_freq - 1)/slot->fclk_freq;
470 	restarts = 0;
471 	while (restarts < maxloops) {
472 		OMAP_MMC_WRITE(host, STAT, 0xFFFF);
473 		OMAP_MMC_WRITE(host, CMD, (3 << 12) | (1 << 7));
474 
475 		passes = 0;
476 		while (passes < timeout) {
477 			stat = OMAP_MMC_READ(host, STAT);
478 			if (stat & OMAP_MMC_STAT_END_OF_CMD)
479 				goto out;
480 			udelay(1);
481 			passes++;
482 		}
483 
484 		restarts++;
485 	}
486 out:
487 	OMAP_MMC_WRITE(host, STAT, stat);
488 }
489 
490 static void
491 mmc_omap_abort_xfer(struct mmc_omap_host *host, struct mmc_data *data)
492 {
493 	if (host->dma_in_use)
494 		mmc_omap_release_dma(host, data, 1);
495 
496 	host->data = NULL;
497 	host->sg_len = 0;
498 
499 	mmc_omap_send_abort(host, 10000);
500 }
501 
502 static void
503 mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
504 {
505 	unsigned long flags;
506 	int done;
507 
508 	if (!host->dma_in_use) {
509 		mmc_omap_xfer_done(host, data);
510 		return;
511 	}
512 	done = 0;
513 	spin_lock_irqsave(&host->dma_lock, flags);
514 	if (host->dma_done)
515 		done = 1;
516 	else
517 		host->brs_received = 1;
518 	spin_unlock_irqrestore(&host->dma_lock, flags);
519 	if (done)
520 		mmc_omap_xfer_done(host, data);
521 }
522 
523 static void
524 mmc_omap_dma_timer(unsigned long data)
525 {
526 	struct mmc_omap_host *host = (struct mmc_omap_host *) data;
527 
528 	BUG_ON(host->dma_ch < 0);
529 	omap_free_dma(host->dma_ch);
530 	host->dma_ch = -1;
531 }
532 
533 static void
534 mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
535 {
536 	unsigned long flags;
537 	int done;
538 
539 	done = 0;
540 	spin_lock_irqsave(&host->dma_lock, flags);
541 	if (host->brs_received)
542 		done = 1;
543 	else
544 		host->dma_done = 1;
545 	spin_unlock_irqrestore(&host->dma_lock, flags);
546 	if (done)
547 		mmc_omap_xfer_done(host, data);
548 }
549 
550 static void
551 mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
552 {
553 	host->cmd = NULL;
554 
555 	del_timer(&host->cmd_abort_timer);
556 
557 	if (cmd->flags & MMC_RSP_PRESENT) {
558 		if (cmd->flags & MMC_RSP_136) {
559 			/* response type 2 */
560 			cmd->resp[3] =
561 				OMAP_MMC_READ(host, RSP0) |
562 				(OMAP_MMC_READ(host, RSP1) << 16);
563 			cmd->resp[2] =
564 				OMAP_MMC_READ(host, RSP2) |
565 				(OMAP_MMC_READ(host, RSP3) << 16);
566 			cmd->resp[1] =
567 				OMAP_MMC_READ(host, RSP4) |
568 				(OMAP_MMC_READ(host, RSP5) << 16);
569 			cmd->resp[0] =
570 				OMAP_MMC_READ(host, RSP6) |
571 				(OMAP_MMC_READ(host, RSP7) << 16);
572 		} else {
573 			/* response types 1, 1b, 3, 4, 5, 6 */
574 			cmd->resp[0] =
575 				OMAP_MMC_READ(host, RSP6) |
576 				(OMAP_MMC_READ(host, RSP7) << 16);
577 		}
578 	}
579 
580 	if (host->data == NULL || cmd->error) {
581 		struct mmc_host *mmc;
582 
583 		if (host->data != NULL)
584 			mmc_omap_abort_xfer(host, host->data);
585 		host->mrq = NULL;
586 		mmc = host->mmc;
587 		mmc_omap_release_slot(host->current_slot, 1);
588 		mmc_request_done(mmc, cmd->mrq);
589 	}
590 }
591 
592 /*
593  * Abort stuck command. Can occur when card is removed while it is being
594  * read.
595  */
596 static void mmc_omap_abort_command(struct work_struct *work)
597 {
598 	struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
599 						  cmd_abort_work);
600 	BUG_ON(!host->cmd);
601 
602 	dev_dbg(mmc_dev(host->mmc), "Aborting stuck command CMD%d\n",
603 		host->cmd->opcode);
604 
605 	if (host->cmd->error == 0)
606 		host->cmd->error = -ETIMEDOUT;
607 
608 	if (host->data == NULL) {
609 		struct mmc_command *cmd;
610 		struct mmc_host    *mmc;
611 
612 		cmd = host->cmd;
613 		host->cmd = NULL;
614 		mmc_omap_send_abort(host, 10000);
615 
616 		host->mrq = NULL;
617 		mmc = host->mmc;
618 		mmc_omap_release_slot(host->current_slot, 1);
619 		mmc_request_done(mmc, cmd->mrq);
620 	} else
621 		mmc_omap_cmd_done(host, host->cmd);
622 
623 	host->abort = 0;
624 	enable_irq(host->irq);
625 }
626 
627 static void
628 mmc_omap_cmd_timer(unsigned long data)
629 {
630 	struct mmc_omap_host *host = (struct mmc_omap_host *) data;
631 	unsigned long flags;
632 
633 	spin_lock_irqsave(&host->slot_lock, flags);
634 	if (host->cmd != NULL && !host->abort) {
635 		OMAP_MMC_WRITE(host, IE, 0);
636 		disable_irq(host->irq);
637 		host->abort = 1;
638 		schedule_work(&host->cmd_abort_work);
639 	}
640 	spin_unlock_irqrestore(&host->slot_lock, flags);
641 }
642 
643 /* PIO only */
644 static void
645 mmc_omap_sg_to_buf(struct mmc_omap_host *host)
646 {
647 	struct scatterlist *sg;
648 
649 	sg = host->data->sg + host->sg_idx;
650 	host->buffer_bytes_left = sg->length;
651 	host->buffer = sg_virt(sg);
652 	if (host->buffer_bytes_left > host->total_bytes_left)
653 		host->buffer_bytes_left = host->total_bytes_left;
654 }
655 
656 static void
657 mmc_omap_clk_timer(unsigned long data)
658 {
659 	struct mmc_omap_host *host = (struct mmc_omap_host *) data;
660 
661 	mmc_omap_fclk_enable(host, 0);
662 }
663 
664 /* PIO only */
665 static void
666 mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
667 {
668 	int n;
669 
670 	if (host->buffer_bytes_left == 0) {
671 		host->sg_idx++;
672 		BUG_ON(host->sg_idx == host->sg_len);
673 		mmc_omap_sg_to_buf(host);
674 	}
675 	n = 64;
676 	if (n > host->buffer_bytes_left)
677 		n = host->buffer_bytes_left;
678 	host->buffer_bytes_left -= n;
679 	host->total_bytes_left -= n;
680 	host->data->bytes_xfered += n;
681 
682 	if (write) {
683 		__raw_writesw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n);
684 	} else {
685 		__raw_readsw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n);
686 	}
687 }
688 
689 static inline void mmc_omap_report_irq(u16 status)
690 {
691 	static const char *mmc_omap_status_bits[] = {
692 		"EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO",
693 		"CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR"
694 	};
695 	int i, c = 0;
696 
697 	for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
698 		if (status & (1 << i)) {
699 			if (c)
700 				printk(" ");
701 			printk("%s", mmc_omap_status_bits[i]);
702 			c++;
703 		}
704 }
705 
706 static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
707 {
708 	struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id;
709 	u16 status;
710 	int end_command;
711 	int end_transfer;
712 	int transfer_error, cmd_error;
713 
714 	if (host->cmd == NULL && host->data == NULL) {
715 		status = OMAP_MMC_READ(host, STAT);
716 		dev_info(mmc_dev(host->slots[0]->mmc),
717 			 "Spurious IRQ 0x%04x\n", status);
718 		if (status != 0) {
719 			OMAP_MMC_WRITE(host, STAT, status);
720 			OMAP_MMC_WRITE(host, IE, 0);
721 		}
722 		return IRQ_HANDLED;
723 	}
724 
725 	end_command = 0;
726 	end_transfer = 0;
727 	transfer_error = 0;
728 	cmd_error = 0;
729 
730 	while ((status = OMAP_MMC_READ(host, STAT)) != 0) {
731 		int cmd;
732 
733 		OMAP_MMC_WRITE(host, STAT, status);
734 		if (host->cmd != NULL)
735 			cmd = host->cmd->opcode;
736 		else
737 			cmd = -1;
738 #ifdef CONFIG_MMC_DEBUG
739 		dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ",
740 			status, cmd);
741 		mmc_omap_report_irq(status);
742 		printk("\n");
743 #endif
744 		if (host->total_bytes_left) {
745 			if ((status & OMAP_MMC_STAT_A_FULL) ||
746 			    (status & OMAP_MMC_STAT_END_OF_DATA))
747 				mmc_omap_xfer_data(host, 0);
748 			if (status & OMAP_MMC_STAT_A_EMPTY)
749 				mmc_omap_xfer_data(host, 1);
750 		}
751 
752 		if (status & OMAP_MMC_STAT_END_OF_DATA)
753 			end_transfer = 1;
754 
755 		if (status & OMAP_MMC_STAT_DATA_TOUT) {
756 			dev_dbg(mmc_dev(host->mmc), "data timeout (CMD%d)\n",
757 				cmd);
758 			if (host->data) {
759 				host->data->error = -ETIMEDOUT;
760 				transfer_error = 1;
761 			}
762 		}
763 
764 		if (status & OMAP_MMC_STAT_DATA_CRC) {
765 			if (host->data) {
766 				host->data->error = -EILSEQ;
767 				dev_dbg(mmc_dev(host->mmc),
768 					 "data CRC error, bytes left %d\n",
769 					host->total_bytes_left);
770 				transfer_error = 1;
771 			} else {
772 				dev_dbg(mmc_dev(host->mmc), "data CRC error\n");
773 			}
774 		}
775 
776 		if (status & OMAP_MMC_STAT_CMD_TOUT) {
777 			/* Timeouts are routine with some commands */
778 			if (host->cmd) {
779 				struct mmc_omap_slot *slot =
780 					host->current_slot;
781 				if (slot == NULL ||
782 				    !mmc_omap_cover_is_open(slot))
783 					dev_err(mmc_dev(host->mmc),
784 						"command timeout (CMD%d)\n",
785 						cmd);
786 				host->cmd->error = -ETIMEDOUT;
787 				end_command = 1;
788 				cmd_error = 1;
789 			}
790 		}
791 
792 		if (status & OMAP_MMC_STAT_CMD_CRC) {
793 			if (host->cmd) {
794 				dev_err(mmc_dev(host->mmc),
795 					"command CRC error (CMD%d, arg 0x%08x)\n",
796 					cmd, host->cmd->arg);
797 				host->cmd->error = -EILSEQ;
798 				end_command = 1;
799 				cmd_error = 1;
800 			} else
801 				dev_err(mmc_dev(host->mmc),
802 					"command CRC error without cmd?\n");
803 		}
804 
805 		if (status & OMAP_MMC_STAT_CARD_ERR) {
806 			dev_dbg(mmc_dev(host->mmc),
807 				"ignoring card status error (CMD%d)\n",
808 				cmd);
809 			end_command = 1;
810 		}
811 
812 		/*
813 		 * NOTE: On 1610 the END_OF_CMD may come too early when
814 		 * starting a write
815 		 */
816 		if ((status & OMAP_MMC_STAT_END_OF_CMD) &&
817 		    (!(status & OMAP_MMC_STAT_A_EMPTY))) {
818 			end_command = 1;
819 		}
820 	}
821 
822 	if (cmd_error && host->data) {
823 		del_timer(&host->cmd_abort_timer);
824 		host->abort = 1;
825 		OMAP_MMC_WRITE(host, IE, 0);
826 		disable_irq_nosync(host->irq);
827 		schedule_work(&host->cmd_abort_work);
828 		return IRQ_HANDLED;
829 	}
830 
831 	if (end_command)
832 		mmc_omap_cmd_done(host, host->cmd);
833 	if (host->data != NULL) {
834 		if (transfer_error)
835 			mmc_omap_xfer_done(host, host->data);
836 		else if (end_transfer)
837 			mmc_omap_end_of_data(host, host->data);
838 	}
839 
840 	return IRQ_HANDLED;
841 }
842 
843 void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed)
844 {
845 	int cover_open;
846 	struct mmc_omap_host *host = dev_get_drvdata(dev);
847 	struct mmc_omap_slot *slot = host->slots[num];
848 
849 	BUG_ON(num >= host->nr_slots);
850 
851 	/* Other subsystems can call in here before we're initialised. */
852 	if (host->nr_slots == 0 || !host->slots[num])
853 		return;
854 
855 	cover_open = mmc_omap_cover_is_open(slot);
856 	if (cover_open != slot->cover_open) {
857 		slot->cover_open = cover_open;
858 		sysfs_notify(&slot->mmc->class_dev.kobj, NULL, "cover_switch");
859 	}
860 
861 	tasklet_hi_schedule(&slot->cover_tasklet);
862 }
863 
864 static void mmc_omap_cover_timer(unsigned long arg)
865 {
866 	struct mmc_omap_slot *slot = (struct mmc_omap_slot *) arg;
867 	tasklet_schedule(&slot->cover_tasklet);
868 }
869 
870 static void mmc_omap_cover_handler(unsigned long param)
871 {
872 	struct mmc_omap_slot *slot = (struct mmc_omap_slot *)param;
873 	int cover_open = mmc_omap_cover_is_open(slot);
874 
875 	mmc_detect_change(slot->mmc, 0);
876 	if (!cover_open)
877 		return;
878 
879 	/*
880 	 * If no card is inserted, we postpone polling until
881 	 * the cover has been closed.
882 	 */
883 	if (slot->mmc->card == NULL || !mmc_card_present(slot->mmc->card))
884 		return;
885 
886 	mod_timer(&slot->cover_timer,
887 		  jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY));
888 }
889 
890 /* Prepare to transfer the next segment of a scatterlist */
891 static void
892 mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
893 {
894 	int dma_ch = host->dma_ch;
895 	unsigned long data_addr;
896 	u16 buf, frame;
897 	u32 count;
898 	struct scatterlist *sg = &data->sg[host->sg_idx];
899 	int src_port = 0;
900 	int dst_port = 0;
901 	int sync_dev = 0;
902 
903 	data_addr = host->phys_base + OMAP_MMC_REG_DATA;
904 	frame = data->blksz;
905 	count = sg_dma_len(sg);
906 
907 	if ((data->blocks == 1) && (count > data->blksz))
908 		count = frame;
909 
910 	host->dma_len = count;
911 
912 	/* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
913 	 * Use 16 or 32 word frames when the blocksize is at least that large.
914 	 * Blocksize is usually 512 bytes; but not for some SD reads.
915 	 */
916 	if (cpu_is_omap15xx() && frame > 32)
917 		frame = 32;
918 	else if (frame > 64)
919 		frame = 64;
920 	count /= frame;
921 	frame >>= 1;
922 
923 	if (!(data->flags & MMC_DATA_WRITE)) {
924 		buf = 0x800f | ((frame - 1) << 8);
925 
926 		if (cpu_class_is_omap1()) {
927 			src_port = OMAP_DMA_PORT_TIPB;
928 			dst_port = OMAP_DMA_PORT_EMIFF;
929 		}
930 		if (cpu_is_omap24xx())
931 			sync_dev = OMAP24XX_DMA_MMC1_RX;
932 
933 		omap_set_dma_src_params(dma_ch, src_port,
934 					OMAP_DMA_AMODE_CONSTANT,
935 					data_addr, 0, 0);
936 		omap_set_dma_dest_params(dma_ch, dst_port,
937 					 OMAP_DMA_AMODE_POST_INC,
938 					 sg_dma_address(sg), 0, 0);
939 		omap_set_dma_dest_data_pack(dma_ch, 1);
940 		omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
941 	} else {
942 		buf = 0x0f80 | ((frame - 1) << 0);
943 
944 		if (cpu_class_is_omap1()) {
945 			src_port = OMAP_DMA_PORT_EMIFF;
946 			dst_port = OMAP_DMA_PORT_TIPB;
947 		}
948 		if (cpu_is_omap24xx())
949 			sync_dev = OMAP24XX_DMA_MMC1_TX;
950 
951 		omap_set_dma_dest_params(dma_ch, dst_port,
952 					 OMAP_DMA_AMODE_CONSTANT,
953 					 data_addr, 0, 0);
954 		omap_set_dma_src_params(dma_ch, src_port,
955 					OMAP_DMA_AMODE_POST_INC,
956 					sg_dma_address(sg), 0, 0);
957 		omap_set_dma_src_data_pack(dma_ch, 1);
958 		omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
959 	}
960 
961 	/* Max limit for DMA frame count is 0xffff */
962 	BUG_ON(count > 0xffff);
963 
964 	OMAP_MMC_WRITE(host, BUF, buf);
965 	omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
966 				     frame, count, OMAP_DMA_SYNC_FRAME,
967 				     sync_dev, 0);
968 }
969 
970 /* A scatterlist segment completed */
971 static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
972 {
973 	struct mmc_omap_host *host = (struct mmc_omap_host *) data;
974 	struct mmc_data *mmcdat = host->data;
975 
976 	if (unlikely(host->dma_ch < 0)) {
977 		dev_err(mmc_dev(host->mmc),
978 			"DMA callback while DMA not enabled\n");
979 		return;
980 	}
981 	/* FIXME: We really should do something to _handle_ the errors */
982 	if (ch_status & OMAP1_DMA_TOUT_IRQ) {
983 		dev_err(mmc_dev(host->mmc),"DMA timeout\n");
984 		return;
985 	}
986 	if (ch_status & OMAP_DMA_DROP_IRQ) {
987 		dev_err(mmc_dev(host->mmc), "DMA sync error\n");
988 		return;
989 	}
990 	if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
991 		return;
992 	}
993 	mmcdat->bytes_xfered += host->dma_len;
994 	host->sg_idx++;
995 	if (host->sg_idx < host->sg_len) {
996 		mmc_omap_prepare_dma(host, host->data);
997 		omap_start_dma(host->dma_ch);
998 	} else
999 		mmc_omap_dma_done(host, host->data);
1000 }
1001 
1002 static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
1003 {
1004 	const char *dma_dev_name;
1005 	int sync_dev, dma_ch, is_read, r;
1006 
1007 	is_read = !(data->flags & MMC_DATA_WRITE);
1008 	del_timer_sync(&host->dma_timer);
1009 	if (host->dma_ch >= 0) {
1010 		if (is_read == host->dma_is_read)
1011 			return 0;
1012 		omap_free_dma(host->dma_ch);
1013 		host->dma_ch = -1;
1014 	}
1015 
1016 	if (is_read) {
1017 		if (host->id == 0) {
1018 			sync_dev = OMAP_DMA_MMC_RX;
1019 			dma_dev_name = "MMC1 read";
1020 		} else {
1021 			sync_dev = OMAP_DMA_MMC2_RX;
1022 			dma_dev_name = "MMC2 read";
1023 		}
1024 	} else {
1025 		if (host->id == 0) {
1026 			sync_dev = OMAP_DMA_MMC_TX;
1027 			dma_dev_name = "MMC1 write";
1028 		} else {
1029 			sync_dev = OMAP_DMA_MMC2_TX;
1030 			dma_dev_name = "MMC2 write";
1031 		}
1032 	}
1033 	r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb,
1034 			     host, &dma_ch);
1035 	if (r != 0) {
1036 		dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
1037 		return r;
1038 	}
1039 	host->dma_ch = dma_ch;
1040 	host->dma_is_read = is_read;
1041 
1042 	return 0;
1043 }
1044 
1045 static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
1046 {
1047 	u16 reg;
1048 
1049 	reg = OMAP_MMC_READ(host, SDIO);
1050 	reg &= ~(1 << 5);
1051 	OMAP_MMC_WRITE(host, SDIO, reg);
1052 	/* Set maximum timeout */
1053 	OMAP_MMC_WRITE(host, CTO, 0xff);
1054 }
1055 
1056 static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
1057 {
1058 	unsigned int timeout, cycle_ns;
1059 	u16 reg;
1060 
1061 	cycle_ns = 1000000000 / host->current_slot->fclk_freq;
1062 	timeout = req->data->timeout_ns / cycle_ns;
1063 	timeout += req->data->timeout_clks;
1064 
1065 	/* Check if we need to use timeout multiplier register */
1066 	reg = OMAP_MMC_READ(host, SDIO);
1067 	if (timeout > 0xffff) {
1068 		reg |= (1 << 5);
1069 		timeout /= 1024;
1070 	} else
1071 		reg &= ~(1 << 5);
1072 	OMAP_MMC_WRITE(host, SDIO, reg);
1073 	OMAP_MMC_WRITE(host, DTO, timeout);
1074 }
1075 
1076 static void
1077 mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
1078 {
1079 	struct mmc_data *data = req->data;
1080 	int i, use_dma, block_size;
1081 	unsigned sg_len;
1082 
1083 	host->data = data;
1084 	if (data == NULL) {
1085 		OMAP_MMC_WRITE(host, BLEN, 0);
1086 		OMAP_MMC_WRITE(host, NBLK, 0);
1087 		OMAP_MMC_WRITE(host, BUF, 0);
1088 		host->dma_in_use = 0;
1089 		set_cmd_timeout(host, req);
1090 		return;
1091 	}
1092 
1093 	block_size = data->blksz;
1094 
1095 	OMAP_MMC_WRITE(host, NBLK, data->blocks - 1);
1096 	OMAP_MMC_WRITE(host, BLEN, block_size - 1);
1097 	set_data_timeout(host, req);
1098 
1099 	/* cope with calling layer confusion; it issues "single
1100 	 * block" writes using multi-block scatterlists.
1101 	 */
1102 	sg_len = (data->blocks == 1) ? 1 : data->sg_len;
1103 
1104 	/* Only do DMA for entire blocks */
1105 	use_dma = host->use_dma;
1106 	if (use_dma) {
1107 		for (i = 0; i < sg_len; i++) {
1108 			if ((data->sg[i].length % block_size) != 0) {
1109 				use_dma = 0;
1110 				break;
1111 			}
1112 		}
1113 	}
1114 
1115 	host->sg_idx = 0;
1116 	if (use_dma) {
1117 		if (mmc_omap_get_dma_channel(host, data) == 0) {
1118 			enum dma_data_direction dma_data_dir;
1119 
1120 			if (data->flags & MMC_DATA_WRITE)
1121 				dma_data_dir = DMA_TO_DEVICE;
1122 			else
1123 				dma_data_dir = DMA_FROM_DEVICE;
1124 
1125 			host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
1126 						sg_len, dma_data_dir);
1127 			host->total_bytes_left = 0;
1128 			mmc_omap_prepare_dma(host, req->data);
1129 			host->brs_received = 0;
1130 			host->dma_done = 0;
1131 			host->dma_in_use = 1;
1132 		} else
1133 			use_dma = 0;
1134 	}
1135 
1136 	/* Revert to PIO? */
1137 	if (!use_dma) {
1138 		OMAP_MMC_WRITE(host, BUF, 0x1f1f);
1139 		host->total_bytes_left = data->blocks * block_size;
1140 		host->sg_len = sg_len;
1141 		mmc_omap_sg_to_buf(host);
1142 		host->dma_in_use = 0;
1143 	}
1144 }
1145 
1146 static void mmc_omap_start_request(struct mmc_omap_host *host,
1147 				   struct mmc_request *req)
1148 {
1149 	BUG_ON(host->mrq != NULL);
1150 
1151 	host->mrq = req;
1152 
1153 	/* only touch fifo AFTER the controller readies it */
1154 	mmc_omap_prepare_data(host, req);
1155 	mmc_omap_start_command(host, req->cmd);
1156 	if (host->dma_in_use)
1157 		omap_start_dma(host->dma_ch);
1158 	BUG_ON(irqs_disabled());
1159 }
1160 
1161 static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
1162 {
1163 	struct mmc_omap_slot *slot = mmc_priv(mmc);
1164 	struct mmc_omap_host *host = slot->host;
1165 	unsigned long flags;
1166 
1167 	spin_lock_irqsave(&host->slot_lock, flags);
1168 	if (host->mmc != NULL) {
1169 		BUG_ON(slot->mrq != NULL);
1170 		slot->mrq = req;
1171 		spin_unlock_irqrestore(&host->slot_lock, flags);
1172 		return;
1173 	} else
1174 		host->mmc = mmc;
1175 	spin_unlock_irqrestore(&host->slot_lock, flags);
1176 	mmc_omap_select_slot(slot, 1);
1177 	mmc_omap_start_request(host, req);
1178 }
1179 
1180 static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,
1181 				int vdd)
1182 {
1183 	struct mmc_omap_host *host;
1184 
1185 	host = slot->host;
1186 
1187 	if (slot->pdata->set_power != NULL)
1188 		slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,
1189 					vdd);
1190 
1191 	if (cpu_is_omap24xx()) {
1192 		u16 w;
1193 
1194 		if (power_on) {
1195 			w = OMAP_MMC_READ(host, CON);
1196 			OMAP_MMC_WRITE(host, CON, w | (1 << 11));
1197 		} else {
1198 			w = OMAP_MMC_READ(host, CON);
1199 			OMAP_MMC_WRITE(host, CON, w & ~(1 << 11));
1200 		}
1201 	}
1202 }
1203 
1204 static int mmc_omap_calc_divisor(struct mmc_host *mmc, struct mmc_ios *ios)
1205 {
1206 	struct mmc_omap_slot *slot = mmc_priv(mmc);
1207 	struct mmc_omap_host *host = slot->host;
1208 	int func_clk_rate = clk_get_rate(host->fclk);
1209 	int dsor;
1210 
1211 	if (ios->clock == 0)
1212 		return 0;
1213 
1214 	dsor = func_clk_rate / ios->clock;
1215 	if (dsor < 1)
1216 		dsor = 1;
1217 
1218 	if (func_clk_rate / dsor > ios->clock)
1219 		dsor++;
1220 
1221 	if (dsor > 250)
1222 		dsor = 250;
1223 
1224 	slot->fclk_freq = func_clk_rate / dsor;
1225 
1226 	if (ios->bus_width == MMC_BUS_WIDTH_4)
1227 		dsor |= 1 << 15;
1228 
1229 	return dsor;
1230 }
1231 
1232 static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1233 {
1234 	struct mmc_omap_slot *slot = mmc_priv(mmc);
1235 	struct mmc_omap_host *host = slot->host;
1236 	int i, dsor;
1237 	int clk_enabled;
1238 
1239 	mmc_omap_select_slot(slot, 0);
1240 
1241 	dsor = mmc_omap_calc_divisor(mmc, ios);
1242 
1243 	if (ios->vdd != slot->vdd)
1244 		slot->vdd = ios->vdd;
1245 
1246 	clk_enabled = 0;
1247 	switch (ios->power_mode) {
1248 	case MMC_POWER_OFF:
1249 		mmc_omap_set_power(slot, 0, ios->vdd);
1250 		break;
1251 	case MMC_POWER_UP:
1252 		/* Cannot touch dsor yet, just power up MMC */
1253 		mmc_omap_set_power(slot, 1, ios->vdd);
1254 		goto exit;
1255 	case MMC_POWER_ON:
1256 		mmc_omap_fclk_enable(host, 1);
1257 		clk_enabled = 1;
1258 		dsor |= 1 << 11;
1259 		break;
1260 	}
1261 
1262 	if (slot->bus_mode != ios->bus_mode) {
1263 		if (slot->pdata->set_bus_mode != NULL)
1264 			slot->pdata->set_bus_mode(mmc_dev(mmc), slot->id,
1265 						  ios->bus_mode);
1266 		slot->bus_mode = ios->bus_mode;
1267 	}
1268 
1269 	/* On insanely high arm_per frequencies something sometimes
1270 	 * goes somehow out of sync, and the POW bit is not being set,
1271 	 * which results in the while loop below getting stuck.
1272 	 * Writing to the CON register twice seems to do the trick. */
1273 	for (i = 0; i < 2; i++)
1274 		OMAP_MMC_WRITE(host, CON, dsor);
1275 	slot->saved_con = dsor;
1276 	if (ios->power_mode == MMC_POWER_ON) {
1277 		/* worst case at 400kHz, 80 cycles makes 200 microsecs */
1278 		int usecs = 250;
1279 
1280 		/* Send clock cycles, poll completion */
1281 		OMAP_MMC_WRITE(host, IE, 0);
1282 		OMAP_MMC_WRITE(host, STAT, 0xffff);
1283 		OMAP_MMC_WRITE(host, CMD, 1 << 7);
1284 		while (usecs > 0 && (OMAP_MMC_READ(host, STAT) & 1) == 0) {
1285 			udelay(1);
1286 			usecs--;
1287 		}
1288 		OMAP_MMC_WRITE(host, STAT, 1);
1289 	}
1290 
1291 exit:
1292 	mmc_omap_release_slot(slot, clk_enabled);
1293 }
1294 
1295 static const struct mmc_host_ops mmc_omap_ops = {
1296 	.request	= mmc_omap_request,
1297 	.set_ios	= mmc_omap_set_ios,
1298 };
1299 
1300 static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)
1301 {
1302 	struct mmc_omap_slot *slot = NULL;
1303 	struct mmc_host *mmc;
1304 	int r;
1305 
1306 	mmc = mmc_alloc_host(sizeof(struct mmc_omap_slot), host->dev);
1307 	if (mmc == NULL)
1308 		return -ENOMEM;
1309 
1310 	slot = mmc_priv(mmc);
1311 	slot->host = host;
1312 	slot->mmc = mmc;
1313 	slot->id = id;
1314 	slot->pdata = &host->pdata->slots[id];
1315 
1316 	host->slots[id] = slot;
1317 
1318 	mmc->caps = 0;
1319 	if (host->pdata->slots[id].wires >= 4)
1320 		mmc->caps |= MMC_CAP_4_BIT_DATA;
1321 
1322 	mmc->ops = &mmc_omap_ops;
1323 	mmc->f_min = 400000;
1324 
1325 	if (cpu_class_is_omap2())
1326 		mmc->f_max = 48000000;
1327 	else
1328 		mmc->f_max = 24000000;
1329 	if (host->pdata->max_freq)
1330 		mmc->f_max = min(host->pdata->max_freq, mmc->f_max);
1331 	mmc->ocr_avail = slot->pdata->ocr_mask;
1332 
1333 	/* Use scatterlist DMA to reduce per-transfer costs.
1334 	 * NOTE max_seg_size assumption that small blocks aren't
1335 	 * normally used (except e.g. for reading SD registers).
1336 	 */
1337 	mmc->max_phys_segs = 32;
1338 	mmc->max_hw_segs = 32;
1339 	mmc->max_blk_size = 2048;	/* BLEN is 11 bits (+1) */
1340 	mmc->max_blk_count = 2048;	/* NBLK is 11 bits (+1) */
1341 	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1342 	mmc->max_seg_size = mmc->max_req_size;
1343 
1344 	r = mmc_add_host(mmc);
1345 	if (r < 0)
1346 		goto err_remove_host;
1347 
1348 	if (slot->pdata->name != NULL) {
1349 		r = device_create_file(&mmc->class_dev,
1350 					&dev_attr_slot_name);
1351 		if (r < 0)
1352 			goto err_remove_host;
1353 	}
1354 
1355 	if (slot->pdata->get_cover_state != NULL) {
1356 		r = device_create_file(&mmc->class_dev,
1357 					&dev_attr_cover_switch);
1358 		if (r < 0)
1359 			goto err_remove_slot_name;
1360 
1361 		setup_timer(&slot->cover_timer, mmc_omap_cover_timer,
1362 			    (unsigned long)slot);
1363 		tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler,
1364 			     (unsigned long)slot);
1365 		tasklet_schedule(&slot->cover_tasklet);
1366 	}
1367 
1368 	return 0;
1369 
1370 err_remove_slot_name:
1371 	if (slot->pdata->name != NULL)
1372 		device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
1373 err_remove_host:
1374 	mmc_remove_host(mmc);
1375 	mmc_free_host(mmc);
1376 	return r;
1377 }
1378 
1379 static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
1380 {
1381 	struct mmc_host *mmc = slot->mmc;
1382 
1383 	if (slot->pdata->name != NULL)
1384 		device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
1385 	if (slot->pdata->get_cover_state != NULL)
1386 		device_remove_file(&mmc->class_dev, &dev_attr_cover_switch);
1387 
1388 	tasklet_kill(&slot->cover_tasklet);
1389 	del_timer_sync(&slot->cover_timer);
1390 	flush_scheduled_work();
1391 
1392 	mmc_remove_host(mmc);
1393 	mmc_free_host(mmc);
1394 }
1395 
1396 static int __init mmc_omap_probe(struct platform_device *pdev)
1397 {
1398 	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
1399 	struct mmc_omap_host *host = NULL;
1400 	struct resource *res;
1401 	int i, ret = 0;
1402 	int irq;
1403 
1404 	if (pdata == NULL) {
1405 		dev_err(&pdev->dev, "platform data missing\n");
1406 		return -ENXIO;
1407 	}
1408 	if (pdata->nr_slots == 0) {
1409 		dev_err(&pdev->dev, "no slots\n");
1410 		return -ENXIO;
1411 	}
1412 
1413 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1414 	irq = platform_get_irq(pdev, 0);
1415 	if (res == NULL || irq < 0)
1416 		return -ENXIO;
1417 
1418 	res = request_mem_region(res->start, res->end - res->start + 1,
1419 				 pdev->name);
1420 	if (res == NULL)
1421 		return -EBUSY;
1422 
1423 	host = kzalloc(sizeof(struct mmc_omap_host), GFP_KERNEL);
1424 	if (host == NULL) {
1425 		ret = -ENOMEM;
1426 		goto err_free_mem_region;
1427 	}
1428 
1429 	INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
1430 	INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
1431 
1432 	INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command);
1433 	setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer,
1434 		    (unsigned long) host);
1435 
1436 	spin_lock_init(&host->clk_lock);
1437 	setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
1438 
1439 	spin_lock_init(&host->dma_lock);
1440 	setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host);
1441 	spin_lock_init(&host->slot_lock);
1442 	init_waitqueue_head(&host->slot_wq);
1443 
1444 	host->pdata = pdata;
1445 	host->dev = &pdev->dev;
1446 	platform_set_drvdata(pdev, host);
1447 
1448 	host->id = pdev->id;
1449 	host->mem_res = res;
1450 	host->irq = irq;
1451 
1452 	host->use_dma = 1;
1453 	host->dev->dma_mask = &pdata->dma_mask;
1454 	host->dma_ch = -1;
1455 
1456 	host->irq = irq;
1457 	host->phys_base = host->mem_res->start;
1458 	host->virt_base = ioremap(res->start, res->end - res->start + 1);
1459 	if (!host->virt_base)
1460 		goto err_ioremap;
1461 
1462 	host->iclk = clk_get(&pdev->dev, "ick");
1463 	if (IS_ERR(host->iclk)) {
1464 		ret = PTR_ERR(host->iclk);
1465 		goto err_free_mmc_host;
1466 	}
1467 	clk_enable(host->iclk);
1468 
1469 	host->fclk = clk_get(&pdev->dev, "fck");
1470 	if (IS_ERR(host->fclk)) {
1471 		ret = PTR_ERR(host->fclk);
1472 		goto err_free_iclk;
1473 	}
1474 
1475 	ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
1476 	if (ret)
1477 		goto err_free_fclk;
1478 
1479 	if (pdata->init != NULL) {
1480 		ret = pdata->init(&pdev->dev);
1481 		if (ret < 0)
1482 			goto err_free_irq;
1483 	}
1484 
1485 	host->nr_slots = pdata->nr_slots;
1486 	for (i = 0; i < pdata->nr_slots; i++) {
1487 		ret = mmc_omap_new_slot(host, i);
1488 		if (ret < 0) {
1489 			while (--i >= 0)
1490 				mmc_omap_remove_slot(host->slots[i]);
1491 
1492 			goto err_plat_cleanup;
1493 		}
1494 	}
1495 
1496 	return 0;
1497 
1498 err_plat_cleanup:
1499 	if (pdata->cleanup)
1500 		pdata->cleanup(&pdev->dev);
1501 err_free_irq:
1502 	free_irq(host->irq, host);
1503 err_free_fclk:
1504 	clk_put(host->fclk);
1505 err_free_iclk:
1506 	clk_disable(host->iclk);
1507 	clk_put(host->iclk);
1508 err_free_mmc_host:
1509 	iounmap(host->virt_base);
1510 err_ioremap:
1511 	kfree(host);
1512 err_free_mem_region:
1513 	release_mem_region(res->start, res->end - res->start + 1);
1514 	return ret;
1515 }
1516 
1517 static int mmc_omap_remove(struct platform_device *pdev)
1518 {
1519 	struct mmc_omap_host *host = platform_get_drvdata(pdev);
1520 	int i;
1521 
1522 	platform_set_drvdata(pdev, NULL);
1523 
1524 	BUG_ON(host == NULL);
1525 
1526 	for (i = 0; i < host->nr_slots; i++)
1527 		mmc_omap_remove_slot(host->slots[i]);
1528 
1529 	if (host->pdata->cleanup)
1530 		host->pdata->cleanup(&pdev->dev);
1531 
1532 	mmc_omap_fclk_enable(host, 0);
1533 	free_irq(host->irq, host);
1534 	clk_put(host->fclk);
1535 	clk_disable(host->iclk);
1536 	clk_put(host->iclk);
1537 
1538 	iounmap(host->virt_base);
1539 	release_mem_region(pdev->resource[0].start,
1540 			   pdev->resource[0].end - pdev->resource[0].start + 1);
1541 
1542 	kfree(host);
1543 
1544 	return 0;
1545 }
1546 
1547 #ifdef CONFIG_PM
1548 static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
1549 {
1550 	int i, ret = 0;
1551 	struct mmc_omap_host *host = platform_get_drvdata(pdev);
1552 
1553 	if (host == NULL || host->suspended)
1554 		return 0;
1555 
1556 	for (i = 0; i < host->nr_slots; i++) {
1557 		struct mmc_omap_slot *slot;
1558 
1559 		slot = host->slots[i];
1560 		ret = mmc_suspend_host(slot->mmc, mesg);
1561 		if (ret < 0) {
1562 			while (--i >= 0) {
1563 				slot = host->slots[i];
1564 				mmc_resume_host(slot->mmc);
1565 			}
1566 			return ret;
1567 		}
1568 	}
1569 	host->suspended = 1;
1570 	return 0;
1571 }
1572 
1573 static int mmc_omap_resume(struct platform_device *pdev)
1574 {
1575 	int i, ret = 0;
1576 	struct mmc_omap_host *host = platform_get_drvdata(pdev);
1577 
1578 	if (host == NULL || !host->suspended)
1579 		return 0;
1580 
1581 	for (i = 0; i < host->nr_slots; i++) {
1582 		struct mmc_omap_slot *slot;
1583 		slot = host->slots[i];
1584 		ret = mmc_resume_host(slot->mmc);
1585 		if (ret < 0)
1586 			return ret;
1587 
1588 		host->suspended = 0;
1589 	}
1590 	return 0;
1591 }
1592 #else
1593 #define mmc_omap_suspend	NULL
1594 #define mmc_omap_resume		NULL
1595 #endif
1596 
1597 static struct platform_driver mmc_omap_driver = {
1598 	.remove		= mmc_omap_remove,
1599 	.suspend	= mmc_omap_suspend,
1600 	.resume		= mmc_omap_resume,
1601 	.driver		= {
1602 		.name	= DRIVER_NAME,
1603 		.owner	= THIS_MODULE,
1604 	},
1605 };
1606 
1607 static int __init mmc_omap_init(void)
1608 {
1609 	return platform_driver_probe(&mmc_omap_driver, mmc_omap_probe);
1610 }
1611 
1612 static void __exit mmc_omap_exit(void)
1613 {
1614 	platform_driver_unregister(&mmc_omap_driver);
1615 }
1616 
1617 module_init(mmc_omap_init);
1618 module_exit(mmc_omap_exit);
1619 
1620 MODULE_DESCRIPTION("OMAP Multimedia Card driver");
1621 MODULE_LICENSE("GPL");
1622 MODULE_ALIAS("platform:" DRIVER_NAME);
1623 MODULE_AUTHOR("Juha Yrj�l�");
1624