xref: /linux/drivers/mmc/host/sdhci.c (revision d39d0ed196aa1685bb24771e92f78633c66ac9cb)
1 /*
2  *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3  *
4  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * Thanks to the following companies for their support:
12  *
13  *     - JMicron (hardware and technical support)
14  */
15 
16 #include <linux/delay.h>
17 #include <linux/highmem.h>
18 #include <linux/io.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/slab.h>
21 #include <linux/scatterlist.h>
22 #include <linux/regulator/consumer.h>
23 
24 #include <linux/leds.h>
25 
26 #include <linux/mmc/host.h>
27 
28 #include "sdhci.h"
29 
30 #define DRIVER_NAME "sdhci"
31 
32 #define DBG(f, x...) \
33 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
34 
35 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
36 	defined(CONFIG_MMC_SDHCI_MODULE))
37 #define SDHCI_USE_LEDS_CLASS
38 #endif
39 
40 static unsigned int debug_quirks = 0;
41 
42 static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
43 static void sdhci_finish_data(struct sdhci_host *);
44 
45 static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
46 static void sdhci_finish_command(struct sdhci_host *);
47 
48 static void sdhci_dumpregs(struct sdhci_host *host)
49 {
50 	printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n");
51 
52 	printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n",
53 		sdhci_readl(host, SDHCI_DMA_ADDRESS),
54 		sdhci_readw(host, SDHCI_HOST_VERSION));
55 	printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n",
56 		sdhci_readw(host, SDHCI_BLOCK_SIZE),
57 		sdhci_readw(host, SDHCI_BLOCK_COUNT));
58 	printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
59 		sdhci_readl(host, SDHCI_ARGUMENT),
60 		sdhci_readw(host, SDHCI_TRANSFER_MODE));
61 	printk(KERN_DEBUG DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n",
62 		sdhci_readl(host, SDHCI_PRESENT_STATE),
63 		sdhci_readb(host, SDHCI_HOST_CONTROL));
64 	printk(KERN_DEBUG DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n",
65 		sdhci_readb(host, SDHCI_POWER_CONTROL),
66 		sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
67 	printk(KERN_DEBUG DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n",
68 		sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
69 		sdhci_readw(host, SDHCI_CLOCK_CONTROL));
70 	printk(KERN_DEBUG DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n",
71 		sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
72 		sdhci_readl(host, SDHCI_INT_STATUS));
73 	printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
74 		sdhci_readl(host, SDHCI_INT_ENABLE),
75 		sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
76 	printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
77 		sdhci_readw(host, SDHCI_ACMD12_ERR),
78 		sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
79 	printk(KERN_DEBUG DRIVER_NAME ": Caps:     0x%08x | Max curr: 0x%08x\n",
80 		sdhci_readl(host, SDHCI_CAPABILITIES),
81 		sdhci_readl(host, SDHCI_MAX_CURRENT));
82 
83 	if (host->flags & SDHCI_USE_ADMA)
84 		printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
85 		       readl(host->ioaddr + SDHCI_ADMA_ERROR),
86 		       readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
87 
88 	printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
89 }
90 
91 /*****************************************************************************\
92  *                                                                           *
93  * Low level functions                                                       *
94  *                                                                           *
95 \*****************************************************************************/
96 
97 static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
98 {
99 	u32 ier;
100 
101 	ier = sdhci_readl(host, SDHCI_INT_ENABLE);
102 	ier &= ~clear;
103 	ier |= set;
104 	sdhci_writel(host, ier, SDHCI_INT_ENABLE);
105 	sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
106 }
107 
108 static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs)
109 {
110 	sdhci_clear_set_irqs(host, 0, irqs);
111 }
112 
113 static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
114 {
115 	sdhci_clear_set_irqs(host, irqs, 0);
116 }
117 
118 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
119 {
120 	u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT;
121 
122 	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
123 		return;
124 
125 	if (enable)
126 		sdhci_unmask_irqs(host, irqs);
127 	else
128 		sdhci_mask_irqs(host, irqs);
129 }
130 
131 static void sdhci_enable_card_detection(struct sdhci_host *host)
132 {
133 	sdhci_set_card_detection(host, true);
134 }
135 
136 static void sdhci_disable_card_detection(struct sdhci_host *host)
137 {
138 	sdhci_set_card_detection(host, false);
139 }
140 
141 static void sdhci_reset(struct sdhci_host *host, u8 mask)
142 {
143 	unsigned long timeout;
144 	u32 uninitialized_var(ier);
145 
146 	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
147 		if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
148 			SDHCI_CARD_PRESENT))
149 			return;
150 	}
151 
152 	if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
153 		ier = sdhci_readl(host, SDHCI_INT_ENABLE);
154 
155 	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
156 
157 	if (mask & SDHCI_RESET_ALL)
158 		host->clock = 0;
159 
160 	/* Wait max 100 ms */
161 	timeout = 100;
162 
163 	/* hw clears the bit when it's done */
164 	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
165 		if (timeout == 0) {
166 			printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
167 				mmc_hostname(host->mmc), (int)mask);
168 			sdhci_dumpregs(host);
169 			return;
170 		}
171 		timeout--;
172 		mdelay(1);
173 	}
174 
175 	if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
176 		sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
177 }
178 
179 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
180 
181 static void sdhci_init(struct sdhci_host *host, int soft)
182 {
183 	if (soft)
184 		sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
185 	else
186 		sdhci_reset(host, SDHCI_RESET_ALL);
187 
188 	sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
189 		SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
190 		SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
191 		SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
192 		SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE);
193 
194 	if (soft) {
195 		/* force clock reconfiguration */
196 		host->clock = 0;
197 		sdhci_set_ios(host->mmc, &host->mmc->ios);
198 	}
199 }
200 
201 static void sdhci_reinit(struct sdhci_host *host)
202 {
203 	sdhci_init(host, 0);
204 	sdhci_enable_card_detection(host);
205 }
206 
207 static void sdhci_activate_led(struct sdhci_host *host)
208 {
209 	u8 ctrl;
210 
211 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
212 	ctrl |= SDHCI_CTRL_LED;
213 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
214 }
215 
216 static void sdhci_deactivate_led(struct sdhci_host *host)
217 {
218 	u8 ctrl;
219 
220 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
221 	ctrl &= ~SDHCI_CTRL_LED;
222 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
223 }
224 
225 #ifdef SDHCI_USE_LEDS_CLASS
226 static void sdhci_led_control(struct led_classdev *led,
227 	enum led_brightness brightness)
228 {
229 	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
230 	unsigned long flags;
231 
232 	spin_lock_irqsave(&host->lock, flags);
233 
234 	if (brightness == LED_OFF)
235 		sdhci_deactivate_led(host);
236 	else
237 		sdhci_activate_led(host);
238 
239 	spin_unlock_irqrestore(&host->lock, flags);
240 }
241 #endif
242 
243 /*****************************************************************************\
244  *                                                                           *
245  * Core functions                                                            *
246  *                                                                           *
247 \*****************************************************************************/
248 
249 static void sdhci_read_block_pio(struct sdhci_host *host)
250 {
251 	unsigned long flags;
252 	size_t blksize, len, chunk;
253 	u32 uninitialized_var(scratch);
254 	u8 *buf;
255 
256 	DBG("PIO reading\n");
257 
258 	blksize = host->data->blksz;
259 	chunk = 0;
260 
261 	local_irq_save(flags);
262 
263 	while (blksize) {
264 		if (!sg_miter_next(&host->sg_miter))
265 			BUG();
266 
267 		len = min(host->sg_miter.length, blksize);
268 
269 		blksize -= len;
270 		host->sg_miter.consumed = len;
271 
272 		buf = host->sg_miter.addr;
273 
274 		while (len) {
275 			if (chunk == 0) {
276 				scratch = sdhci_readl(host, SDHCI_BUFFER);
277 				chunk = 4;
278 			}
279 
280 			*buf = scratch & 0xFF;
281 
282 			buf++;
283 			scratch >>= 8;
284 			chunk--;
285 			len--;
286 		}
287 	}
288 
289 	sg_miter_stop(&host->sg_miter);
290 
291 	local_irq_restore(flags);
292 }
293 
294 static void sdhci_write_block_pio(struct sdhci_host *host)
295 {
296 	unsigned long flags;
297 	size_t blksize, len, chunk;
298 	u32 scratch;
299 	u8 *buf;
300 
301 	DBG("PIO writing\n");
302 
303 	blksize = host->data->blksz;
304 	chunk = 0;
305 	scratch = 0;
306 
307 	local_irq_save(flags);
308 
309 	while (blksize) {
310 		if (!sg_miter_next(&host->sg_miter))
311 			BUG();
312 
313 		len = min(host->sg_miter.length, blksize);
314 
315 		blksize -= len;
316 		host->sg_miter.consumed = len;
317 
318 		buf = host->sg_miter.addr;
319 
320 		while (len) {
321 			scratch |= (u32)*buf << (chunk * 8);
322 
323 			buf++;
324 			chunk++;
325 			len--;
326 
327 			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
328 				sdhci_writel(host, scratch, SDHCI_BUFFER);
329 				chunk = 0;
330 				scratch = 0;
331 			}
332 		}
333 	}
334 
335 	sg_miter_stop(&host->sg_miter);
336 
337 	local_irq_restore(flags);
338 }
339 
340 static void sdhci_transfer_pio(struct sdhci_host *host)
341 {
342 	u32 mask;
343 
344 	BUG_ON(!host->data);
345 
346 	if (host->blocks == 0)
347 		return;
348 
349 	if (host->data->flags & MMC_DATA_READ)
350 		mask = SDHCI_DATA_AVAILABLE;
351 	else
352 		mask = SDHCI_SPACE_AVAILABLE;
353 
354 	/*
355 	 * Some controllers (JMicron JMB38x) mess up the buffer bits
356 	 * for transfers < 4 bytes. As long as it is just one block,
357 	 * we can ignore the bits.
358 	 */
359 	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
360 		(host->data->blocks == 1))
361 		mask = ~0;
362 
363 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
364 		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
365 			udelay(100);
366 
367 		if (host->data->flags & MMC_DATA_READ)
368 			sdhci_read_block_pio(host);
369 		else
370 			sdhci_write_block_pio(host);
371 
372 		host->blocks--;
373 		if (host->blocks == 0)
374 			break;
375 	}
376 
377 	DBG("PIO transfer complete.\n");
378 }
379 
380 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
381 {
382 	local_irq_save(*flags);
383 	return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
384 }
385 
386 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
387 {
388 	kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
389 	local_irq_restore(*flags);
390 }
391 
392 static void sdhci_set_adma_desc(u8 *desc, u32 addr, int len, unsigned cmd)
393 {
394 	__le32 *dataddr = (__le32 __force *)(desc + 4);
395 	__le16 *cmdlen = (__le16 __force *)desc;
396 
397 	/* SDHCI specification says ADMA descriptors should be 4 byte
398 	 * aligned, so using 16 or 32bit operations should be safe. */
399 
400 	cmdlen[0] = cpu_to_le16(cmd);
401 	cmdlen[1] = cpu_to_le16(len);
402 
403 	dataddr[0] = cpu_to_le32(addr);
404 }
405 
406 static int sdhci_adma_table_pre(struct sdhci_host *host,
407 	struct mmc_data *data)
408 {
409 	int direction;
410 
411 	u8 *desc;
412 	u8 *align;
413 	dma_addr_t addr;
414 	dma_addr_t align_addr;
415 	int len, offset;
416 
417 	struct scatterlist *sg;
418 	int i;
419 	char *buffer;
420 	unsigned long flags;
421 
422 	/*
423 	 * The spec does not specify endianness of descriptor table.
424 	 * We currently guess that it is LE.
425 	 */
426 
427 	if (data->flags & MMC_DATA_READ)
428 		direction = DMA_FROM_DEVICE;
429 	else
430 		direction = DMA_TO_DEVICE;
431 
432 	/*
433 	 * The ADMA descriptor table is mapped further down as we
434 	 * need to fill it with data first.
435 	 */
436 
437 	host->align_addr = dma_map_single(mmc_dev(host->mmc),
438 		host->align_buffer, 128 * 4, direction);
439 	if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
440 		goto fail;
441 	BUG_ON(host->align_addr & 0x3);
442 
443 	host->sg_count = dma_map_sg(mmc_dev(host->mmc),
444 		data->sg, data->sg_len, direction);
445 	if (host->sg_count == 0)
446 		goto unmap_align;
447 
448 	desc = host->adma_desc;
449 	align = host->align_buffer;
450 
451 	align_addr = host->align_addr;
452 
453 	for_each_sg(data->sg, sg, host->sg_count, i) {
454 		addr = sg_dma_address(sg);
455 		len = sg_dma_len(sg);
456 
457 		/*
458 		 * The SDHCI specification states that ADMA
459 		 * addresses must be 32-bit aligned. If they
460 		 * aren't, then we use a bounce buffer for
461 		 * the (up to three) bytes that screw up the
462 		 * alignment.
463 		 */
464 		offset = (4 - (addr & 0x3)) & 0x3;
465 		if (offset) {
466 			if (data->flags & MMC_DATA_WRITE) {
467 				buffer = sdhci_kmap_atomic(sg, &flags);
468 				WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
469 				memcpy(align, buffer, offset);
470 				sdhci_kunmap_atomic(buffer, &flags);
471 			}
472 
473 			/* tran, valid */
474 			sdhci_set_adma_desc(desc, align_addr, offset, 0x21);
475 
476 			BUG_ON(offset > 65536);
477 
478 			align += 4;
479 			align_addr += 4;
480 
481 			desc += 8;
482 
483 			addr += offset;
484 			len -= offset;
485 		}
486 
487 		BUG_ON(len > 65536);
488 
489 		/* tran, valid */
490 		sdhci_set_adma_desc(desc, addr, len, 0x21);
491 		desc += 8;
492 
493 		/*
494 		 * If this triggers then we have a calculation bug
495 		 * somewhere. :/
496 		 */
497 		WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
498 	}
499 
500 	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
501 		/*
502 		* Mark the last descriptor as the terminating descriptor
503 		*/
504 		if (desc != host->adma_desc) {
505 			desc -= 8;
506 			desc[0] |= 0x2; /* end */
507 		}
508 	} else {
509 		/*
510 		* Add a terminating entry.
511 		*/
512 
513 		/* nop, end, valid */
514 		sdhci_set_adma_desc(desc, 0, 0, 0x3);
515 	}
516 
517 	/*
518 	 * Resync align buffer as we might have changed it.
519 	 */
520 	if (data->flags & MMC_DATA_WRITE) {
521 		dma_sync_single_for_device(mmc_dev(host->mmc),
522 			host->align_addr, 128 * 4, direction);
523 	}
524 
525 	host->adma_addr = dma_map_single(mmc_dev(host->mmc),
526 		host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
527 	if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
528 		goto unmap_entries;
529 	BUG_ON(host->adma_addr & 0x3);
530 
531 	return 0;
532 
533 unmap_entries:
534 	dma_unmap_sg(mmc_dev(host->mmc), data->sg,
535 		data->sg_len, direction);
536 unmap_align:
537 	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
538 		128 * 4, direction);
539 fail:
540 	return -EINVAL;
541 }
542 
543 static void sdhci_adma_table_post(struct sdhci_host *host,
544 	struct mmc_data *data)
545 {
546 	int direction;
547 
548 	struct scatterlist *sg;
549 	int i, size;
550 	u8 *align;
551 	char *buffer;
552 	unsigned long flags;
553 
554 	if (data->flags & MMC_DATA_READ)
555 		direction = DMA_FROM_DEVICE;
556 	else
557 		direction = DMA_TO_DEVICE;
558 
559 	dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
560 		(128 * 2 + 1) * 4, DMA_TO_DEVICE);
561 
562 	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
563 		128 * 4, direction);
564 
565 	if (data->flags & MMC_DATA_READ) {
566 		dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
567 			data->sg_len, direction);
568 
569 		align = host->align_buffer;
570 
571 		for_each_sg(data->sg, sg, host->sg_count, i) {
572 			if (sg_dma_address(sg) & 0x3) {
573 				size = 4 - (sg_dma_address(sg) & 0x3);
574 
575 				buffer = sdhci_kmap_atomic(sg, &flags);
576 				WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
577 				memcpy(buffer, align, size);
578 				sdhci_kunmap_atomic(buffer, &flags);
579 
580 				align += 4;
581 			}
582 		}
583 	}
584 
585 	dma_unmap_sg(mmc_dev(host->mmc), data->sg,
586 		data->sg_len, direction);
587 }
588 
589 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
590 {
591 	u8 count;
592 	unsigned target_timeout, current_timeout;
593 
594 	/*
595 	 * If the host controller provides us with an incorrect timeout
596 	 * value, just skip the check and use 0xE.  The hardware may take
597 	 * longer to time out, but that's much better than having a too-short
598 	 * timeout value.
599 	 */
600 	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
601 		return 0xE;
602 
603 	/* timeout in us */
604 	target_timeout = data->timeout_ns / 1000 +
605 		data->timeout_clks / host->clock;
606 
607 	if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
608 		host->timeout_clk = host->clock / 1000;
609 
610 	/*
611 	 * Figure out needed cycles.
612 	 * We do this in steps in order to fit inside a 32 bit int.
613 	 * The first step is the minimum timeout, which will have a
614 	 * minimum resolution of 6 bits:
615 	 * (1) 2^13*1000 > 2^22,
616 	 * (2) host->timeout_clk < 2^16
617 	 *     =>
618 	 *     (1) / (2) > 2^6
619 	 */
620 	count = 0;
621 	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
622 	while (current_timeout < target_timeout) {
623 		count++;
624 		current_timeout <<= 1;
625 		if (count >= 0xF)
626 			break;
627 	}
628 
629 	if (count >= 0xF) {
630 		printk(KERN_WARNING "%s: Too large timeout requested!\n",
631 			mmc_hostname(host->mmc));
632 		count = 0xE;
633 	}
634 
635 	return count;
636 }
637 
638 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
639 {
640 	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
641 	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
642 
643 	if (host->flags & SDHCI_REQ_USE_DMA)
644 		sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
645 	else
646 		sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
647 }
648 
649 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
650 {
651 	u8 count;
652 	u8 ctrl;
653 	int ret;
654 
655 	WARN_ON(host->data);
656 
657 	if (data == NULL)
658 		return;
659 
660 	/* Sanity checks */
661 	BUG_ON(data->blksz * data->blocks > 524288);
662 	BUG_ON(data->blksz > host->mmc->max_blk_size);
663 	BUG_ON(data->blocks > 65535);
664 
665 	host->data = data;
666 	host->data_early = 0;
667 
668 	count = sdhci_calc_timeout(host, data);
669 	sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
670 
671 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
672 		host->flags |= SDHCI_REQ_USE_DMA;
673 
674 	/*
675 	 * FIXME: This doesn't account for merging when mapping the
676 	 * scatterlist.
677 	 */
678 	if (host->flags & SDHCI_REQ_USE_DMA) {
679 		int broken, i;
680 		struct scatterlist *sg;
681 
682 		broken = 0;
683 		if (host->flags & SDHCI_USE_ADMA) {
684 			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
685 				broken = 1;
686 		} else {
687 			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
688 				broken = 1;
689 		}
690 
691 		if (unlikely(broken)) {
692 			for_each_sg(data->sg, sg, data->sg_len, i) {
693 				if (sg->length & 0x3) {
694 					DBG("Reverting to PIO because of "
695 						"transfer size (%d)\n",
696 						sg->length);
697 					host->flags &= ~SDHCI_REQ_USE_DMA;
698 					break;
699 				}
700 			}
701 		}
702 	}
703 
704 	/*
705 	 * The assumption here being that alignment is the same after
706 	 * translation to device address space.
707 	 */
708 	if (host->flags & SDHCI_REQ_USE_DMA) {
709 		int broken, i;
710 		struct scatterlist *sg;
711 
712 		broken = 0;
713 		if (host->flags & SDHCI_USE_ADMA) {
714 			/*
715 			 * As we use 3 byte chunks to work around
716 			 * alignment problems, we need to check this
717 			 * quirk.
718 			 */
719 			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
720 				broken = 1;
721 		} else {
722 			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
723 				broken = 1;
724 		}
725 
726 		if (unlikely(broken)) {
727 			for_each_sg(data->sg, sg, data->sg_len, i) {
728 				if (sg->offset & 0x3) {
729 					DBG("Reverting to PIO because of "
730 						"bad alignment\n");
731 					host->flags &= ~SDHCI_REQ_USE_DMA;
732 					break;
733 				}
734 			}
735 		}
736 	}
737 
738 	if (host->flags & SDHCI_REQ_USE_DMA) {
739 		if (host->flags & SDHCI_USE_ADMA) {
740 			ret = sdhci_adma_table_pre(host, data);
741 			if (ret) {
742 				/*
743 				 * This only happens when someone fed
744 				 * us an invalid request.
745 				 */
746 				WARN_ON(1);
747 				host->flags &= ~SDHCI_REQ_USE_DMA;
748 			} else {
749 				sdhci_writel(host, host->adma_addr,
750 					SDHCI_ADMA_ADDRESS);
751 			}
752 		} else {
753 			int sg_cnt;
754 
755 			sg_cnt = dma_map_sg(mmc_dev(host->mmc),
756 					data->sg, data->sg_len,
757 					(data->flags & MMC_DATA_READ) ?
758 						DMA_FROM_DEVICE :
759 						DMA_TO_DEVICE);
760 			if (sg_cnt == 0) {
761 				/*
762 				 * This only happens when someone fed
763 				 * us an invalid request.
764 				 */
765 				WARN_ON(1);
766 				host->flags &= ~SDHCI_REQ_USE_DMA;
767 			} else {
768 				WARN_ON(sg_cnt != 1);
769 				sdhci_writel(host, sg_dma_address(data->sg),
770 					SDHCI_DMA_ADDRESS);
771 			}
772 		}
773 	}
774 
775 	/*
776 	 * Always adjust the DMA selection as some controllers
777 	 * (e.g. JMicron) can't do PIO properly when the selection
778 	 * is ADMA.
779 	 */
780 	if (host->version >= SDHCI_SPEC_200) {
781 		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
782 		ctrl &= ~SDHCI_CTRL_DMA_MASK;
783 		if ((host->flags & SDHCI_REQ_USE_DMA) &&
784 			(host->flags & SDHCI_USE_ADMA))
785 			ctrl |= SDHCI_CTRL_ADMA32;
786 		else
787 			ctrl |= SDHCI_CTRL_SDMA;
788 		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
789 	}
790 
791 	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
792 		int flags;
793 
794 		flags = SG_MITER_ATOMIC;
795 		if (host->data->flags & MMC_DATA_READ)
796 			flags |= SG_MITER_TO_SG;
797 		else
798 			flags |= SG_MITER_FROM_SG;
799 		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
800 		host->blocks = data->blocks;
801 	}
802 
803 	sdhci_set_transfer_irqs(host);
804 
805 	/* We do not handle DMA boundaries, so set it to max (512 KiB) */
806 	sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, data->blksz), SDHCI_BLOCK_SIZE);
807 	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
808 }
809 
810 static void sdhci_set_transfer_mode(struct sdhci_host *host,
811 	struct mmc_data *data)
812 {
813 	u16 mode;
814 
815 	if (data == NULL)
816 		return;
817 
818 	WARN_ON(!host->data);
819 
820 	mode = SDHCI_TRNS_BLK_CNT_EN;
821 	if (data->blocks > 1) {
822 		if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
823 			mode |= SDHCI_TRNS_MULTI | SDHCI_TRNS_ACMD12;
824 		else
825 			mode |= SDHCI_TRNS_MULTI;
826 	}
827 	if (data->flags & MMC_DATA_READ)
828 		mode |= SDHCI_TRNS_READ;
829 	if (host->flags & SDHCI_REQ_USE_DMA)
830 		mode |= SDHCI_TRNS_DMA;
831 
832 	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
833 }
834 
835 static void sdhci_finish_data(struct sdhci_host *host)
836 {
837 	struct mmc_data *data;
838 
839 	BUG_ON(!host->data);
840 
841 	data = host->data;
842 	host->data = NULL;
843 
844 	if (host->flags & SDHCI_REQ_USE_DMA) {
845 		if (host->flags & SDHCI_USE_ADMA)
846 			sdhci_adma_table_post(host, data);
847 		else {
848 			dma_unmap_sg(mmc_dev(host->mmc), data->sg,
849 				data->sg_len, (data->flags & MMC_DATA_READ) ?
850 					DMA_FROM_DEVICE : DMA_TO_DEVICE);
851 		}
852 	}
853 
854 	/*
855 	 * The specification states that the block count register must
856 	 * be updated, but it does not specify at what point in the
857 	 * data flow. That makes the register entirely useless to read
858 	 * back so we have to assume that nothing made it to the card
859 	 * in the event of an error.
860 	 */
861 	if (data->error)
862 		data->bytes_xfered = 0;
863 	else
864 		data->bytes_xfered = data->blksz * data->blocks;
865 
866 	if (data->stop) {
867 		/*
868 		 * The controller needs a reset of internal state machines
869 		 * upon error conditions.
870 		 */
871 		if (data->error) {
872 			sdhci_reset(host, SDHCI_RESET_CMD);
873 			sdhci_reset(host, SDHCI_RESET_DATA);
874 		}
875 
876 		sdhci_send_command(host, data->stop);
877 	} else
878 		tasklet_schedule(&host->finish_tasklet);
879 }
880 
881 static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
882 {
883 	int flags;
884 	u32 mask;
885 	unsigned long timeout;
886 
887 	WARN_ON(host->cmd);
888 
889 	/* Wait max 10 ms */
890 	timeout = 10;
891 
892 	mask = SDHCI_CMD_INHIBIT;
893 	if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
894 		mask |= SDHCI_DATA_INHIBIT;
895 
896 	/* We shouldn't wait for data inihibit for stop commands, even
897 	   though they might use busy signaling */
898 	if (host->mrq->data && (cmd == host->mrq->data->stop))
899 		mask &= ~SDHCI_DATA_INHIBIT;
900 
901 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
902 		if (timeout == 0) {
903 			printk(KERN_ERR "%s: Controller never released "
904 				"inhibit bit(s).\n", mmc_hostname(host->mmc));
905 			sdhci_dumpregs(host);
906 			cmd->error = -EIO;
907 			tasklet_schedule(&host->finish_tasklet);
908 			return;
909 		}
910 		timeout--;
911 		mdelay(1);
912 	}
913 
914 	mod_timer(&host->timer, jiffies + 10 * HZ);
915 
916 	host->cmd = cmd;
917 
918 	sdhci_prepare_data(host, cmd->data);
919 
920 	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
921 
922 	sdhci_set_transfer_mode(host, cmd->data);
923 
924 	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
925 		printk(KERN_ERR "%s: Unsupported response type!\n",
926 			mmc_hostname(host->mmc));
927 		cmd->error = -EINVAL;
928 		tasklet_schedule(&host->finish_tasklet);
929 		return;
930 	}
931 
932 	if (!(cmd->flags & MMC_RSP_PRESENT))
933 		flags = SDHCI_CMD_RESP_NONE;
934 	else if (cmd->flags & MMC_RSP_136)
935 		flags = SDHCI_CMD_RESP_LONG;
936 	else if (cmd->flags & MMC_RSP_BUSY)
937 		flags = SDHCI_CMD_RESP_SHORT_BUSY;
938 	else
939 		flags = SDHCI_CMD_RESP_SHORT;
940 
941 	if (cmd->flags & MMC_RSP_CRC)
942 		flags |= SDHCI_CMD_CRC;
943 	if (cmd->flags & MMC_RSP_OPCODE)
944 		flags |= SDHCI_CMD_INDEX;
945 	if (cmd->data)
946 		flags |= SDHCI_CMD_DATA;
947 
948 	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
949 }
950 
951 static void sdhci_finish_command(struct sdhci_host *host)
952 {
953 	int i;
954 
955 	BUG_ON(host->cmd == NULL);
956 
957 	if (host->cmd->flags & MMC_RSP_PRESENT) {
958 		if (host->cmd->flags & MMC_RSP_136) {
959 			/* CRC is stripped so we need to do some shifting. */
960 			for (i = 0;i < 4;i++) {
961 				host->cmd->resp[i] = sdhci_readl(host,
962 					SDHCI_RESPONSE + (3-i)*4) << 8;
963 				if (i != 3)
964 					host->cmd->resp[i] |=
965 						sdhci_readb(host,
966 						SDHCI_RESPONSE + (3-i)*4-1);
967 			}
968 		} else {
969 			host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
970 		}
971 	}
972 
973 	host->cmd->error = 0;
974 
975 	if (host->data && host->data_early)
976 		sdhci_finish_data(host);
977 
978 	if (!host->cmd->data)
979 		tasklet_schedule(&host->finish_tasklet);
980 
981 	host->cmd = NULL;
982 }
983 
984 static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
985 {
986 	int div;
987 	u16 clk;
988 	unsigned long timeout;
989 
990 	if (clock == host->clock)
991 		return;
992 
993 	if (host->ops->set_clock) {
994 		host->ops->set_clock(host, clock);
995 		if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
996 			return;
997 	}
998 
999 	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1000 
1001 	if (clock == 0)
1002 		goto out;
1003 
1004 	for (div = 1;div < 256;div *= 2) {
1005 		if ((host->max_clk / div) <= clock)
1006 			break;
1007 	}
1008 	div >>= 1;
1009 
1010 	clk = div << SDHCI_DIVIDER_SHIFT;
1011 	clk |= SDHCI_CLOCK_INT_EN;
1012 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1013 
1014 	/* Wait max 20 ms */
1015 	timeout = 20;
1016 	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1017 		& SDHCI_CLOCK_INT_STABLE)) {
1018 		if (timeout == 0) {
1019 			printk(KERN_ERR "%s: Internal clock never "
1020 				"stabilised.\n", mmc_hostname(host->mmc));
1021 			sdhci_dumpregs(host);
1022 			return;
1023 		}
1024 		timeout--;
1025 		mdelay(1);
1026 	}
1027 
1028 	clk |= SDHCI_CLOCK_CARD_EN;
1029 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1030 
1031 out:
1032 	host->clock = clock;
1033 }
1034 
1035 static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
1036 {
1037 	u8 pwr;
1038 
1039 	if (power == (unsigned short)-1)
1040 		pwr = 0;
1041 	else {
1042 		switch (1 << power) {
1043 		case MMC_VDD_165_195:
1044 			pwr = SDHCI_POWER_180;
1045 			break;
1046 		case MMC_VDD_29_30:
1047 		case MMC_VDD_30_31:
1048 			pwr = SDHCI_POWER_300;
1049 			break;
1050 		case MMC_VDD_32_33:
1051 		case MMC_VDD_33_34:
1052 			pwr = SDHCI_POWER_330;
1053 			break;
1054 		default:
1055 			BUG();
1056 		}
1057 	}
1058 
1059 	if (host->pwr == pwr)
1060 		return;
1061 
1062 	host->pwr = pwr;
1063 
1064 	if (pwr == 0) {
1065 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1066 		return;
1067 	}
1068 
1069 	/*
1070 	 * Spec says that we should clear the power reg before setting
1071 	 * a new value. Some controllers don't seem to like this though.
1072 	 */
1073 	if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1074 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1075 
1076 	/*
1077 	 * At least the Marvell CaFe chip gets confused if we set the voltage
1078 	 * and set turn on power at the same time, so set the voltage first.
1079 	 */
1080 	if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1081 		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1082 
1083 	pwr |= SDHCI_POWER_ON;
1084 
1085 	sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1086 
1087 	/*
1088 	 * Some controllers need an extra 10ms delay of 10ms before they
1089 	 * can apply clock after applying power
1090 	 */
1091 	if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1092 		mdelay(10);
1093 }
1094 
1095 /*****************************************************************************\
1096  *                                                                           *
1097  * MMC callbacks                                                             *
1098  *                                                                           *
1099 \*****************************************************************************/
1100 
1101 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1102 {
1103 	struct sdhci_host *host;
1104 	bool present;
1105 	unsigned long flags;
1106 
1107 	host = mmc_priv(mmc);
1108 
1109 	spin_lock_irqsave(&host->lock, flags);
1110 
1111 	WARN_ON(host->mrq != NULL);
1112 
1113 #ifndef SDHCI_USE_LEDS_CLASS
1114 	sdhci_activate_led(host);
1115 #endif
1116 	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) {
1117 		if (mrq->stop) {
1118 			mrq->data->stop = NULL;
1119 			mrq->stop = NULL;
1120 		}
1121 	}
1122 
1123 	host->mrq = mrq;
1124 
1125 	/* If polling, assume that the card is always present. */
1126 	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1127 		present = true;
1128 	else
1129 		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
1130 				SDHCI_CARD_PRESENT;
1131 
1132 	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1133 		host->mrq->cmd->error = -ENOMEDIUM;
1134 		tasklet_schedule(&host->finish_tasklet);
1135 	} else
1136 		sdhci_send_command(host, mrq->cmd);
1137 
1138 	mmiowb();
1139 	spin_unlock_irqrestore(&host->lock, flags);
1140 }
1141 
1142 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1143 {
1144 	struct sdhci_host *host;
1145 	unsigned long flags;
1146 	u8 ctrl;
1147 
1148 	host = mmc_priv(mmc);
1149 
1150 	spin_lock_irqsave(&host->lock, flags);
1151 
1152 	if (host->flags & SDHCI_DEVICE_DEAD)
1153 		goto out;
1154 
1155 	/*
1156 	 * Reset the chip on each power off.
1157 	 * Should clear out any weird states.
1158 	 */
1159 	if (ios->power_mode == MMC_POWER_OFF) {
1160 		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1161 		sdhci_reinit(host);
1162 	}
1163 
1164 	sdhci_set_clock(host, ios->clock);
1165 
1166 	if (ios->power_mode == MMC_POWER_OFF)
1167 		sdhci_set_power(host, -1);
1168 	else
1169 		sdhci_set_power(host, ios->vdd);
1170 
1171 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1172 
1173 	if (ios->bus_width == MMC_BUS_WIDTH_8)
1174 		ctrl |= SDHCI_CTRL_8BITBUS;
1175 	else
1176 		ctrl &= ~SDHCI_CTRL_8BITBUS;
1177 
1178 	if (ios->bus_width == MMC_BUS_WIDTH_4)
1179 		ctrl |= SDHCI_CTRL_4BITBUS;
1180 	else
1181 		ctrl &= ~SDHCI_CTRL_4BITBUS;
1182 
1183 	if (ios->timing == MMC_TIMING_SD_HS)
1184 		ctrl |= SDHCI_CTRL_HISPD;
1185 	else
1186 		ctrl &= ~SDHCI_CTRL_HISPD;
1187 
1188 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1189 
1190 	/*
1191 	 * Some (ENE) controllers go apeshit on some ios operation,
1192 	 * signalling timeout and CRC errors even on CMD0. Resetting
1193 	 * it on each ios seems to solve the problem.
1194 	 */
1195 	if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1196 		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1197 
1198 out:
1199 	mmiowb();
1200 	spin_unlock_irqrestore(&host->lock, flags);
1201 }
1202 
1203 static int sdhci_get_ro(struct mmc_host *mmc)
1204 {
1205 	struct sdhci_host *host;
1206 	unsigned long flags;
1207 	int present;
1208 
1209 	host = mmc_priv(mmc);
1210 
1211 	spin_lock_irqsave(&host->lock, flags);
1212 
1213 	if (host->flags & SDHCI_DEVICE_DEAD)
1214 		present = 0;
1215 	else
1216 		present = sdhci_readl(host, SDHCI_PRESENT_STATE);
1217 
1218 	spin_unlock_irqrestore(&host->lock, flags);
1219 
1220 	if (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT)
1221 		return !!(present & SDHCI_WRITE_PROTECT);
1222 	return !(present & SDHCI_WRITE_PROTECT);
1223 }
1224 
1225 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1226 {
1227 	struct sdhci_host *host;
1228 	unsigned long flags;
1229 
1230 	host = mmc_priv(mmc);
1231 
1232 	spin_lock_irqsave(&host->lock, flags);
1233 
1234 	if (host->flags & SDHCI_DEVICE_DEAD)
1235 		goto out;
1236 
1237 	if (enable)
1238 		sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
1239 	else
1240 		sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
1241 out:
1242 	mmiowb();
1243 
1244 	spin_unlock_irqrestore(&host->lock, flags);
1245 }
1246 
1247 static const struct mmc_host_ops sdhci_ops = {
1248 	.request	= sdhci_request,
1249 	.set_ios	= sdhci_set_ios,
1250 	.get_ro		= sdhci_get_ro,
1251 	.enable_sdio_irq = sdhci_enable_sdio_irq,
1252 };
1253 
1254 /*****************************************************************************\
1255  *                                                                           *
1256  * Tasklets                                                                  *
1257  *                                                                           *
1258 \*****************************************************************************/
1259 
1260 static void sdhci_tasklet_card(unsigned long param)
1261 {
1262 	struct sdhci_host *host;
1263 	unsigned long flags;
1264 
1265 	host = (struct sdhci_host*)param;
1266 
1267 	spin_lock_irqsave(&host->lock, flags);
1268 
1269 	if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
1270 		if (host->mrq) {
1271 			printk(KERN_ERR "%s: Card removed during transfer!\n",
1272 				mmc_hostname(host->mmc));
1273 			printk(KERN_ERR "%s: Resetting controller.\n",
1274 				mmc_hostname(host->mmc));
1275 
1276 			sdhci_reset(host, SDHCI_RESET_CMD);
1277 			sdhci_reset(host, SDHCI_RESET_DATA);
1278 
1279 			host->mrq->cmd->error = -ENOMEDIUM;
1280 			tasklet_schedule(&host->finish_tasklet);
1281 		}
1282 	}
1283 
1284 	spin_unlock_irqrestore(&host->lock, flags);
1285 
1286 	mmc_detect_change(host->mmc, msecs_to_jiffies(200));
1287 }
1288 
1289 static void sdhci_tasklet_finish(unsigned long param)
1290 {
1291 	struct sdhci_host *host;
1292 	unsigned long flags;
1293 	struct mmc_request *mrq;
1294 
1295 	host = (struct sdhci_host*)param;
1296 
1297 	spin_lock_irqsave(&host->lock, flags);
1298 
1299 	del_timer(&host->timer);
1300 
1301 	mrq = host->mrq;
1302 
1303 	/*
1304 	 * The controller needs a reset of internal state machines
1305 	 * upon error conditions.
1306 	 */
1307 	if (!(host->flags & SDHCI_DEVICE_DEAD) &&
1308 		(mrq->cmd->error ||
1309 		 (mrq->data && (mrq->data->error ||
1310 		  (mrq->data->stop && mrq->data->stop->error))) ||
1311 		   (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
1312 
1313 		/* Some controllers need this kick or reset won't work here */
1314 		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
1315 			unsigned int clock;
1316 
1317 			/* This is to force an update */
1318 			clock = host->clock;
1319 			host->clock = 0;
1320 			sdhci_set_clock(host, clock);
1321 		}
1322 
1323 		/* Spec says we should do both at the same time, but Ricoh
1324 		   controllers do not like that. */
1325 		sdhci_reset(host, SDHCI_RESET_CMD);
1326 		sdhci_reset(host, SDHCI_RESET_DATA);
1327 	}
1328 
1329 	host->mrq = NULL;
1330 	host->cmd = NULL;
1331 	host->data = NULL;
1332 
1333 #ifndef SDHCI_USE_LEDS_CLASS
1334 	sdhci_deactivate_led(host);
1335 #endif
1336 
1337 	mmiowb();
1338 	spin_unlock_irqrestore(&host->lock, flags);
1339 
1340 	mmc_request_done(host->mmc, mrq);
1341 }
1342 
1343 static void sdhci_timeout_timer(unsigned long data)
1344 {
1345 	struct sdhci_host *host;
1346 	unsigned long flags;
1347 
1348 	host = (struct sdhci_host*)data;
1349 
1350 	spin_lock_irqsave(&host->lock, flags);
1351 
1352 	if (host->mrq) {
1353 		printk(KERN_ERR "%s: Timeout waiting for hardware "
1354 			"interrupt.\n", mmc_hostname(host->mmc));
1355 		sdhci_dumpregs(host);
1356 
1357 		if (host->data) {
1358 			host->data->error = -ETIMEDOUT;
1359 			sdhci_finish_data(host);
1360 		} else {
1361 			if (host->cmd)
1362 				host->cmd->error = -ETIMEDOUT;
1363 			else
1364 				host->mrq->cmd->error = -ETIMEDOUT;
1365 
1366 			tasklet_schedule(&host->finish_tasklet);
1367 		}
1368 	}
1369 
1370 	mmiowb();
1371 	spin_unlock_irqrestore(&host->lock, flags);
1372 }
1373 
1374 /*****************************************************************************\
1375  *                                                                           *
1376  * Interrupt handling                                                        *
1377  *                                                                           *
1378 \*****************************************************************************/
1379 
1380 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1381 {
1382 	BUG_ON(intmask == 0);
1383 
1384 	if (!host->cmd) {
1385 		printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
1386 			"though no command operation was in progress.\n",
1387 			mmc_hostname(host->mmc), (unsigned)intmask);
1388 		sdhci_dumpregs(host);
1389 		return;
1390 	}
1391 
1392 	if (intmask & SDHCI_INT_TIMEOUT)
1393 		host->cmd->error = -ETIMEDOUT;
1394 	else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
1395 			SDHCI_INT_INDEX))
1396 		host->cmd->error = -EILSEQ;
1397 
1398 	if (host->cmd->error) {
1399 		tasklet_schedule(&host->finish_tasklet);
1400 		return;
1401 	}
1402 
1403 	/*
1404 	 * The host can send and interrupt when the busy state has
1405 	 * ended, allowing us to wait without wasting CPU cycles.
1406 	 * Unfortunately this is overloaded on the "data complete"
1407 	 * interrupt, so we need to take some care when handling
1408 	 * it.
1409 	 *
1410 	 * Note: The 1.0 specification is a bit ambiguous about this
1411 	 *       feature so there might be some problems with older
1412 	 *       controllers.
1413 	 */
1414 	if (host->cmd->flags & MMC_RSP_BUSY) {
1415 		if (host->cmd->data)
1416 			DBG("Cannot wait for busy signal when also "
1417 				"doing a data transfer");
1418 		else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
1419 			return;
1420 
1421 		/* The controller does not support the end-of-busy IRQ,
1422 		 * fall through and take the SDHCI_INT_RESPONSE */
1423 	}
1424 
1425 	if (intmask & SDHCI_INT_RESPONSE)
1426 		sdhci_finish_command(host);
1427 }
1428 
1429 #ifdef DEBUG
1430 static void sdhci_show_adma_error(struct sdhci_host *host)
1431 {
1432 	const char *name = mmc_hostname(host->mmc);
1433 	u8 *desc = host->adma_desc;
1434 	__le32 *dma;
1435 	__le16 *len;
1436 	u8 attr;
1437 
1438 	sdhci_dumpregs(host);
1439 
1440 	while (true) {
1441 		dma = (__le32 *)(desc + 4);
1442 		len = (__le16 *)(desc + 2);
1443 		attr = *desc;
1444 
1445 		DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
1446 		    name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
1447 
1448 		desc += 8;
1449 
1450 		if (attr & 2)
1451 			break;
1452 	}
1453 }
1454 #else
1455 static void sdhci_show_adma_error(struct sdhci_host *host) { }
1456 #endif
1457 
1458 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1459 {
1460 	BUG_ON(intmask == 0);
1461 
1462 	if (!host->data) {
1463 		/*
1464 		 * The "data complete" interrupt is also used to
1465 		 * indicate that a busy state has ended. See comment
1466 		 * above in sdhci_cmd_irq().
1467 		 */
1468 		if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
1469 			if (intmask & SDHCI_INT_DATA_END) {
1470 				sdhci_finish_command(host);
1471 				return;
1472 			}
1473 		}
1474 
1475 		printk(KERN_ERR "%s: Got data interrupt 0x%08x even "
1476 			"though no data operation was in progress.\n",
1477 			mmc_hostname(host->mmc), (unsigned)intmask);
1478 		sdhci_dumpregs(host);
1479 
1480 		return;
1481 	}
1482 
1483 	if (intmask & SDHCI_INT_DATA_TIMEOUT)
1484 		host->data->error = -ETIMEDOUT;
1485 	else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
1486 		host->data->error = -EILSEQ;
1487 	else if (intmask & SDHCI_INT_ADMA_ERROR) {
1488 		printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
1489 		sdhci_show_adma_error(host);
1490 		host->data->error = -EIO;
1491 	}
1492 
1493 	if (host->data->error)
1494 		sdhci_finish_data(host);
1495 	else {
1496 		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
1497 			sdhci_transfer_pio(host);
1498 
1499 		/*
1500 		 * We currently don't do anything fancy with DMA
1501 		 * boundaries, but as we can't disable the feature
1502 		 * we need to at least restart the transfer.
1503 		 */
1504 		if (intmask & SDHCI_INT_DMA_END)
1505 			sdhci_writel(host, sdhci_readl(host, SDHCI_DMA_ADDRESS),
1506 				SDHCI_DMA_ADDRESS);
1507 
1508 		if (intmask & SDHCI_INT_DATA_END) {
1509 			if (host->cmd) {
1510 				/*
1511 				 * Data managed to finish before the
1512 				 * command completed. Make sure we do
1513 				 * things in the proper order.
1514 				 */
1515 				host->data_early = 1;
1516 			} else {
1517 				sdhci_finish_data(host);
1518 			}
1519 		}
1520 	}
1521 }
1522 
1523 static irqreturn_t sdhci_irq(int irq, void *dev_id)
1524 {
1525 	irqreturn_t result;
1526 	struct sdhci_host* host = dev_id;
1527 	u32 intmask;
1528 	int cardint = 0;
1529 
1530 	spin_lock(&host->lock);
1531 
1532 	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
1533 
1534 	if (!intmask || intmask == 0xffffffff) {
1535 		result = IRQ_NONE;
1536 		goto out;
1537 	}
1538 
1539 	DBG("*** %s got interrupt: 0x%08x\n",
1540 		mmc_hostname(host->mmc), intmask);
1541 
1542 	if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
1543 		sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
1544 			SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
1545 		tasklet_schedule(&host->card_tasklet);
1546 	}
1547 
1548 	intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
1549 
1550 	if (intmask & SDHCI_INT_CMD_MASK) {
1551 		sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
1552 			SDHCI_INT_STATUS);
1553 		sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
1554 	}
1555 
1556 	if (intmask & SDHCI_INT_DATA_MASK) {
1557 		sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
1558 			SDHCI_INT_STATUS);
1559 		sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
1560 	}
1561 
1562 	intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
1563 
1564 	intmask &= ~SDHCI_INT_ERROR;
1565 
1566 	if (intmask & SDHCI_INT_BUS_POWER) {
1567 		printk(KERN_ERR "%s: Card is consuming too much power!\n",
1568 			mmc_hostname(host->mmc));
1569 		sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
1570 	}
1571 
1572 	intmask &= ~SDHCI_INT_BUS_POWER;
1573 
1574 	if (intmask & SDHCI_INT_CARD_INT)
1575 		cardint = 1;
1576 
1577 	intmask &= ~SDHCI_INT_CARD_INT;
1578 
1579 	if (intmask) {
1580 		printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
1581 			mmc_hostname(host->mmc), intmask);
1582 		sdhci_dumpregs(host);
1583 
1584 		sdhci_writel(host, intmask, SDHCI_INT_STATUS);
1585 	}
1586 
1587 	result = IRQ_HANDLED;
1588 
1589 	mmiowb();
1590 out:
1591 	spin_unlock(&host->lock);
1592 
1593 	/*
1594 	 * We have to delay this as it calls back into the driver.
1595 	 */
1596 	if (cardint)
1597 		mmc_signal_sdio_irq(host->mmc);
1598 
1599 	return result;
1600 }
1601 
1602 /*****************************************************************************\
1603  *                                                                           *
1604  * Suspend/resume                                                            *
1605  *                                                                           *
1606 \*****************************************************************************/
1607 
1608 #ifdef CONFIG_PM
1609 
1610 int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
1611 {
1612 	int ret;
1613 
1614 	sdhci_disable_card_detection(host);
1615 
1616 	ret = mmc_suspend_host(host->mmc);
1617 	if (ret)
1618 		return ret;
1619 
1620 	free_irq(host->irq, host);
1621 
1622 	if (host->vmmc)
1623 		ret = regulator_disable(host->vmmc);
1624 
1625 	return ret;
1626 }
1627 
1628 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
1629 
1630 int sdhci_resume_host(struct sdhci_host *host)
1631 {
1632 	int ret;
1633 
1634 	if (host->vmmc) {
1635 		int ret = regulator_enable(host->vmmc);
1636 		if (ret)
1637 			return ret;
1638 	}
1639 
1640 
1641 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1642 		if (host->ops->enable_dma)
1643 			host->ops->enable_dma(host);
1644 	}
1645 
1646 	ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1647 			  mmc_hostname(host->mmc), host);
1648 	if (ret)
1649 		return ret;
1650 
1651 	sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
1652 	mmiowb();
1653 
1654 	ret = mmc_resume_host(host->mmc);
1655 	sdhci_enable_card_detection(host);
1656 
1657 	return ret;
1658 }
1659 
1660 EXPORT_SYMBOL_GPL(sdhci_resume_host);
1661 
1662 #endif /* CONFIG_PM */
1663 
1664 /*****************************************************************************\
1665  *                                                                           *
1666  * Device allocation/registration                                            *
1667  *                                                                           *
1668 \*****************************************************************************/
1669 
1670 struct sdhci_host *sdhci_alloc_host(struct device *dev,
1671 	size_t priv_size)
1672 {
1673 	struct mmc_host *mmc;
1674 	struct sdhci_host *host;
1675 
1676 	WARN_ON(dev == NULL);
1677 
1678 	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
1679 	if (!mmc)
1680 		return ERR_PTR(-ENOMEM);
1681 
1682 	host = mmc_priv(mmc);
1683 	host->mmc = mmc;
1684 
1685 	return host;
1686 }
1687 
1688 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
1689 
1690 int sdhci_add_host(struct sdhci_host *host)
1691 {
1692 	struct mmc_host *mmc;
1693 	unsigned int caps;
1694 	int ret;
1695 
1696 	WARN_ON(host == NULL);
1697 	if (host == NULL)
1698 		return -EINVAL;
1699 
1700 	mmc = host->mmc;
1701 
1702 	if (debug_quirks)
1703 		host->quirks = debug_quirks;
1704 
1705 	sdhci_reset(host, SDHCI_RESET_ALL);
1706 
1707 	host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
1708 	host->version = (host->version & SDHCI_SPEC_VER_MASK)
1709 				>> SDHCI_SPEC_VER_SHIFT;
1710 	if (host->version > SDHCI_SPEC_200) {
1711 		printk(KERN_ERR "%s: Unknown controller version (%d). "
1712 			"You may experience problems.\n", mmc_hostname(mmc),
1713 			host->version);
1714 	}
1715 
1716 	caps = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
1717 		sdhci_readl(host, SDHCI_CAPABILITIES);
1718 
1719 	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
1720 		host->flags |= SDHCI_USE_SDMA;
1721 	else if (!(caps & SDHCI_CAN_DO_SDMA))
1722 		DBG("Controller doesn't have SDMA capability\n");
1723 	else
1724 		host->flags |= SDHCI_USE_SDMA;
1725 
1726 	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
1727 		(host->flags & SDHCI_USE_SDMA)) {
1728 		DBG("Disabling DMA as it is marked broken\n");
1729 		host->flags &= ~SDHCI_USE_SDMA;
1730 	}
1731 
1732 	if ((host->version >= SDHCI_SPEC_200) && (caps & SDHCI_CAN_DO_ADMA2))
1733 		host->flags |= SDHCI_USE_ADMA;
1734 
1735 	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
1736 		(host->flags & SDHCI_USE_ADMA)) {
1737 		DBG("Disabling ADMA as it is marked broken\n");
1738 		host->flags &= ~SDHCI_USE_ADMA;
1739 	}
1740 
1741 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1742 		if (host->ops->enable_dma) {
1743 			if (host->ops->enable_dma(host)) {
1744 				printk(KERN_WARNING "%s: No suitable DMA "
1745 					"available. Falling back to PIO.\n",
1746 					mmc_hostname(mmc));
1747 				host->flags &=
1748 					~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
1749 			}
1750 		}
1751 	}
1752 
1753 	if (host->flags & SDHCI_USE_ADMA) {
1754 		/*
1755 		 * We need to allocate descriptors for all sg entries
1756 		 * (128) and potentially one alignment transfer for
1757 		 * each of those entries.
1758 		 */
1759 		host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
1760 		host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
1761 		if (!host->adma_desc || !host->align_buffer) {
1762 			kfree(host->adma_desc);
1763 			kfree(host->align_buffer);
1764 			printk(KERN_WARNING "%s: Unable to allocate ADMA "
1765 				"buffers. Falling back to standard DMA.\n",
1766 				mmc_hostname(mmc));
1767 			host->flags &= ~SDHCI_USE_ADMA;
1768 		}
1769 	}
1770 
1771 	/*
1772 	 * If we use DMA, then it's up to the caller to set the DMA
1773 	 * mask, but PIO does not need the hw shim so we set a new
1774 	 * mask here in that case.
1775 	 */
1776 	if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
1777 		host->dma_mask = DMA_BIT_MASK(64);
1778 		mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
1779 	}
1780 
1781 	host->max_clk =
1782 		(caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
1783 	host->max_clk *= 1000000;
1784 	if (host->max_clk == 0 || host->quirks &
1785 			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
1786 		if (!host->ops->get_max_clock) {
1787 			printk(KERN_ERR
1788 			       "%s: Hardware doesn't specify base clock "
1789 			       "frequency.\n", mmc_hostname(mmc));
1790 			return -ENODEV;
1791 		}
1792 		host->max_clk = host->ops->get_max_clock(host);
1793 	}
1794 
1795 	host->timeout_clk =
1796 		(caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
1797 	if (host->timeout_clk == 0) {
1798 		if (host->ops->get_timeout_clock) {
1799 			host->timeout_clk = host->ops->get_timeout_clock(host);
1800 		} else if (!(host->quirks &
1801 				SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
1802 			printk(KERN_ERR
1803 			       "%s: Hardware doesn't specify timeout clock "
1804 			       "frequency.\n", mmc_hostname(mmc));
1805 			return -ENODEV;
1806 		}
1807 	}
1808 	if (caps & SDHCI_TIMEOUT_CLK_UNIT)
1809 		host->timeout_clk *= 1000;
1810 
1811 	/*
1812 	 * Set host parameters.
1813 	 */
1814 	mmc->ops = &sdhci_ops;
1815 	if (host->ops->get_min_clock)
1816 		mmc->f_min = host->ops->get_min_clock(host);
1817 	else
1818 		mmc->f_min = host->max_clk / 256;
1819 	mmc->f_max = host->max_clk;
1820 	mmc->caps |= MMC_CAP_SDIO_IRQ;
1821 
1822 	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
1823 		mmc->caps |= MMC_CAP_4_BIT_DATA;
1824 
1825 	if (caps & SDHCI_CAN_DO_HISPD)
1826 		mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1827 
1828 	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1829 		mmc->caps |= MMC_CAP_NEEDS_POLL;
1830 
1831 	mmc->ocr_avail = 0;
1832 	if (caps & SDHCI_CAN_VDD_330)
1833 		mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
1834 	if (caps & SDHCI_CAN_VDD_300)
1835 		mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
1836 	if (caps & SDHCI_CAN_VDD_180)
1837 		mmc->ocr_avail |= MMC_VDD_165_195;
1838 
1839 	if (mmc->ocr_avail == 0) {
1840 		printk(KERN_ERR "%s: Hardware doesn't report any "
1841 			"support voltages.\n", mmc_hostname(mmc));
1842 		return -ENODEV;
1843 	}
1844 
1845 	spin_lock_init(&host->lock);
1846 
1847 	/*
1848 	 * Maximum number of segments. Depends on if the hardware
1849 	 * can do scatter/gather or not.
1850 	 */
1851 	if (host->flags & SDHCI_USE_ADMA)
1852 		mmc->max_hw_segs = 128;
1853 	else if (host->flags & SDHCI_USE_SDMA)
1854 		mmc->max_hw_segs = 1;
1855 	else /* PIO */
1856 		mmc->max_hw_segs = 128;
1857 	mmc->max_phys_segs = 128;
1858 
1859 	/*
1860 	 * Maximum number of sectors in one transfer. Limited by DMA boundary
1861 	 * size (512KiB).
1862 	 */
1863 	mmc->max_req_size = 524288;
1864 
1865 	/*
1866 	 * Maximum segment size. Could be one segment with the maximum number
1867 	 * of bytes. When doing hardware scatter/gather, each entry cannot
1868 	 * be larger than 64 KiB though.
1869 	 */
1870 	if (host->flags & SDHCI_USE_ADMA)
1871 		mmc->max_seg_size = 65536;
1872 	else
1873 		mmc->max_seg_size = mmc->max_req_size;
1874 
1875 	/*
1876 	 * Maximum block size. This varies from controller to controller and
1877 	 * is specified in the capabilities register.
1878 	 */
1879 	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
1880 		mmc->max_blk_size = 2;
1881 	} else {
1882 		mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >>
1883 				SDHCI_MAX_BLOCK_SHIFT;
1884 		if (mmc->max_blk_size >= 3) {
1885 			printk(KERN_WARNING "%s: Invalid maximum block size, "
1886 				"assuming 512 bytes\n", mmc_hostname(mmc));
1887 			mmc->max_blk_size = 0;
1888 		}
1889 	}
1890 
1891 	mmc->max_blk_size = 512 << mmc->max_blk_size;
1892 
1893 	/*
1894 	 * Maximum block count.
1895 	 */
1896 	mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
1897 
1898 	/*
1899 	 * Init tasklets.
1900 	 */
1901 	tasklet_init(&host->card_tasklet,
1902 		sdhci_tasklet_card, (unsigned long)host);
1903 	tasklet_init(&host->finish_tasklet,
1904 		sdhci_tasklet_finish, (unsigned long)host);
1905 
1906 	setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
1907 
1908 	ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1909 		mmc_hostname(mmc), host);
1910 	if (ret)
1911 		goto untasklet;
1912 
1913 	host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
1914 	if (IS_ERR(host->vmmc)) {
1915 		printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc));
1916 		host->vmmc = NULL;
1917 	} else {
1918 		regulator_enable(host->vmmc);
1919 	}
1920 
1921 	sdhci_init(host, 0);
1922 
1923 #ifdef CONFIG_MMC_DEBUG
1924 	sdhci_dumpregs(host);
1925 #endif
1926 
1927 #ifdef SDHCI_USE_LEDS_CLASS
1928 	snprintf(host->led_name, sizeof(host->led_name),
1929 		"%s::", mmc_hostname(mmc));
1930 	host->led.name = host->led_name;
1931 	host->led.brightness = LED_OFF;
1932 	host->led.default_trigger = mmc_hostname(mmc);
1933 	host->led.brightness_set = sdhci_led_control;
1934 
1935 	ret = led_classdev_register(mmc_dev(mmc), &host->led);
1936 	if (ret)
1937 		goto reset;
1938 #endif
1939 
1940 	mmiowb();
1941 
1942 	mmc_add_host(mmc);
1943 
1944 	printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n",
1945 		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
1946 		(host->flags & SDHCI_USE_ADMA) ? "ADMA" :
1947 		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
1948 
1949 	sdhci_enable_card_detection(host);
1950 
1951 	return 0;
1952 
1953 #ifdef SDHCI_USE_LEDS_CLASS
1954 reset:
1955 	sdhci_reset(host, SDHCI_RESET_ALL);
1956 	free_irq(host->irq, host);
1957 #endif
1958 untasklet:
1959 	tasklet_kill(&host->card_tasklet);
1960 	tasklet_kill(&host->finish_tasklet);
1961 
1962 	return ret;
1963 }
1964 
1965 EXPORT_SYMBOL_GPL(sdhci_add_host);
1966 
1967 void sdhci_remove_host(struct sdhci_host *host, int dead)
1968 {
1969 	unsigned long flags;
1970 
1971 	if (dead) {
1972 		spin_lock_irqsave(&host->lock, flags);
1973 
1974 		host->flags |= SDHCI_DEVICE_DEAD;
1975 
1976 		if (host->mrq) {
1977 			printk(KERN_ERR "%s: Controller removed during "
1978 				" transfer!\n", mmc_hostname(host->mmc));
1979 
1980 			host->mrq->cmd->error = -ENOMEDIUM;
1981 			tasklet_schedule(&host->finish_tasklet);
1982 		}
1983 
1984 		spin_unlock_irqrestore(&host->lock, flags);
1985 	}
1986 
1987 	sdhci_disable_card_detection(host);
1988 
1989 	mmc_remove_host(host->mmc);
1990 
1991 #ifdef SDHCI_USE_LEDS_CLASS
1992 	led_classdev_unregister(&host->led);
1993 #endif
1994 
1995 	if (!dead)
1996 		sdhci_reset(host, SDHCI_RESET_ALL);
1997 
1998 	free_irq(host->irq, host);
1999 
2000 	del_timer_sync(&host->timer);
2001 
2002 	tasklet_kill(&host->card_tasklet);
2003 	tasklet_kill(&host->finish_tasklet);
2004 
2005 	if (host->vmmc) {
2006 		regulator_disable(host->vmmc);
2007 		regulator_put(host->vmmc);
2008 	}
2009 
2010 	kfree(host->adma_desc);
2011 	kfree(host->align_buffer);
2012 
2013 	host->adma_desc = NULL;
2014 	host->align_buffer = NULL;
2015 }
2016 
2017 EXPORT_SYMBOL_GPL(sdhci_remove_host);
2018 
2019 void sdhci_free_host(struct sdhci_host *host)
2020 {
2021 	mmc_free_host(host->mmc);
2022 }
2023 
2024 EXPORT_SYMBOL_GPL(sdhci_free_host);
2025 
2026 /*****************************************************************************\
2027  *                                                                           *
2028  * Driver init/exit                                                          *
2029  *                                                                           *
2030 \*****************************************************************************/
2031 
2032 static int __init sdhci_drv_init(void)
2033 {
2034 	printk(KERN_INFO DRIVER_NAME
2035 		": Secure Digital Host Controller Interface driver\n");
2036 	printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
2037 
2038 	return 0;
2039 }
2040 
2041 static void __exit sdhci_drv_exit(void)
2042 {
2043 }
2044 
2045 module_init(sdhci_drv_init);
2046 module_exit(sdhci_drv_exit);
2047 
2048 module_param(debug_quirks, uint, 0444);
2049 
2050 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
2051 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
2052 MODULE_LICENSE("GPL");
2053 
2054 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
2055