xref: /linux/drivers/mmc/host/sdhci.c (revision 55f3538c4923e9dfca132e99ebec370e8094afda)
1 /*
2  *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3  *
4  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * Thanks to the following companies for their support:
12  *
13  *     - JMicron (hardware and technical support)
14  */
15 
16 #include <linux/delay.h>
17 #include <linux/ktime.h>
18 #include <linux/highmem.h>
19 #include <linux/io.h>
20 #include <linux/module.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/slab.h>
23 #include <linux/scatterlist.h>
24 #include <linux/swiotlb.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/of.h>
28 
29 #include <linux/leds.h>
30 
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/host.h>
33 #include <linux/mmc/card.h>
34 #include <linux/mmc/sdio.h>
35 #include <linux/mmc/slot-gpio.h>
36 
37 #include "sdhci.h"
38 
39 #define DRIVER_NAME "sdhci"
40 
41 #define DBG(f, x...) \
42 	pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
43 
44 #define SDHCI_DUMP(f, x...) \
45 	pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
46 
47 #define MAX_TUNING_LOOP 40
48 
49 static unsigned int debug_quirks = 0;
50 static unsigned int debug_quirks2;
51 
52 static void sdhci_finish_data(struct sdhci_host *);
53 
54 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
55 
56 void sdhci_dumpregs(struct sdhci_host *host)
57 {
58 	SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
59 
60 	SDHCI_DUMP("Sys addr:  0x%08x | Version:  0x%08x\n",
61 		   sdhci_readl(host, SDHCI_DMA_ADDRESS),
62 		   sdhci_readw(host, SDHCI_HOST_VERSION));
63 	SDHCI_DUMP("Blk size:  0x%08x | Blk cnt:  0x%08x\n",
64 		   sdhci_readw(host, SDHCI_BLOCK_SIZE),
65 		   sdhci_readw(host, SDHCI_BLOCK_COUNT));
66 	SDHCI_DUMP("Argument:  0x%08x | Trn mode: 0x%08x\n",
67 		   sdhci_readl(host, SDHCI_ARGUMENT),
68 		   sdhci_readw(host, SDHCI_TRANSFER_MODE));
69 	SDHCI_DUMP("Present:   0x%08x | Host ctl: 0x%08x\n",
70 		   sdhci_readl(host, SDHCI_PRESENT_STATE),
71 		   sdhci_readb(host, SDHCI_HOST_CONTROL));
72 	SDHCI_DUMP("Power:     0x%08x | Blk gap:  0x%08x\n",
73 		   sdhci_readb(host, SDHCI_POWER_CONTROL),
74 		   sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
75 	SDHCI_DUMP("Wake-up:   0x%08x | Clock:    0x%08x\n",
76 		   sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
77 		   sdhci_readw(host, SDHCI_CLOCK_CONTROL));
78 	SDHCI_DUMP("Timeout:   0x%08x | Int stat: 0x%08x\n",
79 		   sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
80 		   sdhci_readl(host, SDHCI_INT_STATUS));
81 	SDHCI_DUMP("Int enab:  0x%08x | Sig enab: 0x%08x\n",
82 		   sdhci_readl(host, SDHCI_INT_ENABLE),
83 		   sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
84 	SDHCI_DUMP("AC12 err:  0x%08x | Slot int: 0x%08x\n",
85 		   sdhci_readw(host, SDHCI_ACMD12_ERR),
86 		   sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
87 	SDHCI_DUMP("Caps:      0x%08x | Caps_1:   0x%08x\n",
88 		   sdhci_readl(host, SDHCI_CAPABILITIES),
89 		   sdhci_readl(host, SDHCI_CAPABILITIES_1));
90 	SDHCI_DUMP("Cmd:       0x%08x | Max curr: 0x%08x\n",
91 		   sdhci_readw(host, SDHCI_COMMAND),
92 		   sdhci_readl(host, SDHCI_MAX_CURRENT));
93 	SDHCI_DUMP("Resp[0]:   0x%08x | Resp[1]:  0x%08x\n",
94 		   sdhci_readl(host, SDHCI_RESPONSE),
95 		   sdhci_readl(host, SDHCI_RESPONSE + 4));
96 	SDHCI_DUMP("Resp[2]:   0x%08x | Resp[3]:  0x%08x\n",
97 		   sdhci_readl(host, SDHCI_RESPONSE + 8),
98 		   sdhci_readl(host, SDHCI_RESPONSE + 12));
99 	SDHCI_DUMP("Host ctl2: 0x%08x\n",
100 		   sdhci_readw(host, SDHCI_HOST_CONTROL2));
101 
102 	if (host->flags & SDHCI_USE_ADMA) {
103 		if (host->flags & SDHCI_USE_64_BIT_DMA) {
104 			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x%08x\n",
105 				   sdhci_readl(host, SDHCI_ADMA_ERROR),
106 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
107 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
108 		} else {
109 			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x\n",
110 				   sdhci_readl(host, SDHCI_ADMA_ERROR),
111 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
112 		}
113 	}
114 
115 	SDHCI_DUMP("============================================\n");
116 }
117 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
118 
119 /*****************************************************************************\
120  *                                                                           *
121  * Low level functions                                                       *
122  *                                                                           *
123 \*****************************************************************************/
124 
125 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
126 {
127 	return cmd->data || cmd->flags & MMC_RSP_BUSY;
128 }
129 
130 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
131 {
132 	u32 present;
133 
134 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
135 	    !mmc_card_is_removable(host->mmc))
136 		return;
137 
138 	if (enable) {
139 		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
140 				      SDHCI_CARD_PRESENT;
141 
142 		host->ier |= present ? SDHCI_INT_CARD_REMOVE :
143 				       SDHCI_INT_CARD_INSERT;
144 	} else {
145 		host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
146 	}
147 
148 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
149 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
150 }
151 
152 static void sdhci_enable_card_detection(struct sdhci_host *host)
153 {
154 	sdhci_set_card_detection(host, true);
155 }
156 
157 static void sdhci_disable_card_detection(struct sdhci_host *host)
158 {
159 	sdhci_set_card_detection(host, false);
160 }
161 
162 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
163 {
164 	if (host->bus_on)
165 		return;
166 	host->bus_on = true;
167 	pm_runtime_get_noresume(host->mmc->parent);
168 }
169 
170 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
171 {
172 	if (!host->bus_on)
173 		return;
174 	host->bus_on = false;
175 	pm_runtime_put_noidle(host->mmc->parent);
176 }
177 
178 void sdhci_reset(struct sdhci_host *host, u8 mask)
179 {
180 	ktime_t timeout;
181 
182 	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
183 
184 	if (mask & SDHCI_RESET_ALL) {
185 		host->clock = 0;
186 		/* Reset-all turns off SD Bus Power */
187 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
188 			sdhci_runtime_pm_bus_off(host);
189 	}
190 
191 	/* Wait max 100 ms */
192 	timeout = ktime_add_ms(ktime_get(), 100);
193 
194 	/* hw clears the bit when it's done */
195 	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
196 		if (ktime_after(ktime_get(), timeout)) {
197 			pr_err("%s: Reset 0x%x never completed.\n",
198 				mmc_hostname(host->mmc), (int)mask);
199 			sdhci_dumpregs(host);
200 			return;
201 		}
202 		udelay(10);
203 	}
204 }
205 EXPORT_SYMBOL_GPL(sdhci_reset);
206 
207 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
208 {
209 	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
210 		struct mmc_host *mmc = host->mmc;
211 
212 		if (!mmc->ops->get_cd(mmc))
213 			return;
214 	}
215 
216 	host->ops->reset(host, mask);
217 
218 	if (mask & SDHCI_RESET_ALL) {
219 		if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
220 			if (host->ops->enable_dma)
221 				host->ops->enable_dma(host);
222 		}
223 
224 		/* Resetting the controller clears many */
225 		host->preset_enabled = false;
226 	}
227 }
228 
229 static void sdhci_set_default_irqs(struct sdhci_host *host)
230 {
231 	host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
232 		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
233 		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
234 		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
235 		    SDHCI_INT_RESPONSE;
236 
237 	if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
238 	    host->tuning_mode == SDHCI_TUNING_MODE_3)
239 		host->ier |= SDHCI_INT_RETUNE;
240 
241 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
242 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
243 }
244 
245 static void sdhci_init(struct sdhci_host *host, int soft)
246 {
247 	struct mmc_host *mmc = host->mmc;
248 
249 	if (soft)
250 		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
251 	else
252 		sdhci_do_reset(host, SDHCI_RESET_ALL);
253 
254 	sdhci_set_default_irqs(host);
255 
256 	host->cqe_on = false;
257 
258 	if (soft) {
259 		/* force clock reconfiguration */
260 		host->clock = 0;
261 		mmc->ops->set_ios(mmc, &mmc->ios);
262 	}
263 }
264 
265 static void sdhci_reinit(struct sdhci_host *host)
266 {
267 	sdhci_init(host, 0);
268 	sdhci_enable_card_detection(host);
269 }
270 
271 static void __sdhci_led_activate(struct sdhci_host *host)
272 {
273 	u8 ctrl;
274 
275 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
276 	ctrl |= SDHCI_CTRL_LED;
277 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
278 }
279 
280 static void __sdhci_led_deactivate(struct sdhci_host *host)
281 {
282 	u8 ctrl;
283 
284 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
285 	ctrl &= ~SDHCI_CTRL_LED;
286 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
287 }
288 
289 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
290 static void sdhci_led_control(struct led_classdev *led,
291 			      enum led_brightness brightness)
292 {
293 	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
294 	unsigned long flags;
295 
296 	spin_lock_irqsave(&host->lock, flags);
297 
298 	if (host->runtime_suspended)
299 		goto out;
300 
301 	if (brightness == LED_OFF)
302 		__sdhci_led_deactivate(host);
303 	else
304 		__sdhci_led_activate(host);
305 out:
306 	spin_unlock_irqrestore(&host->lock, flags);
307 }
308 
309 static int sdhci_led_register(struct sdhci_host *host)
310 {
311 	struct mmc_host *mmc = host->mmc;
312 
313 	snprintf(host->led_name, sizeof(host->led_name),
314 		 "%s::", mmc_hostname(mmc));
315 
316 	host->led.name = host->led_name;
317 	host->led.brightness = LED_OFF;
318 	host->led.default_trigger = mmc_hostname(mmc);
319 	host->led.brightness_set = sdhci_led_control;
320 
321 	return led_classdev_register(mmc_dev(mmc), &host->led);
322 }
323 
324 static void sdhci_led_unregister(struct sdhci_host *host)
325 {
326 	led_classdev_unregister(&host->led);
327 }
328 
329 static inline void sdhci_led_activate(struct sdhci_host *host)
330 {
331 }
332 
333 static inline void sdhci_led_deactivate(struct sdhci_host *host)
334 {
335 }
336 
337 #else
338 
339 static inline int sdhci_led_register(struct sdhci_host *host)
340 {
341 	return 0;
342 }
343 
344 static inline void sdhci_led_unregister(struct sdhci_host *host)
345 {
346 }
347 
348 static inline void sdhci_led_activate(struct sdhci_host *host)
349 {
350 	__sdhci_led_activate(host);
351 }
352 
353 static inline void sdhci_led_deactivate(struct sdhci_host *host)
354 {
355 	__sdhci_led_deactivate(host);
356 }
357 
358 #endif
359 
360 /*****************************************************************************\
361  *                                                                           *
362  * Core functions                                                            *
363  *                                                                           *
364 \*****************************************************************************/
365 
366 static void sdhci_read_block_pio(struct sdhci_host *host)
367 {
368 	unsigned long flags;
369 	size_t blksize, len, chunk;
370 	u32 uninitialized_var(scratch);
371 	u8 *buf;
372 
373 	DBG("PIO reading\n");
374 
375 	blksize = host->data->blksz;
376 	chunk = 0;
377 
378 	local_irq_save(flags);
379 
380 	while (blksize) {
381 		BUG_ON(!sg_miter_next(&host->sg_miter));
382 
383 		len = min(host->sg_miter.length, blksize);
384 
385 		blksize -= len;
386 		host->sg_miter.consumed = len;
387 
388 		buf = host->sg_miter.addr;
389 
390 		while (len) {
391 			if (chunk == 0) {
392 				scratch = sdhci_readl(host, SDHCI_BUFFER);
393 				chunk = 4;
394 			}
395 
396 			*buf = scratch & 0xFF;
397 
398 			buf++;
399 			scratch >>= 8;
400 			chunk--;
401 			len--;
402 		}
403 	}
404 
405 	sg_miter_stop(&host->sg_miter);
406 
407 	local_irq_restore(flags);
408 }
409 
410 static void sdhci_write_block_pio(struct sdhci_host *host)
411 {
412 	unsigned long flags;
413 	size_t blksize, len, chunk;
414 	u32 scratch;
415 	u8 *buf;
416 
417 	DBG("PIO writing\n");
418 
419 	blksize = host->data->blksz;
420 	chunk = 0;
421 	scratch = 0;
422 
423 	local_irq_save(flags);
424 
425 	while (blksize) {
426 		BUG_ON(!sg_miter_next(&host->sg_miter));
427 
428 		len = min(host->sg_miter.length, blksize);
429 
430 		blksize -= len;
431 		host->sg_miter.consumed = len;
432 
433 		buf = host->sg_miter.addr;
434 
435 		while (len) {
436 			scratch |= (u32)*buf << (chunk * 8);
437 
438 			buf++;
439 			chunk++;
440 			len--;
441 
442 			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
443 				sdhci_writel(host, scratch, SDHCI_BUFFER);
444 				chunk = 0;
445 				scratch = 0;
446 			}
447 		}
448 	}
449 
450 	sg_miter_stop(&host->sg_miter);
451 
452 	local_irq_restore(flags);
453 }
454 
455 static void sdhci_transfer_pio(struct sdhci_host *host)
456 {
457 	u32 mask;
458 
459 	if (host->blocks == 0)
460 		return;
461 
462 	if (host->data->flags & MMC_DATA_READ)
463 		mask = SDHCI_DATA_AVAILABLE;
464 	else
465 		mask = SDHCI_SPACE_AVAILABLE;
466 
467 	/*
468 	 * Some controllers (JMicron JMB38x) mess up the buffer bits
469 	 * for transfers < 4 bytes. As long as it is just one block,
470 	 * we can ignore the bits.
471 	 */
472 	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
473 		(host->data->blocks == 1))
474 		mask = ~0;
475 
476 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
477 		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
478 			udelay(100);
479 
480 		if (host->data->flags & MMC_DATA_READ)
481 			sdhci_read_block_pio(host);
482 		else
483 			sdhci_write_block_pio(host);
484 
485 		host->blocks--;
486 		if (host->blocks == 0)
487 			break;
488 	}
489 
490 	DBG("PIO transfer complete.\n");
491 }
492 
493 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
494 				  struct mmc_data *data, int cookie)
495 {
496 	int sg_count;
497 
498 	/*
499 	 * If the data buffers are already mapped, return the previous
500 	 * dma_map_sg() result.
501 	 */
502 	if (data->host_cookie == COOKIE_PRE_MAPPED)
503 		return data->sg_count;
504 
505 	sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
506 			      mmc_get_dma_dir(data));
507 
508 	if (sg_count == 0)
509 		return -ENOSPC;
510 
511 	data->sg_count = sg_count;
512 	data->host_cookie = cookie;
513 
514 	return sg_count;
515 }
516 
517 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
518 {
519 	local_irq_save(*flags);
520 	return kmap_atomic(sg_page(sg)) + sg->offset;
521 }
522 
523 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
524 {
525 	kunmap_atomic(buffer);
526 	local_irq_restore(*flags);
527 }
528 
529 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
530 				  dma_addr_t addr, int len, unsigned cmd)
531 {
532 	struct sdhci_adma2_64_desc *dma_desc = desc;
533 
534 	/* 32-bit and 64-bit descriptors have these members in same position */
535 	dma_desc->cmd = cpu_to_le16(cmd);
536 	dma_desc->len = cpu_to_le16(len);
537 	dma_desc->addr_lo = cpu_to_le32((u32)addr);
538 
539 	if (host->flags & SDHCI_USE_64_BIT_DMA)
540 		dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
541 }
542 
543 static void sdhci_adma_mark_end(void *desc)
544 {
545 	struct sdhci_adma2_64_desc *dma_desc = desc;
546 
547 	/* 32-bit and 64-bit descriptors have 'cmd' in same position */
548 	dma_desc->cmd |= cpu_to_le16(ADMA2_END);
549 }
550 
551 static void sdhci_adma_table_pre(struct sdhci_host *host,
552 	struct mmc_data *data, int sg_count)
553 {
554 	struct scatterlist *sg;
555 	unsigned long flags;
556 	dma_addr_t addr, align_addr;
557 	void *desc, *align;
558 	char *buffer;
559 	int len, offset, i;
560 
561 	/*
562 	 * The spec does not specify endianness of descriptor table.
563 	 * We currently guess that it is LE.
564 	 */
565 
566 	host->sg_count = sg_count;
567 
568 	desc = host->adma_table;
569 	align = host->align_buffer;
570 
571 	align_addr = host->align_addr;
572 
573 	for_each_sg(data->sg, sg, host->sg_count, i) {
574 		addr = sg_dma_address(sg);
575 		len = sg_dma_len(sg);
576 
577 		/*
578 		 * The SDHCI specification states that ADMA addresses must
579 		 * be 32-bit aligned. If they aren't, then we use a bounce
580 		 * buffer for the (up to three) bytes that screw up the
581 		 * alignment.
582 		 */
583 		offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
584 			 SDHCI_ADMA2_MASK;
585 		if (offset) {
586 			if (data->flags & MMC_DATA_WRITE) {
587 				buffer = sdhci_kmap_atomic(sg, &flags);
588 				memcpy(align, buffer, offset);
589 				sdhci_kunmap_atomic(buffer, &flags);
590 			}
591 
592 			/* tran, valid */
593 			sdhci_adma_write_desc(host, desc, align_addr, offset,
594 					      ADMA2_TRAN_VALID);
595 
596 			BUG_ON(offset > 65536);
597 
598 			align += SDHCI_ADMA2_ALIGN;
599 			align_addr += SDHCI_ADMA2_ALIGN;
600 
601 			desc += host->desc_sz;
602 
603 			addr += offset;
604 			len -= offset;
605 		}
606 
607 		BUG_ON(len > 65536);
608 
609 		if (len) {
610 			/* tran, valid */
611 			sdhci_adma_write_desc(host, desc, addr, len,
612 					      ADMA2_TRAN_VALID);
613 			desc += host->desc_sz;
614 		}
615 
616 		/*
617 		 * If this triggers then we have a calculation bug
618 		 * somewhere. :/
619 		 */
620 		WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
621 	}
622 
623 	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
624 		/* Mark the last descriptor as the terminating descriptor */
625 		if (desc != host->adma_table) {
626 			desc -= host->desc_sz;
627 			sdhci_adma_mark_end(desc);
628 		}
629 	} else {
630 		/* Add a terminating entry - nop, end, valid */
631 		sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
632 	}
633 }
634 
635 static void sdhci_adma_table_post(struct sdhci_host *host,
636 	struct mmc_data *data)
637 {
638 	struct scatterlist *sg;
639 	int i, size;
640 	void *align;
641 	char *buffer;
642 	unsigned long flags;
643 
644 	if (data->flags & MMC_DATA_READ) {
645 		bool has_unaligned = false;
646 
647 		/* Do a quick scan of the SG list for any unaligned mappings */
648 		for_each_sg(data->sg, sg, host->sg_count, i)
649 			if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
650 				has_unaligned = true;
651 				break;
652 			}
653 
654 		if (has_unaligned) {
655 			dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
656 					    data->sg_len, DMA_FROM_DEVICE);
657 
658 			align = host->align_buffer;
659 
660 			for_each_sg(data->sg, sg, host->sg_count, i) {
661 				if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
662 					size = SDHCI_ADMA2_ALIGN -
663 					       (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
664 
665 					buffer = sdhci_kmap_atomic(sg, &flags);
666 					memcpy(buffer, align, size);
667 					sdhci_kunmap_atomic(buffer, &flags);
668 
669 					align += SDHCI_ADMA2_ALIGN;
670 				}
671 			}
672 		}
673 	}
674 }
675 
676 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
677 {
678 	u8 count;
679 	struct mmc_data *data = cmd->data;
680 	unsigned target_timeout, current_timeout;
681 
682 	/*
683 	 * If the host controller provides us with an incorrect timeout
684 	 * value, just skip the check and use 0xE.  The hardware may take
685 	 * longer to time out, but that's much better than having a too-short
686 	 * timeout value.
687 	 */
688 	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
689 		return 0xE;
690 
691 	/* Unspecified timeout, assume max */
692 	if (!data && !cmd->busy_timeout)
693 		return 0xE;
694 
695 	/* timeout in us */
696 	if (!data)
697 		target_timeout = cmd->busy_timeout * 1000;
698 	else {
699 		target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
700 		if (host->clock && data->timeout_clks) {
701 			unsigned long long val;
702 
703 			/*
704 			 * data->timeout_clks is in units of clock cycles.
705 			 * host->clock is in Hz.  target_timeout is in us.
706 			 * Hence, us = 1000000 * cycles / Hz.  Round up.
707 			 */
708 			val = 1000000ULL * data->timeout_clks;
709 			if (do_div(val, host->clock))
710 				target_timeout++;
711 			target_timeout += val;
712 		}
713 	}
714 
715 	/*
716 	 * Figure out needed cycles.
717 	 * We do this in steps in order to fit inside a 32 bit int.
718 	 * The first step is the minimum timeout, which will have a
719 	 * minimum resolution of 6 bits:
720 	 * (1) 2^13*1000 > 2^22,
721 	 * (2) host->timeout_clk < 2^16
722 	 *     =>
723 	 *     (1) / (2) > 2^6
724 	 */
725 	count = 0;
726 	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
727 	while (current_timeout < target_timeout) {
728 		count++;
729 		current_timeout <<= 1;
730 		if (count >= 0xF)
731 			break;
732 	}
733 
734 	if (count >= 0xF) {
735 		DBG("Too large timeout 0x%x requested for CMD%d!\n",
736 		    count, cmd->opcode);
737 		count = 0xE;
738 	}
739 
740 	return count;
741 }
742 
743 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
744 {
745 	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
746 	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
747 
748 	if (host->flags & SDHCI_REQ_USE_DMA)
749 		host->ier = (host->ier & ~pio_irqs) | dma_irqs;
750 	else
751 		host->ier = (host->ier & ~dma_irqs) | pio_irqs;
752 
753 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
754 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
755 }
756 
757 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
758 {
759 	u8 count;
760 
761 	if (host->ops->set_timeout) {
762 		host->ops->set_timeout(host, cmd);
763 	} else {
764 		count = sdhci_calc_timeout(host, cmd);
765 		sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
766 	}
767 }
768 
769 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
770 {
771 	u8 ctrl;
772 	struct mmc_data *data = cmd->data;
773 
774 	if (sdhci_data_line_cmd(cmd))
775 		sdhci_set_timeout(host, cmd);
776 
777 	if (!data)
778 		return;
779 
780 	WARN_ON(host->data);
781 
782 	/* Sanity checks */
783 	BUG_ON(data->blksz * data->blocks > 524288);
784 	BUG_ON(data->blksz > host->mmc->max_blk_size);
785 	BUG_ON(data->blocks > 65535);
786 
787 	host->data = data;
788 	host->data_early = 0;
789 	host->data->bytes_xfered = 0;
790 
791 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
792 		struct scatterlist *sg;
793 		unsigned int length_mask, offset_mask;
794 		int i;
795 
796 		host->flags |= SDHCI_REQ_USE_DMA;
797 
798 		/*
799 		 * FIXME: This doesn't account for merging when mapping the
800 		 * scatterlist.
801 		 *
802 		 * The assumption here being that alignment and lengths are
803 		 * the same after DMA mapping to device address space.
804 		 */
805 		length_mask = 0;
806 		offset_mask = 0;
807 		if (host->flags & SDHCI_USE_ADMA) {
808 			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
809 				length_mask = 3;
810 				/*
811 				 * As we use up to 3 byte chunks to work
812 				 * around alignment problems, we need to
813 				 * check the offset as well.
814 				 */
815 				offset_mask = 3;
816 			}
817 		} else {
818 			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
819 				length_mask = 3;
820 			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
821 				offset_mask = 3;
822 		}
823 
824 		if (unlikely(length_mask | offset_mask)) {
825 			for_each_sg(data->sg, sg, data->sg_len, i) {
826 				if (sg->length & length_mask) {
827 					DBG("Reverting to PIO because of transfer size (%d)\n",
828 					    sg->length);
829 					host->flags &= ~SDHCI_REQ_USE_DMA;
830 					break;
831 				}
832 				if (sg->offset & offset_mask) {
833 					DBG("Reverting to PIO because of bad alignment\n");
834 					host->flags &= ~SDHCI_REQ_USE_DMA;
835 					break;
836 				}
837 			}
838 		}
839 	}
840 
841 	if (host->flags & SDHCI_REQ_USE_DMA) {
842 		int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
843 
844 		if (sg_cnt <= 0) {
845 			/*
846 			 * This only happens when someone fed
847 			 * us an invalid request.
848 			 */
849 			WARN_ON(1);
850 			host->flags &= ~SDHCI_REQ_USE_DMA;
851 		} else if (host->flags & SDHCI_USE_ADMA) {
852 			sdhci_adma_table_pre(host, data, sg_cnt);
853 
854 			sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
855 			if (host->flags & SDHCI_USE_64_BIT_DMA)
856 				sdhci_writel(host,
857 					     (u64)host->adma_addr >> 32,
858 					     SDHCI_ADMA_ADDRESS_HI);
859 		} else {
860 			WARN_ON(sg_cnt != 1);
861 			sdhci_writel(host, sg_dma_address(data->sg),
862 				SDHCI_DMA_ADDRESS);
863 		}
864 	}
865 
866 	/*
867 	 * Always adjust the DMA selection as some controllers
868 	 * (e.g. JMicron) can't do PIO properly when the selection
869 	 * is ADMA.
870 	 */
871 	if (host->version >= SDHCI_SPEC_200) {
872 		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
873 		ctrl &= ~SDHCI_CTRL_DMA_MASK;
874 		if ((host->flags & SDHCI_REQ_USE_DMA) &&
875 			(host->flags & SDHCI_USE_ADMA)) {
876 			if (host->flags & SDHCI_USE_64_BIT_DMA)
877 				ctrl |= SDHCI_CTRL_ADMA64;
878 			else
879 				ctrl |= SDHCI_CTRL_ADMA32;
880 		} else {
881 			ctrl |= SDHCI_CTRL_SDMA;
882 		}
883 		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
884 	}
885 
886 	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
887 		int flags;
888 
889 		flags = SG_MITER_ATOMIC;
890 		if (host->data->flags & MMC_DATA_READ)
891 			flags |= SG_MITER_TO_SG;
892 		else
893 			flags |= SG_MITER_FROM_SG;
894 		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
895 		host->blocks = data->blocks;
896 	}
897 
898 	sdhci_set_transfer_irqs(host);
899 
900 	/* Set the DMA boundary value and block size */
901 	sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
902 		     SDHCI_BLOCK_SIZE);
903 	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
904 }
905 
906 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
907 				    struct mmc_request *mrq)
908 {
909 	return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
910 	       !mrq->cap_cmd_during_tfr;
911 }
912 
913 static void sdhci_set_transfer_mode(struct sdhci_host *host,
914 	struct mmc_command *cmd)
915 {
916 	u16 mode = 0;
917 	struct mmc_data *data = cmd->data;
918 
919 	if (data == NULL) {
920 		if (host->quirks2 &
921 			SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
922 			sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
923 		} else {
924 		/* clear Auto CMD settings for no data CMDs */
925 			mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
926 			sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
927 				SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
928 		}
929 		return;
930 	}
931 
932 	WARN_ON(!host->data);
933 
934 	if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
935 		mode = SDHCI_TRNS_BLK_CNT_EN;
936 
937 	if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
938 		mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
939 		/*
940 		 * If we are sending CMD23, CMD12 never gets sent
941 		 * on successful completion (so no Auto-CMD12).
942 		 */
943 		if (sdhci_auto_cmd12(host, cmd->mrq) &&
944 		    (cmd->opcode != SD_IO_RW_EXTENDED))
945 			mode |= SDHCI_TRNS_AUTO_CMD12;
946 		else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
947 			mode |= SDHCI_TRNS_AUTO_CMD23;
948 			sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
949 		}
950 	}
951 
952 	if (data->flags & MMC_DATA_READ)
953 		mode |= SDHCI_TRNS_READ;
954 	if (host->flags & SDHCI_REQ_USE_DMA)
955 		mode |= SDHCI_TRNS_DMA;
956 
957 	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
958 }
959 
960 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
961 {
962 	return (!(host->flags & SDHCI_DEVICE_DEAD) &&
963 		((mrq->cmd && mrq->cmd->error) ||
964 		 (mrq->sbc && mrq->sbc->error) ||
965 		 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
966 				(mrq->data->stop && mrq->data->stop->error))) ||
967 		 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
968 }
969 
970 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
971 {
972 	int i;
973 
974 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
975 		if (host->mrqs_done[i] == mrq) {
976 			WARN_ON(1);
977 			return;
978 		}
979 	}
980 
981 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
982 		if (!host->mrqs_done[i]) {
983 			host->mrqs_done[i] = mrq;
984 			break;
985 		}
986 	}
987 
988 	WARN_ON(i >= SDHCI_MAX_MRQS);
989 
990 	tasklet_schedule(&host->finish_tasklet);
991 }
992 
993 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
994 {
995 	if (host->cmd && host->cmd->mrq == mrq)
996 		host->cmd = NULL;
997 
998 	if (host->data_cmd && host->data_cmd->mrq == mrq)
999 		host->data_cmd = NULL;
1000 
1001 	if (host->data && host->data->mrq == mrq)
1002 		host->data = NULL;
1003 
1004 	if (sdhci_needs_reset(host, mrq))
1005 		host->pending_reset = true;
1006 
1007 	__sdhci_finish_mrq(host, mrq);
1008 }
1009 
1010 static void sdhci_finish_data(struct sdhci_host *host)
1011 {
1012 	struct mmc_command *data_cmd = host->data_cmd;
1013 	struct mmc_data *data = host->data;
1014 
1015 	host->data = NULL;
1016 	host->data_cmd = NULL;
1017 
1018 	if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1019 	    (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1020 		sdhci_adma_table_post(host, data);
1021 
1022 	/*
1023 	 * The specification states that the block count register must
1024 	 * be updated, but it does not specify at what point in the
1025 	 * data flow. That makes the register entirely useless to read
1026 	 * back so we have to assume that nothing made it to the card
1027 	 * in the event of an error.
1028 	 */
1029 	if (data->error)
1030 		data->bytes_xfered = 0;
1031 	else
1032 		data->bytes_xfered = data->blksz * data->blocks;
1033 
1034 	/*
1035 	 * Need to send CMD12 if -
1036 	 * a) open-ended multiblock transfer (no CMD23)
1037 	 * b) error in multiblock transfer
1038 	 */
1039 	if (data->stop &&
1040 	    (data->error ||
1041 	     !data->mrq->sbc)) {
1042 
1043 		/*
1044 		 * The controller needs a reset of internal state machines
1045 		 * upon error conditions.
1046 		 */
1047 		if (data->error) {
1048 			if (!host->cmd || host->cmd == data_cmd)
1049 				sdhci_do_reset(host, SDHCI_RESET_CMD);
1050 			sdhci_do_reset(host, SDHCI_RESET_DATA);
1051 		}
1052 
1053 		/*
1054 		 * 'cap_cmd_during_tfr' request must not use the command line
1055 		 * after mmc_command_done() has been called. It is upper layer's
1056 		 * responsibility to send the stop command if required.
1057 		 */
1058 		if (data->mrq->cap_cmd_during_tfr) {
1059 			sdhci_finish_mrq(host, data->mrq);
1060 		} else {
1061 			/* Avoid triggering warning in sdhci_send_command() */
1062 			host->cmd = NULL;
1063 			sdhci_send_command(host, data->stop);
1064 		}
1065 	} else {
1066 		sdhci_finish_mrq(host, data->mrq);
1067 	}
1068 }
1069 
1070 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
1071 			    unsigned long timeout)
1072 {
1073 	if (sdhci_data_line_cmd(mrq->cmd))
1074 		mod_timer(&host->data_timer, timeout);
1075 	else
1076 		mod_timer(&host->timer, timeout);
1077 }
1078 
1079 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
1080 {
1081 	if (sdhci_data_line_cmd(mrq->cmd))
1082 		del_timer(&host->data_timer);
1083 	else
1084 		del_timer(&host->timer);
1085 }
1086 
1087 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1088 {
1089 	int flags;
1090 	u32 mask;
1091 	unsigned long timeout;
1092 
1093 	WARN_ON(host->cmd);
1094 
1095 	/* Initially, a command has no error */
1096 	cmd->error = 0;
1097 
1098 	if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1099 	    cmd->opcode == MMC_STOP_TRANSMISSION)
1100 		cmd->flags |= MMC_RSP_BUSY;
1101 
1102 	/* Wait max 10 ms */
1103 	timeout = 10;
1104 
1105 	mask = SDHCI_CMD_INHIBIT;
1106 	if (sdhci_data_line_cmd(cmd))
1107 		mask |= SDHCI_DATA_INHIBIT;
1108 
1109 	/* We shouldn't wait for data inihibit for stop commands, even
1110 	   though they might use busy signaling */
1111 	if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1112 		mask &= ~SDHCI_DATA_INHIBIT;
1113 
1114 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1115 		if (timeout == 0) {
1116 			pr_err("%s: Controller never released inhibit bit(s).\n",
1117 			       mmc_hostname(host->mmc));
1118 			sdhci_dumpregs(host);
1119 			cmd->error = -EIO;
1120 			sdhci_finish_mrq(host, cmd->mrq);
1121 			return;
1122 		}
1123 		timeout--;
1124 		mdelay(1);
1125 	}
1126 
1127 	timeout = jiffies;
1128 	if (!cmd->data && cmd->busy_timeout > 9000)
1129 		timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1130 	else
1131 		timeout += 10 * HZ;
1132 	sdhci_mod_timer(host, cmd->mrq, timeout);
1133 
1134 	host->cmd = cmd;
1135 	if (sdhci_data_line_cmd(cmd)) {
1136 		WARN_ON(host->data_cmd);
1137 		host->data_cmd = cmd;
1138 	}
1139 
1140 	sdhci_prepare_data(host, cmd);
1141 
1142 	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1143 
1144 	sdhci_set_transfer_mode(host, cmd);
1145 
1146 	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1147 		pr_err("%s: Unsupported response type!\n",
1148 			mmc_hostname(host->mmc));
1149 		cmd->error = -EINVAL;
1150 		sdhci_finish_mrq(host, cmd->mrq);
1151 		return;
1152 	}
1153 
1154 	if (!(cmd->flags & MMC_RSP_PRESENT))
1155 		flags = SDHCI_CMD_RESP_NONE;
1156 	else if (cmd->flags & MMC_RSP_136)
1157 		flags = SDHCI_CMD_RESP_LONG;
1158 	else if (cmd->flags & MMC_RSP_BUSY)
1159 		flags = SDHCI_CMD_RESP_SHORT_BUSY;
1160 	else
1161 		flags = SDHCI_CMD_RESP_SHORT;
1162 
1163 	if (cmd->flags & MMC_RSP_CRC)
1164 		flags |= SDHCI_CMD_CRC;
1165 	if (cmd->flags & MMC_RSP_OPCODE)
1166 		flags |= SDHCI_CMD_INDEX;
1167 
1168 	/* CMD19 is special in that the Data Present Select should be set */
1169 	if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1170 	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1171 		flags |= SDHCI_CMD_DATA;
1172 
1173 	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1174 }
1175 EXPORT_SYMBOL_GPL(sdhci_send_command);
1176 
1177 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1178 {
1179 	int i, reg;
1180 
1181 	for (i = 0; i < 4; i++) {
1182 		reg = SDHCI_RESPONSE + (3 - i) * 4;
1183 		cmd->resp[i] = sdhci_readl(host, reg);
1184 	}
1185 
1186 	if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1187 		return;
1188 
1189 	/* CRC is stripped so we need to do some shifting */
1190 	for (i = 0; i < 4; i++) {
1191 		cmd->resp[i] <<= 8;
1192 		if (i != 3)
1193 			cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1194 	}
1195 }
1196 
1197 static void sdhci_finish_command(struct sdhci_host *host)
1198 {
1199 	struct mmc_command *cmd = host->cmd;
1200 
1201 	host->cmd = NULL;
1202 
1203 	if (cmd->flags & MMC_RSP_PRESENT) {
1204 		if (cmd->flags & MMC_RSP_136) {
1205 			sdhci_read_rsp_136(host, cmd);
1206 		} else {
1207 			cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1208 		}
1209 	}
1210 
1211 	if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1212 		mmc_command_done(host->mmc, cmd->mrq);
1213 
1214 	/*
1215 	 * The host can send and interrupt when the busy state has
1216 	 * ended, allowing us to wait without wasting CPU cycles.
1217 	 * The busy signal uses DAT0 so this is similar to waiting
1218 	 * for data to complete.
1219 	 *
1220 	 * Note: The 1.0 specification is a bit ambiguous about this
1221 	 *       feature so there might be some problems with older
1222 	 *       controllers.
1223 	 */
1224 	if (cmd->flags & MMC_RSP_BUSY) {
1225 		if (cmd->data) {
1226 			DBG("Cannot wait for busy signal when also doing a data transfer");
1227 		} else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1228 			   cmd == host->data_cmd) {
1229 			/* Command complete before busy is ended */
1230 			return;
1231 		}
1232 	}
1233 
1234 	/* Finished CMD23, now send actual command. */
1235 	if (cmd == cmd->mrq->sbc) {
1236 		sdhci_send_command(host, cmd->mrq->cmd);
1237 	} else {
1238 
1239 		/* Processed actual command. */
1240 		if (host->data && host->data_early)
1241 			sdhci_finish_data(host);
1242 
1243 		if (!cmd->data)
1244 			sdhci_finish_mrq(host, cmd->mrq);
1245 	}
1246 }
1247 
1248 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1249 {
1250 	u16 preset = 0;
1251 
1252 	switch (host->timing) {
1253 	case MMC_TIMING_UHS_SDR12:
1254 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1255 		break;
1256 	case MMC_TIMING_UHS_SDR25:
1257 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1258 		break;
1259 	case MMC_TIMING_UHS_SDR50:
1260 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1261 		break;
1262 	case MMC_TIMING_UHS_SDR104:
1263 	case MMC_TIMING_MMC_HS200:
1264 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1265 		break;
1266 	case MMC_TIMING_UHS_DDR50:
1267 	case MMC_TIMING_MMC_DDR52:
1268 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1269 		break;
1270 	case MMC_TIMING_MMC_HS400:
1271 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1272 		break;
1273 	default:
1274 		pr_warn("%s: Invalid UHS-I mode selected\n",
1275 			mmc_hostname(host->mmc));
1276 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1277 		break;
1278 	}
1279 	return preset;
1280 }
1281 
1282 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1283 		   unsigned int *actual_clock)
1284 {
1285 	int div = 0; /* Initialized for compiler warning */
1286 	int real_div = div, clk_mul = 1;
1287 	u16 clk = 0;
1288 	bool switch_base_clk = false;
1289 
1290 	if (host->version >= SDHCI_SPEC_300) {
1291 		if (host->preset_enabled) {
1292 			u16 pre_val;
1293 
1294 			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1295 			pre_val = sdhci_get_preset_value(host);
1296 			div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1297 				>> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1298 			if (host->clk_mul &&
1299 				(pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1300 				clk = SDHCI_PROG_CLOCK_MODE;
1301 				real_div = div + 1;
1302 				clk_mul = host->clk_mul;
1303 			} else {
1304 				real_div = max_t(int, 1, div << 1);
1305 			}
1306 			goto clock_set;
1307 		}
1308 
1309 		/*
1310 		 * Check if the Host Controller supports Programmable Clock
1311 		 * Mode.
1312 		 */
1313 		if (host->clk_mul) {
1314 			for (div = 1; div <= 1024; div++) {
1315 				if ((host->max_clk * host->clk_mul / div)
1316 					<= clock)
1317 					break;
1318 			}
1319 			if ((host->max_clk * host->clk_mul / div) <= clock) {
1320 				/*
1321 				 * Set Programmable Clock Mode in the Clock
1322 				 * Control register.
1323 				 */
1324 				clk = SDHCI_PROG_CLOCK_MODE;
1325 				real_div = div;
1326 				clk_mul = host->clk_mul;
1327 				div--;
1328 			} else {
1329 				/*
1330 				 * Divisor can be too small to reach clock
1331 				 * speed requirement. Then use the base clock.
1332 				 */
1333 				switch_base_clk = true;
1334 			}
1335 		}
1336 
1337 		if (!host->clk_mul || switch_base_clk) {
1338 			/* Version 3.00 divisors must be a multiple of 2. */
1339 			if (host->max_clk <= clock)
1340 				div = 1;
1341 			else {
1342 				for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1343 				     div += 2) {
1344 					if ((host->max_clk / div) <= clock)
1345 						break;
1346 				}
1347 			}
1348 			real_div = div;
1349 			div >>= 1;
1350 			if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1351 				&& !div && host->max_clk <= 25000000)
1352 				div = 1;
1353 		}
1354 	} else {
1355 		/* Version 2.00 divisors must be a power of 2. */
1356 		for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1357 			if ((host->max_clk / div) <= clock)
1358 				break;
1359 		}
1360 		real_div = div;
1361 		div >>= 1;
1362 	}
1363 
1364 clock_set:
1365 	if (real_div)
1366 		*actual_clock = (host->max_clk * clk_mul) / real_div;
1367 	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1368 	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1369 		<< SDHCI_DIVIDER_HI_SHIFT;
1370 
1371 	return clk;
1372 }
1373 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1374 
1375 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1376 {
1377 	ktime_t timeout;
1378 
1379 	clk |= SDHCI_CLOCK_INT_EN;
1380 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1381 
1382 	/* Wait max 20 ms */
1383 	timeout = ktime_add_ms(ktime_get(), 20);
1384 	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1385 		& SDHCI_CLOCK_INT_STABLE)) {
1386 		if (ktime_after(ktime_get(), timeout)) {
1387 			pr_err("%s: Internal clock never stabilised.\n",
1388 			       mmc_hostname(host->mmc));
1389 			sdhci_dumpregs(host);
1390 			return;
1391 		}
1392 		udelay(10);
1393 	}
1394 
1395 	clk |= SDHCI_CLOCK_CARD_EN;
1396 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1397 }
1398 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1399 
1400 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1401 {
1402 	u16 clk;
1403 
1404 	host->mmc->actual_clock = 0;
1405 
1406 	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1407 
1408 	if (clock == 0)
1409 		return;
1410 
1411 	clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1412 	sdhci_enable_clk(host, clk);
1413 }
1414 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1415 
1416 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1417 				unsigned short vdd)
1418 {
1419 	struct mmc_host *mmc = host->mmc;
1420 
1421 	mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1422 
1423 	if (mode != MMC_POWER_OFF)
1424 		sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1425 	else
1426 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1427 }
1428 
1429 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1430 			   unsigned short vdd)
1431 {
1432 	u8 pwr = 0;
1433 
1434 	if (mode != MMC_POWER_OFF) {
1435 		switch (1 << vdd) {
1436 		case MMC_VDD_165_195:
1437 		/*
1438 		 * Without a regulator, SDHCI does not support 2.0v
1439 		 * so we only get here if the driver deliberately
1440 		 * added the 2.0v range to ocr_avail. Map it to 1.8v
1441 		 * for the purpose of turning on the power.
1442 		 */
1443 		case MMC_VDD_20_21:
1444 			pwr = SDHCI_POWER_180;
1445 			break;
1446 		case MMC_VDD_29_30:
1447 		case MMC_VDD_30_31:
1448 			pwr = SDHCI_POWER_300;
1449 			break;
1450 		case MMC_VDD_32_33:
1451 		case MMC_VDD_33_34:
1452 			pwr = SDHCI_POWER_330;
1453 			break;
1454 		default:
1455 			WARN(1, "%s: Invalid vdd %#x\n",
1456 			     mmc_hostname(host->mmc), vdd);
1457 			break;
1458 		}
1459 	}
1460 
1461 	if (host->pwr == pwr)
1462 		return;
1463 
1464 	host->pwr = pwr;
1465 
1466 	if (pwr == 0) {
1467 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1468 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1469 			sdhci_runtime_pm_bus_off(host);
1470 	} else {
1471 		/*
1472 		 * Spec says that we should clear the power reg before setting
1473 		 * a new value. Some controllers don't seem to like this though.
1474 		 */
1475 		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1476 			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1477 
1478 		/*
1479 		 * At least the Marvell CaFe chip gets confused if we set the
1480 		 * voltage and set turn on power at the same time, so set the
1481 		 * voltage first.
1482 		 */
1483 		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1484 			sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1485 
1486 		pwr |= SDHCI_POWER_ON;
1487 
1488 		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1489 
1490 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1491 			sdhci_runtime_pm_bus_on(host);
1492 
1493 		/*
1494 		 * Some controllers need an extra 10ms delay of 10ms before
1495 		 * they can apply clock after applying power
1496 		 */
1497 		if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1498 			mdelay(10);
1499 	}
1500 }
1501 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1502 
1503 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1504 		     unsigned short vdd)
1505 {
1506 	if (IS_ERR(host->mmc->supply.vmmc))
1507 		sdhci_set_power_noreg(host, mode, vdd);
1508 	else
1509 		sdhci_set_power_reg(host, mode, vdd);
1510 }
1511 EXPORT_SYMBOL_GPL(sdhci_set_power);
1512 
1513 /*****************************************************************************\
1514  *                                                                           *
1515  * MMC callbacks                                                             *
1516  *                                                                           *
1517 \*****************************************************************************/
1518 
1519 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1520 {
1521 	struct sdhci_host *host;
1522 	int present;
1523 	unsigned long flags;
1524 
1525 	host = mmc_priv(mmc);
1526 
1527 	/* Firstly check card presence */
1528 	present = mmc->ops->get_cd(mmc);
1529 
1530 	spin_lock_irqsave(&host->lock, flags);
1531 
1532 	sdhci_led_activate(host);
1533 
1534 	/*
1535 	 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1536 	 * requests if Auto-CMD12 is enabled.
1537 	 */
1538 	if (sdhci_auto_cmd12(host, mrq)) {
1539 		if (mrq->stop) {
1540 			mrq->data->stop = NULL;
1541 			mrq->stop = NULL;
1542 		}
1543 	}
1544 
1545 	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1546 		mrq->cmd->error = -ENOMEDIUM;
1547 		sdhci_finish_mrq(host, mrq);
1548 	} else {
1549 		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1550 			sdhci_send_command(host, mrq->sbc);
1551 		else
1552 			sdhci_send_command(host, mrq->cmd);
1553 	}
1554 
1555 	mmiowb();
1556 	spin_unlock_irqrestore(&host->lock, flags);
1557 }
1558 
1559 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1560 {
1561 	u8 ctrl;
1562 
1563 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1564 	if (width == MMC_BUS_WIDTH_8) {
1565 		ctrl &= ~SDHCI_CTRL_4BITBUS;
1566 		ctrl |= SDHCI_CTRL_8BITBUS;
1567 	} else {
1568 		if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
1569 			ctrl &= ~SDHCI_CTRL_8BITBUS;
1570 		if (width == MMC_BUS_WIDTH_4)
1571 			ctrl |= SDHCI_CTRL_4BITBUS;
1572 		else
1573 			ctrl &= ~SDHCI_CTRL_4BITBUS;
1574 	}
1575 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1576 }
1577 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1578 
1579 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1580 {
1581 	u16 ctrl_2;
1582 
1583 	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1584 	/* Select Bus Speed Mode for host */
1585 	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1586 	if ((timing == MMC_TIMING_MMC_HS200) ||
1587 	    (timing == MMC_TIMING_UHS_SDR104))
1588 		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1589 	else if (timing == MMC_TIMING_UHS_SDR12)
1590 		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1591 	else if (timing == MMC_TIMING_UHS_SDR25)
1592 		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1593 	else if (timing == MMC_TIMING_UHS_SDR50)
1594 		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1595 	else if ((timing == MMC_TIMING_UHS_DDR50) ||
1596 		 (timing == MMC_TIMING_MMC_DDR52))
1597 		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1598 	else if (timing == MMC_TIMING_MMC_HS400)
1599 		ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1600 	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1601 }
1602 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1603 
1604 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1605 {
1606 	struct sdhci_host *host = mmc_priv(mmc);
1607 	u8 ctrl;
1608 
1609 	if (ios->power_mode == MMC_POWER_UNDEFINED)
1610 		return;
1611 
1612 	if (host->flags & SDHCI_DEVICE_DEAD) {
1613 		if (!IS_ERR(mmc->supply.vmmc) &&
1614 		    ios->power_mode == MMC_POWER_OFF)
1615 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1616 		return;
1617 	}
1618 
1619 	/*
1620 	 * Reset the chip on each power off.
1621 	 * Should clear out any weird states.
1622 	 */
1623 	if (ios->power_mode == MMC_POWER_OFF) {
1624 		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1625 		sdhci_reinit(host);
1626 	}
1627 
1628 	if (host->version >= SDHCI_SPEC_300 &&
1629 		(ios->power_mode == MMC_POWER_UP) &&
1630 		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1631 		sdhci_enable_preset_value(host, false);
1632 
1633 	if (!ios->clock || ios->clock != host->clock) {
1634 		host->ops->set_clock(host, ios->clock);
1635 		host->clock = ios->clock;
1636 
1637 		if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1638 		    host->clock) {
1639 			host->timeout_clk = host->mmc->actual_clock ?
1640 						host->mmc->actual_clock / 1000 :
1641 						host->clock / 1000;
1642 			host->mmc->max_busy_timeout =
1643 				host->ops->get_max_timeout_count ?
1644 				host->ops->get_max_timeout_count(host) :
1645 				1 << 27;
1646 			host->mmc->max_busy_timeout /= host->timeout_clk;
1647 		}
1648 	}
1649 
1650 	if (host->ops->set_power)
1651 		host->ops->set_power(host, ios->power_mode, ios->vdd);
1652 	else
1653 		sdhci_set_power(host, ios->power_mode, ios->vdd);
1654 
1655 	if (host->ops->platform_send_init_74_clocks)
1656 		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1657 
1658 	host->ops->set_bus_width(host, ios->bus_width);
1659 
1660 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1661 
1662 	if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
1663 		if (ios->timing == MMC_TIMING_SD_HS ||
1664 		     ios->timing == MMC_TIMING_MMC_HS ||
1665 		     ios->timing == MMC_TIMING_MMC_HS400 ||
1666 		     ios->timing == MMC_TIMING_MMC_HS200 ||
1667 		     ios->timing == MMC_TIMING_MMC_DDR52 ||
1668 		     ios->timing == MMC_TIMING_UHS_SDR50 ||
1669 		     ios->timing == MMC_TIMING_UHS_SDR104 ||
1670 		     ios->timing == MMC_TIMING_UHS_DDR50 ||
1671 		     ios->timing == MMC_TIMING_UHS_SDR25)
1672 			ctrl |= SDHCI_CTRL_HISPD;
1673 		else
1674 			ctrl &= ~SDHCI_CTRL_HISPD;
1675 	}
1676 
1677 	if (host->version >= SDHCI_SPEC_300) {
1678 		u16 clk, ctrl_2;
1679 
1680 		if (!host->preset_enabled) {
1681 			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1682 			/*
1683 			 * We only need to set Driver Strength if the
1684 			 * preset value enable is not set.
1685 			 */
1686 			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1687 			ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1688 			if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1689 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1690 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1691 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1692 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1693 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1694 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1695 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1696 			else {
1697 				pr_warn("%s: invalid driver type, default to driver type B\n",
1698 					mmc_hostname(mmc));
1699 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1700 			}
1701 
1702 			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1703 		} else {
1704 			/*
1705 			 * According to SDHC Spec v3.00, if the Preset Value
1706 			 * Enable in the Host Control 2 register is set, we
1707 			 * need to reset SD Clock Enable before changing High
1708 			 * Speed Enable to avoid generating clock gliches.
1709 			 */
1710 
1711 			/* Reset SD Clock Enable */
1712 			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1713 			clk &= ~SDHCI_CLOCK_CARD_EN;
1714 			sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1715 
1716 			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1717 
1718 			/* Re-enable SD Clock */
1719 			host->ops->set_clock(host, host->clock);
1720 		}
1721 
1722 		/* Reset SD Clock Enable */
1723 		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1724 		clk &= ~SDHCI_CLOCK_CARD_EN;
1725 		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1726 
1727 		host->ops->set_uhs_signaling(host, ios->timing);
1728 		host->timing = ios->timing;
1729 
1730 		if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1731 				((ios->timing == MMC_TIMING_UHS_SDR12) ||
1732 				 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1733 				 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1734 				 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1735 				 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1736 				 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1737 			u16 preset;
1738 
1739 			sdhci_enable_preset_value(host, true);
1740 			preset = sdhci_get_preset_value(host);
1741 			ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1742 				>> SDHCI_PRESET_DRV_SHIFT;
1743 		}
1744 
1745 		/* Re-enable SD Clock */
1746 		host->ops->set_clock(host, host->clock);
1747 	} else
1748 		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1749 
1750 	/*
1751 	 * Some (ENE) controllers go apeshit on some ios operation,
1752 	 * signalling timeout and CRC errors even on CMD0. Resetting
1753 	 * it on each ios seems to solve the problem.
1754 	 */
1755 	if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1756 		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1757 
1758 	mmiowb();
1759 }
1760 EXPORT_SYMBOL_GPL(sdhci_set_ios);
1761 
1762 static int sdhci_get_cd(struct mmc_host *mmc)
1763 {
1764 	struct sdhci_host *host = mmc_priv(mmc);
1765 	int gpio_cd = mmc_gpio_get_cd(mmc);
1766 
1767 	if (host->flags & SDHCI_DEVICE_DEAD)
1768 		return 0;
1769 
1770 	/* If nonremovable, assume that the card is always present. */
1771 	if (!mmc_card_is_removable(host->mmc))
1772 		return 1;
1773 
1774 	/*
1775 	 * Try slot gpio detect, if defined it take precedence
1776 	 * over build in controller functionality
1777 	 */
1778 	if (gpio_cd >= 0)
1779 		return !!gpio_cd;
1780 
1781 	/* If polling, assume that the card is always present. */
1782 	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1783 		return 1;
1784 
1785 	/* Host native card detect */
1786 	return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1787 }
1788 
1789 static int sdhci_check_ro(struct sdhci_host *host)
1790 {
1791 	unsigned long flags;
1792 	int is_readonly;
1793 
1794 	spin_lock_irqsave(&host->lock, flags);
1795 
1796 	if (host->flags & SDHCI_DEVICE_DEAD)
1797 		is_readonly = 0;
1798 	else if (host->ops->get_ro)
1799 		is_readonly = host->ops->get_ro(host);
1800 	else
1801 		is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1802 				& SDHCI_WRITE_PROTECT);
1803 
1804 	spin_unlock_irqrestore(&host->lock, flags);
1805 
1806 	/* This quirk needs to be replaced by a callback-function later */
1807 	return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1808 		!is_readonly : is_readonly;
1809 }
1810 
1811 #define SAMPLE_COUNT	5
1812 
1813 static int sdhci_get_ro(struct mmc_host *mmc)
1814 {
1815 	struct sdhci_host *host = mmc_priv(mmc);
1816 	int i, ro_count;
1817 
1818 	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1819 		return sdhci_check_ro(host);
1820 
1821 	ro_count = 0;
1822 	for (i = 0; i < SAMPLE_COUNT; i++) {
1823 		if (sdhci_check_ro(host)) {
1824 			if (++ro_count > SAMPLE_COUNT / 2)
1825 				return 1;
1826 		}
1827 		msleep(30);
1828 	}
1829 	return 0;
1830 }
1831 
1832 static void sdhci_hw_reset(struct mmc_host *mmc)
1833 {
1834 	struct sdhci_host *host = mmc_priv(mmc);
1835 
1836 	if (host->ops && host->ops->hw_reset)
1837 		host->ops->hw_reset(host);
1838 }
1839 
1840 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1841 {
1842 	if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1843 		if (enable)
1844 			host->ier |= SDHCI_INT_CARD_INT;
1845 		else
1846 			host->ier &= ~SDHCI_INT_CARD_INT;
1847 
1848 		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1849 		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1850 		mmiowb();
1851 	}
1852 }
1853 
1854 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1855 {
1856 	struct sdhci_host *host = mmc_priv(mmc);
1857 	unsigned long flags;
1858 
1859 	if (enable)
1860 		pm_runtime_get_noresume(host->mmc->parent);
1861 
1862 	spin_lock_irqsave(&host->lock, flags);
1863 	if (enable)
1864 		host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1865 	else
1866 		host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1867 
1868 	sdhci_enable_sdio_irq_nolock(host, enable);
1869 	spin_unlock_irqrestore(&host->lock, flags);
1870 
1871 	if (!enable)
1872 		pm_runtime_put_noidle(host->mmc->parent);
1873 }
1874 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
1875 
1876 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1877 				      struct mmc_ios *ios)
1878 {
1879 	struct sdhci_host *host = mmc_priv(mmc);
1880 	u16 ctrl;
1881 	int ret;
1882 
1883 	/*
1884 	 * Signal Voltage Switching is only applicable for Host Controllers
1885 	 * v3.00 and above.
1886 	 */
1887 	if (host->version < SDHCI_SPEC_300)
1888 		return 0;
1889 
1890 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1891 
1892 	switch (ios->signal_voltage) {
1893 	case MMC_SIGNAL_VOLTAGE_330:
1894 		if (!(host->flags & SDHCI_SIGNALING_330))
1895 			return -EINVAL;
1896 		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1897 		ctrl &= ~SDHCI_CTRL_VDD_180;
1898 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1899 
1900 		if (!IS_ERR(mmc->supply.vqmmc)) {
1901 			ret = mmc_regulator_set_vqmmc(mmc, ios);
1902 			if (ret) {
1903 				pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1904 					mmc_hostname(mmc));
1905 				return -EIO;
1906 			}
1907 		}
1908 		/* Wait for 5ms */
1909 		usleep_range(5000, 5500);
1910 
1911 		/* 3.3V regulator output should be stable within 5 ms */
1912 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1913 		if (!(ctrl & SDHCI_CTRL_VDD_180))
1914 			return 0;
1915 
1916 		pr_warn("%s: 3.3V regulator output did not became stable\n",
1917 			mmc_hostname(mmc));
1918 
1919 		return -EAGAIN;
1920 	case MMC_SIGNAL_VOLTAGE_180:
1921 		if (!(host->flags & SDHCI_SIGNALING_180))
1922 			return -EINVAL;
1923 		if (!IS_ERR(mmc->supply.vqmmc)) {
1924 			ret = mmc_regulator_set_vqmmc(mmc, ios);
1925 			if (ret) {
1926 				pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1927 					mmc_hostname(mmc));
1928 				return -EIO;
1929 			}
1930 		}
1931 
1932 		/*
1933 		 * Enable 1.8V Signal Enable in the Host Control2
1934 		 * register
1935 		 */
1936 		ctrl |= SDHCI_CTRL_VDD_180;
1937 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1938 
1939 		/* Some controller need to do more when switching */
1940 		if (host->ops->voltage_switch)
1941 			host->ops->voltage_switch(host);
1942 
1943 		/* 1.8V regulator output should be stable within 5 ms */
1944 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1945 		if (ctrl & SDHCI_CTRL_VDD_180)
1946 			return 0;
1947 
1948 		pr_warn("%s: 1.8V regulator output did not became stable\n",
1949 			mmc_hostname(mmc));
1950 
1951 		return -EAGAIN;
1952 	case MMC_SIGNAL_VOLTAGE_120:
1953 		if (!(host->flags & SDHCI_SIGNALING_120))
1954 			return -EINVAL;
1955 		if (!IS_ERR(mmc->supply.vqmmc)) {
1956 			ret = mmc_regulator_set_vqmmc(mmc, ios);
1957 			if (ret) {
1958 				pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1959 					mmc_hostname(mmc));
1960 				return -EIO;
1961 			}
1962 		}
1963 		return 0;
1964 	default:
1965 		/* No signal voltage switch required */
1966 		return 0;
1967 	}
1968 }
1969 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
1970 
1971 static int sdhci_card_busy(struct mmc_host *mmc)
1972 {
1973 	struct sdhci_host *host = mmc_priv(mmc);
1974 	u32 present_state;
1975 
1976 	/* Check whether DAT[0] is 0 */
1977 	present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1978 
1979 	return !(present_state & SDHCI_DATA_0_LVL_MASK);
1980 }
1981 
1982 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1983 {
1984 	struct sdhci_host *host = mmc_priv(mmc);
1985 	unsigned long flags;
1986 
1987 	spin_lock_irqsave(&host->lock, flags);
1988 	host->flags |= SDHCI_HS400_TUNING;
1989 	spin_unlock_irqrestore(&host->lock, flags);
1990 
1991 	return 0;
1992 }
1993 
1994 static void sdhci_start_tuning(struct sdhci_host *host)
1995 {
1996 	u16 ctrl;
1997 
1998 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1999 	ctrl |= SDHCI_CTRL_EXEC_TUNING;
2000 	if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2001 		ctrl |= SDHCI_CTRL_TUNED_CLK;
2002 	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2003 
2004 	/*
2005 	 * As per the Host Controller spec v3.00, tuning command
2006 	 * generates Buffer Read Ready interrupt, so enable that.
2007 	 *
2008 	 * Note: The spec clearly says that when tuning sequence
2009 	 * is being performed, the controller does not generate
2010 	 * interrupts other than Buffer Read Ready interrupt. But
2011 	 * to make sure we don't hit a controller bug, we _only_
2012 	 * enable Buffer Read Ready interrupt here.
2013 	 */
2014 	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2015 	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2016 }
2017 
2018 static void sdhci_end_tuning(struct sdhci_host *host)
2019 {
2020 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2021 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2022 }
2023 
2024 static void sdhci_reset_tuning(struct sdhci_host *host)
2025 {
2026 	u16 ctrl;
2027 
2028 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2029 	ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2030 	ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2031 	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2032 }
2033 
2034 static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2035 {
2036 	sdhci_reset_tuning(host);
2037 
2038 	sdhci_do_reset(host, SDHCI_RESET_CMD);
2039 	sdhci_do_reset(host, SDHCI_RESET_DATA);
2040 
2041 	sdhci_end_tuning(host);
2042 
2043 	mmc_abort_tuning(host->mmc, opcode);
2044 }
2045 
2046 /*
2047  * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2048  * tuning command does not have a data payload (or rather the hardware does it
2049  * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2050  * interrupt setup is different to other commands and there is no timeout
2051  * interrupt so special handling is needed.
2052  */
2053 static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2054 {
2055 	struct mmc_host *mmc = host->mmc;
2056 	struct mmc_command cmd = {};
2057 	struct mmc_request mrq = {};
2058 	unsigned long flags;
2059 	u32 b = host->sdma_boundary;
2060 
2061 	spin_lock_irqsave(&host->lock, flags);
2062 
2063 	cmd.opcode = opcode;
2064 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2065 	cmd.mrq = &mrq;
2066 
2067 	mrq.cmd = &cmd;
2068 	/*
2069 	 * In response to CMD19, the card sends 64 bytes of tuning
2070 	 * block to the Host Controller. So we set the block size
2071 	 * to 64 here.
2072 	 */
2073 	if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2074 	    mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2075 		sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2076 	else
2077 		sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2078 
2079 	/*
2080 	 * The tuning block is sent by the card to the host controller.
2081 	 * So we set the TRNS_READ bit in the Transfer Mode register.
2082 	 * This also takes care of setting DMA Enable and Multi Block
2083 	 * Select in the same register to 0.
2084 	 */
2085 	sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2086 
2087 	sdhci_send_command(host, &cmd);
2088 
2089 	host->cmd = NULL;
2090 
2091 	sdhci_del_timer(host, &mrq);
2092 
2093 	host->tuning_done = 0;
2094 
2095 	mmiowb();
2096 	spin_unlock_irqrestore(&host->lock, flags);
2097 
2098 	/* Wait for Buffer Read Ready interrupt */
2099 	wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2100 			   msecs_to_jiffies(50));
2101 
2102 }
2103 
2104 static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2105 {
2106 	int i;
2107 
2108 	/*
2109 	 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2110 	 * of loops reaches 40 times.
2111 	 */
2112 	for (i = 0; i < MAX_TUNING_LOOP; i++) {
2113 		u16 ctrl;
2114 
2115 		sdhci_send_tuning(host, opcode);
2116 
2117 		if (!host->tuning_done) {
2118 			pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
2119 				mmc_hostname(host->mmc));
2120 			sdhci_abort_tuning(host, opcode);
2121 			return;
2122 		}
2123 
2124 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2125 		if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2126 			if (ctrl & SDHCI_CTRL_TUNED_CLK)
2127 				return; /* Success! */
2128 			break;
2129 		}
2130 
2131 		/* Spec does not require a delay between tuning cycles */
2132 		if (host->tuning_delay > 0)
2133 			mdelay(host->tuning_delay);
2134 	}
2135 
2136 	pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2137 		mmc_hostname(host->mmc));
2138 	sdhci_reset_tuning(host);
2139 }
2140 
2141 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2142 {
2143 	struct sdhci_host *host = mmc_priv(mmc);
2144 	int err = 0;
2145 	unsigned int tuning_count = 0;
2146 	bool hs400_tuning;
2147 
2148 	hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2149 
2150 	if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2151 		tuning_count = host->tuning_count;
2152 
2153 	/*
2154 	 * The Host Controller needs tuning in case of SDR104 and DDR50
2155 	 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2156 	 * the Capabilities register.
2157 	 * If the Host Controller supports the HS200 mode then the
2158 	 * tuning function has to be executed.
2159 	 */
2160 	switch (host->timing) {
2161 	/* HS400 tuning is done in HS200 mode */
2162 	case MMC_TIMING_MMC_HS400:
2163 		err = -EINVAL;
2164 		goto out;
2165 
2166 	case MMC_TIMING_MMC_HS200:
2167 		/*
2168 		 * Periodic re-tuning for HS400 is not expected to be needed, so
2169 		 * disable it here.
2170 		 */
2171 		if (hs400_tuning)
2172 			tuning_count = 0;
2173 		break;
2174 
2175 	case MMC_TIMING_UHS_SDR104:
2176 	case MMC_TIMING_UHS_DDR50:
2177 		break;
2178 
2179 	case MMC_TIMING_UHS_SDR50:
2180 		if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2181 			break;
2182 		/* FALLTHROUGH */
2183 
2184 	default:
2185 		goto out;
2186 	}
2187 
2188 	if (host->ops->platform_execute_tuning) {
2189 		err = host->ops->platform_execute_tuning(host, opcode);
2190 		goto out;
2191 	}
2192 
2193 	host->mmc->retune_period = tuning_count;
2194 
2195 	if (host->tuning_delay < 0)
2196 		host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2197 
2198 	sdhci_start_tuning(host);
2199 
2200 	__sdhci_execute_tuning(host, opcode);
2201 
2202 	sdhci_end_tuning(host);
2203 out:
2204 	host->flags &= ~SDHCI_HS400_TUNING;
2205 
2206 	return err;
2207 }
2208 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2209 
2210 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2211 {
2212 	/* Host Controller v3.00 defines preset value registers */
2213 	if (host->version < SDHCI_SPEC_300)
2214 		return;
2215 
2216 	/*
2217 	 * We only enable or disable Preset Value if they are not already
2218 	 * enabled or disabled respectively. Otherwise, we bail out.
2219 	 */
2220 	if (host->preset_enabled != enable) {
2221 		u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2222 
2223 		if (enable)
2224 			ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2225 		else
2226 			ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2227 
2228 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2229 
2230 		if (enable)
2231 			host->flags |= SDHCI_PV_ENABLED;
2232 		else
2233 			host->flags &= ~SDHCI_PV_ENABLED;
2234 
2235 		host->preset_enabled = enable;
2236 	}
2237 }
2238 
2239 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2240 				int err)
2241 {
2242 	struct sdhci_host *host = mmc_priv(mmc);
2243 	struct mmc_data *data = mrq->data;
2244 
2245 	if (data->host_cookie != COOKIE_UNMAPPED)
2246 		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2247 			     mmc_get_dma_dir(data));
2248 
2249 	data->host_cookie = COOKIE_UNMAPPED;
2250 }
2251 
2252 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2253 {
2254 	struct sdhci_host *host = mmc_priv(mmc);
2255 
2256 	mrq->data->host_cookie = COOKIE_UNMAPPED;
2257 
2258 	if (host->flags & SDHCI_REQ_USE_DMA)
2259 		sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2260 }
2261 
2262 static inline bool sdhci_has_requests(struct sdhci_host *host)
2263 {
2264 	return host->cmd || host->data_cmd;
2265 }
2266 
2267 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2268 {
2269 	if (host->data_cmd) {
2270 		host->data_cmd->error = err;
2271 		sdhci_finish_mrq(host, host->data_cmd->mrq);
2272 	}
2273 
2274 	if (host->cmd) {
2275 		host->cmd->error = err;
2276 		sdhci_finish_mrq(host, host->cmd->mrq);
2277 	}
2278 }
2279 
2280 static void sdhci_card_event(struct mmc_host *mmc)
2281 {
2282 	struct sdhci_host *host = mmc_priv(mmc);
2283 	unsigned long flags;
2284 	int present;
2285 
2286 	/* First check if client has provided their own card event */
2287 	if (host->ops->card_event)
2288 		host->ops->card_event(host);
2289 
2290 	present = mmc->ops->get_cd(mmc);
2291 
2292 	spin_lock_irqsave(&host->lock, flags);
2293 
2294 	/* Check sdhci_has_requests() first in case we are runtime suspended */
2295 	if (sdhci_has_requests(host) && !present) {
2296 		pr_err("%s: Card removed during transfer!\n",
2297 			mmc_hostname(host->mmc));
2298 		pr_err("%s: Resetting controller.\n",
2299 			mmc_hostname(host->mmc));
2300 
2301 		sdhci_do_reset(host, SDHCI_RESET_CMD);
2302 		sdhci_do_reset(host, SDHCI_RESET_DATA);
2303 
2304 		sdhci_error_out_mrqs(host, -ENOMEDIUM);
2305 	}
2306 
2307 	spin_unlock_irqrestore(&host->lock, flags);
2308 }
2309 
2310 static const struct mmc_host_ops sdhci_ops = {
2311 	.request	= sdhci_request,
2312 	.post_req	= sdhci_post_req,
2313 	.pre_req	= sdhci_pre_req,
2314 	.set_ios	= sdhci_set_ios,
2315 	.get_cd		= sdhci_get_cd,
2316 	.get_ro		= sdhci_get_ro,
2317 	.hw_reset	= sdhci_hw_reset,
2318 	.enable_sdio_irq = sdhci_enable_sdio_irq,
2319 	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
2320 	.prepare_hs400_tuning		= sdhci_prepare_hs400_tuning,
2321 	.execute_tuning			= sdhci_execute_tuning,
2322 	.card_event			= sdhci_card_event,
2323 	.card_busy	= sdhci_card_busy,
2324 };
2325 
2326 /*****************************************************************************\
2327  *                                                                           *
2328  * Tasklets                                                                  *
2329  *                                                                           *
2330 \*****************************************************************************/
2331 
2332 static bool sdhci_request_done(struct sdhci_host *host)
2333 {
2334 	unsigned long flags;
2335 	struct mmc_request *mrq;
2336 	int i;
2337 
2338 	spin_lock_irqsave(&host->lock, flags);
2339 
2340 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2341 		mrq = host->mrqs_done[i];
2342 		if (mrq)
2343 			break;
2344 	}
2345 
2346 	if (!mrq) {
2347 		spin_unlock_irqrestore(&host->lock, flags);
2348 		return true;
2349 	}
2350 
2351 	sdhci_del_timer(host, mrq);
2352 
2353 	/*
2354 	 * Always unmap the data buffers if they were mapped by
2355 	 * sdhci_prepare_data() whenever we finish with a request.
2356 	 * This avoids leaking DMA mappings on error.
2357 	 */
2358 	if (host->flags & SDHCI_REQ_USE_DMA) {
2359 		struct mmc_data *data = mrq->data;
2360 
2361 		if (data && data->host_cookie == COOKIE_MAPPED) {
2362 			dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2363 				     mmc_get_dma_dir(data));
2364 			data->host_cookie = COOKIE_UNMAPPED;
2365 		}
2366 	}
2367 
2368 	/*
2369 	 * The controller needs a reset of internal state machines
2370 	 * upon error conditions.
2371 	 */
2372 	if (sdhci_needs_reset(host, mrq)) {
2373 		/*
2374 		 * Do not finish until command and data lines are available for
2375 		 * reset. Note there can only be one other mrq, so it cannot
2376 		 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2377 		 * would both be null.
2378 		 */
2379 		if (host->cmd || host->data_cmd) {
2380 			spin_unlock_irqrestore(&host->lock, flags);
2381 			return true;
2382 		}
2383 
2384 		/* Some controllers need this kick or reset won't work here */
2385 		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2386 			/* This is to force an update */
2387 			host->ops->set_clock(host, host->clock);
2388 
2389 		/* Spec says we should do both at the same time, but Ricoh
2390 		   controllers do not like that. */
2391 		sdhci_do_reset(host, SDHCI_RESET_CMD);
2392 		sdhci_do_reset(host, SDHCI_RESET_DATA);
2393 
2394 		host->pending_reset = false;
2395 	}
2396 
2397 	if (!sdhci_has_requests(host))
2398 		sdhci_led_deactivate(host);
2399 
2400 	host->mrqs_done[i] = NULL;
2401 
2402 	mmiowb();
2403 	spin_unlock_irqrestore(&host->lock, flags);
2404 
2405 	mmc_request_done(host->mmc, mrq);
2406 
2407 	return false;
2408 }
2409 
2410 static void sdhci_tasklet_finish(unsigned long param)
2411 {
2412 	struct sdhci_host *host = (struct sdhci_host *)param;
2413 
2414 	while (!sdhci_request_done(host))
2415 		;
2416 }
2417 
2418 static void sdhci_timeout_timer(struct timer_list *t)
2419 {
2420 	struct sdhci_host *host;
2421 	unsigned long flags;
2422 
2423 	host = from_timer(host, t, timer);
2424 
2425 	spin_lock_irqsave(&host->lock, flags);
2426 
2427 	if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2428 		pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2429 		       mmc_hostname(host->mmc));
2430 		sdhci_dumpregs(host);
2431 
2432 		host->cmd->error = -ETIMEDOUT;
2433 		sdhci_finish_mrq(host, host->cmd->mrq);
2434 	}
2435 
2436 	mmiowb();
2437 	spin_unlock_irqrestore(&host->lock, flags);
2438 }
2439 
2440 static void sdhci_timeout_data_timer(struct timer_list *t)
2441 {
2442 	struct sdhci_host *host;
2443 	unsigned long flags;
2444 
2445 	host = from_timer(host, t, data_timer);
2446 
2447 	spin_lock_irqsave(&host->lock, flags);
2448 
2449 	if (host->data || host->data_cmd ||
2450 	    (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2451 		pr_err("%s: Timeout waiting for hardware interrupt.\n",
2452 		       mmc_hostname(host->mmc));
2453 		sdhci_dumpregs(host);
2454 
2455 		if (host->data) {
2456 			host->data->error = -ETIMEDOUT;
2457 			sdhci_finish_data(host);
2458 		} else if (host->data_cmd) {
2459 			host->data_cmd->error = -ETIMEDOUT;
2460 			sdhci_finish_mrq(host, host->data_cmd->mrq);
2461 		} else {
2462 			host->cmd->error = -ETIMEDOUT;
2463 			sdhci_finish_mrq(host, host->cmd->mrq);
2464 		}
2465 	}
2466 
2467 	mmiowb();
2468 	spin_unlock_irqrestore(&host->lock, flags);
2469 }
2470 
2471 /*****************************************************************************\
2472  *                                                                           *
2473  * Interrupt handling                                                        *
2474  *                                                                           *
2475 \*****************************************************************************/
2476 
2477 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
2478 {
2479 	if (!host->cmd) {
2480 		/*
2481 		 * SDHCI recovers from errors by resetting the cmd and data
2482 		 * circuits.  Until that is done, there very well might be more
2483 		 * interrupts, so ignore them in that case.
2484 		 */
2485 		if (host->pending_reset)
2486 			return;
2487 		pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2488 		       mmc_hostname(host->mmc), (unsigned)intmask);
2489 		sdhci_dumpregs(host);
2490 		return;
2491 	}
2492 
2493 	if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2494 		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2495 		if (intmask & SDHCI_INT_TIMEOUT)
2496 			host->cmd->error = -ETIMEDOUT;
2497 		else
2498 			host->cmd->error = -EILSEQ;
2499 
2500 		/*
2501 		 * If this command initiates a data phase and a response
2502 		 * CRC error is signalled, the card can start transferring
2503 		 * data - the card may have received the command without
2504 		 * error.  We must not terminate the mmc_request early.
2505 		 *
2506 		 * If the card did not receive the command or returned an
2507 		 * error which prevented it sending data, the data phase
2508 		 * will time out.
2509 		 */
2510 		if (host->cmd->data &&
2511 		    (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2512 		     SDHCI_INT_CRC) {
2513 			host->cmd = NULL;
2514 			return;
2515 		}
2516 
2517 		sdhci_finish_mrq(host, host->cmd->mrq);
2518 		return;
2519 	}
2520 
2521 	if (intmask & SDHCI_INT_RESPONSE)
2522 		sdhci_finish_command(host);
2523 }
2524 
2525 static void sdhci_adma_show_error(struct sdhci_host *host)
2526 {
2527 	void *desc = host->adma_table;
2528 
2529 	sdhci_dumpregs(host);
2530 
2531 	while (true) {
2532 		struct sdhci_adma2_64_desc *dma_desc = desc;
2533 
2534 		if (host->flags & SDHCI_USE_64_BIT_DMA)
2535 			DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2536 			    desc, le32_to_cpu(dma_desc->addr_hi),
2537 			    le32_to_cpu(dma_desc->addr_lo),
2538 			    le16_to_cpu(dma_desc->len),
2539 			    le16_to_cpu(dma_desc->cmd));
2540 		else
2541 			DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2542 			    desc, le32_to_cpu(dma_desc->addr_lo),
2543 			    le16_to_cpu(dma_desc->len),
2544 			    le16_to_cpu(dma_desc->cmd));
2545 
2546 		desc += host->desc_sz;
2547 
2548 		if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2549 			break;
2550 	}
2551 }
2552 
2553 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2554 {
2555 	u32 command;
2556 
2557 	/* CMD19 generates _only_ Buffer Read Ready interrupt */
2558 	if (intmask & SDHCI_INT_DATA_AVAIL) {
2559 		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2560 		if (command == MMC_SEND_TUNING_BLOCK ||
2561 		    command == MMC_SEND_TUNING_BLOCK_HS200) {
2562 			host->tuning_done = 1;
2563 			wake_up(&host->buf_ready_int);
2564 			return;
2565 		}
2566 	}
2567 
2568 	if (!host->data) {
2569 		struct mmc_command *data_cmd = host->data_cmd;
2570 
2571 		/*
2572 		 * The "data complete" interrupt is also used to
2573 		 * indicate that a busy state has ended. See comment
2574 		 * above in sdhci_cmd_irq().
2575 		 */
2576 		if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2577 			if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2578 				host->data_cmd = NULL;
2579 				data_cmd->error = -ETIMEDOUT;
2580 				sdhci_finish_mrq(host, data_cmd->mrq);
2581 				return;
2582 			}
2583 			if (intmask & SDHCI_INT_DATA_END) {
2584 				host->data_cmd = NULL;
2585 				/*
2586 				 * Some cards handle busy-end interrupt
2587 				 * before the command completed, so make
2588 				 * sure we do things in the proper order.
2589 				 */
2590 				if (host->cmd == data_cmd)
2591 					return;
2592 
2593 				sdhci_finish_mrq(host, data_cmd->mrq);
2594 				return;
2595 			}
2596 		}
2597 
2598 		/*
2599 		 * SDHCI recovers from errors by resetting the cmd and data
2600 		 * circuits. Until that is done, there very well might be more
2601 		 * interrupts, so ignore them in that case.
2602 		 */
2603 		if (host->pending_reset)
2604 			return;
2605 
2606 		pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2607 		       mmc_hostname(host->mmc), (unsigned)intmask);
2608 		sdhci_dumpregs(host);
2609 
2610 		return;
2611 	}
2612 
2613 	if (intmask & SDHCI_INT_DATA_TIMEOUT)
2614 		host->data->error = -ETIMEDOUT;
2615 	else if (intmask & SDHCI_INT_DATA_END_BIT)
2616 		host->data->error = -EILSEQ;
2617 	else if ((intmask & SDHCI_INT_DATA_CRC) &&
2618 		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2619 			!= MMC_BUS_TEST_R)
2620 		host->data->error = -EILSEQ;
2621 	else if (intmask & SDHCI_INT_ADMA_ERROR) {
2622 		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2623 		sdhci_adma_show_error(host);
2624 		host->data->error = -EIO;
2625 		if (host->ops->adma_workaround)
2626 			host->ops->adma_workaround(host, intmask);
2627 	}
2628 
2629 	if (host->data->error)
2630 		sdhci_finish_data(host);
2631 	else {
2632 		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2633 			sdhci_transfer_pio(host);
2634 
2635 		/*
2636 		 * We currently don't do anything fancy with DMA
2637 		 * boundaries, but as we can't disable the feature
2638 		 * we need to at least restart the transfer.
2639 		 *
2640 		 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2641 		 * should return a valid address to continue from, but as
2642 		 * some controllers are faulty, don't trust them.
2643 		 */
2644 		if (intmask & SDHCI_INT_DMA_END) {
2645 			u32 dmastart, dmanow;
2646 			dmastart = sg_dma_address(host->data->sg);
2647 			dmanow = dmastart + host->data->bytes_xfered;
2648 			/*
2649 			 * Force update to the next DMA block boundary.
2650 			 */
2651 			dmanow = (dmanow &
2652 				~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2653 				SDHCI_DEFAULT_BOUNDARY_SIZE;
2654 			host->data->bytes_xfered = dmanow - dmastart;
2655 			DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n",
2656 			    dmastart, host->data->bytes_xfered, dmanow);
2657 			sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2658 		}
2659 
2660 		if (intmask & SDHCI_INT_DATA_END) {
2661 			if (host->cmd == host->data_cmd) {
2662 				/*
2663 				 * Data managed to finish before the
2664 				 * command completed. Make sure we do
2665 				 * things in the proper order.
2666 				 */
2667 				host->data_early = 1;
2668 			} else {
2669 				sdhci_finish_data(host);
2670 			}
2671 		}
2672 	}
2673 }
2674 
2675 static irqreturn_t sdhci_irq(int irq, void *dev_id)
2676 {
2677 	irqreturn_t result = IRQ_NONE;
2678 	struct sdhci_host *host = dev_id;
2679 	u32 intmask, mask, unexpected = 0;
2680 	int max_loops = 16;
2681 
2682 	spin_lock(&host->lock);
2683 
2684 	if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2685 		spin_unlock(&host->lock);
2686 		return IRQ_NONE;
2687 	}
2688 
2689 	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2690 	if (!intmask || intmask == 0xffffffff) {
2691 		result = IRQ_NONE;
2692 		goto out;
2693 	}
2694 
2695 	do {
2696 		DBG("IRQ status 0x%08x\n", intmask);
2697 
2698 		if (host->ops->irq) {
2699 			intmask = host->ops->irq(host, intmask);
2700 			if (!intmask)
2701 				goto cont;
2702 		}
2703 
2704 		/* Clear selected interrupts. */
2705 		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2706 				  SDHCI_INT_BUS_POWER);
2707 		sdhci_writel(host, mask, SDHCI_INT_STATUS);
2708 
2709 		if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2710 			u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2711 				      SDHCI_CARD_PRESENT;
2712 
2713 			/*
2714 			 * There is a observation on i.mx esdhc.  INSERT
2715 			 * bit will be immediately set again when it gets
2716 			 * cleared, if a card is inserted.  We have to mask
2717 			 * the irq to prevent interrupt storm which will
2718 			 * freeze the system.  And the REMOVE gets the
2719 			 * same situation.
2720 			 *
2721 			 * More testing are needed here to ensure it works
2722 			 * for other platforms though.
2723 			 */
2724 			host->ier &= ~(SDHCI_INT_CARD_INSERT |
2725 				       SDHCI_INT_CARD_REMOVE);
2726 			host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2727 					       SDHCI_INT_CARD_INSERT;
2728 			sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2729 			sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2730 
2731 			sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2732 				     SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2733 
2734 			host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2735 						       SDHCI_INT_CARD_REMOVE);
2736 			result = IRQ_WAKE_THREAD;
2737 		}
2738 
2739 		if (intmask & SDHCI_INT_CMD_MASK)
2740 			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
2741 
2742 		if (intmask & SDHCI_INT_DATA_MASK)
2743 			sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2744 
2745 		if (intmask & SDHCI_INT_BUS_POWER)
2746 			pr_err("%s: Card is consuming too much power!\n",
2747 				mmc_hostname(host->mmc));
2748 
2749 		if (intmask & SDHCI_INT_RETUNE)
2750 			mmc_retune_needed(host->mmc);
2751 
2752 		if ((intmask & SDHCI_INT_CARD_INT) &&
2753 		    (host->ier & SDHCI_INT_CARD_INT)) {
2754 			sdhci_enable_sdio_irq_nolock(host, false);
2755 			host->thread_isr |= SDHCI_INT_CARD_INT;
2756 			result = IRQ_WAKE_THREAD;
2757 		}
2758 
2759 		intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2760 			     SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2761 			     SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2762 			     SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
2763 
2764 		if (intmask) {
2765 			unexpected |= intmask;
2766 			sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2767 		}
2768 cont:
2769 		if (result == IRQ_NONE)
2770 			result = IRQ_HANDLED;
2771 
2772 		intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2773 	} while (intmask && --max_loops);
2774 out:
2775 	spin_unlock(&host->lock);
2776 
2777 	if (unexpected) {
2778 		pr_err("%s: Unexpected interrupt 0x%08x.\n",
2779 			   mmc_hostname(host->mmc), unexpected);
2780 		sdhci_dumpregs(host);
2781 	}
2782 
2783 	return result;
2784 }
2785 
2786 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2787 {
2788 	struct sdhci_host *host = dev_id;
2789 	unsigned long flags;
2790 	u32 isr;
2791 
2792 	spin_lock_irqsave(&host->lock, flags);
2793 	isr = host->thread_isr;
2794 	host->thread_isr = 0;
2795 	spin_unlock_irqrestore(&host->lock, flags);
2796 
2797 	if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2798 		struct mmc_host *mmc = host->mmc;
2799 
2800 		mmc->ops->card_event(mmc);
2801 		mmc_detect_change(mmc, msecs_to_jiffies(200));
2802 	}
2803 
2804 	if (isr & SDHCI_INT_CARD_INT) {
2805 		sdio_run_irqs(host->mmc);
2806 
2807 		spin_lock_irqsave(&host->lock, flags);
2808 		if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2809 			sdhci_enable_sdio_irq_nolock(host, true);
2810 		spin_unlock_irqrestore(&host->lock, flags);
2811 	}
2812 
2813 	return isr ? IRQ_HANDLED : IRQ_NONE;
2814 }
2815 
2816 /*****************************************************************************\
2817  *                                                                           *
2818  * Suspend/resume                                                            *
2819  *                                                                           *
2820 \*****************************************************************************/
2821 
2822 #ifdef CONFIG_PM
2823 /*
2824  * To enable wakeup events, the corresponding events have to be enabled in
2825  * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
2826  * Table' in the SD Host Controller Standard Specification.
2827  * It is useless to restore SDHCI_INT_ENABLE state in
2828  * sdhci_disable_irq_wakeups() since it will be set by
2829  * sdhci_enable_card_detection() or sdhci_init().
2830  */
2831 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
2832 {
2833 	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
2834 		  SDHCI_WAKE_ON_INT;
2835 	u32 irq_val = 0;
2836 	u8 wake_val = 0;
2837 	u8 val;
2838 
2839 	if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)) {
2840 		wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
2841 		irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
2842 	}
2843 
2844 	wake_val |= SDHCI_WAKE_ON_INT;
2845 	irq_val |= SDHCI_INT_CARD_INT;
2846 
2847 	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2848 	val &= ~mask;
2849 	val |= wake_val;
2850 	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2851 
2852 	sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
2853 
2854 	host->irq_wake_enabled = !enable_irq_wake(host->irq);
2855 
2856 	return host->irq_wake_enabled;
2857 }
2858 
2859 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
2860 {
2861 	u8 val;
2862 	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2863 			| SDHCI_WAKE_ON_INT;
2864 
2865 	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2866 	val &= ~mask;
2867 	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2868 
2869 	disable_irq_wake(host->irq);
2870 
2871 	host->irq_wake_enabled = false;
2872 }
2873 
2874 int sdhci_suspend_host(struct sdhci_host *host)
2875 {
2876 	sdhci_disable_card_detection(host);
2877 
2878 	mmc_retune_timer_stop(host->mmc);
2879 
2880 	if (!device_may_wakeup(mmc_dev(host->mmc)) ||
2881 	    !sdhci_enable_irq_wakeups(host)) {
2882 		host->ier = 0;
2883 		sdhci_writel(host, 0, SDHCI_INT_ENABLE);
2884 		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2885 		free_irq(host->irq, host);
2886 	}
2887 
2888 	return 0;
2889 }
2890 
2891 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2892 
2893 int sdhci_resume_host(struct sdhci_host *host)
2894 {
2895 	struct mmc_host *mmc = host->mmc;
2896 	int ret = 0;
2897 
2898 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2899 		if (host->ops->enable_dma)
2900 			host->ops->enable_dma(host);
2901 	}
2902 
2903 	if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
2904 	    (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
2905 		/* Card keeps power but host controller does not */
2906 		sdhci_init(host, 0);
2907 		host->pwr = 0;
2908 		host->clock = 0;
2909 		mmc->ops->set_ios(mmc, &mmc->ios);
2910 	} else {
2911 		sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
2912 		mmiowb();
2913 	}
2914 
2915 	if (host->irq_wake_enabled) {
2916 		sdhci_disable_irq_wakeups(host);
2917 	} else {
2918 		ret = request_threaded_irq(host->irq, sdhci_irq,
2919 					   sdhci_thread_irq, IRQF_SHARED,
2920 					   mmc_hostname(host->mmc), host);
2921 		if (ret)
2922 			return ret;
2923 	}
2924 
2925 	sdhci_enable_card_detection(host);
2926 
2927 	return ret;
2928 }
2929 
2930 EXPORT_SYMBOL_GPL(sdhci_resume_host);
2931 
2932 int sdhci_runtime_suspend_host(struct sdhci_host *host)
2933 {
2934 	unsigned long flags;
2935 
2936 	mmc_retune_timer_stop(host->mmc);
2937 
2938 	spin_lock_irqsave(&host->lock, flags);
2939 	host->ier &= SDHCI_INT_CARD_INT;
2940 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2941 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2942 	spin_unlock_irqrestore(&host->lock, flags);
2943 
2944 	synchronize_hardirq(host->irq);
2945 
2946 	spin_lock_irqsave(&host->lock, flags);
2947 	host->runtime_suspended = true;
2948 	spin_unlock_irqrestore(&host->lock, flags);
2949 
2950 	return 0;
2951 }
2952 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
2953 
2954 int sdhci_runtime_resume_host(struct sdhci_host *host)
2955 {
2956 	struct mmc_host *mmc = host->mmc;
2957 	unsigned long flags;
2958 	int host_flags = host->flags;
2959 
2960 	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2961 		if (host->ops->enable_dma)
2962 			host->ops->enable_dma(host);
2963 	}
2964 
2965 	sdhci_init(host, 0);
2966 
2967 	if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
2968 	    mmc->ios.power_mode != MMC_POWER_OFF) {
2969 		/* Force clock and power re-program */
2970 		host->pwr = 0;
2971 		host->clock = 0;
2972 		mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
2973 		mmc->ops->set_ios(mmc, &mmc->ios);
2974 
2975 		if ((host_flags & SDHCI_PV_ENABLED) &&
2976 		    !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
2977 			spin_lock_irqsave(&host->lock, flags);
2978 			sdhci_enable_preset_value(host, true);
2979 			spin_unlock_irqrestore(&host->lock, flags);
2980 		}
2981 
2982 		if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
2983 		    mmc->ops->hs400_enhanced_strobe)
2984 			mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
2985 	}
2986 
2987 	spin_lock_irqsave(&host->lock, flags);
2988 
2989 	host->runtime_suspended = false;
2990 
2991 	/* Enable SDIO IRQ */
2992 	if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2993 		sdhci_enable_sdio_irq_nolock(host, true);
2994 
2995 	/* Enable Card Detection */
2996 	sdhci_enable_card_detection(host);
2997 
2998 	spin_unlock_irqrestore(&host->lock, flags);
2999 
3000 	return 0;
3001 }
3002 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3003 
3004 #endif /* CONFIG_PM */
3005 
3006 /*****************************************************************************\
3007  *                                                                           *
3008  * Command Queue Engine (CQE) helpers                                        *
3009  *                                                                           *
3010 \*****************************************************************************/
3011 
3012 void sdhci_cqe_enable(struct mmc_host *mmc)
3013 {
3014 	struct sdhci_host *host = mmc_priv(mmc);
3015 	unsigned long flags;
3016 	u8 ctrl;
3017 
3018 	spin_lock_irqsave(&host->lock, flags);
3019 
3020 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3021 	ctrl &= ~SDHCI_CTRL_DMA_MASK;
3022 	if (host->flags & SDHCI_USE_64_BIT_DMA)
3023 		ctrl |= SDHCI_CTRL_ADMA64;
3024 	else
3025 		ctrl |= SDHCI_CTRL_ADMA32;
3026 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3027 
3028 	sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3029 		     SDHCI_BLOCK_SIZE);
3030 
3031 	/* Set maximum timeout */
3032 	sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL);
3033 
3034 	host->ier = host->cqe_ier;
3035 
3036 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3037 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3038 
3039 	host->cqe_on = true;
3040 
3041 	pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3042 		 mmc_hostname(mmc), host->ier,
3043 		 sdhci_readl(host, SDHCI_INT_STATUS));
3044 
3045 	mmiowb();
3046 	spin_unlock_irqrestore(&host->lock, flags);
3047 }
3048 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3049 
3050 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3051 {
3052 	struct sdhci_host *host = mmc_priv(mmc);
3053 	unsigned long flags;
3054 
3055 	spin_lock_irqsave(&host->lock, flags);
3056 
3057 	sdhci_set_default_irqs(host);
3058 
3059 	host->cqe_on = false;
3060 
3061 	if (recovery) {
3062 		sdhci_do_reset(host, SDHCI_RESET_CMD);
3063 		sdhci_do_reset(host, SDHCI_RESET_DATA);
3064 	}
3065 
3066 	pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3067 		 mmc_hostname(mmc), host->ier,
3068 		 sdhci_readl(host, SDHCI_INT_STATUS));
3069 
3070 	mmiowb();
3071 	spin_unlock_irqrestore(&host->lock, flags);
3072 }
3073 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3074 
3075 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3076 		   int *data_error)
3077 {
3078 	u32 mask;
3079 
3080 	if (!host->cqe_on)
3081 		return false;
3082 
3083 	if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3084 		*cmd_error = -EILSEQ;
3085 	else if (intmask & SDHCI_INT_TIMEOUT)
3086 		*cmd_error = -ETIMEDOUT;
3087 	else
3088 		*cmd_error = 0;
3089 
3090 	if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3091 		*data_error = -EILSEQ;
3092 	else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3093 		*data_error = -ETIMEDOUT;
3094 	else if (intmask & SDHCI_INT_ADMA_ERROR)
3095 		*data_error = -EIO;
3096 	else
3097 		*data_error = 0;
3098 
3099 	/* Clear selected interrupts. */
3100 	mask = intmask & host->cqe_ier;
3101 	sdhci_writel(host, mask, SDHCI_INT_STATUS);
3102 
3103 	if (intmask & SDHCI_INT_BUS_POWER)
3104 		pr_err("%s: Card is consuming too much power!\n",
3105 		       mmc_hostname(host->mmc));
3106 
3107 	intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3108 	if (intmask) {
3109 		sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3110 		pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3111 		       mmc_hostname(host->mmc), intmask);
3112 		sdhci_dumpregs(host);
3113 	}
3114 
3115 	return true;
3116 }
3117 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3118 
3119 /*****************************************************************************\
3120  *                                                                           *
3121  * Device allocation/registration                                            *
3122  *                                                                           *
3123 \*****************************************************************************/
3124 
3125 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3126 	size_t priv_size)
3127 {
3128 	struct mmc_host *mmc;
3129 	struct sdhci_host *host;
3130 
3131 	WARN_ON(dev == NULL);
3132 
3133 	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3134 	if (!mmc)
3135 		return ERR_PTR(-ENOMEM);
3136 
3137 	host = mmc_priv(mmc);
3138 	host->mmc = mmc;
3139 	host->mmc_host_ops = sdhci_ops;
3140 	mmc->ops = &host->mmc_host_ops;
3141 
3142 	host->flags = SDHCI_SIGNALING_330;
3143 
3144 	host->cqe_ier     = SDHCI_CQE_INT_MASK;
3145 	host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3146 
3147 	host->tuning_delay = -1;
3148 
3149 	host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3150 
3151 	return host;
3152 }
3153 
3154 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3155 
3156 static int sdhci_set_dma_mask(struct sdhci_host *host)
3157 {
3158 	struct mmc_host *mmc = host->mmc;
3159 	struct device *dev = mmc_dev(mmc);
3160 	int ret = -EINVAL;
3161 
3162 	if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3163 		host->flags &= ~SDHCI_USE_64_BIT_DMA;
3164 
3165 	/* Try 64-bit mask if hardware is capable  of it */
3166 	if (host->flags & SDHCI_USE_64_BIT_DMA) {
3167 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3168 		if (ret) {
3169 			pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3170 				mmc_hostname(mmc));
3171 			host->flags &= ~SDHCI_USE_64_BIT_DMA;
3172 		}
3173 	}
3174 
3175 	/* 32-bit mask as default & fallback */
3176 	if (ret) {
3177 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3178 		if (ret)
3179 			pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3180 				mmc_hostname(mmc));
3181 	}
3182 
3183 	return ret;
3184 }
3185 
3186 void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
3187 {
3188 	u16 v;
3189 	u64 dt_caps_mask = 0;
3190 	u64 dt_caps = 0;
3191 
3192 	if (host->read_caps)
3193 		return;
3194 
3195 	host->read_caps = true;
3196 
3197 	if (debug_quirks)
3198 		host->quirks = debug_quirks;
3199 
3200 	if (debug_quirks2)
3201 		host->quirks2 = debug_quirks2;
3202 
3203 	sdhci_do_reset(host, SDHCI_RESET_ALL);
3204 
3205 	of_property_read_u64(mmc_dev(host->mmc)->of_node,
3206 			     "sdhci-caps-mask", &dt_caps_mask);
3207 	of_property_read_u64(mmc_dev(host->mmc)->of_node,
3208 			     "sdhci-caps", &dt_caps);
3209 
3210 	v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3211 	host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3212 
3213 	if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3214 		return;
3215 
3216 	if (caps) {
3217 		host->caps = *caps;
3218 	} else {
3219 		host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3220 		host->caps &= ~lower_32_bits(dt_caps_mask);
3221 		host->caps |= lower_32_bits(dt_caps);
3222 	}
3223 
3224 	if (host->version < SDHCI_SPEC_300)
3225 		return;
3226 
3227 	if (caps1) {
3228 		host->caps1 = *caps1;
3229 	} else {
3230 		host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3231 		host->caps1 &= ~upper_32_bits(dt_caps_mask);
3232 		host->caps1 |= upper_32_bits(dt_caps);
3233 	}
3234 }
3235 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3236 
3237 int sdhci_setup_host(struct sdhci_host *host)
3238 {
3239 	struct mmc_host *mmc;
3240 	u32 max_current_caps;
3241 	unsigned int ocr_avail;
3242 	unsigned int override_timeout_clk;
3243 	u32 max_clk;
3244 	int ret;
3245 
3246 	WARN_ON(host == NULL);
3247 	if (host == NULL)
3248 		return -EINVAL;
3249 
3250 	mmc = host->mmc;
3251 
3252 	/*
3253 	 * If there are external regulators, get them. Note this must be done
3254 	 * early before resetting the host and reading the capabilities so that
3255 	 * the host can take the appropriate action if regulators are not
3256 	 * available.
3257 	 */
3258 	ret = mmc_regulator_get_supply(mmc);
3259 	if (ret)
3260 		return ret;
3261 
3262 	DBG("Version:   0x%08x | Present:  0x%08x\n",
3263 	    sdhci_readw(host, SDHCI_HOST_VERSION),
3264 	    sdhci_readl(host, SDHCI_PRESENT_STATE));
3265 	DBG("Caps:      0x%08x | Caps_1:   0x%08x\n",
3266 	    sdhci_readl(host, SDHCI_CAPABILITIES),
3267 	    sdhci_readl(host, SDHCI_CAPABILITIES_1));
3268 
3269 	sdhci_read_caps(host);
3270 
3271 	override_timeout_clk = host->timeout_clk;
3272 
3273 	if (host->version > SDHCI_SPEC_300) {
3274 		pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3275 		       mmc_hostname(mmc), host->version);
3276 	}
3277 
3278 	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3279 		host->flags |= SDHCI_USE_SDMA;
3280 	else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3281 		DBG("Controller doesn't have SDMA capability\n");
3282 	else
3283 		host->flags |= SDHCI_USE_SDMA;
3284 
3285 	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3286 		(host->flags & SDHCI_USE_SDMA)) {
3287 		DBG("Disabling DMA as it is marked broken\n");
3288 		host->flags &= ~SDHCI_USE_SDMA;
3289 	}
3290 
3291 	if ((host->version >= SDHCI_SPEC_200) &&
3292 		(host->caps & SDHCI_CAN_DO_ADMA2))
3293 		host->flags |= SDHCI_USE_ADMA;
3294 
3295 	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3296 		(host->flags & SDHCI_USE_ADMA)) {
3297 		DBG("Disabling ADMA as it is marked broken\n");
3298 		host->flags &= ~SDHCI_USE_ADMA;
3299 	}
3300 
3301 	/*
3302 	 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3303 	 * and *must* do 64-bit DMA.  A driver has the opportunity to change
3304 	 * that during the first call to ->enable_dma().  Similarly
3305 	 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3306 	 * implement.
3307 	 */
3308 	if (host->caps & SDHCI_CAN_64BIT)
3309 		host->flags |= SDHCI_USE_64_BIT_DMA;
3310 
3311 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3312 		ret = sdhci_set_dma_mask(host);
3313 
3314 		if (!ret && host->ops->enable_dma)
3315 			ret = host->ops->enable_dma(host);
3316 
3317 		if (ret) {
3318 			pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3319 				mmc_hostname(mmc));
3320 			host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3321 
3322 			ret = 0;
3323 		}
3324 	}
3325 
3326 	/* SDMA does not support 64-bit DMA */
3327 	if (host->flags & SDHCI_USE_64_BIT_DMA)
3328 		host->flags &= ~SDHCI_USE_SDMA;
3329 
3330 	if (host->flags & SDHCI_USE_ADMA) {
3331 		dma_addr_t dma;
3332 		void *buf;
3333 
3334 		/*
3335 		 * The DMA descriptor table size is calculated as the maximum
3336 		 * number of segments times 2, to allow for an alignment
3337 		 * descriptor for each segment, plus 1 for a nop end descriptor,
3338 		 * all multipled by the descriptor size.
3339 		 */
3340 		if (host->flags & SDHCI_USE_64_BIT_DMA) {
3341 			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3342 					      SDHCI_ADMA2_64_DESC_SZ;
3343 			host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
3344 		} else {
3345 			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3346 					      SDHCI_ADMA2_32_DESC_SZ;
3347 			host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3348 		}
3349 
3350 		host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3351 		buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
3352 					 host->adma_table_sz, &dma, GFP_KERNEL);
3353 		if (!buf) {
3354 			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3355 				mmc_hostname(mmc));
3356 			host->flags &= ~SDHCI_USE_ADMA;
3357 		} else if ((dma + host->align_buffer_sz) &
3358 			   (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3359 			pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3360 				mmc_hostname(mmc));
3361 			host->flags &= ~SDHCI_USE_ADMA;
3362 			dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3363 					  host->adma_table_sz, buf, dma);
3364 		} else {
3365 			host->align_buffer = buf;
3366 			host->align_addr = dma;
3367 
3368 			host->adma_table = buf + host->align_buffer_sz;
3369 			host->adma_addr = dma + host->align_buffer_sz;
3370 		}
3371 	}
3372 
3373 	/*
3374 	 * If we use DMA, then it's up to the caller to set the DMA
3375 	 * mask, but PIO does not need the hw shim so we set a new
3376 	 * mask here in that case.
3377 	 */
3378 	if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3379 		host->dma_mask = DMA_BIT_MASK(64);
3380 		mmc_dev(mmc)->dma_mask = &host->dma_mask;
3381 	}
3382 
3383 	if (host->version >= SDHCI_SPEC_300)
3384 		host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3385 			>> SDHCI_CLOCK_BASE_SHIFT;
3386 	else
3387 		host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3388 			>> SDHCI_CLOCK_BASE_SHIFT;
3389 
3390 	host->max_clk *= 1000000;
3391 	if (host->max_clk == 0 || host->quirks &
3392 			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3393 		if (!host->ops->get_max_clock) {
3394 			pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3395 			       mmc_hostname(mmc));
3396 			ret = -ENODEV;
3397 			goto undma;
3398 		}
3399 		host->max_clk = host->ops->get_max_clock(host);
3400 	}
3401 
3402 	/*
3403 	 * In case of Host Controller v3.00, find out whether clock
3404 	 * multiplier is supported.
3405 	 */
3406 	host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3407 			SDHCI_CLOCK_MUL_SHIFT;
3408 
3409 	/*
3410 	 * In case the value in Clock Multiplier is 0, then programmable
3411 	 * clock mode is not supported, otherwise the actual clock
3412 	 * multiplier is one more than the value of Clock Multiplier
3413 	 * in the Capabilities Register.
3414 	 */
3415 	if (host->clk_mul)
3416 		host->clk_mul += 1;
3417 
3418 	/*
3419 	 * Set host parameters.
3420 	 */
3421 	max_clk = host->max_clk;
3422 
3423 	if (host->ops->get_min_clock)
3424 		mmc->f_min = host->ops->get_min_clock(host);
3425 	else if (host->version >= SDHCI_SPEC_300) {
3426 		if (host->clk_mul) {
3427 			mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3428 			max_clk = host->max_clk * host->clk_mul;
3429 		} else
3430 			mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3431 	} else
3432 		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3433 
3434 	if (!mmc->f_max || mmc->f_max > max_clk)
3435 		mmc->f_max = max_clk;
3436 
3437 	if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3438 		host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3439 					SDHCI_TIMEOUT_CLK_SHIFT;
3440 
3441 		if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3442 			host->timeout_clk *= 1000;
3443 
3444 		if (host->timeout_clk == 0) {
3445 			if (!host->ops->get_timeout_clock) {
3446 				pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3447 					mmc_hostname(mmc));
3448 				ret = -ENODEV;
3449 				goto undma;
3450 			}
3451 
3452 			host->timeout_clk =
3453 				DIV_ROUND_UP(host->ops->get_timeout_clock(host),
3454 					     1000);
3455 		}
3456 
3457 		if (override_timeout_clk)
3458 			host->timeout_clk = override_timeout_clk;
3459 
3460 		mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3461 			host->ops->get_max_timeout_count(host) : 1 << 27;
3462 		mmc->max_busy_timeout /= host->timeout_clk;
3463 	}
3464 
3465 	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3466 	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3467 
3468 	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3469 		host->flags |= SDHCI_AUTO_CMD12;
3470 
3471 	/* Auto-CMD23 stuff only works in ADMA or PIO. */
3472 	if ((host->version >= SDHCI_SPEC_300) &&
3473 	    ((host->flags & SDHCI_USE_ADMA) ||
3474 	     !(host->flags & SDHCI_USE_SDMA)) &&
3475 	     !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3476 		host->flags |= SDHCI_AUTO_CMD23;
3477 		DBG("Auto-CMD23 available\n");
3478 	} else {
3479 		DBG("Auto-CMD23 unavailable\n");
3480 	}
3481 
3482 	/*
3483 	 * A controller may support 8-bit width, but the board itself
3484 	 * might not have the pins brought out.  Boards that support
3485 	 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3486 	 * their platform code before calling sdhci_add_host(), and we
3487 	 * won't assume 8-bit width for hosts without that CAP.
3488 	 */
3489 	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3490 		mmc->caps |= MMC_CAP_4_BIT_DATA;
3491 
3492 	if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3493 		mmc->caps &= ~MMC_CAP_CMD23;
3494 
3495 	if (host->caps & SDHCI_CAN_DO_HISPD)
3496 		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3497 
3498 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3499 	    mmc_card_is_removable(mmc) &&
3500 	    mmc_gpio_get_cd(host->mmc) < 0)
3501 		mmc->caps |= MMC_CAP_NEEDS_POLL;
3502 
3503 	/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3504 	if (!IS_ERR(mmc->supply.vqmmc)) {
3505 		ret = regulator_enable(mmc->supply.vqmmc);
3506 		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3507 						    1950000))
3508 			host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
3509 					 SDHCI_SUPPORT_SDR50 |
3510 					 SDHCI_SUPPORT_DDR50);
3511 		if (ret) {
3512 			pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3513 				mmc_hostname(mmc), ret);
3514 			mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3515 		}
3516 	}
3517 
3518 	if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
3519 		host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3520 				 SDHCI_SUPPORT_DDR50);
3521 	}
3522 
3523 	/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3524 	if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3525 			   SDHCI_SUPPORT_DDR50))
3526 		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3527 
3528 	/* SDR104 supports also implies SDR50 support */
3529 	if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3530 		mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3531 		/* SD3.0: SDR104 is supported so (for eMMC) the caps2
3532 		 * field can be promoted to support HS200.
3533 		 */
3534 		if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3535 			mmc->caps2 |= MMC_CAP2_HS200;
3536 	} else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3537 		mmc->caps |= MMC_CAP_UHS_SDR50;
3538 	}
3539 
3540 	if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3541 	    (host->caps1 & SDHCI_SUPPORT_HS400))
3542 		mmc->caps2 |= MMC_CAP2_HS400;
3543 
3544 	if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3545 	    (IS_ERR(mmc->supply.vqmmc) ||
3546 	     !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3547 					     1300000)))
3548 		mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3549 
3550 	if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
3551 	    !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3552 		mmc->caps |= MMC_CAP_UHS_DDR50;
3553 
3554 	/* Does the host need tuning for SDR50? */
3555 	if (host->caps1 & SDHCI_USE_SDR50_TUNING)
3556 		host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3557 
3558 	/* Driver Type(s) (A, C, D) supported by the host */
3559 	if (host->caps1 & SDHCI_DRIVER_TYPE_A)
3560 		mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3561 	if (host->caps1 & SDHCI_DRIVER_TYPE_C)
3562 		mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3563 	if (host->caps1 & SDHCI_DRIVER_TYPE_D)
3564 		mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3565 
3566 	/* Initial value for re-tuning timer count */
3567 	host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3568 			     SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3569 
3570 	/*
3571 	 * In case Re-tuning Timer is not disabled, the actual value of
3572 	 * re-tuning timer will be 2 ^ (n - 1).
3573 	 */
3574 	if (host->tuning_count)
3575 		host->tuning_count = 1 << (host->tuning_count - 1);
3576 
3577 	/* Re-tuning mode supported by the Host Controller */
3578 	host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
3579 			     SDHCI_RETUNING_MODE_SHIFT;
3580 
3581 	ocr_avail = 0;
3582 
3583 	/*
3584 	 * According to SD Host Controller spec v3.00, if the Host System
3585 	 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3586 	 * the value is meaningful only if Voltage Support in the Capabilities
3587 	 * register is set. The actual current value is 4 times the register
3588 	 * value.
3589 	 */
3590 	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3591 	if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3592 		int curr = regulator_get_current_limit(mmc->supply.vmmc);
3593 		if (curr > 0) {
3594 
3595 			/* convert to SDHCI_MAX_CURRENT format */
3596 			curr = curr/1000;  /* convert to mA */
3597 			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3598 
3599 			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3600 			max_current_caps =
3601 				(curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3602 				(curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3603 				(curr << SDHCI_MAX_CURRENT_180_SHIFT);
3604 		}
3605 	}
3606 
3607 	if (host->caps & SDHCI_CAN_VDD_330) {
3608 		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3609 
3610 		mmc->max_current_330 = ((max_current_caps &
3611 				   SDHCI_MAX_CURRENT_330_MASK) >>
3612 				   SDHCI_MAX_CURRENT_330_SHIFT) *
3613 				   SDHCI_MAX_CURRENT_MULTIPLIER;
3614 	}
3615 	if (host->caps & SDHCI_CAN_VDD_300) {
3616 		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3617 
3618 		mmc->max_current_300 = ((max_current_caps &
3619 				   SDHCI_MAX_CURRENT_300_MASK) >>
3620 				   SDHCI_MAX_CURRENT_300_SHIFT) *
3621 				   SDHCI_MAX_CURRENT_MULTIPLIER;
3622 	}
3623 	if (host->caps & SDHCI_CAN_VDD_180) {
3624 		ocr_avail |= MMC_VDD_165_195;
3625 
3626 		mmc->max_current_180 = ((max_current_caps &
3627 				   SDHCI_MAX_CURRENT_180_MASK) >>
3628 				   SDHCI_MAX_CURRENT_180_SHIFT) *
3629 				   SDHCI_MAX_CURRENT_MULTIPLIER;
3630 	}
3631 
3632 	/* If OCR set by host, use it instead. */
3633 	if (host->ocr_mask)
3634 		ocr_avail = host->ocr_mask;
3635 
3636 	/* If OCR set by external regulators, give it highest prio. */
3637 	if (mmc->ocr_avail)
3638 		ocr_avail = mmc->ocr_avail;
3639 
3640 	mmc->ocr_avail = ocr_avail;
3641 	mmc->ocr_avail_sdio = ocr_avail;
3642 	if (host->ocr_avail_sdio)
3643 		mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3644 	mmc->ocr_avail_sd = ocr_avail;
3645 	if (host->ocr_avail_sd)
3646 		mmc->ocr_avail_sd &= host->ocr_avail_sd;
3647 	else /* normal SD controllers don't support 1.8V */
3648 		mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3649 	mmc->ocr_avail_mmc = ocr_avail;
3650 	if (host->ocr_avail_mmc)
3651 		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3652 
3653 	if (mmc->ocr_avail == 0) {
3654 		pr_err("%s: Hardware doesn't report any support voltages.\n",
3655 		       mmc_hostname(mmc));
3656 		ret = -ENODEV;
3657 		goto unreg;
3658 	}
3659 
3660 	if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
3661 			  MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
3662 			  MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
3663 	    (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
3664 		host->flags |= SDHCI_SIGNALING_180;
3665 
3666 	if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
3667 		host->flags |= SDHCI_SIGNALING_120;
3668 
3669 	spin_lock_init(&host->lock);
3670 
3671 	/*
3672 	 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3673 	 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3674 	 * is less anyway.
3675 	 */
3676 	mmc->max_req_size = 524288;
3677 
3678 	/*
3679 	 * Maximum number of segments. Depends on if the hardware
3680 	 * can do scatter/gather or not.
3681 	 */
3682 	if (host->flags & SDHCI_USE_ADMA) {
3683 		mmc->max_segs = SDHCI_MAX_SEGS;
3684 	} else if (host->flags & SDHCI_USE_SDMA) {
3685 		mmc->max_segs = 1;
3686 		if (swiotlb_max_segment()) {
3687 			unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
3688 						IO_TLB_SEGSIZE;
3689 			mmc->max_req_size = min(mmc->max_req_size,
3690 						max_req_size);
3691 		}
3692 	} else { /* PIO */
3693 		mmc->max_segs = SDHCI_MAX_SEGS;
3694 	}
3695 
3696 	/*
3697 	 * Maximum segment size. Could be one segment with the maximum number
3698 	 * of bytes. When doing hardware scatter/gather, each entry cannot
3699 	 * be larger than 64 KiB though.
3700 	 */
3701 	if (host->flags & SDHCI_USE_ADMA) {
3702 		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3703 			mmc->max_seg_size = 65535;
3704 		else
3705 			mmc->max_seg_size = 65536;
3706 	} else {
3707 		mmc->max_seg_size = mmc->max_req_size;
3708 	}
3709 
3710 	/*
3711 	 * Maximum block size. This varies from controller to controller and
3712 	 * is specified in the capabilities register.
3713 	 */
3714 	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3715 		mmc->max_blk_size = 2;
3716 	} else {
3717 		mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
3718 				SDHCI_MAX_BLOCK_SHIFT;
3719 		if (mmc->max_blk_size >= 3) {
3720 			pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3721 				mmc_hostname(mmc));
3722 			mmc->max_blk_size = 0;
3723 		}
3724 	}
3725 
3726 	mmc->max_blk_size = 512 << mmc->max_blk_size;
3727 
3728 	/*
3729 	 * Maximum block count.
3730 	 */
3731 	mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3732 
3733 	return 0;
3734 
3735 unreg:
3736 	if (!IS_ERR(mmc->supply.vqmmc))
3737 		regulator_disable(mmc->supply.vqmmc);
3738 undma:
3739 	if (host->align_buffer)
3740 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3741 				  host->adma_table_sz, host->align_buffer,
3742 				  host->align_addr);
3743 	host->adma_table = NULL;
3744 	host->align_buffer = NULL;
3745 
3746 	return ret;
3747 }
3748 EXPORT_SYMBOL_GPL(sdhci_setup_host);
3749 
3750 void sdhci_cleanup_host(struct sdhci_host *host)
3751 {
3752 	struct mmc_host *mmc = host->mmc;
3753 
3754 	if (!IS_ERR(mmc->supply.vqmmc))
3755 		regulator_disable(mmc->supply.vqmmc);
3756 
3757 	if (host->align_buffer)
3758 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3759 				  host->adma_table_sz, host->align_buffer,
3760 				  host->align_addr);
3761 	host->adma_table = NULL;
3762 	host->align_buffer = NULL;
3763 }
3764 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
3765 
3766 int __sdhci_add_host(struct sdhci_host *host)
3767 {
3768 	struct mmc_host *mmc = host->mmc;
3769 	int ret;
3770 
3771 	/*
3772 	 * Init tasklets.
3773 	 */
3774 	tasklet_init(&host->finish_tasklet,
3775 		sdhci_tasklet_finish, (unsigned long)host);
3776 
3777 	timer_setup(&host->timer, sdhci_timeout_timer, 0);
3778 	timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
3779 
3780 	init_waitqueue_head(&host->buf_ready_int);
3781 
3782 	sdhci_init(host, 0);
3783 
3784 	ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
3785 				   IRQF_SHARED,	mmc_hostname(mmc), host);
3786 	if (ret) {
3787 		pr_err("%s: Failed to request IRQ %d: %d\n",
3788 		       mmc_hostname(mmc), host->irq, ret);
3789 		goto untasklet;
3790 	}
3791 
3792 	ret = sdhci_led_register(host);
3793 	if (ret) {
3794 		pr_err("%s: Failed to register LED device: %d\n",
3795 		       mmc_hostname(mmc), ret);
3796 		goto unirq;
3797 	}
3798 
3799 	mmiowb();
3800 
3801 	ret = mmc_add_host(mmc);
3802 	if (ret)
3803 		goto unled;
3804 
3805 	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3806 		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3807 		(host->flags & SDHCI_USE_ADMA) ?
3808 		(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3809 		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3810 
3811 	sdhci_enable_card_detection(host);
3812 
3813 	return 0;
3814 
3815 unled:
3816 	sdhci_led_unregister(host);
3817 unirq:
3818 	sdhci_do_reset(host, SDHCI_RESET_ALL);
3819 	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3820 	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3821 	free_irq(host->irq, host);
3822 untasklet:
3823 	tasklet_kill(&host->finish_tasklet);
3824 
3825 	return ret;
3826 }
3827 EXPORT_SYMBOL_GPL(__sdhci_add_host);
3828 
3829 int sdhci_add_host(struct sdhci_host *host)
3830 {
3831 	int ret;
3832 
3833 	ret = sdhci_setup_host(host);
3834 	if (ret)
3835 		return ret;
3836 
3837 	ret = __sdhci_add_host(host);
3838 	if (ret)
3839 		goto cleanup;
3840 
3841 	return 0;
3842 
3843 cleanup:
3844 	sdhci_cleanup_host(host);
3845 
3846 	return ret;
3847 }
3848 EXPORT_SYMBOL_GPL(sdhci_add_host);
3849 
3850 void sdhci_remove_host(struct sdhci_host *host, int dead)
3851 {
3852 	struct mmc_host *mmc = host->mmc;
3853 	unsigned long flags;
3854 
3855 	if (dead) {
3856 		spin_lock_irqsave(&host->lock, flags);
3857 
3858 		host->flags |= SDHCI_DEVICE_DEAD;
3859 
3860 		if (sdhci_has_requests(host)) {
3861 			pr_err("%s: Controller removed during "
3862 				" transfer!\n", mmc_hostname(mmc));
3863 			sdhci_error_out_mrqs(host, -ENOMEDIUM);
3864 		}
3865 
3866 		spin_unlock_irqrestore(&host->lock, flags);
3867 	}
3868 
3869 	sdhci_disable_card_detection(host);
3870 
3871 	mmc_remove_host(mmc);
3872 
3873 	sdhci_led_unregister(host);
3874 
3875 	if (!dead)
3876 		sdhci_do_reset(host, SDHCI_RESET_ALL);
3877 
3878 	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3879 	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3880 	free_irq(host->irq, host);
3881 
3882 	del_timer_sync(&host->timer);
3883 	del_timer_sync(&host->data_timer);
3884 
3885 	tasklet_kill(&host->finish_tasklet);
3886 
3887 	if (!IS_ERR(mmc->supply.vqmmc))
3888 		regulator_disable(mmc->supply.vqmmc);
3889 
3890 	if (host->align_buffer)
3891 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3892 				  host->adma_table_sz, host->align_buffer,
3893 				  host->align_addr);
3894 
3895 	host->adma_table = NULL;
3896 	host->align_buffer = NULL;
3897 }
3898 
3899 EXPORT_SYMBOL_GPL(sdhci_remove_host);
3900 
3901 void sdhci_free_host(struct sdhci_host *host)
3902 {
3903 	mmc_free_host(host->mmc);
3904 }
3905 
3906 EXPORT_SYMBOL_GPL(sdhci_free_host);
3907 
3908 /*****************************************************************************\
3909  *                                                                           *
3910  * Driver init/exit                                                          *
3911  *                                                                           *
3912 \*****************************************************************************/
3913 
3914 static int __init sdhci_drv_init(void)
3915 {
3916 	pr_info(DRIVER_NAME
3917 		": Secure Digital Host Controller Interface driver\n");
3918 	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3919 
3920 	return 0;
3921 }
3922 
3923 static void __exit sdhci_drv_exit(void)
3924 {
3925 }
3926 
3927 module_init(sdhci_drv_init);
3928 module_exit(sdhci_drv_exit);
3929 
3930 module_param(debug_quirks, uint, 0444);
3931 module_param(debug_quirks2, uint, 0444);
3932 
3933 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3934 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3935 MODULE_LICENSE("GPL");
3936 
3937 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3938 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
3939