xref: /linux/drivers/mmc/host/sdhci-pci-core.c (revision 3e7819886281e077e82006fe4804b0d6b0f5643b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*  linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface
3  *
4  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5  *
6  * Thanks to the following companies for their support:
7  *
8  *     - JMicron (hardware and technical support)
9  */
10 
11 #include <linux/bitfield.h>
12 #include <linux/string.h>
13 #include <linux/delay.h>
14 #include <linux/highmem.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 #include <linux/device.h>
20 #include <linux/scatterlist.h>
21 #include <linux/io.h>
22 #include <linux/iopoll.h>
23 #include <linux/gpio.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm_qos.h>
26 #include <linux/debugfs.h>
27 #include <linux/acpi.h>
28 #include <linux/dmi.h>
29 
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/slot-gpio.h>
33 
34 #ifdef CONFIG_X86
35 #include <asm/iosf_mbi.h>
36 #endif
37 
38 #include "cqhci.h"
39 
40 #include "sdhci.h"
41 #include "sdhci-cqhci.h"
42 #include "sdhci-pci.h"
43 
44 static void sdhci_pci_hw_reset(struct sdhci_host *host);
45 
46 #ifdef CONFIG_PM_SLEEP
47 static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
48 {
49 	mmc_pm_flag_t pm_flags = 0;
50 	bool cap_cd_wake = false;
51 	int i;
52 
53 	for (i = 0; i < chip->num_slots; i++) {
54 		struct sdhci_pci_slot *slot = chip->slots[i];
55 
56 		if (slot) {
57 			pm_flags |= slot->host->mmc->pm_flags;
58 			if (slot->host->mmc->caps & MMC_CAP_CD_WAKE)
59 				cap_cd_wake = true;
60 		}
61 	}
62 
63 	if ((pm_flags & MMC_PM_KEEP_POWER) && (pm_flags & MMC_PM_WAKE_SDIO_IRQ))
64 		return device_wakeup_enable(&chip->pdev->dev);
65 	else if (!cap_cd_wake)
66 		device_wakeup_disable(&chip->pdev->dev);
67 
68 	return 0;
69 }
70 
71 static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
72 {
73 	int i, ret;
74 
75 	sdhci_pci_init_wakeup(chip);
76 
77 	for (i = 0; i < chip->num_slots; i++) {
78 		struct sdhci_pci_slot *slot = chip->slots[i];
79 		struct sdhci_host *host;
80 
81 		if (!slot)
82 			continue;
83 
84 		host = slot->host;
85 
86 		if (chip->pm_retune && host->tuning_mode != SDHCI_TUNING_MODE_3)
87 			mmc_retune_needed(host->mmc);
88 
89 		ret = sdhci_suspend_host(host);
90 		if (ret)
91 			goto err_pci_suspend;
92 
93 		if (device_may_wakeup(&chip->pdev->dev))
94 			mmc_gpio_set_cd_wake(host->mmc, true);
95 	}
96 
97 	return 0;
98 
99 err_pci_suspend:
100 	while (--i >= 0)
101 		sdhci_resume_host(chip->slots[i]->host);
102 	return ret;
103 }
104 
105 int sdhci_pci_resume_host(struct sdhci_pci_chip *chip)
106 {
107 	struct sdhci_pci_slot *slot;
108 	int i, ret;
109 
110 	for (i = 0; i < chip->num_slots; i++) {
111 		slot = chip->slots[i];
112 		if (!slot)
113 			continue;
114 
115 		ret = sdhci_resume_host(slot->host);
116 		if (ret)
117 			return ret;
118 
119 		mmc_gpio_set_cd_wake(slot->host->mmc, false);
120 	}
121 
122 	return 0;
123 }
124 
125 static int sdhci_cqhci_suspend(struct sdhci_pci_chip *chip)
126 {
127 	int ret;
128 
129 	ret = cqhci_suspend(chip->slots[0]->host->mmc);
130 	if (ret)
131 		return ret;
132 
133 	return sdhci_pci_suspend_host(chip);
134 }
135 
136 static int sdhci_cqhci_resume(struct sdhci_pci_chip *chip)
137 {
138 	int ret;
139 
140 	ret = sdhci_pci_resume_host(chip);
141 	if (ret)
142 		return ret;
143 
144 	return cqhci_resume(chip->slots[0]->host->mmc);
145 }
146 #endif
147 
148 #ifdef CONFIG_PM
149 static int sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip *chip)
150 {
151 	struct sdhci_pci_slot *slot;
152 	struct sdhci_host *host;
153 	int i, ret;
154 
155 	for (i = 0; i < chip->num_slots; i++) {
156 		slot = chip->slots[i];
157 		if (!slot)
158 			continue;
159 
160 		host = slot->host;
161 
162 		ret = sdhci_runtime_suspend_host(host);
163 		if (ret)
164 			goto err_pci_runtime_suspend;
165 
166 		if (chip->rpm_retune &&
167 		    host->tuning_mode != SDHCI_TUNING_MODE_3)
168 			mmc_retune_needed(host->mmc);
169 	}
170 
171 	return 0;
172 
173 err_pci_runtime_suspend:
174 	while (--i >= 0)
175 		sdhci_runtime_resume_host(chip->slots[i]->host, 0);
176 	return ret;
177 }
178 
179 static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip)
180 {
181 	struct sdhci_pci_slot *slot;
182 	int i, ret;
183 
184 	for (i = 0; i < chip->num_slots; i++) {
185 		slot = chip->slots[i];
186 		if (!slot)
187 			continue;
188 
189 		ret = sdhci_runtime_resume_host(slot->host, 0);
190 		if (ret)
191 			return ret;
192 	}
193 
194 	return 0;
195 }
196 
197 static int sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip *chip)
198 {
199 	int ret;
200 
201 	ret = cqhci_suspend(chip->slots[0]->host->mmc);
202 	if (ret)
203 		return ret;
204 
205 	return sdhci_pci_runtime_suspend_host(chip);
206 }
207 
208 static int sdhci_cqhci_runtime_resume(struct sdhci_pci_chip *chip)
209 {
210 	int ret;
211 
212 	ret = sdhci_pci_runtime_resume_host(chip);
213 	if (ret)
214 		return ret;
215 
216 	return cqhci_resume(chip->slots[0]->host->mmc);
217 }
218 #endif
219 
220 static u32 sdhci_cqhci_irq(struct sdhci_host *host, u32 intmask)
221 {
222 	int cmd_error = 0;
223 	int data_error = 0;
224 
225 	if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
226 		return intmask;
227 
228 	cqhci_irq(host->mmc, intmask, cmd_error, data_error);
229 
230 	return 0;
231 }
232 
233 static void sdhci_pci_dumpregs(struct mmc_host *mmc)
234 {
235 	sdhci_dumpregs(mmc_priv(mmc));
236 }
237 
238 /*****************************************************************************\
239  *                                                                           *
240  * Hardware specific quirk handling                                          *
241  *                                                                           *
242 \*****************************************************************************/
243 
244 static int ricoh_probe(struct sdhci_pci_chip *chip)
245 {
246 	if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG ||
247 	    chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY)
248 		chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET;
249 	return 0;
250 }
251 
252 static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot)
253 {
254 	u32 caps =
255 		FIELD_PREP(SDHCI_TIMEOUT_CLK_MASK, 0x21) |
256 		FIELD_PREP(SDHCI_CLOCK_BASE_MASK, 0x21) |
257 		SDHCI_TIMEOUT_CLK_UNIT |
258 		SDHCI_CAN_VDD_330 |
259 		SDHCI_CAN_DO_HISPD |
260 		SDHCI_CAN_DO_SDMA;
261 	u32 caps1 = 0;
262 
263 	__sdhci_read_caps(slot->host, NULL, &caps, &caps1);
264 	return 0;
265 }
266 
267 #ifdef CONFIG_PM_SLEEP
268 static int ricoh_mmc_resume(struct sdhci_pci_chip *chip)
269 {
270 	/* Apply a delay to allow controller to settle */
271 	/* Otherwise it becomes confused if card state changed
272 		during suspend */
273 	msleep(500);
274 	return sdhci_pci_resume_host(chip);
275 }
276 #endif
277 
278 static const struct sdhci_pci_fixes sdhci_ricoh = {
279 	.probe		= ricoh_probe,
280 	.quirks		= SDHCI_QUIRK_32BIT_DMA_ADDR |
281 			  SDHCI_QUIRK_FORCE_DMA |
282 			  SDHCI_QUIRK_CLOCK_BEFORE_RESET,
283 };
284 
285 static const struct sdhci_pci_fixes sdhci_ricoh_mmc = {
286 	.probe_slot	= ricoh_mmc_probe_slot,
287 #ifdef CONFIG_PM_SLEEP
288 	.resume		= ricoh_mmc_resume,
289 #endif
290 	.quirks		= SDHCI_QUIRK_32BIT_DMA_ADDR |
291 			  SDHCI_QUIRK_CLOCK_BEFORE_RESET |
292 			  SDHCI_QUIRK_NO_CARD_NO_RESET,
293 };
294 
295 static void ene_714_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
296 {
297 	struct sdhci_host *host = mmc_priv(mmc);
298 
299 	sdhci_set_ios(mmc, ios);
300 
301 	/*
302 	 * Some (ENE) controllers misbehave on some ios operations,
303 	 * signalling timeout and CRC errors even on CMD0. Resetting
304 	 * it on each ios seems to solve the problem.
305 	 */
306 	if (!(host->flags & SDHCI_DEVICE_DEAD))
307 		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
308 }
309 
310 static int ene_714_probe_slot(struct sdhci_pci_slot *slot)
311 {
312 	slot->host->mmc_host_ops.set_ios = ene_714_set_ios;
313 	return 0;
314 }
315 
316 static const struct sdhci_pci_fixes sdhci_ene_712 = {
317 	.quirks		= SDHCI_QUIRK_SINGLE_POWER_WRITE |
318 			  SDHCI_QUIRK_BROKEN_DMA,
319 };
320 
321 static const struct sdhci_pci_fixes sdhci_ene_714 = {
322 	.quirks		= SDHCI_QUIRK_SINGLE_POWER_WRITE |
323 			  SDHCI_QUIRK_BROKEN_DMA,
324 	.probe_slot	= ene_714_probe_slot,
325 };
326 
327 static const struct sdhci_pci_fixes sdhci_cafe = {
328 	.quirks		= SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
329 			  SDHCI_QUIRK_NO_BUSY_IRQ |
330 			  SDHCI_QUIRK_BROKEN_CARD_DETECTION |
331 			  SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
332 };
333 
334 static const struct sdhci_pci_fixes sdhci_intel_qrk = {
335 	.quirks		= SDHCI_QUIRK_NO_HISPD_BIT,
336 };
337 
338 static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot)
339 {
340 	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
341 	return 0;
342 }
343 
344 /*
345  * ADMA operation is disabled for Moorestown platform due to
346  * hardware bugs.
347  */
348 static int mrst_hc_probe(struct sdhci_pci_chip *chip)
349 {
350 	/*
351 	 * slots number is fixed here for MRST as SDIO3/5 are never used and
352 	 * have hardware bugs.
353 	 */
354 	chip->num_slots = 1;
355 	return 0;
356 }
357 
358 static int pch_hc_probe_slot(struct sdhci_pci_slot *slot)
359 {
360 	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
361 	return 0;
362 }
363 
364 static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
365 {
366 	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
367 	slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
368 	return 0;
369 }
370 
371 static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot)
372 {
373 	slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
374 	return 0;
375 }
376 
377 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
378 	.quirks		= SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
379 	.probe_slot	= mrst_hc_probe_slot,
380 };
381 
382 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
383 	.quirks		= SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
384 	.probe		= mrst_hc_probe,
385 };
386 
387 static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
388 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
389 	.allow_runtime_pm = true,
390 	.own_cd_for_runtime_pm = true,
391 };
392 
393 static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
394 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
395 	.quirks2	= SDHCI_QUIRK2_HOST_OFF_CARD_ON,
396 	.allow_runtime_pm = true,
397 	.probe_slot	= mfd_sdio_probe_slot,
398 };
399 
400 static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = {
401 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
402 	.allow_runtime_pm = true,
403 	.probe_slot	= mfd_emmc_probe_slot,
404 };
405 
406 static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
407 	.quirks		= SDHCI_QUIRK_BROKEN_ADMA,
408 	.probe_slot	= pch_hc_probe_slot,
409 };
410 
411 #ifdef CONFIG_X86
412 
413 #define BYT_IOSF_SCCEP			0x63
414 #define BYT_IOSF_OCP_NETCTRL0		0x1078
415 #define BYT_IOSF_OCP_TIMEOUT_BASE	GENMASK(10, 8)
416 
417 static void byt_ocp_setting(struct pci_dev *pdev)
418 {
419 	u32 val = 0;
420 
421 	if (pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC &&
422 	    pdev->device != PCI_DEVICE_ID_INTEL_BYT_SDIO &&
423 	    pdev->device != PCI_DEVICE_ID_INTEL_BYT_SD &&
424 	    pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC2)
425 		return;
426 
427 	if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
428 			  &val)) {
429 		dev_err(&pdev->dev, "%s read error\n", __func__);
430 		return;
431 	}
432 
433 	if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
434 		return;
435 
436 	val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
437 
438 	if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
439 			   val)) {
440 		dev_err(&pdev->dev, "%s write error\n", __func__);
441 		return;
442 	}
443 
444 	dev_dbg(&pdev->dev, "%s completed\n", __func__);
445 }
446 
447 #else
448 
449 static inline void byt_ocp_setting(struct pci_dev *pdev)
450 {
451 }
452 
453 #endif
454 
455 enum {
456 	INTEL_DSM_FNS		=  0,
457 	INTEL_DSM_V18_SWITCH	=  3,
458 	INTEL_DSM_V33_SWITCH	=  4,
459 	INTEL_DSM_DRV_STRENGTH	=  9,
460 	INTEL_DSM_D3_RETUNE	= 10,
461 };
462 
463 struct intel_host {
464 	u32	dsm_fns;
465 	int	drv_strength;
466 	bool	d3_retune;
467 	bool	rpm_retune_ok;
468 	bool	needs_pwr_off;
469 	u32	glk_rx_ctrl1;
470 	u32	glk_tun_val;
471 	u32	active_ltr;
472 	u32	idle_ltr;
473 };
474 
475 static const guid_t intel_dsm_guid =
476 	GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F,
477 		  0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61);
478 
479 static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
480 		       unsigned int fn, u32 *result)
481 {
482 	union acpi_object *obj;
483 	int err = 0;
484 	size_t len;
485 
486 	obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL,
487 				      ACPI_TYPE_BUFFER);
488 	if (!obj)
489 		return -EOPNOTSUPP;
490 
491 	if (obj->buffer.length < 1) {
492 		err = -EINVAL;
493 		goto out;
494 	}
495 
496 	len = min_t(size_t, obj->buffer.length, 4);
497 
498 	*result = 0;
499 	memcpy(result, obj->buffer.pointer, len);
500 out:
501 	ACPI_FREE(obj);
502 
503 	return err;
504 }
505 
506 static int intel_dsm(struct intel_host *intel_host, struct device *dev,
507 		     unsigned int fn, u32 *result)
508 {
509 	if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
510 		return -EOPNOTSUPP;
511 
512 	return __intel_dsm(intel_host, dev, fn, result);
513 }
514 
515 static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
516 			   struct mmc_host *mmc)
517 {
518 	int err;
519 	u32 val;
520 
521 	intel_host->d3_retune = true;
522 
523 	err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
524 	if (err) {
525 		pr_debug("%s: DSM not supported, error %d\n",
526 			 mmc_hostname(mmc), err);
527 		return;
528 	}
529 
530 	pr_debug("%s: DSM function mask %#x\n",
531 		 mmc_hostname(mmc), intel_host->dsm_fns);
532 
533 	err = intel_dsm(intel_host, dev, INTEL_DSM_DRV_STRENGTH, &val);
534 	intel_host->drv_strength = err ? 0 : val;
535 
536 	err = intel_dsm(intel_host, dev, INTEL_DSM_D3_RETUNE, &val);
537 	intel_host->d3_retune = err ? true : !!val;
538 }
539 
540 static void sdhci_pci_int_hw_reset(struct sdhci_host *host)
541 {
542 	u8 reg;
543 
544 	reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
545 	reg |= 0x10;
546 	sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
547 	/* For eMMC, minimum is 1us but give it 9us for good measure */
548 	udelay(9);
549 	reg &= ~0x10;
550 	sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
551 	/* For eMMC, minimum is 200us but give it 300us for good measure */
552 	usleep_range(300, 1000);
553 }
554 
555 static int intel_select_drive_strength(struct mmc_card *card,
556 				       unsigned int max_dtr, int host_drv,
557 				       int card_drv, int *drv_type)
558 {
559 	struct sdhci_host *host = mmc_priv(card->host);
560 	struct sdhci_pci_slot *slot = sdhci_priv(host);
561 	struct intel_host *intel_host = sdhci_pci_priv(slot);
562 
563 	if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv))
564 		return 0;
565 
566 	return intel_host->drv_strength;
567 }
568 
569 static int bxt_get_cd(struct mmc_host *mmc)
570 {
571 	int gpio_cd = mmc_gpio_get_cd(mmc);
572 
573 	if (!gpio_cd)
574 		return 0;
575 
576 	return sdhci_get_cd_nogpio(mmc);
577 }
578 
579 static int mrfld_get_cd(struct mmc_host *mmc)
580 {
581 	return sdhci_get_cd_nogpio(mmc);
582 }
583 
584 #define SDHCI_INTEL_PWR_TIMEOUT_CNT	20
585 #define SDHCI_INTEL_PWR_TIMEOUT_UDELAY	100
586 
587 static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
588 				  unsigned short vdd)
589 {
590 	struct sdhci_pci_slot *slot = sdhci_priv(host);
591 	struct intel_host *intel_host = sdhci_pci_priv(slot);
592 	int cntr;
593 	u8 reg;
594 
595 	/*
596 	 * Bus power may control card power, but a full reset still may not
597 	 * reset the power, whereas a direct write to SDHCI_POWER_CONTROL can.
598 	 * That might be needed to initialize correctly, if the card was left
599 	 * powered on previously.
600 	 */
601 	if (intel_host->needs_pwr_off) {
602 		intel_host->needs_pwr_off = false;
603 		if (mode != MMC_POWER_OFF) {
604 			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
605 			usleep_range(10000, 12500);
606 		}
607 	}
608 
609 	sdhci_set_power(host, mode, vdd);
610 
611 	if (mode == MMC_POWER_OFF)
612 		return;
613 
614 	/*
615 	 * Bus power might not enable after D3 -> D0 transition due to the
616 	 * present state not yet having propagated. Retry for up to 2ms.
617 	 */
618 	for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) {
619 		reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
620 		if (reg & SDHCI_POWER_ON)
621 			break;
622 		udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY);
623 		reg |= SDHCI_POWER_ON;
624 		sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
625 	}
626 }
627 
628 static void sdhci_intel_set_uhs_signaling(struct sdhci_host *host,
629 					  unsigned int timing)
630 {
631 	/* Set UHS timing to SDR25 for High Speed mode */
632 	if (timing == MMC_TIMING_MMC_HS || timing == MMC_TIMING_SD_HS)
633 		timing = MMC_TIMING_UHS_SDR25;
634 	sdhci_set_uhs_signaling(host, timing);
635 }
636 
637 #define INTEL_HS400_ES_REG 0x78
638 #define INTEL_HS400_ES_BIT BIT(0)
639 
640 static void intel_hs400_enhanced_strobe(struct mmc_host *mmc,
641 					struct mmc_ios *ios)
642 {
643 	struct sdhci_host *host = mmc_priv(mmc);
644 	u32 val;
645 
646 	val = sdhci_readl(host, INTEL_HS400_ES_REG);
647 	if (ios->enhanced_strobe)
648 		val |= INTEL_HS400_ES_BIT;
649 	else
650 		val &= ~INTEL_HS400_ES_BIT;
651 	sdhci_writel(host, val, INTEL_HS400_ES_REG);
652 }
653 
654 static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
655 					     struct mmc_ios *ios)
656 {
657 	struct device *dev = mmc_dev(mmc);
658 	struct sdhci_host *host = mmc_priv(mmc);
659 	struct sdhci_pci_slot *slot = sdhci_priv(host);
660 	struct intel_host *intel_host = sdhci_pci_priv(slot);
661 	unsigned int fn;
662 	u32 result = 0;
663 	int err;
664 
665 	err = sdhci_start_signal_voltage_switch(mmc, ios);
666 	if (err)
667 		return err;
668 
669 	switch (ios->signal_voltage) {
670 	case MMC_SIGNAL_VOLTAGE_330:
671 		fn = INTEL_DSM_V33_SWITCH;
672 		break;
673 	case MMC_SIGNAL_VOLTAGE_180:
674 		fn = INTEL_DSM_V18_SWITCH;
675 		break;
676 	default:
677 		return 0;
678 	}
679 
680 	err = intel_dsm(intel_host, dev, fn, &result);
681 	pr_debug("%s: %s DSM fn %u error %d result %u\n",
682 		 mmc_hostname(mmc), __func__, fn, err, result);
683 
684 	return 0;
685 }
686 
687 static const struct sdhci_ops sdhci_intel_byt_ops = {
688 	.set_clock		= sdhci_set_clock,
689 	.set_power		= sdhci_intel_set_power,
690 	.enable_dma		= sdhci_pci_enable_dma,
691 	.set_bus_width		= sdhci_set_bus_width,
692 	.reset			= sdhci_reset,
693 	.set_uhs_signaling	= sdhci_intel_set_uhs_signaling,
694 	.hw_reset		= sdhci_pci_hw_reset,
695 };
696 
697 static const struct sdhci_ops sdhci_intel_glk_ops = {
698 	.set_clock		= sdhci_set_clock,
699 	.set_power		= sdhci_intel_set_power,
700 	.enable_dma		= sdhci_pci_enable_dma,
701 	.set_bus_width		= sdhci_set_bus_width,
702 	.reset			= sdhci_and_cqhci_reset,
703 	.set_uhs_signaling	= sdhci_intel_set_uhs_signaling,
704 	.hw_reset		= sdhci_pci_hw_reset,
705 	.irq			= sdhci_cqhci_irq,
706 };
707 
708 static void byt_read_dsm(struct sdhci_pci_slot *slot)
709 {
710 	struct intel_host *intel_host = sdhci_pci_priv(slot);
711 	struct device *dev = &slot->chip->pdev->dev;
712 	struct mmc_host *mmc = slot->host->mmc;
713 
714 	intel_dsm_init(intel_host, dev, mmc);
715 	slot->chip->rpm_retune = intel_host->d3_retune;
716 }
717 
718 static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode)
719 {
720 	int err = sdhci_execute_tuning(mmc, opcode);
721 	struct sdhci_host *host = mmc_priv(mmc);
722 
723 	if (err)
724 		return err;
725 
726 	/*
727 	 * Tuning can leave the IP in an active state (Buffer Read Enable bit
728 	 * set) which prevents the entry to low power states (i.e. S0i3). Data
729 	 * reset will clear it.
730 	 */
731 	sdhci_reset(host, SDHCI_RESET_DATA);
732 
733 	return 0;
734 }
735 
736 #define INTEL_ACTIVELTR		0x804
737 #define INTEL_IDLELTR		0x808
738 
739 #define INTEL_LTR_REQ		BIT(15)
740 #define INTEL_LTR_SCALE_MASK	GENMASK(11, 10)
741 #define INTEL_LTR_SCALE_1US	(2 << 10)
742 #define INTEL_LTR_SCALE_32US	(3 << 10)
743 #define INTEL_LTR_VALUE_MASK	GENMASK(9, 0)
744 
745 static void intel_cache_ltr(struct sdhci_pci_slot *slot)
746 {
747 	struct intel_host *intel_host = sdhci_pci_priv(slot);
748 	struct sdhci_host *host = slot->host;
749 
750 	intel_host->active_ltr = readl(host->ioaddr + INTEL_ACTIVELTR);
751 	intel_host->idle_ltr = readl(host->ioaddr + INTEL_IDLELTR);
752 }
753 
754 static void intel_ltr_set(struct device *dev, s32 val)
755 {
756 	struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
757 	struct sdhci_pci_slot *slot = chip->slots[0];
758 	struct intel_host *intel_host = sdhci_pci_priv(slot);
759 	struct sdhci_host *host = slot->host;
760 	u32 ltr;
761 
762 	pm_runtime_get_sync(dev);
763 
764 	/*
765 	 * Program latency tolerance (LTR) accordingly what has been asked
766 	 * by the PM QoS layer or disable it in case we were passed
767 	 * negative value or PM_QOS_LATENCY_ANY.
768 	 */
769 	ltr = readl(host->ioaddr + INTEL_ACTIVELTR);
770 
771 	if (val == PM_QOS_LATENCY_ANY || val < 0) {
772 		ltr &= ~INTEL_LTR_REQ;
773 	} else {
774 		ltr |= INTEL_LTR_REQ;
775 		ltr &= ~INTEL_LTR_SCALE_MASK;
776 		ltr &= ~INTEL_LTR_VALUE_MASK;
777 
778 		if (val > INTEL_LTR_VALUE_MASK) {
779 			val >>= 5;
780 			if (val > INTEL_LTR_VALUE_MASK)
781 				val = INTEL_LTR_VALUE_MASK;
782 			ltr |= INTEL_LTR_SCALE_32US | val;
783 		} else {
784 			ltr |= INTEL_LTR_SCALE_1US | val;
785 		}
786 	}
787 
788 	if (ltr == intel_host->active_ltr)
789 		goto out;
790 
791 	writel(ltr, host->ioaddr + INTEL_ACTIVELTR);
792 	writel(ltr, host->ioaddr + INTEL_IDLELTR);
793 
794 	/* Cache the values into lpss structure */
795 	intel_cache_ltr(slot);
796 out:
797 	pm_runtime_put_autosuspend(dev);
798 }
799 
800 static bool intel_use_ltr(struct sdhci_pci_chip *chip)
801 {
802 	switch (chip->pdev->device) {
803 	case PCI_DEVICE_ID_INTEL_BYT_EMMC:
804 	case PCI_DEVICE_ID_INTEL_BYT_EMMC2:
805 	case PCI_DEVICE_ID_INTEL_BYT_SDIO:
806 	case PCI_DEVICE_ID_INTEL_BYT_SD:
807 	case PCI_DEVICE_ID_INTEL_BSW_EMMC:
808 	case PCI_DEVICE_ID_INTEL_BSW_SDIO:
809 	case PCI_DEVICE_ID_INTEL_BSW_SD:
810 		return false;
811 	default:
812 		return true;
813 	}
814 }
815 
816 static void intel_ltr_expose(struct sdhci_pci_chip *chip)
817 {
818 	struct device *dev = &chip->pdev->dev;
819 
820 	if (!intel_use_ltr(chip))
821 		return;
822 
823 	dev->power.set_latency_tolerance = intel_ltr_set;
824 	dev_pm_qos_expose_latency_tolerance(dev);
825 }
826 
827 static void intel_ltr_hide(struct sdhci_pci_chip *chip)
828 {
829 	struct device *dev = &chip->pdev->dev;
830 
831 	if (!intel_use_ltr(chip))
832 		return;
833 
834 	dev_pm_qos_hide_latency_tolerance(dev);
835 	dev->power.set_latency_tolerance = NULL;
836 }
837 
838 static void byt_probe_slot(struct sdhci_pci_slot *slot)
839 {
840 	struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
841 	struct device *dev = &slot->chip->pdev->dev;
842 	struct mmc_host *mmc = slot->host->mmc;
843 
844 	byt_read_dsm(slot);
845 
846 	byt_ocp_setting(slot->chip->pdev);
847 
848 	ops->execute_tuning = intel_execute_tuning;
849 	ops->start_signal_voltage_switch = intel_start_signal_voltage_switch;
850 
851 	device_property_read_u32(dev, "max-frequency", &mmc->f_max);
852 
853 	if (!mmc->slotno) {
854 		slot->chip->slots[mmc->slotno] = slot;
855 		intel_ltr_expose(slot->chip);
856 	}
857 }
858 
859 static void byt_add_debugfs(struct sdhci_pci_slot *slot)
860 {
861 	struct intel_host *intel_host = sdhci_pci_priv(slot);
862 	struct mmc_host *mmc = slot->host->mmc;
863 	struct dentry *dir = mmc->debugfs_root;
864 
865 	if (!intel_use_ltr(slot->chip))
866 		return;
867 
868 	debugfs_create_x32("active_ltr", 0444, dir, &intel_host->active_ltr);
869 	debugfs_create_x32("idle_ltr", 0444, dir, &intel_host->idle_ltr);
870 
871 	intel_cache_ltr(slot);
872 }
873 
874 static int byt_add_host(struct sdhci_pci_slot *slot)
875 {
876 	int ret = sdhci_add_host(slot->host);
877 
878 	if (!ret)
879 		byt_add_debugfs(slot);
880 	return ret;
881 }
882 
883 static void byt_remove_slot(struct sdhci_pci_slot *slot, int dead)
884 {
885 	struct mmc_host *mmc = slot->host->mmc;
886 
887 	if (!mmc->slotno)
888 		intel_ltr_hide(slot->chip);
889 }
890 
891 static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
892 {
893 	byt_probe_slot(slot);
894 	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
895 				 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
896 				 MMC_CAP_CMD_DURING_TFR |
897 				 MMC_CAP_WAIT_WHILE_BUSY;
898 	slot->hw_reset = sdhci_pci_int_hw_reset;
899 	if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC)
900 		slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
901 	slot->host->mmc_host_ops.select_drive_strength =
902 						intel_select_drive_strength;
903 	return 0;
904 }
905 
906 static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
907 {
908 	return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
909 	       (dmi_match(DMI_BIOS_VENDOR, "LENOVO") ||
910 		dmi_match(DMI_SYS_VENDOR, "IRBIS"));
911 }
912 
913 static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot)
914 {
915 	return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_JSL_EMMC &&
916 			dmi_match(DMI_BIOS_VENDOR, "ASUSTeK COMPUTER INC.");
917 }
918 
919 static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
920 {
921 	int ret = byt_emmc_probe_slot(slot);
922 
923 	if (!glk_broken_cqhci(slot))
924 		slot->host->mmc->caps2 |= MMC_CAP2_CQE;
925 
926 	if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
927 		if (!jsl_broken_hs400es(slot)) {
928 			slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES;
929 			slot->host->mmc_host_ops.hs400_enhanced_strobe =
930 							intel_hs400_enhanced_strobe;
931 		}
932 		slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
933 	}
934 
935 	return ret;
936 }
937 
938 static const struct cqhci_host_ops glk_cqhci_ops = {
939 	.enable		= sdhci_cqe_enable,
940 	.disable	= sdhci_cqe_disable,
941 	.dumpregs	= sdhci_pci_dumpregs,
942 };
943 
944 static int glk_emmc_add_host(struct sdhci_pci_slot *slot)
945 {
946 	struct device *dev = &slot->chip->pdev->dev;
947 	struct sdhci_host *host = slot->host;
948 	struct cqhci_host *cq_host;
949 	bool dma64;
950 	int ret;
951 
952 	ret = sdhci_setup_host(host);
953 	if (ret)
954 		return ret;
955 
956 	cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL);
957 	if (!cq_host) {
958 		ret = -ENOMEM;
959 		goto cleanup;
960 	}
961 
962 	cq_host->mmio = host->ioaddr + 0x200;
963 	cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
964 	cq_host->ops = &glk_cqhci_ops;
965 
966 	dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
967 	if (dma64)
968 		cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
969 
970 	ret = cqhci_init(cq_host, host->mmc, dma64);
971 	if (ret)
972 		goto cleanup;
973 
974 	ret = __sdhci_add_host(host);
975 	if (ret)
976 		goto cleanup;
977 
978 	byt_add_debugfs(slot);
979 
980 	return 0;
981 
982 cleanup:
983 	sdhci_cleanup_host(host);
984 	return ret;
985 }
986 
987 #ifdef CONFIG_PM
988 #define GLK_RX_CTRL1	0x834
989 #define GLK_TUN_VAL	0x840
990 #define GLK_PATH_PLL	GENMASK(13, 8)
991 #define GLK_DLY		GENMASK(6, 0)
992 /* Workaround firmware failing to restore the tuning value */
993 static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp)
994 {
995 	struct sdhci_pci_slot *slot = chip->slots[0];
996 	struct intel_host *intel_host = sdhci_pci_priv(slot);
997 	struct sdhci_host *host = slot->host;
998 	u32 glk_rx_ctrl1;
999 	u32 glk_tun_val;
1000 	u32 dly;
1001 
1002 	if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc))
1003 		return;
1004 
1005 	glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1);
1006 	glk_tun_val = sdhci_readl(host, GLK_TUN_VAL);
1007 
1008 	if (susp) {
1009 		intel_host->glk_rx_ctrl1 = glk_rx_ctrl1;
1010 		intel_host->glk_tun_val = glk_tun_val;
1011 		return;
1012 	}
1013 
1014 	if (!intel_host->glk_tun_val)
1015 		return;
1016 
1017 	if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) {
1018 		intel_host->rpm_retune_ok = true;
1019 		return;
1020 	}
1021 
1022 	dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) +
1023 				  (intel_host->glk_tun_val << 1));
1024 	if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1))
1025 		return;
1026 
1027 	glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly;
1028 	sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1);
1029 
1030 	intel_host->rpm_retune_ok = true;
1031 	chip->rpm_retune = true;
1032 	mmc_retune_needed(host->mmc);
1033 	pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc));
1034 }
1035 
1036 static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp)
1037 {
1038 	if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
1039 	    !chip->rpm_retune)
1040 		glk_rpm_retune_wa(chip, susp);
1041 }
1042 
1043 static int glk_runtime_suspend(struct sdhci_pci_chip *chip)
1044 {
1045 	glk_rpm_retune_chk(chip, true);
1046 
1047 	return sdhci_cqhci_runtime_suspend(chip);
1048 }
1049 
1050 static int glk_runtime_resume(struct sdhci_pci_chip *chip)
1051 {
1052 	glk_rpm_retune_chk(chip, false);
1053 
1054 	return sdhci_cqhci_runtime_resume(chip);
1055 }
1056 #endif
1057 
1058 #ifdef CONFIG_ACPI
1059 static int ni_set_max_freq(struct sdhci_pci_slot *slot)
1060 {
1061 	acpi_status status;
1062 	unsigned long long max_freq;
1063 
1064 	status = acpi_evaluate_integer(ACPI_HANDLE(&slot->chip->pdev->dev),
1065 				       "MXFQ", NULL, &max_freq);
1066 	if (ACPI_FAILURE(status)) {
1067 		dev_err(&slot->chip->pdev->dev,
1068 			"MXFQ not found in acpi table\n");
1069 		return -EINVAL;
1070 	}
1071 
1072 	slot->host->mmc->f_max = max_freq * 1000000;
1073 
1074 	return 0;
1075 }
1076 #else
1077 static inline int ni_set_max_freq(struct sdhci_pci_slot *slot)
1078 {
1079 	return 0;
1080 }
1081 #endif
1082 
1083 static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
1084 {
1085 	int err;
1086 
1087 	byt_probe_slot(slot);
1088 
1089 	err = ni_set_max_freq(slot);
1090 	if (err)
1091 		return err;
1092 
1093 	slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
1094 				 MMC_CAP_WAIT_WHILE_BUSY;
1095 	return 0;
1096 }
1097 
1098 static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
1099 {
1100 	byt_probe_slot(slot);
1101 	slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
1102 				 MMC_CAP_WAIT_WHILE_BUSY;
1103 	return 0;
1104 }
1105 
1106 static void byt_needs_pwr_off(struct sdhci_pci_slot *slot)
1107 {
1108 	struct intel_host *intel_host = sdhci_pci_priv(slot);
1109 	u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL);
1110 
1111 	intel_host->needs_pwr_off = reg  & SDHCI_POWER_ON;
1112 }
1113 
1114 static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
1115 {
1116 	byt_probe_slot(slot);
1117 	slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
1118 				 MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE;
1119 	slot->cd_idx = 0;
1120 	slot->cd_override_level = true;
1121 	if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
1122 	    slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
1123 	    slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD ||
1124 	    slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_SD)
1125 		slot->host->mmc_host_ops.get_cd = bxt_get_cd;
1126 
1127 	if (slot->chip->pdev->subsystem_vendor == PCI_VENDOR_ID_NI &&
1128 	    slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3)
1129 		slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V;
1130 
1131 	byt_needs_pwr_off(slot);
1132 
1133 	return 0;
1134 }
1135 
1136 #ifdef CONFIG_PM_SLEEP
1137 
1138 static int byt_resume(struct sdhci_pci_chip *chip)
1139 {
1140 	byt_ocp_setting(chip->pdev);
1141 
1142 	return sdhci_pci_resume_host(chip);
1143 }
1144 
1145 #endif
1146 
1147 #ifdef CONFIG_PM
1148 
1149 static int byt_runtime_resume(struct sdhci_pci_chip *chip)
1150 {
1151 	byt_ocp_setting(chip->pdev);
1152 
1153 	return sdhci_pci_runtime_resume_host(chip);
1154 }
1155 
1156 #endif
1157 
1158 static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
1159 #ifdef CONFIG_PM_SLEEP
1160 	.resume		= byt_resume,
1161 #endif
1162 #ifdef CONFIG_PM
1163 	.runtime_resume	= byt_runtime_resume,
1164 #endif
1165 	.allow_runtime_pm = true,
1166 	.probe_slot	= byt_emmc_probe_slot,
1167 	.add_host	= byt_add_host,
1168 	.remove_slot	= byt_remove_slot,
1169 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1170 			  SDHCI_QUIRK_NO_LED,
1171 	.quirks2	= SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1172 			  SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
1173 			  SDHCI_QUIRK2_STOP_WITH_TC,
1174 	.ops		= &sdhci_intel_byt_ops,
1175 	.priv_size	= sizeof(struct intel_host),
1176 };
1177 
1178 static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
1179 	.allow_runtime_pm	= true,
1180 	.probe_slot		= glk_emmc_probe_slot,
1181 	.add_host		= glk_emmc_add_host,
1182 	.remove_slot		= byt_remove_slot,
1183 #ifdef CONFIG_PM_SLEEP
1184 	.suspend		= sdhci_cqhci_suspend,
1185 	.resume			= sdhci_cqhci_resume,
1186 #endif
1187 #ifdef CONFIG_PM
1188 	.runtime_suspend	= glk_runtime_suspend,
1189 	.runtime_resume		= glk_runtime_resume,
1190 #endif
1191 	.quirks			= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1192 				  SDHCI_QUIRK_NO_LED,
1193 	.quirks2		= SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1194 				  SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
1195 				  SDHCI_QUIRK2_STOP_WITH_TC,
1196 	.ops			= &sdhci_intel_glk_ops,
1197 	.priv_size		= sizeof(struct intel_host),
1198 };
1199 
1200 static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
1201 #ifdef CONFIG_PM_SLEEP
1202 	.resume		= byt_resume,
1203 #endif
1204 #ifdef CONFIG_PM
1205 	.runtime_resume	= byt_runtime_resume,
1206 #endif
1207 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1208 			  SDHCI_QUIRK_NO_LED,
1209 	.quirks2	= SDHCI_QUIRK2_HOST_OFF_CARD_ON |
1210 			  SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1211 	.allow_runtime_pm = true,
1212 	.probe_slot	= ni_byt_sdio_probe_slot,
1213 	.add_host	= byt_add_host,
1214 	.remove_slot	= byt_remove_slot,
1215 	.ops		= &sdhci_intel_byt_ops,
1216 	.priv_size	= sizeof(struct intel_host),
1217 };
1218 
1219 static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
1220 #ifdef CONFIG_PM_SLEEP
1221 	.resume		= byt_resume,
1222 #endif
1223 #ifdef CONFIG_PM
1224 	.runtime_resume	= byt_runtime_resume,
1225 #endif
1226 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1227 			  SDHCI_QUIRK_NO_LED,
1228 	.quirks2	= SDHCI_QUIRK2_HOST_OFF_CARD_ON |
1229 			SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1230 	.allow_runtime_pm = true,
1231 	.probe_slot	= byt_sdio_probe_slot,
1232 	.add_host	= byt_add_host,
1233 	.remove_slot	= byt_remove_slot,
1234 	.ops		= &sdhci_intel_byt_ops,
1235 	.priv_size	= sizeof(struct intel_host),
1236 };
1237 
1238 static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
1239 #ifdef CONFIG_PM_SLEEP
1240 	.resume		= byt_resume,
1241 #endif
1242 #ifdef CONFIG_PM
1243 	.runtime_resume	= byt_runtime_resume,
1244 #endif
1245 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1246 			  SDHCI_QUIRK_NO_LED,
1247 	.quirks2	= SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
1248 			  SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1249 			  SDHCI_QUIRK2_STOP_WITH_TC,
1250 	.allow_runtime_pm = true,
1251 	.own_cd_for_runtime_pm = true,
1252 	.probe_slot	= byt_sd_probe_slot,
1253 	.add_host	= byt_add_host,
1254 	.remove_slot	= byt_remove_slot,
1255 	.ops		= &sdhci_intel_byt_ops,
1256 	.priv_size	= sizeof(struct intel_host),
1257 };
1258 
1259 /* Define Host controllers for Intel Merrifield platform */
1260 #define INTEL_MRFLD_EMMC_0	0
1261 #define INTEL_MRFLD_EMMC_1	1
1262 #define INTEL_MRFLD_SD		2
1263 #define INTEL_MRFLD_SDIO	3
1264 
1265 #ifdef CONFIG_ACPI
1266 static void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot)
1267 {
1268 	struct acpi_device *device;
1269 
1270 	device = ACPI_COMPANION(&slot->chip->pdev->dev);
1271 	if (device)
1272 		acpi_device_fix_up_power_extended(device);
1273 }
1274 #else
1275 static inline void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) {}
1276 #endif
1277 
1278 static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
1279 {
1280 	unsigned int func = PCI_FUNC(slot->chip->pdev->devfn);
1281 
1282 	switch (func) {
1283 	case INTEL_MRFLD_EMMC_0:
1284 	case INTEL_MRFLD_EMMC_1:
1285 		slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
1286 					 MMC_CAP_8_BIT_DATA |
1287 					 MMC_CAP_1_8V_DDR;
1288 		break;
1289 	case INTEL_MRFLD_SD:
1290 		slot->cd_idx = 0;
1291 		slot->cd_override_level = true;
1292 		/*
1293 		 * There are two PCB designs of SD card slot with the opposite
1294 		 * card detection sense. Quirk this out by ignoring GPIO state
1295 		 * completely in the custom ->get_cd() callback.
1296 		 */
1297 		slot->host->mmc_host_ops.get_cd = mrfld_get_cd;
1298 		slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
1299 		break;
1300 	case INTEL_MRFLD_SDIO:
1301 		/* Advertise 2.0v for compatibility with the SDIO card's OCR */
1302 		slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195;
1303 		slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
1304 					 MMC_CAP_POWER_OFF_CARD;
1305 		break;
1306 	default:
1307 		return -ENODEV;
1308 	}
1309 
1310 	intel_mrfld_mmc_fix_up_power_slot(slot);
1311 	return 0;
1312 }
1313 
1314 static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = {
1315 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1316 	.quirks2	= SDHCI_QUIRK2_BROKEN_HS200 |
1317 			SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1318 	.allow_runtime_pm = true,
1319 	.probe_slot	= intel_mrfld_mmc_probe_slot,
1320 };
1321 
1322 static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
1323 {
1324 	u8 scratch;
1325 	int ret;
1326 
1327 	ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch);
1328 	if (ret)
1329 		goto fail;
1330 
1331 	/*
1332 	 * Turn PMOS on [bit 0], set over current detection to 2.4 V
1333 	 * [bit 1:2] and enable over current debouncing [bit 6].
1334 	 */
1335 	if (on)
1336 		scratch |= 0x47;
1337 	else
1338 		scratch &= ~0x47;
1339 
1340 	ret = pci_write_config_byte(chip->pdev, 0xAE, scratch);
1341 
1342 fail:
1343 	return pcibios_err_to_errno(ret);
1344 }
1345 
1346 static int jmicron_probe(struct sdhci_pci_chip *chip)
1347 {
1348 	int ret;
1349 	u16 mmcdev = 0;
1350 
1351 	if (chip->pdev->revision == 0) {
1352 		chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
1353 			  SDHCI_QUIRK_32BIT_DMA_SIZE |
1354 			  SDHCI_QUIRK_32BIT_ADMA_SIZE |
1355 			  SDHCI_QUIRK_RESET_AFTER_REQUEST |
1356 			  SDHCI_QUIRK_BROKEN_SMALL_PIO;
1357 	}
1358 
1359 	/*
1360 	 * JMicron chips can have two interfaces to the same hardware
1361 	 * in order to work around limitations in Microsoft's driver.
1362 	 * We need to make sure we only bind to one of them.
1363 	 *
1364 	 * This code assumes two things:
1365 	 *
1366 	 * 1. The PCI code adds subfunctions in order.
1367 	 *
1368 	 * 2. The MMC interface has a lower subfunction number
1369 	 *    than the SD interface.
1370 	 */
1371 	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD)
1372 		mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC;
1373 	else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD)
1374 		mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD;
1375 
1376 	if (mmcdev) {
1377 		struct pci_dev *sd_dev;
1378 
1379 		sd_dev = NULL;
1380 		while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
1381 						mmcdev, sd_dev)) != NULL) {
1382 			if ((PCI_SLOT(chip->pdev->devfn) ==
1383 				PCI_SLOT(sd_dev->devfn)) &&
1384 				(chip->pdev->bus == sd_dev->bus))
1385 				break;
1386 		}
1387 
1388 		if (sd_dev) {
1389 			pci_dev_put(sd_dev);
1390 			dev_info(&chip->pdev->dev, "Refusing to bind to "
1391 				"secondary interface.\n");
1392 			return -ENODEV;
1393 		}
1394 	}
1395 
1396 	/*
1397 	 * JMicron chips need a bit of a nudge to enable the power
1398 	 * output pins.
1399 	 */
1400 	ret = jmicron_pmos(chip, 1);
1401 	if (ret) {
1402 		dev_err(&chip->pdev->dev, "Failure enabling card power\n");
1403 		return ret;
1404 	}
1405 
1406 	/* quirk for unsable RO-detection on JM388 chips */
1407 	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD ||
1408 	    chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1409 		chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT;
1410 
1411 	return 0;
1412 }
1413 
1414 static void jmicron_enable_mmc(struct sdhci_host *host, int on)
1415 {
1416 	u8 scratch;
1417 
1418 	scratch = readb(host->ioaddr + 0xC0);
1419 
1420 	if (on)
1421 		scratch |= 0x01;
1422 	else
1423 		scratch &= ~0x01;
1424 
1425 	writeb(scratch, host->ioaddr + 0xC0);
1426 }
1427 
1428 static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
1429 {
1430 	if (slot->chip->pdev->revision == 0) {
1431 		u16 version;
1432 
1433 		version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION);
1434 		version = (version & SDHCI_VENDOR_VER_MASK) >>
1435 			SDHCI_VENDOR_VER_SHIFT;
1436 
1437 		/*
1438 		 * Older versions of the chip have lots of nasty glitches
1439 		 * in the ADMA engine. It's best just to avoid it
1440 		 * completely.
1441 		 */
1442 		if (version < 0xAC)
1443 			slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
1444 	}
1445 
1446 	/* JM388 MMC doesn't support 1.8V while SD supports it */
1447 	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1448 		slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 |
1449 			MMC_VDD_29_30 | MMC_VDD_30_31 |
1450 			MMC_VDD_165_195; /* allow 1.8V */
1451 		slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
1452 			MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */
1453 	}
1454 
1455 	/*
1456 	 * The secondary interface requires a bit set to get the
1457 	 * interrupts.
1458 	 */
1459 	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1460 	    slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1461 		jmicron_enable_mmc(slot->host, 1);
1462 
1463 	slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
1464 
1465 	return 0;
1466 }
1467 
1468 static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
1469 {
1470 	if (dead)
1471 		return;
1472 
1473 	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1474 	    slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1475 		jmicron_enable_mmc(slot->host, 0);
1476 }
1477 
1478 #ifdef CONFIG_PM_SLEEP
1479 static int jmicron_suspend(struct sdhci_pci_chip *chip)
1480 {
1481 	int i, ret;
1482 
1483 	ret = sdhci_pci_suspend_host(chip);
1484 	if (ret)
1485 		return ret;
1486 
1487 	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1488 	    chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1489 		for (i = 0; i < chip->num_slots; i++)
1490 			jmicron_enable_mmc(chip->slots[i]->host, 0);
1491 	}
1492 
1493 	return 0;
1494 }
1495 
1496 static int jmicron_resume(struct sdhci_pci_chip *chip)
1497 {
1498 	int ret, i;
1499 
1500 	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1501 	    chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1502 		for (i = 0; i < chip->num_slots; i++)
1503 			jmicron_enable_mmc(chip->slots[i]->host, 1);
1504 	}
1505 
1506 	ret = jmicron_pmos(chip, 1);
1507 	if (ret) {
1508 		dev_err(&chip->pdev->dev, "Failure enabling card power\n");
1509 		return ret;
1510 	}
1511 
1512 	return sdhci_pci_resume_host(chip);
1513 }
1514 #endif
1515 
1516 static const struct sdhci_pci_fixes sdhci_jmicron = {
1517 	.probe		= jmicron_probe,
1518 
1519 	.probe_slot	= jmicron_probe_slot,
1520 	.remove_slot	= jmicron_remove_slot,
1521 
1522 #ifdef CONFIG_PM_SLEEP
1523 	.suspend	= jmicron_suspend,
1524 	.resume		= jmicron_resume,
1525 #endif
1526 };
1527 
1528 /* SysKonnect CardBus2SDIO extra registers */
1529 #define SYSKT_CTRL		0x200
1530 #define SYSKT_RDFIFO_STAT	0x204
1531 #define SYSKT_WRFIFO_STAT	0x208
1532 #define SYSKT_POWER_DATA	0x20c
1533 #define   SYSKT_POWER_330	0xef
1534 #define   SYSKT_POWER_300	0xf8
1535 #define   SYSKT_POWER_184	0xcc
1536 #define SYSKT_POWER_CMD		0x20d
1537 #define   SYSKT_POWER_START	(1 << 7)
1538 #define SYSKT_POWER_STATUS	0x20e
1539 #define   SYSKT_POWER_STATUS_OK	(1 << 0)
1540 #define SYSKT_BOARD_REV		0x210
1541 #define SYSKT_CHIP_REV		0x211
1542 #define SYSKT_CONF_DATA		0x212
1543 #define   SYSKT_CONF_DATA_1V8	(1 << 2)
1544 #define   SYSKT_CONF_DATA_2V5	(1 << 1)
1545 #define   SYSKT_CONF_DATA_3V3	(1 << 0)
1546 
1547 static int syskt_probe(struct sdhci_pci_chip *chip)
1548 {
1549 	if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
1550 		chip->pdev->class &= ~0x0000FF;
1551 		chip->pdev->class |= PCI_SDHCI_IFDMA;
1552 	}
1553 	return 0;
1554 }
1555 
1556 static int syskt_probe_slot(struct sdhci_pci_slot *slot)
1557 {
1558 	int tm, ps;
1559 
1560 	u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV);
1561 	u8  chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV);
1562 	dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, "
1563 					 "board rev %d.%d, chip rev %d.%d\n",
1564 					 board_rev >> 4, board_rev & 0xf,
1565 					 chip_rev >> 4,  chip_rev & 0xf);
1566 	if (chip_rev >= 0x20)
1567 		slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA;
1568 
1569 	writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA);
1570 	writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD);
1571 	udelay(50);
1572 	tm = 10;  /* Wait max 1 ms */
1573 	do {
1574 		ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS);
1575 		if (ps & SYSKT_POWER_STATUS_OK)
1576 			break;
1577 		udelay(100);
1578 	} while (--tm);
1579 	if (!tm) {
1580 		dev_err(&slot->chip->pdev->dev,
1581 			"power regulator never stabilized");
1582 		writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD);
1583 		return -ENODEV;
1584 	}
1585 
1586 	return 0;
1587 }
1588 
1589 static const struct sdhci_pci_fixes sdhci_syskt = {
1590 	.quirks		= SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER,
1591 	.probe		= syskt_probe,
1592 	.probe_slot	= syskt_probe_slot,
1593 };
1594 
1595 static int via_probe(struct sdhci_pci_chip *chip)
1596 {
1597 	if (chip->pdev->revision == 0x10)
1598 		chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
1599 
1600 	return 0;
1601 }
1602 
1603 static const struct sdhci_pci_fixes sdhci_via = {
1604 	.probe		= via_probe,
1605 };
1606 
1607 static int rtsx_probe_slot(struct sdhci_pci_slot *slot)
1608 {
1609 	slot->host->mmc->caps2 |= MMC_CAP2_HS200;
1610 	return 0;
1611 }
1612 
1613 static const struct sdhci_pci_fixes sdhci_rtsx = {
1614 	.quirks2	= SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1615 			SDHCI_QUIRK2_BROKEN_64_BIT_DMA |
1616 			SDHCI_QUIRK2_BROKEN_DDR50,
1617 	.probe_slot	= rtsx_probe_slot,
1618 };
1619 
1620 /*AMD chipset generation*/
1621 enum amd_chipset_gen {
1622 	AMD_CHIPSET_BEFORE_ML,
1623 	AMD_CHIPSET_CZ,
1624 	AMD_CHIPSET_NL,
1625 	AMD_CHIPSET_UNKNOWN,
1626 };
1627 
1628 /* AMD registers */
1629 #define AMD_SD_AUTO_PATTERN		0xB8
1630 #define AMD_MSLEEP_DURATION		4
1631 #define AMD_SD_MISC_CONTROL		0xD0
1632 #define AMD_MAX_TUNE_VALUE		0x0B
1633 #define AMD_AUTO_TUNE_SEL		0x10800
1634 #define AMD_FIFO_PTR			0x30
1635 #define AMD_BIT_MASK			0x1F
1636 
1637 static void amd_tuning_reset(struct sdhci_host *host)
1638 {
1639 	unsigned int val;
1640 
1641 	val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1642 	val |= SDHCI_CTRL_PRESET_VAL_ENABLE | SDHCI_CTRL_EXEC_TUNING;
1643 	sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
1644 
1645 	val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1646 	val &= ~SDHCI_CTRL_EXEC_TUNING;
1647 	sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
1648 }
1649 
1650 static void amd_config_tuning_phase(struct pci_dev *pdev, u8 phase)
1651 {
1652 	unsigned int val;
1653 
1654 	pci_read_config_dword(pdev, AMD_SD_AUTO_PATTERN, &val);
1655 	val &= ~AMD_BIT_MASK;
1656 	val |= (AMD_AUTO_TUNE_SEL | (phase << 1));
1657 	pci_write_config_dword(pdev, AMD_SD_AUTO_PATTERN, val);
1658 }
1659 
1660 static void amd_enable_manual_tuning(struct pci_dev *pdev)
1661 {
1662 	unsigned int val;
1663 
1664 	pci_read_config_dword(pdev, AMD_SD_MISC_CONTROL, &val);
1665 	val |= AMD_FIFO_PTR;
1666 	pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val);
1667 }
1668 
1669 static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode)
1670 {
1671 	struct sdhci_pci_slot *slot = sdhci_priv(host);
1672 	struct pci_dev *pdev = slot->chip->pdev;
1673 	u8 valid_win = 0;
1674 	u8 valid_win_max = 0;
1675 	u8 valid_win_end = 0;
1676 	u8 ctrl, tune_around;
1677 
1678 	amd_tuning_reset(host);
1679 
1680 	for (tune_around = 0; tune_around < 12; tune_around++) {
1681 		amd_config_tuning_phase(pdev, tune_around);
1682 
1683 		if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1684 			valid_win = 0;
1685 			msleep(AMD_MSLEEP_DURATION);
1686 			ctrl = SDHCI_RESET_CMD | SDHCI_RESET_DATA;
1687 			sdhci_writeb(host, ctrl, SDHCI_SOFTWARE_RESET);
1688 		} else if (++valid_win > valid_win_max) {
1689 			valid_win_max = valid_win;
1690 			valid_win_end = tune_around;
1691 		}
1692 	}
1693 
1694 	if (!valid_win_max) {
1695 		dev_err(&pdev->dev, "no tuning point found\n");
1696 		return -EIO;
1697 	}
1698 
1699 	amd_config_tuning_phase(pdev, valid_win_end - valid_win_max / 2);
1700 
1701 	amd_enable_manual_tuning(pdev);
1702 
1703 	host->mmc->retune_period = 0;
1704 
1705 	return 0;
1706 }
1707 
1708 static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode)
1709 {
1710 	struct sdhci_host *host = mmc_priv(mmc);
1711 
1712 	/* AMD requires custom HS200 tuning */
1713 	if (host->timing == MMC_TIMING_MMC_HS200)
1714 		return amd_execute_tuning_hs200(host, opcode);
1715 
1716 	/* Otherwise perform standard SDHCI tuning */
1717 	return sdhci_execute_tuning(mmc, opcode);
1718 }
1719 
1720 static int amd_probe_slot(struct sdhci_pci_slot *slot)
1721 {
1722 	struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
1723 
1724 	ops->execute_tuning = amd_execute_tuning;
1725 
1726 	return 0;
1727 }
1728 
1729 static int amd_probe(struct sdhci_pci_chip *chip)
1730 {
1731 	struct pci_dev	*smbus_dev;
1732 	enum amd_chipset_gen gen;
1733 
1734 	smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
1735 			PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
1736 	if (smbus_dev) {
1737 		gen = AMD_CHIPSET_BEFORE_ML;
1738 	} else {
1739 		smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
1740 				PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
1741 		if (smbus_dev) {
1742 			if (smbus_dev->revision < 0x51)
1743 				gen = AMD_CHIPSET_CZ;
1744 			else
1745 				gen = AMD_CHIPSET_NL;
1746 		} else {
1747 			gen = AMD_CHIPSET_UNKNOWN;
1748 		}
1749 	}
1750 
1751 	pci_dev_put(smbus_dev);
1752 
1753 	if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ)
1754 		chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
1755 
1756 	return 0;
1757 }
1758 
1759 static u32 sdhci_read_present_state(struct sdhci_host *host)
1760 {
1761 	return sdhci_readl(host, SDHCI_PRESENT_STATE);
1762 }
1763 
1764 static void amd_sdhci_reset(struct sdhci_host *host, u8 mask)
1765 {
1766 	struct sdhci_pci_slot *slot = sdhci_priv(host);
1767 	struct pci_dev *pdev = slot->chip->pdev;
1768 	u32 present_state;
1769 
1770 	/*
1771 	 * SDHC 0x7906 requires a hard reset to clear all internal state.
1772 	 * Otherwise it can get into a bad state where the DATA lines are always
1773 	 * read as zeros.
1774 	 */
1775 	if (pdev->device == 0x7906 && (mask & SDHCI_RESET_ALL)) {
1776 		pci_clear_master(pdev);
1777 
1778 		pci_save_state(pdev);
1779 
1780 		pci_set_power_state(pdev, PCI_D3cold);
1781 		pr_debug("%s: power_state=%u\n", mmc_hostname(host->mmc),
1782 			pdev->current_state);
1783 		pci_set_power_state(pdev, PCI_D0);
1784 
1785 		pci_restore_state(pdev);
1786 
1787 		/*
1788 		 * SDHCI_RESET_ALL says the card detect logic should not be
1789 		 * reset, but since we need to reset the entire controller
1790 		 * we should wait until the card detect logic has stabilized.
1791 		 *
1792 		 * This normally takes about 40ms.
1793 		 */
1794 		readx_poll_timeout(
1795 			sdhci_read_present_state,
1796 			host,
1797 			present_state,
1798 			present_state & SDHCI_CD_STABLE,
1799 			10000,
1800 			100000
1801 		);
1802 	}
1803 
1804 	return sdhci_reset(host, mask);
1805 }
1806 
1807 static const struct sdhci_ops amd_sdhci_pci_ops = {
1808 	.set_clock			= sdhci_set_clock,
1809 	.enable_dma			= sdhci_pci_enable_dma,
1810 	.set_bus_width			= sdhci_set_bus_width,
1811 	.reset				= amd_sdhci_reset,
1812 	.set_uhs_signaling		= sdhci_set_uhs_signaling,
1813 };
1814 
1815 static const struct sdhci_pci_fixes sdhci_amd = {
1816 	.probe		= amd_probe,
1817 	.ops		= &amd_sdhci_pci_ops,
1818 	.probe_slot	= amd_probe_slot,
1819 };
1820 
1821 static const struct pci_device_id pci_ids[] = {
1822 	SDHCI_PCI_DEVICE(RICOH, R5C822,  ricoh),
1823 	SDHCI_PCI_DEVICE(RICOH, R5C843,  ricoh_mmc),
1824 	SDHCI_PCI_DEVICE(RICOH, R5CE822, ricoh_mmc),
1825 	SDHCI_PCI_DEVICE(RICOH, R5CE823, ricoh_mmc),
1826 	SDHCI_PCI_DEVICE(ENE, CB712_SD,   ene_712),
1827 	SDHCI_PCI_DEVICE(ENE, CB712_SD_2, ene_712),
1828 	SDHCI_PCI_DEVICE(ENE, CB714_SD,   ene_714),
1829 	SDHCI_PCI_DEVICE(ENE, CB714_SD_2, ene_714),
1830 	SDHCI_PCI_DEVICE(MARVELL, 88ALP01_SD, cafe),
1831 	SDHCI_PCI_DEVICE(JMICRON, JMB38X_SD,  jmicron),
1832 	SDHCI_PCI_DEVICE(JMICRON, JMB38X_MMC, jmicron),
1833 	SDHCI_PCI_DEVICE(JMICRON, JMB388_SD,  jmicron),
1834 	SDHCI_PCI_DEVICE(JMICRON, JMB388_ESD, jmicron),
1835 	SDHCI_PCI_DEVICE(SYSKONNECT, 8000, syskt),
1836 	SDHCI_PCI_DEVICE(VIA, 95D0, via),
1837 	SDHCI_PCI_DEVICE(REALTEK, 5250, rtsx),
1838 	SDHCI_PCI_DEVICE(INTEL, QRK_SD,    intel_qrk),
1839 	SDHCI_PCI_DEVICE(INTEL, MRST_SD0,  intel_mrst_hc0),
1840 	SDHCI_PCI_DEVICE(INTEL, MRST_SD1,  intel_mrst_hc1_hc2),
1841 	SDHCI_PCI_DEVICE(INTEL, MRST_SD2,  intel_mrst_hc1_hc2),
1842 	SDHCI_PCI_DEVICE(INTEL, MFD_SD,    intel_mfd_sd),
1843 	SDHCI_PCI_DEVICE(INTEL, MFD_SDIO1, intel_mfd_sdio),
1844 	SDHCI_PCI_DEVICE(INTEL, MFD_SDIO2, intel_mfd_sdio),
1845 	SDHCI_PCI_DEVICE(INTEL, MFD_EMMC0, intel_mfd_emmc),
1846 	SDHCI_PCI_DEVICE(INTEL, MFD_EMMC1, intel_mfd_emmc),
1847 	SDHCI_PCI_DEVICE(INTEL, PCH_SDIO0, intel_pch_sdio),
1848 	SDHCI_PCI_DEVICE(INTEL, PCH_SDIO1, intel_pch_sdio),
1849 	SDHCI_PCI_DEVICE(INTEL, BYT_EMMC,  intel_byt_emmc),
1850 	SDHCI_PCI_SUBDEVICE(INTEL, BYT_SDIO, NI, 7884, ni_byt_sdio),
1851 	SDHCI_PCI_DEVICE(INTEL, BYT_SDIO,  intel_byt_sdio),
1852 	SDHCI_PCI_DEVICE(INTEL, BYT_SD,    intel_byt_sd),
1853 	SDHCI_PCI_DEVICE(INTEL, BYT_EMMC2, intel_byt_emmc),
1854 	SDHCI_PCI_DEVICE(INTEL, BSW_EMMC,  intel_byt_emmc),
1855 	SDHCI_PCI_DEVICE(INTEL, BSW_SDIO,  intel_byt_sdio),
1856 	SDHCI_PCI_DEVICE(INTEL, BSW_SD,    intel_byt_sd),
1857 	SDHCI_PCI_DEVICE(INTEL, CLV_SDIO0, intel_mfd_sd),
1858 	SDHCI_PCI_DEVICE(INTEL, CLV_SDIO1, intel_mfd_sdio),
1859 	SDHCI_PCI_DEVICE(INTEL, CLV_SDIO2, intel_mfd_sdio),
1860 	SDHCI_PCI_DEVICE(INTEL, CLV_EMMC0, intel_mfd_emmc),
1861 	SDHCI_PCI_DEVICE(INTEL, CLV_EMMC1, intel_mfd_emmc),
1862 	SDHCI_PCI_DEVICE(INTEL, MRFLD_MMC, intel_mrfld_mmc),
1863 	SDHCI_PCI_DEVICE(INTEL, SPT_EMMC,  intel_byt_emmc),
1864 	SDHCI_PCI_DEVICE(INTEL, SPT_SDIO,  intel_byt_sdio),
1865 	SDHCI_PCI_DEVICE(INTEL, SPT_SD,    intel_byt_sd),
1866 	SDHCI_PCI_DEVICE(INTEL, DNV_EMMC,  intel_byt_emmc),
1867 	SDHCI_PCI_DEVICE(INTEL, CDF_EMMC,  intel_glk_emmc),
1868 	SDHCI_PCI_DEVICE(INTEL, BXT_EMMC,  intel_byt_emmc),
1869 	SDHCI_PCI_DEVICE(INTEL, BXT_SDIO,  intel_byt_sdio),
1870 	SDHCI_PCI_DEVICE(INTEL, BXT_SD,    intel_byt_sd),
1871 	SDHCI_PCI_DEVICE(INTEL, BXTM_EMMC, intel_byt_emmc),
1872 	SDHCI_PCI_DEVICE(INTEL, BXTM_SDIO, intel_byt_sdio),
1873 	SDHCI_PCI_DEVICE(INTEL, BXTM_SD,   intel_byt_sd),
1874 	SDHCI_PCI_DEVICE(INTEL, APL_EMMC,  intel_byt_emmc),
1875 	SDHCI_PCI_DEVICE(INTEL, APL_SDIO,  intel_byt_sdio),
1876 	SDHCI_PCI_DEVICE(INTEL, APL_SD,    intel_byt_sd),
1877 	SDHCI_PCI_DEVICE(INTEL, GLK_EMMC,  intel_glk_emmc),
1878 	SDHCI_PCI_DEVICE(INTEL, GLK_SDIO,  intel_byt_sdio),
1879 	SDHCI_PCI_DEVICE(INTEL, GLK_SD,    intel_byt_sd),
1880 	SDHCI_PCI_DEVICE(INTEL, CNP_EMMC,  intel_glk_emmc),
1881 	SDHCI_PCI_DEVICE(INTEL, CNP_SD,    intel_byt_sd),
1882 	SDHCI_PCI_DEVICE(INTEL, CNPH_SD,   intel_byt_sd),
1883 	SDHCI_PCI_DEVICE(INTEL, ICP_EMMC,  intel_glk_emmc),
1884 	SDHCI_PCI_DEVICE(INTEL, ICP_SD,    intel_byt_sd),
1885 	SDHCI_PCI_DEVICE(INTEL, EHL_EMMC,  intel_glk_emmc),
1886 	SDHCI_PCI_DEVICE(INTEL, EHL_SD,    intel_byt_sd),
1887 	SDHCI_PCI_DEVICE(INTEL, CML_EMMC,  intel_glk_emmc),
1888 	SDHCI_PCI_DEVICE(INTEL, CML_SD,    intel_byt_sd),
1889 	SDHCI_PCI_DEVICE(INTEL, CMLH_SD,   intel_byt_sd),
1890 	SDHCI_PCI_DEVICE(INTEL, JSL_EMMC,  intel_glk_emmc),
1891 	SDHCI_PCI_DEVICE(INTEL, JSL_SD,    intel_byt_sd),
1892 	SDHCI_PCI_DEVICE(INTEL, LKF_EMMC,  intel_glk_emmc),
1893 	SDHCI_PCI_DEVICE(INTEL, LKF_SD,    intel_byt_sd),
1894 	SDHCI_PCI_DEVICE(INTEL, ADL_EMMC,  intel_glk_emmc),
1895 	SDHCI_PCI_DEVICE(O2, 8120,     o2),
1896 	SDHCI_PCI_DEVICE(O2, 8220,     o2),
1897 	SDHCI_PCI_DEVICE(O2, 8221,     o2),
1898 	SDHCI_PCI_DEVICE(O2, 8320,     o2),
1899 	SDHCI_PCI_DEVICE(O2, 8321,     o2),
1900 	SDHCI_PCI_DEVICE(O2, FUJIN2,   o2),
1901 	SDHCI_PCI_DEVICE(O2, SDS0,     o2),
1902 	SDHCI_PCI_DEVICE(O2, SDS1,     o2),
1903 	SDHCI_PCI_DEVICE(O2, SEABIRD0, o2),
1904 	SDHCI_PCI_DEVICE(O2, SEABIRD1, o2),
1905 	SDHCI_PCI_DEVICE(O2, GG8_9860, o2),
1906 	SDHCI_PCI_DEVICE(O2, GG8_9861, o2),
1907 	SDHCI_PCI_DEVICE(O2, GG8_9862, o2),
1908 	SDHCI_PCI_DEVICE(O2, GG8_9863, o2),
1909 	SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan),
1910 	SDHCI_PCI_DEVICE(SYNOPSYS, DWC_MSHC, snps),
1911 	SDHCI_PCI_DEVICE(GLI, 9750, gl9750),
1912 	SDHCI_PCI_DEVICE(GLI, 9755, gl9755),
1913 	SDHCI_PCI_DEVICE(GLI, 9763E, gl9763e),
1914 	SDHCI_PCI_DEVICE(GLI, 9767, gl9767),
1915 	SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd),
1916 	/* Generic SD host controller */
1917 	{PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)},
1918 	{ /* end: all zeroes */ },
1919 };
1920 
1921 MODULE_DEVICE_TABLE(pci, pci_ids);
1922 
1923 /*****************************************************************************\
1924  *                                                                           *
1925  * SDHCI core callbacks                                                      *
1926  *                                                                           *
1927 \*****************************************************************************/
1928 
1929 int sdhci_pci_enable_dma(struct sdhci_host *host)
1930 {
1931 	struct sdhci_pci_slot *slot;
1932 	struct pci_dev *pdev;
1933 
1934 	slot = sdhci_priv(host);
1935 	pdev = slot->chip->pdev;
1936 
1937 	if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
1938 		((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
1939 		(host->flags & SDHCI_USE_SDMA)) {
1940 		dev_warn(&pdev->dev, "Will use DMA mode even though HW "
1941 			"doesn't fully claim to support it.\n");
1942 	}
1943 
1944 	pci_set_master(pdev);
1945 
1946 	return 0;
1947 }
1948 
1949 static void sdhci_pci_hw_reset(struct sdhci_host *host)
1950 {
1951 	struct sdhci_pci_slot *slot = sdhci_priv(host);
1952 
1953 	if (slot->hw_reset)
1954 		slot->hw_reset(host);
1955 }
1956 
1957 static const struct sdhci_ops sdhci_pci_ops = {
1958 	.set_clock	= sdhci_set_clock,
1959 	.enable_dma	= sdhci_pci_enable_dma,
1960 	.set_bus_width	= sdhci_set_bus_width,
1961 	.reset		= sdhci_reset,
1962 	.set_uhs_signaling = sdhci_set_uhs_signaling,
1963 	.hw_reset		= sdhci_pci_hw_reset,
1964 };
1965 
1966 /*****************************************************************************\
1967  *                                                                           *
1968  * Suspend/resume                                                            *
1969  *                                                                           *
1970 \*****************************************************************************/
1971 
1972 #ifdef CONFIG_PM_SLEEP
1973 static int sdhci_pci_suspend(struct device *dev)
1974 {
1975 	struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
1976 
1977 	if (!chip)
1978 		return 0;
1979 
1980 	if (chip->fixes && chip->fixes->suspend)
1981 		return chip->fixes->suspend(chip);
1982 
1983 	return sdhci_pci_suspend_host(chip);
1984 }
1985 
1986 static int sdhci_pci_resume(struct device *dev)
1987 {
1988 	struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
1989 
1990 	if (!chip)
1991 		return 0;
1992 
1993 	if (chip->fixes && chip->fixes->resume)
1994 		return chip->fixes->resume(chip);
1995 
1996 	return sdhci_pci_resume_host(chip);
1997 }
1998 #endif
1999 
2000 #ifdef CONFIG_PM
2001 static int sdhci_pci_runtime_suspend(struct device *dev)
2002 {
2003 	struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
2004 
2005 	if (!chip)
2006 		return 0;
2007 
2008 	if (chip->fixes && chip->fixes->runtime_suspend)
2009 		return chip->fixes->runtime_suspend(chip);
2010 
2011 	return sdhci_pci_runtime_suspend_host(chip);
2012 }
2013 
2014 static int sdhci_pci_runtime_resume(struct device *dev)
2015 {
2016 	struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
2017 
2018 	if (!chip)
2019 		return 0;
2020 
2021 	if (chip->fixes && chip->fixes->runtime_resume)
2022 		return chip->fixes->runtime_resume(chip);
2023 
2024 	return sdhci_pci_runtime_resume_host(chip);
2025 }
2026 #endif
2027 
2028 static const struct dev_pm_ops sdhci_pci_pm_ops = {
2029 	SET_SYSTEM_SLEEP_PM_OPS(sdhci_pci_suspend, sdhci_pci_resume)
2030 	SET_RUNTIME_PM_OPS(sdhci_pci_runtime_suspend,
2031 			sdhci_pci_runtime_resume, NULL)
2032 };
2033 
2034 /*****************************************************************************\
2035  *                                                                           *
2036  * Device probing/removal                                                    *
2037  *                                                                           *
2038 \*****************************************************************************/
2039 
2040 static struct sdhci_pci_slot *sdhci_pci_probe_slot(
2041 	struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar,
2042 	int slotno)
2043 {
2044 	struct sdhci_pci_slot *slot;
2045 	struct sdhci_host *host;
2046 	int ret, bar = first_bar + slotno;
2047 	size_t priv_size = chip->fixes ? chip->fixes->priv_size : 0;
2048 
2049 	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
2050 		dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar);
2051 		return ERR_PTR(-ENODEV);
2052 	}
2053 
2054 	if (pci_resource_len(pdev, bar) < 0x100) {
2055 		dev_err(&pdev->dev, "Invalid iomem size. You may "
2056 			"experience problems.\n");
2057 	}
2058 
2059 	if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
2060 		dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n");
2061 		return ERR_PTR(-ENODEV);
2062 	}
2063 
2064 	if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
2065 		dev_err(&pdev->dev, "Unknown interface. Aborting.\n");
2066 		return ERR_PTR(-ENODEV);
2067 	}
2068 
2069 	host = sdhci_alloc_host(&pdev->dev, sizeof(*slot) + priv_size);
2070 	if (IS_ERR(host)) {
2071 		dev_err(&pdev->dev, "cannot allocate host\n");
2072 		return ERR_CAST(host);
2073 	}
2074 
2075 	slot = sdhci_priv(host);
2076 
2077 	slot->chip = chip;
2078 	slot->host = host;
2079 	slot->cd_idx = -1;
2080 
2081 	host->hw_name = "PCI";
2082 	host->ops = chip->fixes && chip->fixes->ops ?
2083 		    chip->fixes->ops :
2084 		    &sdhci_pci_ops;
2085 	host->quirks = chip->quirks;
2086 	host->quirks2 = chip->quirks2;
2087 
2088 	host->irq = pdev->irq;
2089 
2090 	ret = pcim_iomap_regions(pdev, BIT(bar), mmc_hostname(host->mmc));
2091 	if (ret) {
2092 		dev_err(&pdev->dev, "cannot request region\n");
2093 		goto cleanup;
2094 	}
2095 
2096 	host->ioaddr = pcim_iomap_table(pdev)[bar];
2097 
2098 	if (chip->fixes && chip->fixes->probe_slot) {
2099 		ret = chip->fixes->probe_slot(slot);
2100 		if (ret)
2101 			goto cleanup;
2102 	}
2103 
2104 	host->mmc->pm_caps = MMC_PM_KEEP_POWER;
2105 	host->mmc->slotno = slotno;
2106 	host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
2107 
2108 	if (device_can_wakeup(&pdev->dev))
2109 		host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2110 
2111 	if (host->mmc->caps & MMC_CAP_CD_WAKE)
2112 		device_init_wakeup(&pdev->dev, true);
2113 
2114 	if (slot->cd_idx >= 0) {
2115 		ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
2116 					   slot->cd_override_level, 0);
2117 		if (ret && ret != -EPROBE_DEFER)
2118 			ret = mmc_gpiod_request_cd(host->mmc, NULL,
2119 						   slot->cd_idx,
2120 						   slot->cd_override_level,
2121 						   0);
2122 		if (ret == -EPROBE_DEFER)
2123 			goto remove;
2124 
2125 		if (ret) {
2126 			dev_warn(&pdev->dev, "failed to setup card detect gpio\n");
2127 			slot->cd_idx = -1;
2128 		}
2129 	}
2130 
2131 	if (chip->fixes && chip->fixes->add_host)
2132 		ret = chip->fixes->add_host(slot);
2133 	else
2134 		ret = sdhci_add_host(host);
2135 	if (ret)
2136 		goto remove;
2137 
2138 	/*
2139 	 * Check if the chip needs a separate GPIO for card detect to wake up
2140 	 * from runtime suspend.  If it is not there, don't allow runtime PM.
2141 	 */
2142 	if (chip->fixes && chip->fixes->own_cd_for_runtime_pm && slot->cd_idx < 0)
2143 		chip->allow_runtime_pm = false;
2144 
2145 	return slot;
2146 
2147 remove:
2148 	if (chip->fixes && chip->fixes->remove_slot)
2149 		chip->fixes->remove_slot(slot, 0);
2150 
2151 cleanup:
2152 	sdhci_free_host(host);
2153 
2154 	return ERR_PTR(ret);
2155 }
2156 
2157 static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
2158 {
2159 	int dead;
2160 	u32 scratch;
2161 
2162 	dead = 0;
2163 	scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
2164 	if (scratch == (u32)-1)
2165 		dead = 1;
2166 
2167 	sdhci_remove_host(slot->host, dead);
2168 
2169 	if (slot->chip->fixes && slot->chip->fixes->remove_slot)
2170 		slot->chip->fixes->remove_slot(slot, dead);
2171 
2172 	sdhci_free_host(slot->host);
2173 }
2174 
2175 static void sdhci_pci_runtime_pm_allow(struct device *dev)
2176 {
2177 	pm_suspend_ignore_children(dev, 1);
2178 	pm_runtime_set_autosuspend_delay(dev, 50);
2179 	pm_runtime_use_autosuspend(dev);
2180 	pm_runtime_allow(dev);
2181 	/* Stay active until mmc core scans for a card */
2182 	pm_runtime_put_noidle(dev);
2183 }
2184 
2185 static void sdhci_pci_runtime_pm_forbid(struct device *dev)
2186 {
2187 	pm_runtime_forbid(dev);
2188 	pm_runtime_get_noresume(dev);
2189 }
2190 
2191 static int sdhci_pci_probe(struct pci_dev *pdev,
2192 				     const struct pci_device_id *ent)
2193 {
2194 	struct sdhci_pci_chip *chip;
2195 	struct sdhci_pci_slot *slot;
2196 
2197 	u8 slots, first_bar;
2198 	int ret, i;
2199 
2200 	BUG_ON(pdev == NULL);
2201 	BUG_ON(ent == NULL);
2202 
2203 	dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n",
2204 		 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
2205 
2206 	ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
2207 	if (ret)
2208 		return pcibios_err_to_errno(ret);
2209 
2210 	slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
2211 	dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
2212 
2213 	BUG_ON(slots > MAX_SLOTS);
2214 
2215 	ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
2216 	if (ret)
2217 		return pcibios_err_to_errno(ret);
2218 
2219 	first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
2220 
2221 	if (first_bar > 5) {
2222 		dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n");
2223 		return -ENODEV;
2224 	}
2225 
2226 	ret = pcim_enable_device(pdev);
2227 	if (ret)
2228 		return ret;
2229 
2230 	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
2231 	if (!chip)
2232 		return -ENOMEM;
2233 
2234 	chip->pdev = pdev;
2235 	chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data;
2236 	if (chip->fixes) {
2237 		chip->quirks = chip->fixes->quirks;
2238 		chip->quirks2 = chip->fixes->quirks2;
2239 		chip->allow_runtime_pm = chip->fixes->allow_runtime_pm;
2240 	}
2241 	chip->num_slots = slots;
2242 	chip->pm_retune = true;
2243 	chip->rpm_retune = true;
2244 
2245 	pci_set_drvdata(pdev, chip);
2246 
2247 	if (chip->fixes && chip->fixes->probe) {
2248 		ret = chip->fixes->probe(chip);
2249 		if (ret)
2250 			return ret;
2251 	}
2252 
2253 	slots = chip->num_slots;	/* Quirk may have changed this */
2254 
2255 	for (i = 0; i < slots; i++) {
2256 		slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i);
2257 		if (IS_ERR(slot)) {
2258 			for (i--; i >= 0; i--)
2259 				sdhci_pci_remove_slot(chip->slots[i]);
2260 			return PTR_ERR(slot);
2261 		}
2262 
2263 		chip->slots[i] = slot;
2264 	}
2265 
2266 	if (chip->allow_runtime_pm)
2267 		sdhci_pci_runtime_pm_allow(&pdev->dev);
2268 
2269 	return 0;
2270 }
2271 
2272 static void sdhci_pci_remove(struct pci_dev *pdev)
2273 {
2274 	int i;
2275 	struct sdhci_pci_chip *chip = pci_get_drvdata(pdev);
2276 
2277 	if (chip->allow_runtime_pm)
2278 		sdhci_pci_runtime_pm_forbid(&pdev->dev);
2279 
2280 	for (i = 0; i < chip->num_slots; i++)
2281 		sdhci_pci_remove_slot(chip->slots[i]);
2282 }
2283 
2284 static struct pci_driver sdhci_driver = {
2285 	.name =		"sdhci-pci",
2286 	.id_table =	pci_ids,
2287 	.probe =	sdhci_pci_probe,
2288 	.remove =	sdhci_pci_remove,
2289 	.driver =	{
2290 		.pm =   &sdhci_pci_pm_ops,
2291 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
2292 	},
2293 };
2294 
2295 module_pci_driver(sdhci_driver);
2296 
2297 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
2298 MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver");
2299 MODULE_LICENSE("GPL");
2300