1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface
3 *
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5 *
6 * Thanks to the following companies for their support:
7 *
8 * - JMicron (hardware and technical support)
9 */
10
11 #include <linux/bitfield.h>
12 #include <linux/string.h>
13 #include <linux/delay.h>
14 #include <linux/highmem.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 #include <linux/device.h>
20 #include <linux/scatterlist.h>
21 #include <linux/io.h>
22 #include <linux/iopoll.h>
23 #include <linux/gpio.h>
24 #include <linux/gpio/machine.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm_qos.h>
27 #include <linux/debugfs.h>
28 #include <linux/acpi.h>
29 #include <linux/dmi.h>
30
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/slot-gpio.h>
34
35 #ifdef CONFIG_X86
36 #include <asm/iosf_mbi.h>
37 #endif
38
39 #include "cqhci.h"
40
41 #include "sdhci.h"
42 #include "sdhci-cqhci.h"
43 #include "sdhci-pci.h"
44 #include "sdhci-uhs2.h"
45
46 static void sdhci_pci_hw_reset(struct sdhci_host *host);
47
48 #ifdef CONFIG_PM_SLEEP
sdhci_pci_init_wakeup(struct sdhci_pci_chip * chip)49 static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
50 {
51 mmc_pm_flag_t pm_flags = 0;
52 bool cap_cd_wake = false;
53 int i;
54
55 for (i = 0; i < chip->num_slots; i++) {
56 struct sdhci_pci_slot *slot = chip->slots[i];
57
58 if (slot) {
59 pm_flags |= slot->host->mmc->pm_flags;
60 if (slot->host->mmc->caps & MMC_CAP_CD_WAKE)
61 cap_cd_wake = true;
62 }
63 }
64
65 if ((pm_flags & MMC_PM_KEEP_POWER) && (pm_flags & MMC_PM_WAKE_SDIO_IRQ))
66 return device_wakeup_enable(&chip->pdev->dev);
67 else if (!cap_cd_wake)
68 device_wakeup_disable(&chip->pdev->dev);
69
70 return 0;
71 }
72
sdhci_pci_suspend_host(struct sdhci_pci_chip * chip)73 static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
74 {
75 int i, ret;
76
77 sdhci_pci_init_wakeup(chip);
78
79 for (i = 0; i < chip->num_slots; i++) {
80 struct sdhci_pci_slot *slot = chip->slots[i];
81 struct sdhci_host *host;
82
83 if (!slot)
84 continue;
85
86 host = slot->host;
87
88 if (chip->pm_retune && host->tuning_mode != SDHCI_TUNING_MODE_3)
89 mmc_retune_needed(host->mmc);
90
91 ret = sdhci_suspend_host(host);
92 if (ret)
93 goto err_pci_suspend;
94
95 if (device_may_wakeup(&chip->pdev->dev))
96 mmc_gpio_set_cd_wake(host->mmc, true);
97 }
98
99 return 0;
100
101 err_pci_suspend:
102 while (--i >= 0)
103 sdhci_resume_host(chip->slots[i]->host);
104 return ret;
105 }
106
sdhci_pci_resume_host(struct sdhci_pci_chip * chip)107 int sdhci_pci_resume_host(struct sdhci_pci_chip *chip)
108 {
109 struct sdhci_pci_slot *slot;
110 int i, ret;
111
112 for (i = 0; i < chip->num_slots; i++) {
113 slot = chip->slots[i];
114 if (!slot)
115 continue;
116
117 ret = sdhci_resume_host(slot->host);
118 if (ret)
119 return ret;
120
121 mmc_gpio_set_cd_wake(slot->host->mmc, false);
122 }
123
124 return 0;
125 }
126
sdhci_cqhci_suspend(struct sdhci_pci_chip * chip)127 static int sdhci_cqhci_suspend(struct sdhci_pci_chip *chip)
128 {
129 int ret;
130
131 ret = cqhci_suspend(chip->slots[0]->host->mmc);
132 if (ret)
133 return ret;
134
135 return sdhci_pci_suspend_host(chip);
136 }
137
sdhci_cqhci_resume(struct sdhci_pci_chip * chip)138 static int sdhci_cqhci_resume(struct sdhci_pci_chip *chip)
139 {
140 int ret;
141
142 ret = sdhci_pci_resume_host(chip);
143 if (ret)
144 return ret;
145
146 return cqhci_resume(chip->slots[0]->host->mmc);
147 }
148 #endif
149
150 #ifdef CONFIG_PM
sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip * chip)151 static int sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip *chip)
152 {
153 struct sdhci_pci_slot *slot;
154 struct sdhci_host *host;
155
156 for (int i = 0; i < chip->num_slots; i++) {
157 slot = chip->slots[i];
158 if (!slot)
159 continue;
160
161 host = slot->host;
162
163 sdhci_runtime_suspend_host(host);
164
165 if (chip->rpm_retune &&
166 host->tuning_mode != SDHCI_TUNING_MODE_3)
167 mmc_retune_needed(host->mmc);
168 }
169
170 return 0;
171 }
172
sdhci_pci_runtime_resume_host(struct sdhci_pci_chip * chip)173 static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip)
174 {
175 struct sdhci_pci_slot *slot;
176
177 for (int i = 0; i < chip->num_slots; i++) {
178 slot = chip->slots[i];
179 if (!slot)
180 continue;
181
182 sdhci_runtime_resume_host(slot->host, 0);
183 }
184
185 return 0;
186 }
187
sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip * chip)188 static int sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip *chip)
189 {
190 int ret;
191
192 ret = cqhci_suspend(chip->slots[0]->host->mmc);
193 if (ret)
194 return ret;
195
196 return sdhci_pci_runtime_suspend_host(chip);
197 }
198
sdhci_cqhci_runtime_resume(struct sdhci_pci_chip * chip)199 static int sdhci_cqhci_runtime_resume(struct sdhci_pci_chip *chip)
200 {
201 int ret;
202
203 ret = sdhci_pci_runtime_resume_host(chip);
204 if (ret)
205 return ret;
206
207 return cqhci_resume(chip->slots[0]->host->mmc);
208 }
209 #endif
210
sdhci_cqhci_irq(struct sdhci_host * host,u32 intmask)211 static u32 sdhci_cqhci_irq(struct sdhci_host *host, u32 intmask)
212 {
213 int cmd_error = 0;
214 int data_error = 0;
215
216 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
217 return intmask;
218
219 cqhci_irq(host->mmc, intmask, cmd_error, data_error);
220
221 return 0;
222 }
223
sdhci_pci_dumpregs(struct mmc_host * mmc)224 static void sdhci_pci_dumpregs(struct mmc_host *mmc)
225 {
226 sdhci_dumpregs(mmc_priv(mmc));
227 }
228
229 /*****************************************************************************\
230 * *
231 * Hardware specific quirk handling *
232 * *
233 \*****************************************************************************/
234
ricoh_probe(struct sdhci_pci_chip * chip)235 static int ricoh_probe(struct sdhci_pci_chip *chip)
236 {
237 if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG ||
238 chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY)
239 chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET;
240 return 0;
241 }
242
ricoh_mmc_probe_slot(struct sdhci_pci_slot * slot)243 static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot)
244 {
245 u32 caps =
246 FIELD_PREP(SDHCI_TIMEOUT_CLK_MASK, 0x21) |
247 FIELD_PREP(SDHCI_CLOCK_BASE_MASK, 0x21) |
248 SDHCI_TIMEOUT_CLK_UNIT |
249 SDHCI_CAN_VDD_330 |
250 SDHCI_CAN_DO_HISPD |
251 SDHCI_CAN_DO_SDMA;
252 u32 caps1 = 0;
253
254 __sdhci_read_caps(slot->host, NULL, &caps, &caps1);
255 return 0;
256 }
257
258 #ifdef CONFIG_PM_SLEEP
ricoh_mmc_resume(struct sdhci_pci_chip * chip)259 static int ricoh_mmc_resume(struct sdhci_pci_chip *chip)
260 {
261 /* Apply a delay to allow controller to settle */
262 /* Otherwise it becomes confused if card state changed
263 during suspend */
264 msleep(500);
265 return sdhci_pci_resume_host(chip);
266 }
267 #endif
268
269 static const struct sdhci_pci_fixes sdhci_ricoh = {
270 .probe = ricoh_probe,
271 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
272 SDHCI_QUIRK_FORCE_DMA |
273 SDHCI_QUIRK_CLOCK_BEFORE_RESET,
274 };
275
276 static const struct sdhci_pci_fixes sdhci_ricoh_mmc = {
277 .probe_slot = ricoh_mmc_probe_slot,
278 #ifdef CONFIG_PM_SLEEP
279 .resume = ricoh_mmc_resume,
280 #endif
281 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
282 SDHCI_QUIRK_CLOCK_BEFORE_RESET |
283 SDHCI_QUIRK_NO_CARD_NO_RESET,
284 };
285
ene_714_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)286 static void ene_714_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
287 {
288 struct sdhci_host *host = mmc_priv(mmc);
289
290 sdhci_set_ios(mmc, ios);
291
292 /*
293 * Some (ENE) controllers misbehave on some ios operations,
294 * signalling timeout and CRC errors even on CMD0. Resetting
295 * it on each ios seems to solve the problem.
296 */
297 if (!(host->flags & SDHCI_DEVICE_DEAD))
298 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
299 }
300
ene_714_probe_slot(struct sdhci_pci_slot * slot)301 static int ene_714_probe_slot(struct sdhci_pci_slot *slot)
302 {
303 slot->host->mmc_host_ops.set_ios = ene_714_set_ios;
304 return 0;
305 }
306
307 static const struct sdhci_pci_fixes sdhci_ene_712 = {
308 .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
309 SDHCI_QUIRK_BROKEN_DMA,
310 };
311
312 static const struct sdhci_pci_fixes sdhci_ene_714 = {
313 .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
314 SDHCI_QUIRK_BROKEN_DMA,
315 .probe_slot = ene_714_probe_slot,
316 };
317
318 static const struct sdhci_pci_fixes sdhci_cafe = {
319 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
320 SDHCI_QUIRK_NO_BUSY_IRQ |
321 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
322 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
323 };
324
325 static const struct sdhci_pci_fixes sdhci_intel_qrk = {
326 .quirks = SDHCI_QUIRK_NO_HISPD_BIT,
327 };
328
mrst_hc_probe_slot(struct sdhci_pci_slot * slot)329 static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot)
330 {
331 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
332 return 0;
333 }
334
335 /*
336 * ADMA operation is disabled for Moorestown platform due to
337 * hardware bugs.
338 */
mrst_hc_probe(struct sdhci_pci_chip * chip)339 static int mrst_hc_probe(struct sdhci_pci_chip *chip)
340 {
341 /*
342 * slots number is fixed here for MRST as SDIO3/5 are never used and
343 * have hardware bugs.
344 */
345 chip->num_slots = 1;
346 return 0;
347 }
348
pch_hc_probe_slot(struct sdhci_pci_slot * slot)349 static int pch_hc_probe_slot(struct sdhci_pci_slot *slot)
350 {
351 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
352 return 0;
353 }
354
mfd_emmc_probe_slot(struct sdhci_pci_slot * slot)355 static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
356 {
357 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
358 slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
359 return 0;
360 }
361
mfd_sdio_probe_slot(struct sdhci_pci_slot * slot)362 static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot)
363 {
364 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
365 return 0;
366 }
367
368 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
369 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
370 .probe_slot = mrst_hc_probe_slot,
371 };
372
373 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
374 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
375 .probe = mrst_hc_probe,
376 };
377
378 static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
379 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
380 .allow_runtime_pm = true,
381 .own_cd_for_runtime_pm = true,
382 };
383
384 static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
385 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
386 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
387 .allow_runtime_pm = true,
388 .probe_slot = mfd_sdio_probe_slot,
389 };
390
391 static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = {
392 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
393 .allow_runtime_pm = true,
394 .probe_slot = mfd_emmc_probe_slot,
395 };
396
397 static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
398 .quirks = SDHCI_QUIRK_BROKEN_ADMA,
399 .probe_slot = pch_hc_probe_slot,
400 };
401
402 #ifdef CONFIG_X86
403
404 #define BYT_IOSF_SCCEP 0x63
405 #define BYT_IOSF_OCP_NETCTRL0 0x1078
406 #define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
407
byt_ocp_setting(struct pci_dev * pdev)408 static void byt_ocp_setting(struct pci_dev *pdev)
409 {
410 u32 val = 0;
411
412 if (pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC &&
413 pdev->device != PCI_DEVICE_ID_INTEL_BYT_SDIO &&
414 pdev->device != PCI_DEVICE_ID_INTEL_BYT_SD &&
415 pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC2)
416 return;
417
418 if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
419 &val)) {
420 dev_err(&pdev->dev, "%s read error\n", __func__);
421 return;
422 }
423
424 if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
425 return;
426
427 val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
428
429 if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
430 val)) {
431 dev_err(&pdev->dev, "%s write error\n", __func__);
432 return;
433 }
434
435 dev_dbg(&pdev->dev, "%s completed\n", __func__);
436 }
437
438 #else
439
byt_ocp_setting(struct pci_dev * pdev)440 static inline void byt_ocp_setting(struct pci_dev *pdev)
441 {
442 }
443
444 #endif
445
446 enum {
447 INTEL_DSM_FNS = 0,
448 INTEL_DSM_V18_SWITCH = 3,
449 INTEL_DSM_V33_SWITCH = 4,
450 INTEL_DSM_DRV_STRENGTH = 9,
451 INTEL_DSM_D3_RETUNE = 10,
452 };
453
454 struct intel_host {
455 u32 dsm_fns;
456 int drv_strength;
457 bool d3_retune;
458 bool rpm_retune_ok;
459 bool needs_pwr_off;
460 u32 glk_rx_ctrl1;
461 u32 glk_tun_val;
462 u32 active_ltr;
463 u32 idle_ltr;
464 };
465
466 static const guid_t intel_dsm_guid =
467 GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F,
468 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61);
469
__intel_dsm(struct intel_host * intel_host,struct device * dev,unsigned int fn,u32 * result)470 static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
471 unsigned int fn, u32 *result)
472 {
473 union acpi_object *obj;
474 int err = 0;
475 size_t len;
476
477 obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL,
478 ACPI_TYPE_BUFFER);
479 if (!obj)
480 return -EOPNOTSUPP;
481
482 if (obj->buffer.length < 1) {
483 err = -EINVAL;
484 goto out;
485 }
486
487 len = min_t(size_t, obj->buffer.length, 4);
488
489 *result = 0;
490 memcpy(result, obj->buffer.pointer, len);
491 out:
492 ACPI_FREE(obj);
493
494 return err;
495 }
496
intel_dsm(struct intel_host * intel_host,struct device * dev,unsigned int fn,u32 * result)497 static int intel_dsm(struct intel_host *intel_host, struct device *dev,
498 unsigned int fn, u32 *result)
499 {
500 if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
501 return -EOPNOTSUPP;
502
503 return __intel_dsm(intel_host, dev, fn, result);
504 }
505
intel_dsm_init(struct intel_host * intel_host,struct device * dev,struct mmc_host * mmc)506 static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
507 struct mmc_host *mmc)
508 {
509 int err;
510 u32 val;
511
512 intel_host->d3_retune = true;
513
514 err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
515 if (err) {
516 pr_debug("%s: DSM not supported, error %d\n",
517 mmc_hostname(mmc), err);
518 return;
519 }
520
521 pr_debug("%s: DSM function mask %#x\n",
522 mmc_hostname(mmc), intel_host->dsm_fns);
523
524 err = intel_dsm(intel_host, dev, INTEL_DSM_DRV_STRENGTH, &val);
525 intel_host->drv_strength = err ? 0 : val;
526
527 err = intel_dsm(intel_host, dev, INTEL_DSM_D3_RETUNE, &val);
528 intel_host->d3_retune = err ? true : !!val;
529 }
530
sdhci_pci_int_hw_reset(struct sdhci_host * host)531 static void sdhci_pci_int_hw_reset(struct sdhci_host *host)
532 {
533 u8 reg;
534
535 reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
536 reg |= 0x10;
537 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
538 /* For eMMC, minimum is 1us but give it 9us for good measure */
539 udelay(9);
540 reg &= ~0x10;
541 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
542 /* For eMMC, minimum is 200us but give it 300us for good measure */
543 usleep_range(300, 1000);
544 }
545
intel_select_drive_strength(struct mmc_card * card,unsigned int max_dtr,int host_drv,int card_drv,int * drv_type)546 static int intel_select_drive_strength(struct mmc_card *card,
547 unsigned int max_dtr, int host_drv,
548 int card_drv, int *drv_type)
549 {
550 struct sdhci_host *host = mmc_priv(card->host);
551 struct sdhci_pci_slot *slot = sdhci_priv(host);
552 struct intel_host *intel_host = sdhci_pci_priv(slot);
553
554 if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv))
555 return 0;
556
557 return intel_host->drv_strength;
558 }
559
bxt_get_cd(struct mmc_host * mmc)560 static int bxt_get_cd(struct mmc_host *mmc)
561 {
562 int gpio_cd = mmc_gpio_get_cd(mmc);
563
564 if (!gpio_cd)
565 return 0;
566
567 return sdhci_get_cd_nogpio(mmc);
568 }
569
mrfld_get_cd(struct mmc_host * mmc)570 static int mrfld_get_cd(struct mmc_host *mmc)
571 {
572 return sdhci_get_cd_nogpio(mmc);
573 }
574
575 #define SDHCI_INTEL_PWR_TIMEOUT_CNT 20
576 #define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100
577
sdhci_intel_set_power(struct sdhci_host * host,unsigned char mode,unsigned short vdd)578 static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
579 unsigned short vdd)
580 {
581 struct sdhci_pci_slot *slot = sdhci_priv(host);
582 struct intel_host *intel_host = sdhci_pci_priv(slot);
583 int cntr;
584 u8 reg;
585
586 /*
587 * Bus power may control card power, but a full reset still may not
588 * reset the power, whereas a direct write to SDHCI_POWER_CONTROL can.
589 * That might be needed to initialize correctly, if the card was left
590 * powered on previously.
591 */
592 if (intel_host->needs_pwr_off) {
593 intel_host->needs_pwr_off = false;
594 if (mode != MMC_POWER_OFF) {
595 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
596 usleep_range(10000, 12500);
597 }
598 }
599
600 sdhci_set_power(host, mode, vdd);
601
602 if (mode == MMC_POWER_OFF) {
603 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD ||
604 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BYT_SD)
605 usleep_range(15000, 17500);
606 return;
607 }
608
609 /*
610 * Bus power might not enable after D3 -> D0 transition due to the
611 * present state not yet having propagated. Retry for up to 2ms.
612 */
613 for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) {
614 reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
615 if (reg & SDHCI_POWER_ON)
616 break;
617 udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY);
618 reg |= SDHCI_POWER_ON;
619 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
620 }
621 }
622
sdhci_intel_set_uhs_signaling(struct sdhci_host * host,unsigned int timing)623 static void sdhci_intel_set_uhs_signaling(struct sdhci_host *host,
624 unsigned int timing)
625 {
626 /* Set UHS timing to SDR25 for High Speed mode */
627 if (timing == MMC_TIMING_MMC_HS || timing == MMC_TIMING_SD_HS)
628 timing = MMC_TIMING_UHS_SDR25;
629 sdhci_set_uhs_signaling(host, timing);
630 }
631
632 #define INTEL_HS400_ES_REG 0x78
633 #define INTEL_HS400_ES_BIT BIT(0)
634
intel_hs400_enhanced_strobe(struct mmc_host * mmc,struct mmc_ios * ios)635 static void intel_hs400_enhanced_strobe(struct mmc_host *mmc,
636 struct mmc_ios *ios)
637 {
638 struct sdhci_host *host = mmc_priv(mmc);
639 u32 val;
640
641 val = sdhci_readl(host, INTEL_HS400_ES_REG);
642 if (ios->enhanced_strobe)
643 val |= INTEL_HS400_ES_BIT;
644 else
645 val &= ~INTEL_HS400_ES_BIT;
646 sdhci_writel(host, val, INTEL_HS400_ES_REG);
647 }
648
intel_start_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)649 static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
650 struct mmc_ios *ios)
651 {
652 struct device *dev = mmc_dev(mmc);
653 struct sdhci_host *host = mmc_priv(mmc);
654 struct sdhci_pci_slot *slot = sdhci_priv(host);
655 struct intel_host *intel_host = sdhci_pci_priv(slot);
656 unsigned int fn;
657 u32 result = 0;
658 int err;
659
660 err = sdhci_start_signal_voltage_switch(mmc, ios);
661 if (err)
662 return err;
663
664 switch (ios->signal_voltage) {
665 case MMC_SIGNAL_VOLTAGE_330:
666 fn = INTEL_DSM_V33_SWITCH;
667 break;
668 case MMC_SIGNAL_VOLTAGE_180:
669 fn = INTEL_DSM_V18_SWITCH;
670 break;
671 default:
672 return 0;
673 }
674
675 err = intel_dsm(intel_host, dev, fn, &result);
676 pr_debug("%s: %s DSM fn %u error %d result %u\n",
677 mmc_hostname(mmc), __func__, fn, err, result);
678
679 return 0;
680 }
681
682 static const struct sdhci_ops sdhci_intel_byt_ops = {
683 .set_clock = sdhci_set_clock,
684 .set_power = sdhci_intel_set_power,
685 .enable_dma = sdhci_pci_enable_dma,
686 .set_bus_width = sdhci_set_bus_width,
687 .reset = sdhci_reset,
688 .set_uhs_signaling = sdhci_intel_set_uhs_signaling,
689 .hw_reset = sdhci_pci_hw_reset,
690 };
691
692 static const struct sdhci_ops sdhci_intel_glk_ops = {
693 .set_clock = sdhci_set_clock,
694 .set_power = sdhci_intel_set_power,
695 .enable_dma = sdhci_pci_enable_dma,
696 .set_bus_width = sdhci_set_bus_width,
697 .reset = sdhci_and_cqhci_reset,
698 .set_uhs_signaling = sdhci_intel_set_uhs_signaling,
699 .hw_reset = sdhci_pci_hw_reset,
700 .irq = sdhci_cqhci_irq,
701 };
702
byt_read_dsm(struct sdhci_pci_slot * slot)703 static void byt_read_dsm(struct sdhci_pci_slot *slot)
704 {
705 struct intel_host *intel_host = sdhci_pci_priv(slot);
706 struct device *dev = &slot->chip->pdev->dev;
707 struct mmc_host *mmc = slot->host->mmc;
708
709 intel_dsm_init(intel_host, dev, mmc);
710 slot->chip->rpm_retune = intel_host->d3_retune;
711 }
712
intel_execute_tuning(struct mmc_host * mmc,u32 opcode)713 static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode)
714 {
715 int err = sdhci_execute_tuning(mmc, opcode);
716 struct sdhci_host *host = mmc_priv(mmc);
717
718 if (err)
719 return err;
720
721 /*
722 * Tuning can leave the IP in an active state (Buffer Read Enable bit
723 * set) which prevents the entry to low power states (i.e. S0i3). Data
724 * reset will clear it.
725 */
726 sdhci_reset(host, SDHCI_RESET_DATA);
727
728 return 0;
729 }
730
731 #define INTEL_ACTIVELTR 0x804
732 #define INTEL_IDLELTR 0x808
733
734 #define INTEL_LTR_REQ BIT(15)
735 #define INTEL_LTR_SCALE_MASK GENMASK(11, 10)
736 #define INTEL_LTR_SCALE_1US (2 << 10)
737 #define INTEL_LTR_SCALE_32US (3 << 10)
738 #define INTEL_LTR_VALUE_MASK GENMASK(9, 0)
739
intel_cache_ltr(struct sdhci_pci_slot * slot)740 static void intel_cache_ltr(struct sdhci_pci_slot *slot)
741 {
742 struct intel_host *intel_host = sdhci_pci_priv(slot);
743 struct sdhci_host *host = slot->host;
744
745 intel_host->active_ltr = readl(host->ioaddr + INTEL_ACTIVELTR);
746 intel_host->idle_ltr = readl(host->ioaddr + INTEL_IDLELTR);
747 }
748
intel_ltr_set(struct device * dev,s32 val)749 static void intel_ltr_set(struct device *dev, s32 val)
750 {
751 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
752 struct sdhci_pci_slot *slot = chip->slots[0];
753 struct intel_host *intel_host = sdhci_pci_priv(slot);
754 struct sdhci_host *host = slot->host;
755 u32 ltr;
756
757 pm_runtime_get_sync(dev);
758
759 /*
760 * Program latency tolerance (LTR) accordingly what has been asked
761 * by the PM QoS layer or disable it in case we were passed
762 * negative value or PM_QOS_LATENCY_ANY.
763 */
764 ltr = readl(host->ioaddr + INTEL_ACTIVELTR);
765
766 if (val == PM_QOS_LATENCY_ANY || val < 0) {
767 ltr &= ~INTEL_LTR_REQ;
768 } else {
769 ltr |= INTEL_LTR_REQ;
770 ltr &= ~INTEL_LTR_SCALE_MASK;
771 ltr &= ~INTEL_LTR_VALUE_MASK;
772
773 if (val > INTEL_LTR_VALUE_MASK) {
774 val >>= 5;
775 if (val > INTEL_LTR_VALUE_MASK)
776 val = INTEL_LTR_VALUE_MASK;
777 ltr |= INTEL_LTR_SCALE_32US | val;
778 } else {
779 ltr |= INTEL_LTR_SCALE_1US | val;
780 }
781 }
782
783 if (ltr == intel_host->active_ltr)
784 goto out;
785
786 writel(ltr, host->ioaddr + INTEL_ACTIVELTR);
787 writel(ltr, host->ioaddr + INTEL_IDLELTR);
788
789 /* Cache the values into lpss structure */
790 intel_cache_ltr(slot);
791 out:
792 pm_runtime_put_autosuspend(dev);
793 }
794
intel_use_ltr(struct sdhci_pci_chip * chip)795 static bool intel_use_ltr(struct sdhci_pci_chip *chip)
796 {
797 switch (chip->pdev->device) {
798 case PCI_DEVICE_ID_INTEL_BYT_EMMC:
799 case PCI_DEVICE_ID_INTEL_BYT_EMMC2:
800 case PCI_DEVICE_ID_INTEL_BYT_SDIO:
801 case PCI_DEVICE_ID_INTEL_BYT_SD:
802 case PCI_DEVICE_ID_INTEL_BSW_EMMC:
803 case PCI_DEVICE_ID_INTEL_BSW_SDIO:
804 case PCI_DEVICE_ID_INTEL_BSW_SD:
805 return false;
806 default:
807 return true;
808 }
809 }
810
intel_ltr_expose(struct sdhci_pci_chip * chip)811 static void intel_ltr_expose(struct sdhci_pci_chip *chip)
812 {
813 struct device *dev = &chip->pdev->dev;
814
815 if (!intel_use_ltr(chip))
816 return;
817
818 dev->power.set_latency_tolerance = intel_ltr_set;
819 dev_pm_qos_expose_latency_tolerance(dev);
820 }
821
intel_ltr_hide(struct sdhci_pci_chip * chip)822 static void intel_ltr_hide(struct sdhci_pci_chip *chip)
823 {
824 struct device *dev = &chip->pdev->dev;
825
826 if (!intel_use_ltr(chip))
827 return;
828
829 dev_pm_qos_hide_latency_tolerance(dev);
830 dev->power.set_latency_tolerance = NULL;
831 }
832
byt_probe_slot(struct sdhci_pci_slot * slot)833 static void byt_probe_slot(struct sdhci_pci_slot *slot)
834 {
835 struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
836 struct device *dev = &slot->chip->pdev->dev;
837 struct mmc_host *mmc = slot->host->mmc;
838
839 byt_read_dsm(slot);
840
841 byt_ocp_setting(slot->chip->pdev);
842
843 ops->execute_tuning = intel_execute_tuning;
844 ops->start_signal_voltage_switch = intel_start_signal_voltage_switch;
845
846 device_property_read_u32(dev, "max-frequency", &mmc->f_max);
847
848 if (!mmc->slotno) {
849 slot->chip->slots[mmc->slotno] = slot;
850 intel_ltr_expose(slot->chip);
851 }
852 }
853
byt_add_debugfs(struct sdhci_pci_slot * slot)854 static void byt_add_debugfs(struct sdhci_pci_slot *slot)
855 {
856 struct intel_host *intel_host = sdhci_pci_priv(slot);
857 struct mmc_host *mmc = slot->host->mmc;
858 struct dentry *dir = mmc->debugfs_root;
859
860 if (!intel_use_ltr(slot->chip))
861 return;
862
863 debugfs_create_x32("active_ltr", 0444, dir, &intel_host->active_ltr);
864 debugfs_create_x32("idle_ltr", 0444, dir, &intel_host->idle_ltr);
865
866 intel_cache_ltr(slot);
867 }
868
byt_add_host(struct sdhci_pci_slot * slot)869 static int byt_add_host(struct sdhci_pci_slot *slot)
870 {
871 int ret = sdhci_add_host(slot->host);
872
873 if (!ret)
874 byt_add_debugfs(slot);
875 return ret;
876 }
877
byt_remove_slot(struct sdhci_pci_slot * slot,int dead)878 static void byt_remove_slot(struct sdhci_pci_slot *slot, int dead)
879 {
880 struct mmc_host *mmc = slot->host->mmc;
881
882 if (!mmc->slotno)
883 intel_ltr_hide(slot->chip);
884 }
885
byt_emmc_probe_slot(struct sdhci_pci_slot * slot)886 static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
887 {
888 byt_probe_slot(slot);
889 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
890 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
891 MMC_CAP_CMD_DURING_TFR |
892 MMC_CAP_WAIT_WHILE_BUSY;
893 slot->hw_reset = sdhci_pci_int_hw_reset;
894 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC)
895 slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
896 slot->host->mmc_host_ops.select_drive_strength =
897 intel_select_drive_strength;
898 return 0;
899 }
900
glk_broken_cqhci(struct sdhci_pci_slot * slot)901 static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
902 {
903 return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
904 (dmi_match(DMI_BIOS_VENDOR, "LENOVO") ||
905 dmi_match(DMI_SYS_VENDOR, "IRBIS") ||
906 dmi_match(DMI_SYS_VENDOR, "Positivo Tecnologia SA"));
907 }
908
jsl_broken_hs400es(struct sdhci_pci_slot * slot)909 static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot)
910 {
911 return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_JSL_EMMC &&
912 dmi_match(DMI_BIOS_VENDOR, "ASUSTeK COMPUTER INC.");
913 }
914
glk_emmc_probe_slot(struct sdhci_pci_slot * slot)915 static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
916 {
917 int ret = byt_emmc_probe_slot(slot);
918
919 if (!glk_broken_cqhci(slot))
920 slot->host->mmc->caps2 |= MMC_CAP2_CQE;
921
922 if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
923 if (!jsl_broken_hs400es(slot)) {
924 slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES;
925 slot->host->mmc_host_ops.hs400_enhanced_strobe =
926 intel_hs400_enhanced_strobe;
927 }
928 slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
929 }
930
931 return ret;
932 }
933
934 static const struct cqhci_host_ops glk_cqhci_ops = {
935 .enable = sdhci_cqe_enable,
936 .disable = sdhci_cqe_disable,
937 .dumpregs = sdhci_pci_dumpregs,
938 };
939
glk_emmc_add_host(struct sdhci_pci_slot * slot)940 static int glk_emmc_add_host(struct sdhci_pci_slot *slot)
941 {
942 struct device *dev = &slot->chip->pdev->dev;
943 struct sdhci_host *host = slot->host;
944 struct cqhci_host *cq_host;
945 bool dma64;
946 int ret;
947
948 ret = sdhci_setup_host(host);
949 if (ret)
950 return ret;
951
952 cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL);
953 if (!cq_host) {
954 ret = -ENOMEM;
955 goto cleanup;
956 }
957
958 cq_host->mmio = host->ioaddr + 0x200;
959 cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
960 cq_host->ops = &glk_cqhci_ops;
961
962 dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
963 if (dma64)
964 cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
965
966 ret = cqhci_init(cq_host, host->mmc, dma64);
967 if (ret)
968 goto cleanup;
969
970 ret = __sdhci_add_host(host);
971 if (ret)
972 goto cleanup;
973
974 byt_add_debugfs(slot);
975
976 return 0;
977
978 cleanup:
979 sdhci_cleanup_host(host);
980 return ret;
981 }
982
983 #ifdef CONFIG_PM
984 #define GLK_RX_CTRL1 0x834
985 #define GLK_TUN_VAL 0x840
986 #define GLK_PATH_PLL GENMASK(13, 8)
987 #define GLK_DLY GENMASK(6, 0)
988 /* Workaround firmware failing to restore the tuning value */
glk_rpm_retune_wa(struct sdhci_pci_chip * chip,bool susp)989 static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp)
990 {
991 struct sdhci_pci_slot *slot = chip->slots[0];
992 struct intel_host *intel_host = sdhci_pci_priv(slot);
993 struct sdhci_host *host = slot->host;
994 u32 glk_rx_ctrl1;
995 u32 glk_tun_val;
996 u32 dly;
997
998 if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc))
999 return;
1000
1001 glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1);
1002 glk_tun_val = sdhci_readl(host, GLK_TUN_VAL);
1003
1004 if (susp) {
1005 intel_host->glk_rx_ctrl1 = glk_rx_ctrl1;
1006 intel_host->glk_tun_val = glk_tun_val;
1007 return;
1008 }
1009
1010 if (!intel_host->glk_tun_val)
1011 return;
1012
1013 if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) {
1014 intel_host->rpm_retune_ok = true;
1015 return;
1016 }
1017
1018 dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) +
1019 (intel_host->glk_tun_val << 1));
1020 if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1))
1021 return;
1022
1023 glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly;
1024 sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1);
1025
1026 intel_host->rpm_retune_ok = true;
1027 chip->rpm_retune = true;
1028 mmc_retune_needed(host->mmc);
1029 pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc));
1030 }
1031
glk_rpm_retune_chk(struct sdhci_pci_chip * chip,bool susp)1032 static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp)
1033 {
1034 if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
1035 !chip->rpm_retune)
1036 glk_rpm_retune_wa(chip, susp);
1037 }
1038
glk_runtime_suspend(struct sdhci_pci_chip * chip)1039 static int glk_runtime_suspend(struct sdhci_pci_chip *chip)
1040 {
1041 glk_rpm_retune_chk(chip, true);
1042
1043 return sdhci_cqhci_runtime_suspend(chip);
1044 }
1045
glk_runtime_resume(struct sdhci_pci_chip * chip)1046 static int glk_runtime_resume(struct sdhci_pci_chip *chip)
1047 {
1048 glk_rpm_retune_chk(chip, false);
1049
1050 return sdhci_cqhci_runtime_resume(chip);
1051 }
1052 #endif
1053
1054 #ifdef CONFIG_ACPI
ni_set_max_freq(struct sdhci_pci_slot * slot)1055 static int ni_set_max_freq(struct sdhci_pci_slot *slot)
1056 {
1057 acpi_status status;
1058 unsigned long long max_freq;
1059
1060 status = acpi_evaluate_integer(ACPI_HANDLE(&slot->chip->pdev->dev),
1061 "MXFQ", NULL, &max_freq);
1062 if (ACPI_FAILURE(status)) {
1063 dev_err(&slot->chip->pdev->dev,
1064 "MXFQ not found in acpi table\n");
1065 return -EINVAL;
1066 }
1067
1068 slot->host->mmc->f_max = max_freq * 1000000;
1069
1070 return 0;
1071 }
1072 #else
ni_set_max_freq(struct sdhci_pci_slot * slot)1073 static inline int ni_set_max_freq(struct sdhci_pci_slot *slot)
1074 {
1075 return 0;
1076 }
1077 #endif
1078
ni_byt_sdio_probe_slot(struct sdhci_pci_slot * slot)1079 static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
1080 {
1081 int err;
1082
1083 byt_probe_slot(slot);
1084
1085 err = ni_set_max_freq(slot);
1086 if (err)
1087 return err;
1088
1089 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
1090 MMC_CAP_WAIT_WHILE_BUSY;
1091 return 0;
1092 }
1093
byt_sdio_probe_slot(struct sdhci_pci_slot * slot)1094 static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
1095 {
1096 byt_probe_slot(slot);
1097 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
1098 MMC_CAP_WAIT_WHILE_BUSY;
1099 return 0;
1100 }
1101
byt_needs_pwr_off(struct sdhci_pci_slot * slot)1102 static void byt_needs_pwr_off(struct sdhci_pci_slot *slot)
1103 {
1104 struct intel_host *intel_host = sdhci_pci_priv(slot);
1105 u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL);
1106
1107 intel_host->needs_pwr_off = reg & SDHCI_POWER_ON;
1108 }
1109
byt_sd_probe_slot(struct sdhci_pci_slot * slot)1110 static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
1111 {
1112 byt_probe_slot(slot);
1113 slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
1114 MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE;
1115 slot->cd_idx = 0;
1116 slot->cd_override_level = true;
1117 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
1118 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
1119 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD ||
1120 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_SD)
1121 slot->host->mmc_host_ops.get_cd = bxt_get_cd;
1122
1123 if (slot->chip->pdev->subsystem_vendor == PCI_VENDOR_ID_NI &&
1124 slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3)
1125 slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V;
1126
1127 byt_needs_pwr_off(slot);
1128
1129 return 0;
1130 }
1131
1132 #ifdef CONFIG_PM_SLEEP
1133
byt_resume(struct sdhci_pci_chip * chip)1134 static int byt_resume(struct sdhci_pci_chip *chip)
1135 {
1136 byt_ocp_setting(chip->pdev);
1137
1138 return sdhci_pci_resume_host(chip);
1139 }
1140
1141 #endif
1142
1143 #ifdef CONFIG_PM
1144
byt_runtime_resume(struct sdhci_pci_chip * chip)1145 static int byt_runtime_resume(struct sdhci_pci_chip *chip)
1146 {
1147 byt_ocp_setting(chip->pdev);
1148
1149 return sdhci_pci_runtime_resume_host(chip);
1150 }
1151
1152 #endif
1153
1154 static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
1155 #ifdef CONFIG_PM_SLEEP
1156 .resume = byt_resume,
1157 #endif
1158 #ifdef CONFIG_PM
1159 .runtime_resume = byt_runtime_resume,
1160 #endif
1161 .allow_runtime_pm = true,
1162 .probe_slot = byt_emmc_probe_slot,
1163 .add_host = byt_add_host,
1164 .remove_slot = byt_remove_slot,
1165 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1166 SDHCI_QUIRK_NO_LED,
1167 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1168 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
1169 SDHCI_QUIRK2_STOP_WITH_TC,
1170 .ops = &sdhci_intel_byt_ops,
1171 .priv_size = sizeof(struct intel_host),
1172 };
1173
1174 static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
1175 .allow_runtime_pm = true,
1176 .probe_slot = glk_emmc_probe_slot,
1177 .add_host = glk_emmc_add_host,
1178 .remove_slot = byt_remove_slot,
1179 #ifdef CONFIG_PM_SLEEP
1180 .suspend = sdhci_cqhci_suspend,
1181 .resume = sdhci_cqhci_resume,
1182 #endif
1183 #ifdef CONFIG_PM
1184 .runtime_suspend = glk_runtime_suspend,
1185 .runtime_resume = glk_runtime_resume,
1186 #endif
1187 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1188 SDHCI_QUIRK_NO_LED,
1189 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1190 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
1191 SDHCI_QUIRK2_STOP_WITH_TC,
1192 .ops = &sdhci_intel_glk_ops,
1193 .priv_size = sizeof(struct intel_host),
1194 };
1195
1196 static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
1197 #ifdef CONFIG_PM_SLEEP
1198 .resume = byt_resume,
1199 #endif
1200 #ifdef CONFIG_PM
1201 .runtime_resume = byt_runtime_resume,
1202 #endif
1203 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1204 SDHCI_QUIRK_NO_LED,
1205 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
1206 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1207 .allow_runtime_pm = true,
1208 .probe_slot = ni_byt_sdio_probe_slot,
1209 .add_host = byt_add_host,
1210 .remove_slot = byt_remove_slot,
1211 .ops = &sdhci_intel_byt_ops,
1212 .priv_size = sizeof(struct intel_host),
1213 };
1214
1215 static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
1216 #ifdef CONFIG_PM_SLEEP
1217 .resume = byt_resume,
1218 #endif
1219 #ifdef CONFIG_PM
1220 .runtime_resume = byt_runtime_resume,
1221 #endif
1222 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1223 SDHCI_QUIRK_NO_LED,
1224 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
1225 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1226 .allow_runtime_pm = true,
1227 .probe_slot = byt_sdio_probe_slot,
1228 .add_host = byt_add_host,
1229 .remove_slot = byt_remove_slot,
1230 .ops = &sdhci_intel_byt_ops,
1231 .priv_size = sizeof(struct intel_host),
1232 };
1233
1234 /* DMI quirks for devices with missing or broken CD GPIO info */
1235 static const struct gpiod_lookup_table vexia_edu_atla10_cd_gpios = {
1236 .dev_id = "0000:00:12.0",
1237 .table = {
1238 GPIO_LOOKUP("INT33FC:00", 38, "cd", GPIO_ACTIVE_HIGH),
1239 { }
1240 },
1241 };
1242
1243 static const struct dmi_system_id sdhci_intel_byt_cd_gpio_override[] = {
1244 {
1245 /* Vexia Edu Atla 10 tablet 9V version */
1246 .matches = {
1247 DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
1248 DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
1249 /* Above strings are too generic, also match on BIOS date */
1250 DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"),
1251 },
1252 .driver_data = (void *)&vexia_edu_atla10_cd_gpios,
1253 },
1254 { }
1255 };
1256
1257 static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
1258 #ifdef CONFIG_PM_SLEEP
1259 .resume = byt_resume,
1260 #endif
1261 #ifdef CONFIG_PM
1262 .runtime_resume = byt_runtime_resume,
1263 #endif
1264 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1265 SDHCI_QUIRK_NO_LED,
1266 .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
1267 SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1268 SDHCI_QUIRK2_STOP_WITH_TC,
1269 .allow_runtime_pm = true,
1270 .own_cd_for_runtime_pm = true,
1271 .probe_slot = byt_sd_probe_slot,
1272 .add_host = byt_add_host,
1273 .remove_slot = byt_remove_slot,
1274 .ops = &sdhci_intel_byt_ops,
1275 .cd_gpio_override = sdhci_intel_byt_cd_gpio_override,
1276 .priv_size = sizeof(struct intel_host),
1277 };
1278
1279 /* Define Host controllers for Intel Merrifield platform */
1280 #define INTEL_MRFLD_EMMC_0 0
1281 #define INTEL_MRFLD_EMMC_1 1
1282 #define INTEL_MRFLD_SD 2
1283 #define INTEL_MRFLD_SDIO 3
1284
1285 #ifdef CONFIG_ACPI
intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot * slot)1286 static void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot)
1287 {
1288 struct acpi_device *device;
1289
1290 device = ACPI_COMPANION(&slot->chip->pdev->dev);
1291 if (device)
1292 acpi_device_fix_up_power_extended(device);
1293 }
1294 #else
intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot * slot)1295 static inline void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) {}
1296 #endif
1297
intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot * slot)1298 static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
1299 {
1300 unsigned int func = PCI_FUNC(slot->chip->pdev->devfn);
1301
1302 switch (func) {
1303 case INTEL_MRFLD_EMMC_0:
1304 case INTEL_MRFLD_EMMC_1:
1305 slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
1306 MMC_CAP_8_BIT_DATA |
1307 MMC_CAP_1_8V_DDR;
1308 break;
1309 case INTEL_MRFLD_SD:
1310 slot->cd_idx = 0;
1311 slot->cd_override_level = true;
1312 /*
1313 * There are two PCB designs of SD card slot with the opposite
1314 * card detection sense. Quirk this out by ignoring GPIO state
1315 * completely in the custom ->get_cd() callback.
1316 */
1317 slot->host->mmc_host_ops.get_cd = mrfld_get_cd;
1318 slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
1319 break;
1320 case INTEL_MRFLD_SDIO:
1321 /* Advertise 2.0v for compatibility with the SDIO card's OCR */
1322 slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195;
1323 slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
1324 MMC_CAP_POWER_OFF_CARD;
1325 break;
1326 default:
1327 return -ENODEV;
1328 }
1329
1330 intel_mrfld_mmc_fix_up_power_slot(slot);
1331 return 0;
1332 }
1333
1334 static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = {
1335 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1336 .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 |
1337 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1338 .allow_runtime_pm = true,
1339 .probe_slot = intel_mrfld_mmc_probe_slot,
1340 };
1341
1342 #define JMB388_SAMPLE_COUNT 5
1343
jmicron_jmb388_get_ro(struct mmc_host * mmc)1344 static int jmicron_jmb388_get_ro(struct mmc_host *mmc)
1345 {
1346 int i, ro_count;
1347
1348 ro_count = 0;
1349 for (i = 0; i < JMB388_SAMPLE_COUNT; i++) {
1350 if (sdhci_get_ro(mmc) > 0) {
1351 if (++ro_count > JMB388_SAMPLE_COUNT / 2)
1352 return 1;
1353 }
1354 msleep(30);
1355 }
1356 return 0;
1357 }
1358
jmicron_pmos(struct sdhci_pci_chip * chip,int on)1359 static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
1360 {
1361 u8 scratch;
1362 int ret;
1363
1364 ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch);
1365 if (ret)
1366 goto fail;
1367
1368 /*
1369 * Turn PMOS on [bit 0], set over current detection to 2.4 V
1370 * [bit 1:2] and enable over current debouncing [bit 6].
1371 */
1372 if (on)
1373 scratch |= 0x47;
1374 else
1375 scratch &= ~0x47;
1376
1377 ret = pci_write_config_byte(chip->pdev, 0xAE, scratch);
1378
1379 fail:
1380 return pcibios_err_to_errno(ret);
1381 }
1382
jmicron_probe(struct sdhci_pci_chip * chip)1383 static int jmicron_probe(struct sdhci_pci_chip *chip)
1384 {
1385 int ret;
1386 u16 mmcdev = 0;
1387
1388 if (chip->pdev->revision == 0) {
1389 chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
1390 SDHCI_QUIRK_32BIT_DMA_SIZE |
1391 SDHCI_QUIRK_32BIT_ADMA_SIZE |
1392 SDHCI_QUIRK_RESET_AFTER_REQUEST |
1393 SDHCI_QUIRK_BROKEN_SMALL_PIO;
1394 }
1395
1396 /*
1397 * JMicron chips can have two interfaces to the same hardware
1398 * in order to work around limitations in Microsoft's driver.
1399 * We need to make sure we only bind to one of them.
1400 *
1401 * This code assumes two things:
1402 *
1403 * 1. The PCI code adds subfunctions in order.
1404 *
1405 * 2. The MMC interface has a lower subfunction number
1406 * than the SD interface.
1407 */
1408 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD)
1409 mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC;
1410 else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD)
1411 mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD;
1412
1413 if (mmcdev) {
1414 struct pci_dev *sd_dev;
1415
1416 sd_dev = NULL;
1417 while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
1418 mmcdev, sd_dev)) != NULL) {
1419 if ((PCI_SLOT(chip->pdev->devfn) ==
1420 PCI_SLOT(sd_dev->devfn)) &&
1421 (chip->pdev->bus == sd_dev->bus))
1422 break;
1423 }
1424
1425 if (sd_dev) {
1426 pci_dev_put(sd_dev);
1427 dev_info(&chip->pdev->dev, "Refusing to bind to "
1428 "secondary interface.\n");
1429 return -ENODEV;
1430 }
1431 }
1432
1433 /*
1434 * JMicron chips need a bit of a nudge to enable the power
1435 * output pins.
1436 */
1437 ret = jmicron_pmos(chip, 1);
1438 if (ret) {
1439 dev_err(&chip->pdev->dev, "Failure enabling card power\n");
1440 return ret;
1441 }
1442
1443 return 0;
1444 }
1445
jmicron_enable_mmc(struct sdhci_host * host,int on)1446 static void jmicron_enable_mmc(struct sdhci_host *host, int on)
1447 {
1448 u8 scratch;
1449
1450 scratch = readb(host->ioaddr + 0xC0);
1451
1452 if (on)
1453 scratch |= 0x01;
1454 else
1455 scratch &= ~0x01;
1456
1457 writeb(scratch, host->ioaddr + 0xC0);
1458 }
1459
jmicron_probe_slot(struct sdhci_pci_slot * slot)1460 static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
1461 {
1462 if (slot->chip->pdev->revision == 0) {
1463 u16 version;
1464
1465 version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION);
1466 version = (version & SDHCI_VENDOR_VER_MASK) >>
1467 SDHCI_VENDOR_VER_SHIFT;
1468
1469 /*
1470 * Older versions of the chip have lots of nasty glitches
1471 * in the ADMA engine. It's best just to avoid it
1472 * completely.
1473 */
1474 if (version < 0xAC)
1475 slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
1476 }
1477
1478 /* JM388 MMC doesn't support 1.8V while SD supports it */
1479 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1480 slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 |
1481 MMC_VDD_29_30 | MMC_VDD_30_31 |
1482 MMC_VDD_165_195; /* allow 1.8V */
1483 slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
1484 MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */
1485 }
1486
1487 /*
1488 * The secondary interface requires a bit set to get the
1489 * interrupts.
1490 */
1491 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1492 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1493 jmicron_enable_mmc(slot->host, 1);
1494
1495 slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
1496
1497 /* Handle unstable RO-detection on JM388 chips */
1498 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD ||
1499 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1500 slot->host->mmc_host_ops.get_ro = jmicron_jmb388_get_ro;
1501
1502 return 0;
1503 }
1504
jmicron_remove_slot(struct sdhci_pci_slot * slot,int dead)1505 static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
1506 {
1507 if (dead)
1508 return;
1509
1510 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1511 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1512 jmicron_enable_mmc(slot->host, 0);
1513 }
1514
1515 #ifdef CONFIG_PM_SLEEP
jmicron_suspend(struct sdhci_pci_chip * chip)1516 static int jmicron_suspend(struct sdhci_pci_chip *chip)
1517 {
1518 int i, ret;
1519
1520 ret = sdhci_pci_suspend_host(chip);
1521 if (ret)
1522 return ret;
1523
1524 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1525 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1526 for (i = 0; i < chip->num_slots; i++)
1527 jmicron_enable_mmc(chip->slots[i]->host, 0);
1528 }
1529
1530 return 0;
1531 }
1532
jmicron_resume(struct sdhci_pci_chip * chip)1533 static int jmicron_resume(struct sdhci_pci_chip *chip)
1534 {
1535 int ret, i;
1536
1537 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1538 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1539 for (i = 0; i < chip->num_slots; i++)
1540 jmicron_enable_mmc(chip->slots[i]->host, 1);
1541 }
1542
1543 ret = jmicron_pmos(chip, 1);
1544 if (ret) {
1545 dev_err(&chip->pdev->dev, "Failure enabling card power\n");
1546 return ret;
1547 }
1548
1549 return sdhci_pci_resume_host(chip);
1550 }
1551 #endif
1552
1553 static const struct sdhci_pci_fixes sdhci_jmicron = {
1554 .probe = jmicron_probe,
1555
1556 .probe_slot = jmicron_probe_slot,
1557 .remove_slot = jmicron_remove_slot,
1558
1559 #ifdef CONFIG_PM_SLEEP
1560 .suspend = jmicron_suspend,
1561 .resume = jmicron_resume,
1562 #endif
1563 };
1564
1565 /* SysKonnect CardBus2SDIO extra registers */
1566 #define SYSKT_CTRL 0x200
1567 #define SYSKT_RDFIFO_STAT 0x204
1568 #define SYSKT_WRFIFO_STAT 0x208
1569 #define SYSKT_POWER_DATA 0x20c
1570 #define SYSKT_POWER_330 0xef
1571 #define SYSKT_POWER_300 0xf8
1572 #define SYSKT_POWER_184 0xcc
1573 #define SYSKT_POWER_CMD 0x20d
1574 #define SYSKT_POWER_START (1 << 7)
1575 #define SYSKT_POWER_STATUS 0x20e
1576 #define SYSKT_POWER_STATUS_OK (1 << 0)
1577 #define SYSKT_BOARD_REV 0x210
1578 #define SYSKT_CHIP_REV 0x211
1579 #define SYSKT_CONF_DATA 0x212
1580 #define SYSKT_CONF_DATA_1V8 (1 << 2)
1581 #define SYSKT_CONF_DATA_2V5 (1 << 1)
1582 #define SYSKT_CONF_DATA_3V3 (1 << 0)
1583
syskt_probe(struct sdhci_pci_chip * chip)1584 static int syskt_probe(struct sdhci_pci_chip *chip)
1585 {
1586 if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
1587 chip->pdev->class &= ~0x0000FF;
1588 chip->pdev->class |= PCI_SDHCI_IFDMA;
1589 }
1590 return 0;
1591 }
1592
syskt_probe_slot(struct sdhci_pci_slot * slot)1593 static int syskt_probe_slot(struct sdhci_pci_slot *slot)
1594 {
1595 int tm, ps;
1596
1597 u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV);
1598 u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV);
1599 dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, "
1600 "board rev %d.%d, chip rev %d.%d\n",
1601 board_rev >> 4, board_rev & 0xf,
1602 chip_rev >> 4, chip_rev & 0xf);
1603 if (chip_rev >= 0x20)
1604 slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA;
1605
1606 writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA);
1607 writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD);
1608 udelay(50);
1609 tm = 10; /* Wait max 1 ms */
1610 do {
1611 ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS);
1612 if (ps & SYSKT_POWER_STATUS_OK)
1613 break;
1614 udelay(100);
1615 } while (--tm);
1616 if (!tm) {
1617 dev_err(&slot->chip->pdev->dev,
1618 "power regulator never stabilized");
1619 writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD);
1620 return -ENODEV;
1621 }
1622
1623 return 0;
1624 }
1625
1626 static const struct sdhci_pci_fixes sdhci_syskt = {
1627 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER,
1628 .probe = syskt_probe,
1629 .probe_slot = syskt_probe_slot,
1630 };
1631
via_probe(struct sdhci_pci_chip * chip)1632 static int via_probe(struct sdhci_pci_chip *chip)
1633 {
1634 if (chip->pdev->revision == 0x10)
1635 chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
1636
1637 return 0;
1638 }
1639
1640 static const struct sdhci_pci_fixes sdhci_via = {
1641 .probe = via_probe,
1642 };
1643
rtsx_probe_slot(struct sdhci_pci_slot * slot)1644 static int rtsx_probe_slot(struct sdhci_pci_slot *slot)
1645 {
1646 slot->host->mmc->caps2 |= MMC_CAP2_HS200;
1647 return 0;
1648 }
1649
1650 static const struct sdhci_pci_fixes sdhci_rtsx = {
1651 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1652 SDHCI_QUIRK2_BROKEN_64_BIT_DMA |
1653 SDHCI_QUIRK2_BROKEN_DDR50,
1654 .probe_slot = rtsx_probe_slot,
1655 };
1656
1657 /*AMD chipset generation*/
1658 enum amd_chipset_gen {
1659 AMD_CHIPSET_BEFORE_ML,
1660 AMD_CHIPSET_CZ,
1661 AMD_CHIPSET_NL,
1662 AMD_CHIPSET_UNKNOWN,
1663 };
1664
1665 /* AMD registers */
1666 #define AMD_SD_AUTO_PATTERN 0xB8
1667 #define AMD_MSLEEP_DURATION 4
1668 #define AMD_SD_MISC_CONTROL 0xD0
1669 #define AMD_MAX_TUNE_VALUE 0x0B
1670 #define AMD_AUTO_TUNE_SEL 0x10800
1671 #define AMD_FIFO_PTR 0x30
1672 #define AMD_BIT_MASK 0x1F
1673
amd_tuning_reset(struct sdhci_host * host)1674 static void amd_tuning_reset(struct sdhci_host *host)
1675 {
1676 unsigned int val;
1677
1678 val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1679 val |= SDHCI_CTRL_PRESET_VAL_ENABLE | SDHCI_CTRL_EXEC_TUNING;
1680 sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
1681
1682 val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1683 val &= ~SDHCI_CTRL_EXEC_TUNING;
1684 sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
1685 }
1686
amd_config_tuning_phase(struct pci_dev * pdev,u8 phase)1687 static void amd_config_tuning_phase(struct pci_dev *pdev, u8 phase)
1688 {
1689 unsigned int val;
1690
1691 pci_read_config_dword(pdev, AMD_SD_AUTO_PATTERN, &val);
1692 val &= ~AMD_BIT_MASK;
1693 val |= (AMD_AUTO_TUNE_SEL | (phase << 1));
1694 pci_write_config_dword(pdev, AMD_SD_AUTO_PATTERN, val);
1695 }
1696
amd_enable_manual_tuning(struct pci_dev * pdev)1697 static void amd_enable_manual_tuning(struct pci_dev *pdev)
1698 {
1699 unsigned int val;
1700
1701 pci_read_config_dword(pdev, AMD_SD_MISC_CONTROL, &val);
1702 val |= AMD_FIFO_PTR;
1703 pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val);
1704 }
1705
amd_execute_tuning_hs200(struct sdhci_host * host,u32 opcode)1706 static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode)
1707 {
1708 struct sdhci_pci_slot *slot = sdhci_priv(host);
1709 struct pci_dev *pdev = slot->chip->pdev;
1710 u8 valid_win = 0;
1711 u8 valid_win_max = 0;
1712 u8 valid_win_end = 0;
1713 u8 ctrl, tune_around;
1714
1715 amd_tuning_reset(host);
1716
1717 for (tune_around = 0; tune_around < 12; tune_around++) {
1718 amd_config_tuning_phase(pdev, tune_around);
1719
1720 if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1721 valid_win = 0;
1722 msleep(AMD_MSLEEP_DURATION);
1723 ctrl = SDHCI_RESET_CMD | SDHCI_RESET_DATA;
1724 sdhci_writeb(host, ctrl, SDHCI_SOFTWARE_RESET);
1725 } else if (++valid_win > valid_win_max) {
1726 valid_win_max = valid_win;
1727 valid_win_end = tune_around;
1728 }
1729 }
1730
1731 if (!valid_win_max) {
1732 dev_err(&pdev->dev, "no tuning point found\n");
1733 return -EIO;
1734 }
1735
1736 amd_config_tuning_phase(pdev, valid_win_end - valid_win_max / 2);
1737
1738 amd_enable_manual_tuning(pdev);
1739
1740 host->mmc->retune_period = 0;
1741
1742 return 0;
1743 }
1744
amd_execute_tuning(struct mmc_host * mmc,u32 opcode)1745 static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode)
1746 {
1747 struct sdhci_host *host = mmc_priv(mmc);
1748
1749 /* AMD requires custom HS200 tuning */
1750 if (host->timing == MMC_TIMING_MMC_HS200)
1751 return amd_execute_tuning_hs200(host, opcode);
1752
1753 /* Otherwise perform standard SDHCI tuning */
1754 return sdhci_execute_tuning(mmc, opcode);
1755 }
1756
amd_probe_slot(struct sdhci_pci_slot * slot)1757 static int amd_probe_slot(struct sdhci_pci_slot *slot)
1758 {
1759 struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
1760
1761 ops->execute_tuning = amd_execute_tuning;
1762
1763 return 0;
1764 }
1765
amd_probe(struct sdhci_pci_chip * chip)1766 static int amd_probe(struct sdhci_pci_chip *chip)
1767 {
1768 struct pci_dev *smbus_dev;
1769 enum amd_chipset_gen gen;
1770
1771 smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
1772 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
1773 if (smbus_dev) {
1774 gen = AMD_CHIPSET_BEFORE_ML;
1775 } else {
1776 smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
1777 PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
1778 if (smbus_dev) {
1779 if (smbus_dev->revision < 0x51)
1780 gen = AMD_CHIPSET_CZ;
1781 else
1782 gen = AMD_CHIPSET_NL;
1783 } else {
1784 gen = AMD_CHIPSET_UNKNOWN;
1785 }
1786 }
1787
1788 pci_dev_put(smbus_dev);
1789
1790 if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ)
1791 chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
1792
1793 return 0;
1794 }
1795
sdhci_read_present_state(struct sdhci_host * host)1796 static u32 sdhci_read_present_state(struct sdhci_host *host)
1797 {
1798 return sdhci_readl(host, SDHCI_PRESENT_STATE);
1799 }
1800
amd_sdhci_reset(struct sdhci_host * host,u8 mask)1801 static void amd_sdhci_reset(struct sdhci_host *host, u8 mask)
1802 {
1803 struct sdhci_pci_slot *slot = sdhci_priv(host);
1804 struct pci_dev *pdev = slot->chip->pdev;
1805 u32 present_state;
1806
1807 /*
1808 * SDHC 0x7906 requires a hard reset to clear all internal state.
1809 * Otherwise it can get into a bad state where the DATA lines are always
1810 * read as zeros.
1811 */
1812 if (pdev->device == 0x7906 && (mask & SDHCI_RESET_ALL)) {
1813 pci_clear_master(pdev);
1814
1815 pci_save_state(pdev);
1816
1817 pci_set_power_state(pdev, PCI_D3cold);
1818 pr_debug("%s: power_state=%u\n", mmc_hostname(host->mmc),
1819 pdev->current_state);
1820 pci_set_power_state(pdev, PCI_D0);
1821
1822 pci_restore_state(pdev);
1823
1824 /*
1825 * SDHCI_RESET_ALL says the card detect logic should not be
1826 * reset, but since we need to reset the entire controller
1827 * we should wait until the card detect logic has stabilized.
1828 *
1829 * This normally takes about 40ms.
1830 */
1831 readx_poll_timeout(
1832 sdhci_read_present_state,
1833 host,
1834 present_state,
1835 present_state & SDHCI_CD_STABLE,
1836 10000,
1837 100000
1838 );
1839 }
1840
1841 return sdhci_reset(host, mask);
1842 }
1843
1844 static const struct sdhci_ops amd_sdhci_pci_ops = {
1845 .set_clock = sdhci_set_clock,
1846 .enable_dma = sdhci_pci_enable_dma,
1847 .set_bus_width = sdhci_set_bus_width,
1848 .reset = amd_sdhci_reset,
1849 .set_uhs_signaling = sdhci_set_uhs_signaling,
1850 };
1851
1852 static const struct sdhci_pci_fixes sdhci_amd = {
1853 .probe = amd_probe,
1854 .ops = &amd_sdhci_pci_ops,
1855 .probe_slot = amd_probe_slot,
1856 };
1857
1858 static const struct pci_device_id pci_ids[] = {
1859 SDHCI_PCI_DEVICE(RICOH, R5C822, ricoh),
1860 SDHCI_PCI_DEVICE(RICOH, R5C843, ricoh_mmc),
1861 SDHCI_PCI_DEVICE(RICOH, R5CE822, ricoh_mmc),
1862 SDHCI_PCI_DEVICE(RICOH, R5CE823, ricoh_mmc),
1863 SDHCI_PCI_DEVICE(ENE, CB712_SD, ene_712),
1864 SDHCI_PCI_DEVICE(ENE, CB712_SD_2, ene_712),
1865 SDHCI_PCI_DEVICE(ENE, CB714_SD, ene_714),
1866 SDHCI_PCI_DEVICE(ENE, CB714_SD_2, ene_714),
1867 SDHCI_PCI_DEVICE(MARVELL, 88ALP01_SD, cafe),
1868 SDHCI_PCI_DEVICE(JMICRON, JMB38X_SD, jmicron),
1869 SDHCI_PCI_DEVICE(JMICRON, JMB38X_MMC, jmicron),
1870 SDHCI_PCI_DEVICE(JMICRON, JMB388_SD, jmicron),
1871 SDHCI_PCI_DEVICE(JMICRON, JMB388_ESD, jmicron),
1872 SDHCI_PCI_DEVICE(SYSKONNECT, 8000, syskt),
1873 SDHCI_PCI_DEVICE(VIA, 95D0, via),
1874 SDHCI_PCI_DEVICE(REALTEK, 5250, rtsx),
1875 SDHCI_PCI_DEVICE(INTEL, QRK_SD, intel_qrk),
1876 SDHCI_PCI_DEVICE(INTEL, MRST_SD0, intel_mrst_hc0),
1877 SDHCI_PCI_DEVICE(INTEL, MRST_SD1, intel_mrst_hc1_hc2),
1878 SDHCI_PCI_DEVICE(INTEL, MRST_SD2, intel_mrst_hc1_hc2),
1879 SDHCI_PCI_DEVICE(INTEL, MFD_SD, intel_mfd_sd),
1880 SDHCI_PCI_DEVICE(INTEL, MFD_SDIO1, intel_mfd_sdio),
1881 SDHCI_PCI_DEVICE(INTEL, MFD_SDIO2, intel_mfd_sdio),
1882 SDHCI_PCI_DEVICE(INTEL, MFD_EMMC0, intel_mfd_emmc),
1883 SDHCI_PCI_DEVICE(INTEL, MFD_EMMC1, intel_mfd_emmc),
1884 SDHCI_PCI_DEVICE(INTEL, PCH_SDIO0, intel_pch_sdio),
1885 SDHCI_PCI_DEVICE(INTEL, PCH_SDIO1, intel_pch_sdio),
1886 SDHCI_PCI_DEVICE(INTEL, BYT_EMMC, intel_byt_emmc),
1887 SDHCI_PCI_SUBDEVICE(INTEL, BYT_SDIO, NI, 7884, ni_byt_sdio),
1888 SDHCI_PCI_DEVICE(INTEL, BYT_SDIO, intel_byt_sdio),
1889 SDHCI_PCI_DEVICE(INTEL, BYT_SD, intel_byt_sd),
1890 SDHCI_PCI_DEVICE(INTEL, BYT_EMMC2, intel_byt_emmc),
1891 SDHCI_PCI_DEVICE(INTEL, BSW_EMMC, intel_byt_emmc),
1892 SDHCI_PCI_DEVICE(INTEL, BSW_SDIO, intel_byt_sdio),
1893 SDHCI_PCI_DEVICE(INTEL, BSW_SD, intel_byt_sd),
1894 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO0, intel_mfd_sd),
1895 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO1, intel_mfd_sdio),
1896 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO2, intel_mfd_sdio),
1897 SDHCI_PCI_DEVICE(INTEL, CLV_EMMC0, intel_mfd_emmc),
1898 SDHCI_PCI_DEVICE(INTEL, CLV_EMMC1, intel_mfd_emmc),
1899 SDHCI_PCI_DEVICE(INTEL, MRFLD_MMC, intel_mrfld_mmc),
1900 SDHCI_PCI_DEVICE(INTEL, SPT_EMMC, intel_byt_emmc),
1901 SDHCI_PCI_DEVICE(INTEL, SPT_SDIO, intel_byt_sdio),
1902 SDHCI_PCI_DEVICE(INTEL, SPT_SD, intel_byt_sd),
1903 SDHCI_PCI_DEVICE(INTEL, DNV_EMMC, intel_byt_emmc),
1904 SDHCI_PCI_DEVICE(INTEL, CDF_EMMC, intel_glk_emmc),
1905 SDHCI_PCI_DEVICE(INTEL, BXT_EMMC, intel_byt_emmc),
1906 SDHCI_PCI_DEVICE(INTEL, BXT_SDIO, intel_byt_sdio),
1907 SDHCI_PCI_DEVICE(INTEL, BXT_SD, intel_byt_sd),
1908 SDHCI_PCI_DEVICE(INTEL, BXTM_EMMC, intel_byt_emmc),
1909 SDHCI_PCI_DEVICE(INTEL, BXTM_SDIO, intel_byt_sdio),
1910 SDHCI_PCI_DEVICE(INTEL, BXTM_SD, intel_byt_sd),
1911 SDHCI_PCI_DEVICE(INTEL, APL_EMMC, intel_byt_emmc),
1912 SDHCI_PCI_DEVICE(INTEL, APL_SDIO, intel_byt_sdio),
1913 SDHCI_PCI_DEVICE(INTEL, APL_SD, intel_byt_sd),
1914 SDHCI_PCI_DEVICE(INTEL, GLK_EMMC, intel_glk_emmc),
1915 SDHCI_PCI_DEVICE(INTEL, GLK_SDIO, intel_byt_sdio),
1916 SDHCI_PCI_DEVICE(INTEL, GLK_SD, intel_byt_sd),
1917 SDHCI_PCI_DEVICE(INTEL, CNP_EMMC, intel_glk_emmc),
1918 SDHCI_PCI_DEVICE(INTEL, CNP_SD, intel_byt_sd),
1919 SDHCI_PCI_DEVICE(INTEL, CNPH_SD, intel_byt_sd),
1920 SDHCI_PCI_DEVICE(INTEL, ICP_EMMC, intel_glk_emmc),
1921 SDHCI_PCI_DEVICE(INTEL, ICP_SD, intel_byt_sd),
1922 SDHCI_PCI_DEVICE(INTEL, EHL_EMMC, intel_glk_emmc),
1923 SDHCI_PCI_DEVICE(INTEL, EHL_SD, intel_byt_sd),
1924 SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc),
1925 SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd),
1926 SDHCI_PCI_DEVICE(INTEL, CMLH_SD, intel_byt_sd),
1927 SDHCI_PCI_DEVICE(INTEL, JSL_EMMC, intel_glk_emmc),
1928 SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd),
1929 SDHCI_PCI_DEVICE(INTEL, LKF_EMMC, intel_glk_emmc),
1930 SDHCI_PCI_DEVICE(INTEL, LKF_SD, intel_byt_sd),
1931 SDHCI_PCI_DEVICE(INTEL, ADL_EMMC, intel_glk_emmc),
1932 SDHCI_PCI_DEVICE(O2, 8120, o2),
1933 SDHCI_PCI_DEVICE(O2, 8220, o2),
1934 SDHCI_PCI_DEVICE(O2, 8221, o2),
1935 SDHCI_PCI_DEVICE(O2, 8320, o2),
1936 SDHCI_PCI_DEVICE(O2, 8321, o2),
1937 SDHCI_PCI_DEVICE(O2, FUJIN2, o2),
1938 SDHCI_PCI_DEVICE(O2, SDS0, o2),
1939 SDHCI_PCI_DEVICE(O2, SDS1, o2),
1940 SDHCI_PCI_DEVICE(O2, SEABIRD0, o2),
1941 SDHCI_PCI_DEVICE(O2, SEABIRD1, o2),
1942 SDHCI_PCI_DEVICE(O2, GG8_9860, o2),
1943 SDHCI_PCI_DEVICE(O2, GG8_9861, o2),
1944 SDHCI_PCI_DEVICE(O2, GG8_9862, o2),
1945 SDHCI_PCI_DEVICE(O2, GG8_9863, o2),
1946 SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan),
1947 SDHCI_PCI_DEVICE(SYNOPSYS, DWC_MSHC, snps),
1948 SDHCI_PCI_DEVICE(GLI, 9750, gl9750),
1949 SDHCI_PCI_DEVICE(GLI, 9755, gl9755),
1950 SDHCI_PCI_DEVICE(GLI, 9763E, gl9763e),
1951 SDHCI_PCI_DEVICE(GLI, 9767, gl9767),
1952 SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd),
1953 /* Generic SD host controller */
1954 {PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)},
1955 { /* end: all zeroes */ },
1956 };
1957
1958 MODULE_DEVICE_TABLE(pci, pci_ids);
1959
1960 /*****************************************************************************\
1961 * *
1962 * SDHCI core callbacks *
1963 * *
1964 \*****************************************************************************/
1965
sdhci_pci_enable_dma(struct sdhci_host * host)1966 int sdhci_pci_enable_dma(struct sdhci_host *host)
1967 {
1968 struct sdhci_pci_slot *slot;
1969 struct pci_dev *pdev;
1970
1971 slot = sdhci_priv(host);
1972 pdev = slot->chip->pdev;
1973
1974 if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
1975 ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
1976 (host->flags & SDHCI_USE_SDMA)) {
1977 dev_warn(&pdev->dev, "Will use DMA mode even though HW "
1978 "doesn't fully claim to support it.\n");
1979 }
1980
1981 pci_set_master(pdev);
1982
1983 return 0;
1984 }
1985
sdhci_pci_hw_reset(struct sdhci_host * host)1986 static void sdhci_pci_hw_reset(struct sdhci_host *host)
1987 {
1988 struct sdhci_pci_slot *slot = sdhci_priv(host);
1989
1990 if (slot->hw_reset)
1991 slot->hw_reset(host);
1992 }
1993
1994 static const struct sdhci_ops sdhci_pci_ops = {
1995 .set_clock = sdhci_set_clock,
1996 .enable_dma = sdhci_pci_enable_dma,
1997 .set_bus_width = sdhci_set_bus_width,
1998 .reset = sdhci_reset,
1999 .set_uhs_signaling = sdhci_set_uhs_signaling,
2000 .hw_reset = sdhci_pci_hw_reset,
2001 };
2002
2003 /*****************************************************************************\
2004 * *
2005 * Suspend/resume *
2006 * *
2007 \*****************************************************************************/
2008
2009 #ifdef CONFIG_PM_SLEEP
sdhci_pci_suspend(struct device * dev)2010 static int sdhci_pci_suspend(struct device *dev)
2011 {
2012 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
2013
2014 if (!chip)
2015 return 0;
2016
2017 if (chip->fixes && chip->fixes->suspend)
2018 return chip->fixes->suspend(chip);
2019
2020 return sdhci_pci_suspend_host(chip);
2021 }
2022
sdhci_pci_resume(struct device * dev)2023 static int sdhci_pci_resume(struct device *dev)
2024 {
2025 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
2026
2027 if (!chip)
2028 return 0;
2029
2030 if (chip->fixes && chip->fixes->resume)
2031 return chip->fixes->resume(chip);
2032
2033 return sdhci_pci_resume_host(chip);
2034 }
2035 #endif
2036
2037 #ifdef CONFIG_PM
sdhci_pci_runtime_suspend(struct device * dev)2038 static int sdhci_pci_runtime_suspend(struct device *dev)
2039 {
2040 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
2041
2042 if (!chip)
2043 return 0;
2044
2045 if (chip->fixes && chip->fixes->runtime_suspend)
2046 return chip->fixes->runtime_suspend(chip);
2047
2048 return sdhci_pci_runtime_suspend_host(chip);
2049 }
2050
sdhci_pci_runtime_resume(struct device * dev)2051 static int sdhci_pci_runtime_resume(struct device *dev)
2052 {
2053 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
2054
2055 if (!chip)
2056 return 0;
2057
2058 if (chip->fixes && chip->fixes->runtime_resume)
2059 return chip->fixes->runtime_resume(chip);
2060
2061 return sdhci_pci_runtime_resume_host(chip);
2062 }
2063 #endif
2064
2065 static const struct dev_pm_ops sdhci_pci_pm_ops = {
2066 SET_SYSTEM_SLEEP_PM_OPS(sdhci_pci_suspend, sdhci_pci_resume)
2067 SET_RUNTIME_PM_OPS(sdhci_pci_runtime_suspend,
2068 sdhci_pci_runtime_resume, NULL)
2069 };
2070
2071 /*****************************************************************************\
2072 * *
2073 * Device probing/removal *
2074 * *
2075 \*****************************************************************************/
2076
sdhci_pci_add_gpio_lookup_table(struct sdhci_pci_chip * chip)2077 static struct gpiod_lookup_table *sdhci_pci_add_gpio_lookup_table(
2078 struct sdhci_pci_chip *chip)
2079 {
2080 struct gpiod_lookup_table *cd_gpio_lookup_table;
2081 const struct dmi_system_id *dmi_id = NULL;
2082 size_t count;
2083
2084 if (chip->fixes && chip->fixes->cd_gpio_override)
2085 dmi_id = dmi_first_match(chip->fixes->cd_gpio_override);
2086
2087 if (!dmi_id)
2088 return NULL;
2089
2090 cd_gpio_lookup_table = dmi_id->driver_data;
2091 for (count = 0; cd_gpio_lookup_table->table[count].key; count++)
2092 ;
2093
2094 cd_gpio_lookup_table = kmemdup(dmi_id->driver_data,
2095 /* count + 1 terminating entry */
2096 struct_size(cd_gpio_lookup_table, table, count + 1),
2097 GFP_KERNEL);
2098 if (!cd_gpio_lookup_table)
2099 return ERR_PTR(-ENOMEM);
2100
2101 gpiod_add_lookup_table(cd_gpio_lookup_table);
2102 return cd_gpio_lookup_table;
2103 }
2104
sdhci_pci_remove_gpio_lookup_table(struct gpiod_lookup_table * lookup_table)2105 static void sdhci_pci_remove_gpio_lookup_table(struct gpiod_lookup_table *lookup_table)
2106 {
2107 if (lookup_table) {
2108 gpiod_remove_lookup_table(lookup_table);
2109 kfree(lookup_table);
2110 }
2111 }
2112
sdhci_pci_probe_slot(struct pci_dev * pdev,struct sdhci_pci_chip * chip,int first_bar,int slotno)2113 static struct sdhci_pci_slot *sdhci_pci_probe_slot(
2114 struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar,
2115 int slotno)
2116 {
2117 struct sdhci_pci_slot *slot;
2118 struct sdhci_host *host;
2119 int ret, bar = first_bar + slotno;
2120 size_t priv_size = chip->fixes ? chip->fixes->priv_size : 0;
2121
2122 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
2123 dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar);
2124 return ERR_PTR(-ENODEV);
2125 }
2126
2127 if (pci_resource_len(pdev, bar) < 0x100) {
2128 dev_err(&pdev->dev, "Invalid iomem size. You may "
2129 "experience problems.\n");
2130 }
2131
2132 if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
2133 dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n");
2134 return ERR_PTR(-ENODEV);
2135 }
2136
2137 if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
2138 dev_err(&pdev->dev, "Unknown interface. Aborting.\n");
2139 return ERR_PTR(-ENODEV);
2140 }
2141
2142 host = sdhci_alloc_host(&pdev->dev, sizeof(*slot) + priv_size);
2143 if (IS_ERR(host)) {
2144 dev_err(&pdev->dev, "cannot allocate host\n");
2145 return ERR_CAST(host);
2146 }
2147
2148 slot = sdhci_priv(host);
2149
2150 slot->chip = chip;
2151 slot->host = host;
2152 slot->cd_idx = -1;
2153
2154 host->hw_name = "PCI";
2155 host->ops = chip->fixes && chip->fixes->ops ?
2156 chip->fixes->ops :
2157 &sdhci_pci_ops;
2158 host->quirks = chip->quirks;
2159 host->quirks2 = chip->quirks2;
2160
2161 host->irq = pdev->irq;
2162
2163 ret = pcim_iomap_regions(pdev, BIT(bar), mmc_hostname(host->mmc));
2164 if (ret) {
2165 dev_err(&pdev->dev, "cannot request region\n");
2166 return ERR_PTR(ret);
2167 }
2168
2169 host->ioaddr = pcim_iomap_table(pdev)[bar];
2170
2171 if (chip->fixes && chip->fixes->probe_slot) {
2172 ret = chip->fixes->probe_slot(slot);
2173 if (ret)
2174 return ERR_PTR(ret);
2175 }
2176
2177 host->mmc->pm_caps = MMC_PM_KEEP_POWER;
2178 host->mmc->slotno = slotno;
2179 host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
2180
2181 if (device_can_wakeup(&pdev->dev))
2182 host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2183
2184 if (host->mmc->caps & MMC_CAP_CD_WAKE)
2185 device_init_wakeup(&pdev->dev, true);
2186
2187 if (slot->cd_idx >= 0) {
2188 struct gpiod_lookup_table *cd_gpio_lookup_table;
2189
2190 cd_gpio_lookup_table = sdhci_pci_add_gpio_lookup_table(chip);
2191 if (IS_ERR(cd_gpio_lookup_table)) {
2192 ret = PTR_ERR(cd_gpio_lookup_table);
2193 goto remove;
2194 }
2195
2196 ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
2197 slot->cd_override_level, 0);
2198
2199 sdhci_pci_remove_gpio_lookup_table(cd_gpio_lookup_table);
2200
2201 if (ret && ret != -EPROBE_DEFER)
2202 ret = mmc_gpiod_request_cd(host->mmc, NULL,
2203 slot->cd_idx,
2204 slot->cd_override_level,
2205 0);
2206 if (ret == -EPROBE_DEFER)
2207 goto remove;
2208
2209 if (ret) {
2210 dev_warn(&pdev->dev, "failed to setup card detect gpio\n");
2211 slot->cd_idx = -1;
2212 }
2213 }
2214
2215 if (chip->fixes && chip->fixes->add_host)
2216 ret = chip->fixes->add_host(slot);
2217 else
2218 ret = sdhci_add_host(host);
2219 if (ret)
2220 goto remove;
2221
2222 /*
2223 * Check if the chip needs a separate GPIO for card detect to wake up
2224 * from runtime suspend. If it is not there, don't allow runtime PM.
2225 */
2226 if (chip->fixes && chip->fixes->own_cd_for_runtime_pm && slot->cd_idx < 0)
2227 chip->allow_runtime_pm = false;
2228
2229 return slot;
2230
2231 remove:
2232 if (chip->fixes && chip->fixes->remove_slot)
2233 chip->fixes->remove_slot(slot, 0);
2234
2235 return ERR_PTR(ret);
2236 }
2237
sdhci_pci_remove_slot(struct sdhci_pci_slot * slot)2238 static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
2239 {
2240 int dead;
2241 u32 scratch;
2242
2243 dead = 0;
2244 scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
2245 if (scratch == (u32)-1)
2246 dead = 1;
2247
2248 if (slot->chip->fixes && slot->chip->fixes->remove_host)
2249 slot->chip->fixes->remove_host(slot, dead);
2250 else
2251 sdhci_remove_host(slot->host, dead);
2252
2253 if (slot->chip->fixes && slot->chip->fixes->remove_slot)
2254 slot->chip->fixes->remove_slot(slot, dead);
2255 }
2256
sdhci_pci_uhs2_add_host(struct sdhci_pci_slot * slot)2257 int sdhci_pci_uhs2_add_host(struct sdhci_pci_slot *slot)
2258 {
2259 return sdhci_uhs2_add_host(slot->host);
2260 }
2261
sdhci_pci_uhs2_remove_host(struct sdhci_pci_slot * slot,int dead)2262 void sdhci_pci_uhs2_remove_host(struct sdhci_pci_slot *slot, int dead)
2263 {
2264 sdhci_uhs2_remove_host(slot->host, dead);
2265 }
2266
sdhci_pci_runtime_pm_allow(struct device * dev)2267 static void sdhci_pci_runtime_pm_allow(struct device *dev)
2268 {
2269 pm_suspend_ignore_children(dev, 1);
2270 pm_runtime_set_autosuspend_delay(dev, 50);
2271 pm_runtime_use_autosuspend(dev);
2272 pm_runtime_allow(dev);
2273 /* Stay active until mmc core scans for a card */
2274 pm_runtime_put_noidle(dev);
2275 }
2276
sdhci_pci_runtime_pm_forbid(struct device * dev)2277 static void sdhci_pci_runtime_pm_forbid(struct device *dev)
2278 {
2279 pm_runtime_forbid(dev);
2280 pm_runtime_get_noresume(dev);
2281 }
2282
sdhci_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2283 static int sdhci_pci_probe(struct pci_dev *pdev,
2284 const struct pci_device_id *ent)
2285 {
2286 struct sdhci_pci_chip *chip;
2287 struct sdhci_pci_slot *slot;
2288
2289 u8 slots, first_bar;
2290 int ret, i;
2291
2292 BUG_ON(pdev == NULL);
2293 BUG_ON(ent == NULL);
2294
2295 dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n",
2296 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
2297
2298 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
2299 if (ret)
2300 return pcibios_err_to_errno(ret);
2301
2302 slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
2303 dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
2304
2305 BUG_ON(slots > MAX_SLOTS);
2306
2307 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
2308 if (ret)
2309 return pcibios_err_to_errno(ret);
2310
2311 first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
2312
2313 if (first_bar > 5) {
2314 dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n");
2315 return -ENODEV;
2316 }
2317
2318 ret = pcim_enable_device(pdev);
2319 if (ret)
2320 return ret;
2321
2322 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
2323 if (!chip)
2324 return -ENOMEM;
2325
2326 chip->pdev = pdev;
2327 chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data;
2328 if (chip->fixes) {
2329 chip->quirks = chip->fixes->quirks;
2330 chip->quirks2 = chip->fixes->quirks2;
2331 chip->allow_runtime_pm = chip->fixes->allow_runtime_pm;
2332 }
2333 chip->num_slots = slots;
2334 chip->pm_retune = true;
2335 chip->rpm_retune = true;
2336
2337 pci_set_drvdata(pdev, chip);
2338
2339 if (chip->fixes && chip->fixes->probe) {
2340 ret = chip->fixes->probe(chip);
2341 if (ret)
2342 return ret;
2343 }
2344
2345 slots = chip->num_slots; /* Quirk may have changed this */
2346
2347 for (i = 0; i < slots; i++) {
2348 slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i);
2349 if (IS_ERR(slot)) {
2350 for (i--; i >= 0; i--)
2351 sdhci_pci_remove_slot(chip->slots[i]);
2352 return PTR_ERR(slot);
2353 }
2354
2355 chip->slots[i] = slot;
2356 }
2357
2358 if (chip->allow_runtime_pm)
2359 sdhci_pci_runtime_pm_allow(&pdev->dev);
2360
2361 return 0;
2362 }
2363
sdhci_pci_remove(struct pci_dev * pdev)2364 static void sdhci_pci_remove(struct pci_dev *pdev)
2365 {
2366 int i;
2367 struct sdhci_pci_chip *chip = pci_get_drvdata(pdev);
2368
2369 if (chip->allow_runtime_pm)
2370 sdhci_pci_runtime_pm_forbid(&pdev->dev);
2371
2372 for (i = 0; i < chip->num_slots; i++)
2373 sdhci_pci_remove_slot(chip->slots[i]);
2374 }
2375
2376 static struct pci_driver sdhci_driver = {
2377 .name = "sdhci-pci",
2378 .id_table = pci_ids,
2379 .probe = sdhci_pci_probe,
2380 .remove = sdhci_pci_remove,
2381 .driver = {
2382 .pm = &sdhci_pci_pm_ops,
2383 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
2384 },
2385 };
2386
2387 module_pci_driver(sdhci_driver);
2388
2389 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
2390 MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver");
2391 MODULE_LICENSE("GPL");
2392