1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface 3 * 4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 5 * 6 * Thanks to the following companies for their support: 7 * 8 * - JMicron (hardware and technical support) 9 */ 10 11 #include <linux/bitfield.h> 12 #include <linux/string.h> 13 #include <linux/delay.h> 14 #include <linux/highmem.h> 15 #include <linux/module.h> 16 #include <linux/pci.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/slab.h> 19 #include <linux/device.h> 20 #include <linux/scatterlist.h> 21 #include <linux/io.h> 22 #include <linux/iopoll.h> 23 #include <linux/gpio.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/pm_qos.h> 26 #include <linux/debugfs.h> 27 #include <linux/acpi.h> 28 #include <linux/dmi.h> 29 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/mmc.h> 32 #include <linux/mmc/slot-gpio.h> 33 34 #ifdef CONFIG_X86 35 #include <asm/iosf_mbi.h> 36 #endif 37 38 #include "cqhci.h" 39 40 #include "sdhci.h" 41 #include "sdhci-cqhci.h" 42 #include "sdhci-pci.h" 43 #include "sdhci-uhs2.h" 44 45 static void sdhci_pci_hw_reset(struct sdhci_host *host); 46 47 #ifdef CONFIG_PM_SLEEP 48 static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip) 49 { 50 mmc_pm_flag_t pm_flags = 0; 51 bool cap_cd_wake = false; 52 int i; 53 54 for (i = 0; i < chip->num_slots; i++) { 55 struct sdhci_pci_slot *slot = chip->slots[i]; 56 57 if (slot) { 58 pm_flags |= slot->host->mmc->pm_flags; 59 if (slot->host->mmc->caps & MMC_CAP_CD_WAKE) 60 cap_cd_wake = true; 61 } 62 } 63 64 if ((pm_flags & MMC_PM_KEEP_POWER) && (pm_flags & MMC_PM_WAKE_SDIO_IRQ)) 65 return device_wakeup_enable(&chip->pdev->dev); 66 else if (!cap_cd_wake) 67 device_wakeup_disable(&chip->pdev->dev); 68 69 return 0; 70 } 71 72 static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip) 73 { 74 int i, ret; 75 76 sdhci_pci_init_wakeup(chip); 77 78 for (i = 0; i < chip->num_slots; i++) { 79 struct sdhci_pci_slot *slot = chip->slots[i]; 80 struct sdhci_host *host; 81 82 if (!slot) 83 continue; 84 85 host = slot->host; 86 87 if (chip->pm_retune && host->tuning_mode != SDHCI_TUNING_MODE_3) 88 mmc_retune_needed(host->mmc); 89 90 ret = sdhci_suspend_host(host); 91 if (ret) 92 goto err_pci_suspend; 93 94 if (device_may_wakeup(&chip->pdev->dev)) 95 mmc_gpio_set_cd_wake(host->mmc, true); 96 } 97 98 return 0; 99 100 err_pci_suspend: 101 while (--i >= 0) 102 sdhci_resume_host(chip->slots[i]->host); 103 return ret; 104 } 105 106 int sdhci_pci_resume_host(struct sdhci_pci_chip *chip) 107 { 108 struct sdhci_pci_slot *slot; 109 int i, ret; 110 111 for (i = 0; i < chip->num_slots; i++) { 112 slot = chip->slots[i]; 113 if (!slot) 114 continue; 115 116 ret = sdhci_resume_host(slot->host); 117 if (ret) 118 return ret; 119 120 mmc_gpio_set_cd_wake(slot->host->mmc, false); 121 } 122 123 return 0; 124 } 125 126 static int sdhci_cqhci_suspend(struct sdhci_pci_chip *chip) 127 { 128 int ret; 129 130 ret = cqhci_suspend(chip->slots[0]->host->mmc); 131 if (ret) 132 return ret; 133 134 return sdhci_pci_suspend_host(chip); 135 } 136 137 static int sdhci_cqhci_resume(struct sdhci_pci_chip *chip) 138 { 139 int ret; 140 141 ret = sdhci_pci_resume_host(chip); 142 if (ret) 143 return ret; 144 145 return cqhci_resume(chip->slots[0]->host->mmc); 146 } 147 #endif 148 149 #ifdef CONFIG_PM 150 static int sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip *chip) 151 { 152 struct sdhci_pci_slot *slot; 153 struct sdhci_host *host; 154 int i, ret; 155 156 for (i = 0; i < chip->num_slots; i++) { 157 slot = chip->slots[i]; 158 if (!slot) 159 continue; 160 161 host = slot->host; 162 163 ret = sdhci_runtime_suspend_host(host); 164 if (ret) 165 goto err_pci_runtime_suspend; 166 167 if (chip->rpm_retune && 168 host->tuning_mode != SDHCI_TUNING_MODE_3) 169 mmc_retune_needed(host->mmc); 170 } 171 172 return 0; 173 174 err_pci_runtime_suspend: 175 while (--i >= 0) 176 sdhci_runtime_resume_host(chip->slots[i]->host, 0); 177 return ret; 178 } 179 180 static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip) 181 { 182 struct sdhci_pci_slot *slot; 183 int i, ret; 184 185 for (i = 0; i < chip->num_slots; i++) { 186 slot = chip->slots[i]; 187 if (!slot) 188 continue; 189 190 ret = sdhci_runtime_resume_host(slot->host, 0); 191 if (ret) 192 return ret; 193 } 194 195 return 0; 196 } 197 198 static int sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip *chip) 199 { 200 int ret; 201 202 ret = cqhci_suspend(chip->slots[0]->host->mmc); 203 if (ret) 204 return ret; 205 206 return sdhci_pci_runtime_suspend_host(chip); 207 } 208 209 static int sdhci_cqhci_runtime_resume(struct sdhci_pci_chip *chip) 210 { 211 int ret; 212 213 ret = sdhci_pci_runtime_resume_host(chip); 214 if (ret) 215 return ret; 216 217 return cqhci_resume(chip->slots[0]->host->mmc); 218 } 219 #endif 220 221 static u32 sdhci_cqhci_irq(struct sdhci_host *host, u32 intmask) 222 { 223 int cmd_error = 0; 224 int data_error = 0; 225 226 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) 227 return intmask; 228 229 cqhci_irq(host->mmc, intmask, cmd_error, data_error); 230 231 return 0; 232 } 233 234 static void sdhci_pci_dumpregs(struct mmc_host *mmc) 235 { 236 sdhci_dumpregs(mmc_priv(mmc)); 237 } 238 239 /*****************************************************************************\ 240 * * 241 * Hardware specific quirk handling * 242 * * 243 \*****************************************************************************/ 244 245 static int ricoh_probe(struct sdhci_pci_chip *chip) 246 { 247 if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG || 248 chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY) 249 chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET; 250 return 0; 251 } 252 253 static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot) 254 { 255 u32 caps = 256 FIELD_PREP(SDHCI_TIMEOUT_CLK_MASK, 0x21) | 257 FIELD_PREP(SDHCI_CLOCK_BASE_MASK, 0x21) | 258 SDHCI_TIMEOUT_CLK_UNIT | 259 SDHCI_CAN_VDD_330 | 260 SDHCI_CAN_DO_HISPD | 261 SDHCI_CAN_DO_SDMA; 262 u32 caps1 = 0; 263 264 __sdhci_read_caps(slot->host, NULL, &caps, &caps1); 265 return 0; 266 } 267 268 #ifdef CONFIG_PM_SLEEP 269 static int ricoh_mmc_resume(struct sdhci_pci_chip *chip) 270 { 271 /* Apply a delay to allow controller to settle */ 272 /* Otherwise it becomes confused if card state changed 273 during suspend */ 274 msleep(500); 275 return sdhci_pci_resume_host(chip); 276 } 277 #endif 278 279 static const struct sdhci_pci_fixes sdhci_ricoh = { 280 .probe = ricoh_probe, 281 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | 282 SDHCI_QUIRK_FORCE_DMA | 283 SDHCI_QUIRK_CLOCK_BEFORE_RESET, 284 }; 285 286 static const struct sdhci_pci_fixes sdhci_ricoh_mmc = { 287 .probe_slot = ricoh_mmc_probe_slot, 288 #ifdef CONFIG_PM_SLEEP 289 .resume = ricoh_mmc_resume, 290 #endif 291 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | 292 SDHCI_QUIRK_CLOCK_BEFORE_RESET | 293 SDHCI_QUIRK_NO_CARD_NO_RESET, 294 }; 295 296 static void ene_714_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 297 { 298 struct sdhci_host *host = mmc_priv(mmc); 299 300 sdhci_set_ios(mmc, ios); 301 302 /* 303 * Some (ENE) controllers misbehave on some ios operations, 304 * signalling timeout and CRC errors even on CMD0. Resetting 305 * it on each ios seems to solve the problem. 306 */ 307 if (!(host->flags & SDHCI_DEVICE_DEAD)) 308 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 309 } 310 311 static int ene_714_probe_slot(struct sdhci_pci_slot *slot) 312 { 313 slot->host->mmc_host_ops.set_ios = ene_714_set_ios; 314 return 0; 315 } 316 317 static const struct sdhci_pci_fixes sdhci_ene_712 = { 318 .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE | 319 SDHCI_QUIRK_BROKEN_DMA, 320 }; 321 322 static const struct sdhci_pci_fixes sdhci_ene_714 = { 323 .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE | 324 SDHCI_QUIRK_BROKEN_DMA, 325 .probe_slot = ene_714_probe_slot, 326 }; 327 328 static const struct sdhci_pci_fixes sdhci_cafe = { 329 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | 330 SDHCI_QUIRK_NO_BUSY_IRQ | 331 SDHCI_QUIRK_BROKEN_CARD_DETECTION | 332 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, 333 }; 334 335 static const struct sdhci_pci_fixes sdhci_intel_qrk = { 336 .quirks = SDHCI_QUIRK_NO_HISPD_BIT, 337 }; 338 339 static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot) 340 { 341 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; 342 return 0; 343 } 344 345 /* 346 * ADMA operation is disabled for Moorestown platform due to 347 * hardware bugs. 348 */ 349 static int mrst_hc_probe(struct sdhci_pci_chip *chip) 350 { 351 /* 352 * slots number is fixed here for MRST as SDIO3/5 are never used and 353 * have hardware bugs. 354 */ 355 chip->num_slots = 1; 356 return 0; 357 } 358 359 static int pch_hc_probe_slot(struct sdhci_pci_slot *slot) 360 { 361 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; 362 return 0; 363 } 364 365 static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot) 366 { 367 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE; 368 slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC; 369 return 0; 370 } 371 372 static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot) 373 { 374 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE; 375 return 0; 376 } 377 378 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = { 379 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, 380 .probe_slot = mrst_hc_probe_slot, 381 }; 382 383 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = { 384 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, 385 .probe = mrst_hc_probe, 386 }; 387 388 static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = { 389 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 390 .allow_runtime_pm = true, 391 .own_cd_for_runtime_pm = true, 392 }; 393 394 static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = { 395 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 396 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, 397 .allow_runtime_pm = true, 398 .probe_slot = mfd_sdio_probe_slot, 399 }; 400 401 static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = { 402 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 403 .allow_runtime_pm = true, 404 .probe_slot = mfd_emmc_probe_slot, 405 }; 406 407 static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = { 408 .quirks = SDHCI_QUIRK_BROKEN_ADMA, 409 .probe_slot = pch_hc_probe_slot, 410 }; 411 412 #ifdef CONFIG_X86 413 414 #define BYT_IOSF_SCCEP 0x63 415 #define BYT_IOSF_OCP_NETCTRL0 0x1078 416 #define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8) 417 418 static void byt_ocp_setting(struct pci_dev *pdev) 419 { 420 u32 val = 0; 421 422 if (pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC && 423 pdev->device != PCI_DEVICE_ID_INTEL_BYT_SDIO && 424 pdev->device != PCI_DEVICE_ID_INTEL_BYT_SD && 425 pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC2) 426 return; 427 428 if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0, 429 &val)) { 430 dev_err(&pdev->dev, "%s read error\n", __func__); 431 return; 432 } 433 434 if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE)) 435 return; 436 437 val &= ~BYT_IOSF_OCP_TIMEOUT_BASE; 438 439 if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0, 440 val)) { 441 dev_err(&pdev->dev, "%s write error\n", __func__); 442 return; 443 } 444 445 dev_dbg(&pdev->dev, "%s completed\n", __func__); 446 } 447 448 #else 449 450 static inline void byt_ocp_setting(struct pci_dev *pdev) 451 { 452 } 453 454 #endif 455 456 enum { 457 INTEL_DSM_FNS = 0, 458 INTEL_DSM_V18_SWITCH = 3, 459 INTEL_DSM_V33_SWITCH = 4, 460 INTEL_DSM_DRV_STRENGTH = 9, 461 INTEL_DSM_D3_RETUNE = 10, 462 }; 463 464 struct intel_host { 465 u32 dsm_fns; 466 int drv_strength; 467 bool d3_retune; 468 bool rpm_retune_ok; 469 bool needs_pwr_off; 470 u32 glk_rx_ctrl1; 471 u32 glk_tun_val; 472 u32 active_ltr; 473 u32 idle_ltr; 474 }; 475 476 static const guid_t intel_dsm_guid = 477 GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F, 478 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61); 479 480 static int __intel_dsm(struct intel_host *intel_host, struct device *dev, 481 unsigned int fn, u32 *result) 482 { 483 union acpi_object *obj; 484 int err = 0; 485 size_t len; 486 487 obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL, 488 ACPI_TYPE_BUFFER); 489 if (!obj) 490 return -EOPNOTSUPP; 491 492 if (obj->buffer.length < 1) { 493 err = -EINVAL; 494 goto out; 495 } 496 497 len = min_t(size_t, obj->buffer.length, 4); 498 499 *result = 0; 500 memcpy(result, obj->buffer.pointer, len); 501 out: 502 ACPI_FREE(obj); 503 504 return err; 505 } 506 507 static int intel_dsm(struct intel_host *intel_host, struct device *dev, 508 unsigned int fn, u32 *result) 509 { 510 if (fn > 31 || !(intel_host->dsm_fns & (1 << fn))) 511 return -EOPNOTSUPP; 512 513 return __intel_dsm(intel_host, dev, fn, result); 514 } 515 516 static void intel_dsm_init(struct intel_host *intel_host, struct device *dev, 517 struct mmc_host *mmc) 518 { 519 int err; 520 u32 val; 521 522 intel_host->d3_retune = true; 523 524 err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); 525 if (err) { 526 pr_debug("%s: DSM not supported, error %d\n", 527 mmc_hostname(mmc), err); 528 return; 529 } 530 531 pr_debug("%s: DSM function mask %#x\n", 532 mmc_hostname(mmc), intel_host->dsm_fns); 533 534 err = intel_dsm(intel_host, dev, INTEL_DSM_DRV_STRENGTH, &val); 535 intel_host->drv_strength = err ? 0 : val; 536 537 err = intel_dsm(intel_host, dev, INTEL_DSM_D3_RETUNE, &val); 538 intel_host->d3_retune = err ? true : !!val; 539 } 540 541 static void sdhci_pci_int_hw_reset(struct sdhci_host *host) 542 { 543 u8 reg; 544 545 reg = sdhci_readb(host, SDHCI_POWER_CONTROL); 546 reg |= 0x10; 547 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); 548 /* For eMMC, minimum is 1us but give it 9us for good measure */ 549 udelay(9); 550 reg &= ~0x10; 551 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); 552 /* For eMMC, minimum is 200us but give it 300us for good measure */ 553 usleep_range(300, 1000); 554 } 555 556 static int intel_select_drive_strength(struct mmc_card *card, 557 unsigned int max_dtr, int host_drv, 558 int card_drv, int *drv_type) 559 { 560 struct sdhci_host *host = mmc_priv(card->host); 561 struct sdhci_pci_slot *slot = sdhci_priv(host); 562 struct intel_host *intel_host = sdhci_pci_priv(slot); 563 564 if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv)) 565 return 0; 566 567 return intel_host->drv_strength; 568 } 569 570 static int bxt_get_cd(struct mmc_host *mmc) 571 { 572 int gpio_cd = mmc_gpio_get_cd(mmc); 573 574 if (!gpio_cd) 575 return 0; 576 577 return sdhci_get_cd_nogpio(mmc); 578 } 579 580 static int mrfld_get_cd(struct mmc_host *mmc) 581 { 582 return sdhci_get_cd_nogpio(mmc); 583 } 584 585 #define SDHCI_INTEL_PWR_TIMEOUT_CNT 20 586 #define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100 587 588 static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode, 589 unsigned short vdd) 590 { 591 struct sdhci_pci_slot *slot = sdhci_priv(host); 592 struct intel_host *intel_host = sdhci_pci_priv(slot); 593 int cntr; 594 u8 reg; 595 596 /* 597 * Bus power may control card power, but a full reset still may not 598 * reset the power, whereas a direct write to SDHCI_POWER_CONTROL can. 599 * That might be needed to initialize correctly, if the card was left 600 * powered on previously. 601 */ 602 if (intel_host->needs_pwr_off) { 603 intel_host->needs_pwr_off = false; 604 if (mode != MMC_POWER_OFF) { 605 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 606 usleep_range(10000, 12500); 607 } 608 } 609 610 sdhci_set_power(host, mode, vdd); 611 612 if (mode == MMC_POWER_OFF) 613 return; 614 615 /* 616 * Bus power might not enable after D3 -> D0 transition due to the 617 * present state not yet having propagated. Retry for up to 2ms. 618 */ 619 for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) { 620 reg = sdhci_readb(host, SDHCI_POWER_CONTROL); 621 if (reg & SDHCI_POWER_ON) 622 break; 623 udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY); 624 reg |= SDHCI_POWER_ON; 625 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); 626 } 627 } 628 629 static void sdhci_intel_set_uhs_signaling(struct sdhci_host *host, 630 unsigned int timing) 631 { 632 /* Set UHS timing to SDR25 for High Speed mode */ 633 if (timing == MMC_TIMING_MMC_HS || timing == MMC_TIMING_SD_HS) 634 timing = MMC_TIMING_UHS_SDR25; 635 sdhci_set_uhs_signaling(host, timing); 636 } 637 638 #define INTEL_HS400_ES_REG 0x78 639 #define INTEL_HS400_ES_BIT BIT(0) 640 641 static void intel_hs400_enhanced_strobe(struct mmc_host *mmc, 642 struct mmc_ios *ios) 643 { 644 struct sdhci_host *host = mmc_priv(mmc); 645 u32 val; 646 647 val = sdhci_readl(host, INTEL_HS400_ES_REG); 648 if (ios->enhanced_strobe) 649 val |= INTEL_HS400_ES_BIT; 650 else 651 val &= ~INTEL_HS400_ES_BIT; 652 sdhci_writel(host, val, INTEL_HS400_ES_REG); 653 } 654 655 static int intel_start_signal_voltage_switch(struct mmc_host *mmc, 656 struct mmc_ios *ios) 657 { 658 struct device *dev = mmc_dev(mmc); 659 struct sdhci_host *host = mmc_priv(mmc); 660 struct sdhci_pci_slot *slot = sdhci_priv(host); 661 struct intel_host *intel_host = sdhci_pci_priv(slot); 662 unsigned int fn; 663 u32 result = 0; 664 int err; 665 666 err = sdhci_start_signal_voltage_switch(mmc, ios); 667 if (err) 668 return err; 669 670 switch (ios->signal_voltage) { 671 case MMC_SIGNAL_VOLTAGE_330: 672 fn = INTEL_DSM_V33_SWITCH; 673 break; 674 case MMC_SIGNAL_VOLTAGE_180: 675 fn = INTEL_DSM_V18_SWITCH; 676 break; 677 default: 678 return 0; 679 } 680 681 err = intel_dsm(intel_host, dev, fn, &result); 682 pr_debug("%s: %s DSM fn %u error %d result %u\n", 683 mmc_hostname(mmc), __func__, fn, err, result); 684 685 return 0; 686 } 687 688 static const struct sdhci_ops sdhci_intel_byt_ops = { 689 .set_clock = sdhci_set_clock, 690 .set_power = sdhci_intel_set_power, 691 .enable_dma = sdhci_pci_enable_dma, 692 .set_bus_width = sdhci_set_bus_width, 693 .reset = sdhci_reset, 694 .set_uhs_signaling = sdhci_intel_set_uhs_signaling, 695 .hw_reset = sdhci_pci_hw_reset, 696 }; 697 698 static const struct sdhci_ops sdhci_intel_glk_ops = { 699 .set_clock = sdhci_set_clock, 700 .set_power = sdhci_intel_set_power, 701 .enable_dma = sdhci_pci_enable_dma, 702 .set_bus_width = sdhci_set_bus_width, 703 .reset = sdhci_and_cqhci_reset, 704 .set_uhs_signaling = sdhci_intel_set_uhs_signaling, 705 .hw_reset = sdhci_pci_hw_reset, 706 .irq = sdhci_cqhci_irq, 707 }; 708 709 static void byt_read_dsm(struct sdhci_pci_slot *slot) 710 { 711 struct intel_host *intel_host = sdhci_pci_priv(slot); 712 struct device *dev = &slot->chip->pdev->dev; 713 struct mmc_host *mmc = slot->host->mmc; 714 715 intel_dsm_init(intel_host, dev, mmc); 716 slot->chip->rpm_retune = intel_host->d3_retune; 717 } 718 719 static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode) 720 { 721 int err = sdhci_execute_tuning(mmc, opcode); 722 struct sdhci_host *host = mmc_priv(mmc); 723 724 if (err) 725 return err; 726 727 /* 728 * Tuning can leave the IP in an active state (Buffer Read Enable bit 729 * set) which prevents the entry to low power states (i.e. S0i3). Data 730 * reset will clear it. 731 */ 732 sdhci_reset(host, SDHCI_RESET_DATA); 733 734 return 0; 735 } 736 737 #define INTEL_ACTIVELTR 0x804 738 #define INTEL_IDLELTR 0x808 739 740 #define INTEL_LTR_REQ BIT(15) 741 #define INTEL_LTR_SCALE_MASK GENMASK(11, 10) 742 #define INTEL_LTR_SCALE_1US (2 << 10) 743 #define INTEL_LTR_SCALE_32US (3 << 10) 744 #define INTEL_LTR_VALUE_MASK GENMASK(9, 0) 745 746 static void intel_cache_ltr(struct sdhci_pci_slot *slot) 747 { 748 struct intel_host *intel_host = sdhci_pci_priv(slot); 749 struct sdhci_host *host = slot->host; 750 751 intel_host->active_ltr = readl(host->ioaddr + INTEL_ACTIVELTR); 752 intel_host->idle_ltr = readl(host->ioaddr + INTEL_IDLELTR); 753 } 754 755 static void intel_ltr_set(struct device *dev, s32 val) 756 { 757 struct sdhci_pci_chip *chip = dev_get_drvdata(dev); 758 struct sdhci_pci_slot *slot = chip->slots[0]; 759 struct intel_host *intel_host = sdhci_pci_priv(slot); 760 struct sdhci_host *host = slot->host; 761 u32 ltr; 762 763 pm_runtime_get_sync(dev); 764 765 /* 766 * Program latency tolerance (LTR) accordingly what has been asked 767 * by the PM QoS layer or disable it in case we were passed 768 * negative value or PM_QOS_LATENCY_ANY. 769 */ 770 ltr = readl(host->ioaddr + INTEL_ACTIVELTR); 771 772 if (val == PM_QOS_LATENCY_ANY || val < 0) { 773 ltr &= ~INTEL_LTR_REQ; 774 } else { 775 ltr |= INTEL_LTR_REQ; 776 ltr &= ~INTEL_LTR_SCALE_MASK; 777 ltr &= ~INTEL_LTR_VALUE_MASK; 778 779 if (val > INTEL_LTR_VALUE_MASK) { 780 val >>= 5; 781 if (val > INTEL_LTR_VALUE_MASK) 782 val = INTEL_LTR_VALUE_MASK; 783 ltr |= INTEL_LTR_SCALE_32US | val; 784 } else { 785 ltr |= INTEL_LTR_SCALE_1US | val; 786 } 787 } 788 789 if (ltr == intel_host->active_ltr) 790 goto out; 791 792 writel(ltr, host->ioaddr + INTEL_ACTIVELTR); 793 writel(ltr, host->ioaddr + INTEL_IDLELTR); 794 795 /* Cache the values into lpss structure */ 796 intel_cache_ltr(slot); 797 out: 798 pm_runtime_put_autosuspend(dev); 799 } 800 801 static bool intel_use_ltr(struct sdhci_pci_chip *chip) 802 { 803 switch (chip->pdev->device) { 804 case PCI_DEVICE_ID_INTEL_BYT_EMMC: 805 case PCI_DEVICE_ID_INTEL_BYT_EMMC2: 806 case PCI_DEVICE_ID_INTEL_BYT_SDIO: 807 case PCI_DEVICE_ID_INTEL_BYT_SD: 808 case PCI_DEVICE_ID_INTEL_BSW_EMMC: 809 case PCI_DEVICE_ID_INTEL_BSW_SDIO: 810 case PCI_DEVICE_ID_INTEL_BSW_SD: 811 return false; 812 default: 813 return true; 814 } 815 } 816 817 static void intel_ltr_expose(struct sdhci_pci_chip *chip) 818 { 819 struct device *dev = &chip->pdev->dev; 820 821 if (!intel_use_ltr(chip)) 822 return; 823 824 dev->power.set_latency_tolerance = intel_ltr_set; 825 dev_pm_qos_expose_latency_tolerance(dev); 826 } 827 828 static void intel_ltr_hide(struct sdhci_pci_chip *chip) 829 { 830 struct device *dev = &chip->pdev->dev; 831 832 if (!intel_use_ltr(chip)) 833 return; 834 835 dev_pm_qos_hide_latency_tolerance(dev); 836 dev->power.set_latency_tolerance = NULL; 837 } 838 839 static void byt_probe_slot(struct sdhci_pci_slot *slot) 840 { 841 struct mmc_host_ops *ops = &slot->host->mmc_host_ops; 842 struct device *dev = &slot->chip->pdev->dev; 843 struct mmc_host *mmc = slot->host->mmc; 844 845 byt_read_dsm(slot); 846 847 byt_ocp_setting(slot->chip->pdev); 848 849 ops->execute_tuning = intel_execute_tuning; 850 ops->start_signal_voltage_switch = intel_start_signal_voltage_switch; 851 852 device_property_read_u32(dev, "max-frequency", &mmc->f_max); 853 854 if (!mmc->slotno) { 855 slot->chip->slots[mmc->slotno] = slot; 856 intel_ltr_expose(slot->chip); 857 } 858 } 859 860 static void byt_add_debugfs(struct sdhci_pci_slot *slot) 861 { 862 struct intel_host *intel_host = sdhci_pci_priv(slot); 863 struct mmc_host *mmc = slot->host->mmc; 864 struct dentry *dir = mmc->debugfs_root; 865 866 if (!intel_use_ltr(slot->chip)) 867 return; 868 869 debugfs_create_x32("active_ltr", 0444, dir, &intel_host->active_ltr); 870 debugfs_create_x32("idle_ltr", 0444, dir, &intel_host->idle_ltr); 871 872 intel_cache_ltr(slot); 873 } 874 875 static int byt_add_host(struct sdhci_pci_slot *slot) 876 { 877 int ret = sdhci_add_host(slot->host); 878 879 if (!ret) 880 byt_add_debugfs(slot); 881 return ret; 882 } 883 884 static void byt_remove_slot(struct sdhci_pci_slot *slot, int dead) 885 { 886 struct mmc_host *mmc = slot->host->mmc; 887 888 if (!mmc->slotno) 889 intel_ltr_hide(slot->chip); 890 } 891 892 static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) 893 { 894 byt_probe_slot(slot); 895 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | 896 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | 897 MMC_CAP_CMD_DURING_TFR | 898 MMC_CAP_WAIT_WHILE_BUSY; 899 slot->hw_reset = sdhci_pci_int_hw_reset; 900 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC) 901 slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */ 902 slot->host->mmc_host_ops.select_drive_strength = 903 intel_select_drive_strength; 904 return 0; 905 } 906 907 static bool glk_broken_cqhci(struct sdhci_pci_slot *slot) 908 { 909 return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && 910 (dmi_match(DMI_BIOS_VENDOR, "LENOVO") || 911 dmi_match(DMI_SYS_VENDOR, "IRBIS")); 912 } 913 914 static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot) 915 { 916 return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_JSL_EMMC && 917 dmi_match(DMI_BIOS_VENDOR, "ASUSTeK COMPUTER INC."); 918 } 919 920 static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) 921 { 922 int ret = byt_emmc_probe_slot(slot); 923 924 if (!glk_broken_cqhci(slot)) 925 slot->host->mmc->caps2 |= MMC_CAP2_CQE; 926 927 if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) { 928 if (!jsl_broken_hs400es(slot)) { 929 slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES; 930 slot->host->mmc_host_ops.hs400_enhanced_strobe = 931 intel_hs400_enhanced_strobe; 932 } 933 slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD; 934 } 935 936 return ret; 937 } 938 939 static const struct cqhci_host_ops glk_cqhci_ops = { 940 .enable = sdhci_cqe_enable, 941 .disable = sdhci_cqe_disable, 942 .dumpregs = sdhci_pci_dumpregs, 943 }; 944 945 static int glk_emmc_add_host(struct sdhci_pci_slot *slot) 946 { 947 struct device *dev = &slot->chip->pdev->dev; 948 struct sdhci_host *host = slot->host; 949 struct cqhci_host *cq_host; 950 bool dma64; 951 int ret; 952 953 ret = sdhci_setup_host(host); 954 if (ret) 955 return ret; 956 957 cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL); 958 if (!cq_host) { 959 ret = -ENOMEM; 960 goto cleanup; 961 } 962 963 cq_host->mmio = host->ioaddr + 0x200; 964 cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ; 965 cq_host->ops = &glk_cqhci_ops; 966 967 dma64 = host->flags & SDHCI_USE_64_BIT_DMA; 968 if (dma64) 969 cq_host->caps |= CQHCI_TASK_DESC_SZ_128; 970 971 ret = cqhci_init(cq_host, host->mmc, dma64); 972 if (ret) 973 goto cleanup; 974 975 ret = __sdhci_add_host(host); 976 if (ret) 977 goto cleanup; 978 979 byt_add_debugfs(slot); 980 981 return 0; 982 983 cleanup: 984 sdhci_cleanup_host(host); 985 return ret; 986 } 987 988 #ifdef CONFIG_PM 989 #define GLK_RX_CTRL1 0x834 990 #define GLK_TUN_VAL 0x840 991 #define GLK_PATH_PLL GENMASK(13, 8) 992 #define GLK_DLY GENMASK(6, 0) 993 /* Workaround firmware failing to restore the tuning value */ 994 static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp) 995 { 996 struct sdhci_pci_slot *slot = chip->slots[0]; 997 struct intel_host *intel_host = sdhci_pci_priv(slot); 998 struct sdhci_host *host = slot->host; 999 u32 glk_rx_ctrl1; 1000 u32 glk_tun_val; 1001 u32 dly; 1002 1003 if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc)) 1004 return; 1005 1006 glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1); 1007 glk_tun_val = sdhci_readl(host, GLK_TUN_VAL); 1008 1009 if (susp) { 1010 intel_host->glk_rx_ctrl1 = glk_rx_ctrl1; 1011 intel_host->glk_tun_val = glk_tun_val; 1012 return; 1013 } 1014 1015 if (!intel_host->glk_tun_val) 1016 return; 1017 1018 if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) { 1019 intel_host->rpm_retune_ok = true; 1020 return; 1021 } 1022 1023 dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) + 1024 (intel_host->glk_tun_val << 1)); 1025 if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1)) 1026 return; 1027 1028 glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly; 1029 sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1); 1030 1031 intel_host->rpm_retune_ok = true; 1032 chip->rpm_retune = true; 1033 mmc_retune_needed(host->mmc); 1034 pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc)); 1035 } 1036 1037 static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp) 1038 { 1039 if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && 1040 !chip->rpm_retune) 1041 glk_rpm_retune_wa(chip, susp); 1042 } 1043 1044 static int glk_runtime_suspend(struct sdhci_pci_chip *chip) 1045 { 1046 glk_rpm_retune_chk(chip, true); 1047 1048 return sdhci_cqhci_runtime_suspend(chip); 1049 } 1050 1051 static int glk_runtime_resume(struct sdhci_pci_chip *chip) 1052 { 1053 glk_rpm_retune_chk(chip, false); 1054 1055 return sdhci_cqhci_runtime_resume(chip); 1056 } 1057 #endif 1058 1059 #ifdef CONFIG_ACPI 1060 static int ni_set_max_freq(struct sdhci_pci_slot *slot) 1061 { 1062 acpi_status status; 1063 unsigned long long max_freq; 1064 1065 status = acpi_evaluate_integer(ACPI_HANDLE(&slot->chip->pdev->dev), 1066 "MXFQ", NULL, &max_freq); 1067 if (ACPI_FAILURE(status)) { 1068 dev_err(&slot->chip->pdev->dev, 1069 "MXFQ not found in acpi table\n"); 1070 return -EINVAL; 1071 } 1072 1073 slot->host->mmc->f_max = max_freq * 1000000; 1074 1075 return 0; 1076 } 1077 #else 1078 static inline int ni_set_max_freq(struct sdhci_pci_slot *slot) 1079 { 1080 return 0; 1081 } 1082 #endif 1083 1084 static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot) 1085 { 1086 int err; 1087 1088 byt_probe_slot(slot); 1089 1090 err = ni_set_max_freq(slot); 1091 if (err) 1092 return err; 1093 1094 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | 1095 MMC_CAP_WAIT_WHILE_BUSY; 1096 return 0; 1097 } 1098 1099 static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) 1100 { 1101 byt_probe_slot(slot); 1102 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | 1103 MMC_CAP_WAIT_WHILE_BUSY; 1104 return 0; 1105 } 1106 1107 static void byt_needs_pwr_off(struct sdhci_pci_slot *slot) 1108 { 1109 struct intel_host *intel_host = sdhci_pci_priv(slot); 1110 u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL); 1111 1112 intel_host->needs_pwr_off = reg & SDHCI_POWER_ON; 1113 } 1114 1115 static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) 1116 { 1117 byt_probe_slot(slot); 1118 slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | 1119 MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE; 1120 slot->cd_idx = 0; 1121 slot->cd_override_level = true; 1122 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD || 1123 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD || 1124 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD || 1125 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_SD) 1126 slot->host->mmc_host_ops.get_cd = bxt_get_cd; 1127 1128 if (slot->chip->pdev->subsystem_vendor == PCI_VENDOR_ID_NI && 1129 slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3) 1130 slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V; 1131 1132 byt_needs_pwr_off(slot); 1133 1134 return 0; 1135 } 1136 1137 #ifdef CONFIG_PM_SLEEP 1138 1139 static int byt_resume(struct sdhci_pci_chip *chip) 1140 { 1141 byt_ocp_setting(chip->pdev); 1142 1143 return sdhci_pci_resume_host(chip); 1144 } 1145 1146 #endif 1147 1148 #ifdef CONFIG_PM 1149 1150 static int byt_runtime_resume(struct sdhci_pci_chip *chip) 1151 { 1152 byt_ocp_setting(chip->pdev); 1153 1154 return sdhci_pci_runtime_resume_host(chip); 1155 } 1156 1157 #endif 1158 1159 static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { 1160 #ifdef CONFIG_PM_SLEEP 1161 .resume = byt_resume, 1162 #endif 1163 #ifdef CONFIG_PM 1164 .runtime_resume = byt_runtime_resume, 1165 #endif 1166 .allow_runtime_pm = true, 1167 .probe_slot = byt_emmc_probe_slot, 1168 .add_host = byt_add_host, 1169 .remove_slot = byt_remove_slot, 1170 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 1171 SDHCI_QUIRK_NO_LED, 1172 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 1173 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 | 1174 SDHCI_QUIRK2_STOP_WITH_TC, 1175 .ops = &sdhci_intel_byt_ops, 1176 .priv_size = sizeof(struct intel_host), 1177 }; 1178 1179 static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = { 1180 .allow_runtime_pm = true, 1181 .probe_slot = glk_emmc_probe_slot, 1182 .add_host = glk_emmc_add_host, 1183 .remove_slot = byt_remove_slot, 1184 #ifdef CONFIG_PM_SLEEP 1185 .suspend = sdhci_cqhci_suspend, 1186 .resume = sdhci_cqhci_resume, 1187 #endif 1188 #ifdef CONFIG_PM 1189 .runtime_suspend = glk_runtime_suspend, 1190 .runtime_resume = glk_runtime_resume, 1191 #endif 1192 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 1193 SDHCI_QUIRK_NO_LED, 1194 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 1195 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 | 1196 SDHCI_QUIRK2_STOP_WITH_TC, 1197 .ops = &sdhci_intel_glk_ops, 1198 .priv_size = sizeof(struct intel_host), 1199 }; 1200 1201 static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = { 1202 #ifdef CONFIG_PM_SLEEP 1203 .resume = byt_resume, 1204 #endif 1205 #ifdef CONFIG_PM 1206 .runtime_resume = byt_runtime_resume, 1207 #endif 1208 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 1209 SDHCI_QUIRK_NO_LED, 1210 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON | 1211 SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1212 .allow_runtime_pm = true, 1213 .probe_slot = ni_byt_sdio_probe_slot, 1214 .add_host = byt_add_host, 1215 .remove_slot = byt_remove_slot, 1216 .ops = &sdhci_intel_byt_ops, 1217 .priv_size = sizeof(struct intel_host), 1218 }; 1219 1220 static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { 1221 #ifdef CONFIG_PM_SLEEP 1222 .resume = byt_resume, 1223 #endif 1224 #ifdef CONFIG_PM 1225 .runtime_resume = byt_runtime_resume, 1226 #endif 1227 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 1228 SDHCI_QUIRK_NO_LED, 1229 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON | 1230 SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1231 .allow_runtime_pm = true, 1232 .probe_slot = byt_sdio_probe_slot, 1233 .add_host = byt_add_host, 1234 .remove_slot = byt_remove_slot, 1235 .ops = &sdhci_intel_byt_ops, 1236 .priv_size = sizeof(struct intel_host), 1237 }; 1238 1239 static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { 1240 #ifdef CONFIG_PM_SLEEP 1241 .resume = byt_resume, 1242 #endif 1243 #ifdef CONFIG_PM 1244 .runtime_resume = byt_runtime_resume, 1245 #endif 1246 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 1247 SDHCI_QUIRK_NO_LED, 1248 .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON | 1249 SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 1250 SDHCI_QUIRK2_STOP_WITH_TC, 1251 .allow_runtime_pm = true, 1252 .own_cd_for_runtime_pm = true, 1253 .probe_slot = byt_sd_probe_slot, 1254 .add_host = byt_add_host, 1255 .remove_slot = byt_remove_slot, 1256 .ops = &sdhci_intel_byt_ops, 1257 .priv_size = sizeof(struct intel_host), 1258 }; 1259 1260 /* Define Host controllers for Intel Merrifield platform */ 1261 #define INTEL_MRFLD_EMMC_0 0 1262 #define INTEL_MRFLD_EMMC_1 1 1263 #define INTEL_MRFLD_SD 2 1264 #define INTEL_MRFLD_SDIO 3 1265 1266 #ifdef CONFIG_ACPI 1267 static void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) 1268 { 1269 struct acpi_device *device; 1270 1271 device = ACPI_COMPANION(&slot->chip->pdev->dev); 1272 if (device) 1273 acpi_device_fix_up_power_extended(device); 1274 } 1275 #else 1276 static inline void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) {} 1277 #endif 1278 1279 static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot) 1280 { 1281 unsigned int func = PCI_FUNC(slot->chip->pdev->devfn); 1282 1283 switch (func) { 1284 case INTEL_MRFLD_EMMC_0: 1285 case INTEL_MRFLD_EMMC_1: 1286 slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE | 1287 MMC_CAP_8_BIT_DATA | 1288 MMC_CAP_1_8V_DDR; 1289 break; 1290 case INTEL_MRFLD_SD: 1291 slot->cd_idx = 0; 1292 slot->cd_override_level = true; 1293 /* 1294 * There are two PCB designs of SD card slot with the opposite 1295 * card detection sense. Quirk this out by ignoring GPIO state 1296 * completely in the custom ->get_cd() callback. 1297 */ 1298 slot->host->mmc_host_ops.get_cd = mrfld_get_cd; 1299 slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; 1300 break; 1301 case INTEL_MRFLD_SDIO: 1302 /* Advertise 2.0v for compatibility with the SDIO card's OCR */ 1303 slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195; 1304 slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE | 1305 MMC_CAP_POWER_OFF_CARD; 1306 break; 1307 default: 1308 return -ENODEV; 1309 } 1310 1311 intel_mrfld_mmc_fix_up_power_slot(slot); 1312 return 0; 1313 } 1314 1315 static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = { 1316 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 1317 .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 | 1318 SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1319 .allow_runtime_pm = true, 1320 .probe_slot = intel_mrfld_mmc_probe_slot, 1321 }; 1322 1323 #define JMB388_SAMPLE_COUNT 5 1324 1325 static int jmicron_jmb388_get_ro(struct mmc_host *mmc) 1326 { 1327 int i, ro_count; 1328 1329 ro_count = 0; 1330 for (i = 0; i < JMB388_SAMPLE_COUNT; i++) { 1331 if (sdhci_get_ro(mmc) > 0) { 1332 if (++ro_count > JMB388_SAMPLE_COUNT / 2) 1333 return 1; 1334 } 1335 msleep(30); 1336 } 1337 return 0; 1338 } 1339 1340 static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) 1341 { 1342 u8 scratch; 1343 int ret; 1344 1345 ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch); 1346 if (ret) 1347 goto fail; 1348 1349 /* 1350 * Turn PMOS on [bit 0], set over current detection to 2.4 V 1351 * [bit 1:2] and enable over current debouncing [bit 6]. 1352 */ 1353 if (on) 1354 scratch |= 0x47; 1355 else 1356 scratch &= ~0x47; 1357 1358 ret = pci_write_config_byte(chip->pdev, 0xAE, scratch); 1359 1360 fail: 1361 return pcibios_err_to_errno(ret); 1362 } 1363 1364 static int jmicron_probe(struct sdhci_pci_chip *chip) 1365 { 1366 int ret; 1367 u16 mmcdev = 0; 1368 1369 if (chip->pdev->revision == 0) { 1370 chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR | 1371 SDHCI_QUIRK_32BIT_DMA_SIZE | 1372 SDHCI_QUIRK_32BIT_ADMA_SIZE | 1373 SDHCI_QUIRK_RESET_AFTER_REQUEST | 1374 SDHCI_QUIRK_BROKEN_SMALL_PIO; 1375 } 1376 1377 /* 1378 * JMicron chips can have two interfaces to the same hardware 1379 * in order to work around limitations in Microsoft's driver. 1380 * We need to make sure we only bind to one of them. 1381 * 1382 * This code assumes two things: 1383 * 1384 * 1. The PCI code adds subfunctions in order. 1385 * 1386 * 2. The MMC interface has a lower subfunction number 1387 * than the SD interface. 1388 */ 1389 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) 1390 mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC; 1391 else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD) 1392 mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD; 1393 1394 if (mmcdev) { 1395 struct pci_dev *sd_dev; 1396 1397 sd_dev = NULL; 1398 while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON, 1399 mmcdev, sd_dev)) != NULL) { 1400 if ((PCI_SLOT(chip->pdev->devfn) == 1401 PCI_SLOT(sd_dev->devfn)) && 1402 (chip->pdev->bus == sd_dev->bus)) 1403 break; 1404 } 1405 1406 if (sd_dev) { 1407 pci_dev_put(sd_dev); 1408 dev_info(&chip->pdev->dev, "Refusing to bind to " 1409 "secondary interface.\n"); 1410 return -ENODEV; 1411 } 1412 } 1413 1414 /* 1415 * JMicron chips need a bit of a nudge to enable the power 1416 * output pins. 1417 */ 1418 ret = jmicron_pmos(chip, 1); 1419 if (ret) { 1420 dev_err(&chip->pdev->dev, "Failure enabling card power\n"); 1421 return ret; 1422 } 1423 1424 return 0; 1425 } 1426 1427 static void jmicron_enable_mmc(struct sdhci_host *host, int on) 1428 { 1429 u8 scratch; 1430 1431 scratch = readb(host->ioaddr + 0xC0); 1432 1433 if (on) 1434 scratch |= 0x01; 1435 else 1436 scratch &= ~0x01; 1437 1438 writeb(scratch, host->ioaddr + 0xC0); 1439 } 1440 1441 static int jmicron_probe_slot(struct sdhci_pci_slot *slot) 1442 { 1443 if (slot->chip->pdev->revision == 0) { 1444 u16 version; 1445 1446 version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION); 1447 version = (version & SDHCI_VENDOR_VER_MASK) >> 1448 SDHCI_VENDOR_VER_SHIFT; 1449 1450 /* 1451 * Older versions of the chip have lots of nasty glitches 1452 * in the ADMA engine. It's best just to avoid it 1453 * completely. 1454 */ 1455 if (version < 0xAC) 1456 slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; 1457 } 1458 1459 /* JM388 MMC doesn't support 1.8V while SD supports it */ 1460 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { 1461 slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 | 1462 MMC_VDD_29_30 | MMC_VDD_30_31 | 1463 MMC_VDD_165_195; /* allow 1.8V */ 1464 slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 | 1465 MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */ 1466 } 1467 1468 /* 1469 * The secondary interface requires a bit set to get the 1470 * interrupts. 1471 */ 1472 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || 1473 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) 1474 jmicron_enable_mmc(slot->host, 1); 1475 1476 slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST; 1477 1478 /* Handle unstable RO-detection on JM388 chips */ 1479 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD || 1480 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) 1481 slot->host->mmc_host_ops.get_ro = jmicron_jmb388_get_ro; 1482 1483 return 0; 1484 } 1485 1486 static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead) 1487 { 1488 if (dead) 1489 return; 1490 1491 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || 1492 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) 1493 jmicron_enable_mmc(slot->host, 0); 1494 } 1495 1496 #ifdef CONFIG_PM_SLEEP 1497 static int jmicron_suspend(struct sdhci_pci_chip *chip) 1498 { 1499 int i, ret; 1500 1501 ret = sdhci_pci_suspend_host(chip); 1502 if (ret) 1503 return ret; 1504 1505 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || 1506 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { 1507 for (i = 0; i < chip->num_slots; i++) 1508 jmicron_enable_mmc(chip->slots[i]->host, 0); 1509 } 1510 1511 return 0; 1512 } 1513 1514 static int jmicron_resume(struct sdhci_pci_chip *chip) 1515 { 1516 int ret, i; 1517 1518 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || 1519 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { 1520 for (i = 0; i < chip->num_slots; i++) 1521 jmicron_enable_mmc(chip->slots[i]->host, 1); 1522 } 1523 1524 ret = jmicron_pmos(chip, 1); 1525 if (ret) { 1526 dev_err(&chip->pdev->dev, "Failure enabling card power\n"); 1527 return ret; 1528 } 1529 1530 return sdhci_pci_resume_host(chip); 1531 } 1532 #endif 1533 1534 static const struct sdhci_pci_fixes sdhci_jmicron = { 1535 .probe = jmicron_probe, 1536 1537 .probe_slot = jmicron_probe_slot, 1538 .remove_slot = jmicron_remove_slot, 1539 1540 #ifdef CONFIG_PM_SLEEP 1541 .suspend = jmicron_suspend, 1542 .resume = jmicron_resume, 1543 #endif 1544 }; 1545 1546 /* SysKonnect CardBus2SDIO extra registers */ 1547 #define SYSKT_CTRL 0x200 1548 #define SYSKT_RDFIFO_STAT 0x204 1549 #define SYSKT_WRFIFO_STAT 0x208 1550 #define SYSKT_POWER_DATA 0x20c 1551 #define SYSKT_POWER_330 0xef 1552 #define SYSKT_POWER_300 0xf8 1553 #define SYSKT_POWER_184 0xcc 1554 #define SYSKT_POWER_CMD 0x20d 1555 #define SYSKT_POWER_START (1 << 7) 1556 #define SYSKT_POWER_STATUS 0x20e 1557 #define SYSKT_POWER_STATUS_OK (1 << 0) 1558 #define SYSKT_BOARD_REV 0x210 1559 #define SYSKT_CHIP_REV 0x211 1560 #define SYSKT_CONF_DATA 0x212 1561 #define SYSKT_CONF_DATA_1V8 (1 << 2) 1562 #define SYSKT_CONF_DATA_2V5 (1 << 1) 1563 #define SYSKT_CONF_DATA_3V3 (1 << 0) 1564 1565 static int syskt_probe(struct sdhci_pci_chip *chip) 1566 { 1567 if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { 1568 chip->pdev->class &= ~0x0000FF; 1569 chip->pdev->class |= PCI_SDHCI_IFDMA; 1570 } 1571 return 0; 1572 } 1573 1574 static int syskt_probe_slot(struct sdhci_pci_slot *slot) 1575 { 1576 int tm, ps; 1577 1578 u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV); 1579 u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV); 1580 dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, " 1581 "board rev %d.%d, chip rev %d.%d\n", 1582 board_rev >> 4, board_rev & 0xf, 1583 chip_rev >> 4, chip_rev & 0xf); 1584 if (chip_rev >= 0x20) 1585 slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA; 1586 1587 writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA); 1588 writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD); 1589 udelay(50); 1590 tm = 10; /* Wait max 1 ms */ 1591 do { 1592 ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS); 1593 if (ps & SYSKT_POWER_STATUS_OK) 1594 break; 1595 udelay(100); 1596 } while (--tm); 1597 if (!tm) { 1598 dev_err(&slot->chip->pdev->dev, 1599 "power regulator never stabilized"); 1600 writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD); 1601 return -ENODEV; 1602 } 1603 1604 return 0; 1605 } 1606 1607 static const struct sdhci_pci_fixes sdhci_syskt = { 1608 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER, 1609 .probe = syskt_probe, 1610 .probe_slot = syskt_probe_slot, 1611 }; 1612 1613 static int via_probe(struct sdhci_pci_chip *chip) 1614 { 1615 if (chip->pdev->revision == 0x10) 1616 chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER; 1617 1618 return 0; 1619 } 1620 1621 static const struct sdhci_pci_fixes sdhci_via = { 1622 .probe = via_probe, 1623 }; 1624 1625 static int rtsx_probe_slot(struct sdhci_pci_slot *slot) 1626 { 1627 slot->host->mmc->caps2 |= MMC_CAP2_HS200; 1628 return 0; 1629 } 1630 1631 static const struct sdhci_pci_fixes sdhci_rtsx = { 1632 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 1633 SDHCI_QUIRK2_BROKEN_64_BIT_DMA | 1634 SDHCI_QUIRK2_BROKEN_DDR50, 1635 .probe_slot = rtsx_probe_slot, 1636 }; 1637 1638 /*AMD chipset generation*/ 1639 enum amd_chipset_gen { 1640 AMD_CHIPSET_BEFORE_ML, 1641 AMD_CHIPSET_CZ, 1642 AMD_CHIPSET_NL, 1643 AMD_CHIPSET_UNKNOWN, 1644 }; 1645 1646 /* AMD registers */ 1647 #define AMD_SD_AUTO_PATTERN 0xB8 1648 #define AMD_MSLEEP_DURATION 4 1649 #define AMD_SD_MISC_CONTROL 0xD0 1650 #define AMD_MAX_TUNE_VALUE 0x0B 1651 #define AMD_AUTO_TUNE_SEL 0x10800 1652 #define AMD_FIFO_PTR 0x30 1653 #define AMD_BIT_MASK 0x1F 1654 1655 static void amd_tuning_reset(struct sdhci_host *host) 1656 { 1657 unsigned int val; 1658 1659 val = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1660 val |= SDHCI_CTRL_PRESET_VAL_ENABLE | SDHCI_CTRL_EXEC_TUNING; 1661 sdhci_writew(host, val, SDHCI_HOST_CONTROL2); 1662 1663 val = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1664 val &= ~SDHCI_CTRL_EXEC_TUNING; 1665 sdhci_writew(host, val, SDHCI_HOST_CONTROL2); 1666 } 1667 1668 static void amd_config_tuning_phase(struct pci_dev *pdev, u8 phase) 1669 { 1670 unsigned int val; 1671 1672 pci_read_config_dword(pdev, AMD_SD_AUTO_PATTERN, &val); 1673 val &= ~AMD_BIT_MASK; 1674 val |= (AMD_AUTO_TUNE_SEL | (phase << 1)); 1675 pci_write_config_dword(pdev, AMD_SD_AUTO_PATTERN, val); 1676 } 1677 1678 static void amd_enable_manual_tuning(struct pci_dev *pdev) 1679 { 1680 unsigned int val; 1681 1682 pci_read_config_dword(pdev, AMD_SD_MISC_CONTROL, &val); 1683 val |= AMD_FIFO_PTR; 1684 pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val); 1685 } 1686 1687 static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode) 1688 { 1689 struct sdhci_pci_slot *slot = sdhci_priv(host); 1690 struct pci_dev *pdev = slot->chip->pdev; 1691 u8 valid_win = 0; 1692 u8 valid_win_max = 0; 1693 u8 valid_win_end = 0; 1694 u8 ctrl, tune_around; 1695 1696 amd_tuning_reset(host); 1697 1698 for (tune_around = 0; tune_around < 12; tune_around++) { 1699 amd_config_tuning_phase(pdev, tune_around); 1700 1701 if (mmc_send_tuning(host->mmc, opcode, NULL)) { 1702 valid_win = 0; 1703 msleep(AMD_MSLEEP_DURATION); 1704 ctrl = SDHCI_RESET_CMD | SDHCI_RESET_DATA; 1705 sdhci_writeb(host, ctrl, SDHCI_SOFTWARE_RESET); 1706 } else if (++valid_win > valid_win_max) { 1707 valid_win_max = valid_win; 1708 valid_win_end = tune_around; 1709 } 1710 } 1711 1712 if (!valid_win_max) { 1713 dev_err(&pdev->dev, "no tuning point found\n"); 1714 return -EIO; 1715 } 1716 1717 amd_config_tuning_phase(pdev, valid_win_end - valid_win_max / 2); 1718 1719 amd_enable_manual_tuning(pdev); 1720 1721 host->mmc->retune_period = 0; 1722 1723 return 0; 1724 } 1725 1726 static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode) 1727 { 1728 struct sdhci_host *host = mmc_priv(mmc); 1729 1730 /* AMD requires custom HS200 tuning */ 1731 if (host->timing == MMC_TIMING_MMC_HS200) 1732 return amd_execute_tuning_hs200(host, opcode); 1733 1734 /* Otherwise perform standard SDHCI tuning */ 1735 return sdhci_execute_tuning(mmc, opcode); 1736 } 1737 1738 static int amd_probe_slot(struct sdhci_pci_slot *slot) 1739 { 1740 struct mmc_host_ops *ops = &slot->host->mmc_host_ops; 1741 1742 ops->execute_tuning = amd_execute_tuning; 1743 1744 return 0; 1745 } 1746 1747 static int amd_probe(struct sdhci_pci_chip *chip) 1748 { 1749 struct pci_dev *smbus_dev; 1750 enum amd_chipset_gen gen; 1751 1752 smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 1753 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL); 1754 if (smbus_dev) { 1755 gen = AMD_CHIPSET_BEFORE_ML; 1756 } else { 1757 smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 1758 PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL); 1759 if (smbus_dev) { 1760 if (smbus_dev->revision < 0x51) 1761 gen = AMD_CHIPSET_CZ; 1762 else 1763 gen = AMD_CHIPSET_NL; 1764 } else { 1765 gen = AMD_CHIPSET_UNKNOWN; 1766 } 1767 } 1768 1769 pci_dev_put(smbus_dev); 1770 1771 if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ) 1772 chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD; 1773 1774 return 0; 1775 } 1776 1777 static u32 sdhci_read_present_state(struct sdhci_host *host) 1778 { 1779 return sdhci_readl(host, SDHCI_PRESENT_STATE); 1780 } 1781 1782 static void amd_sdhci_reset(struct sdhci_host *host, u8 mask) 1783 { 1784 struct sdhci_pci_slot *slot = sdhci_priv(host); 1785 struct pci_dev *pdev = slot->chip->pdev; 1786 u32 present_state; 1787 1788 /* 1789 * SDHC 0x7906 requires a hard reset to clear all internal state. 1790 * Otherwise it can get into a bad state where the DATA lines are always 1791 * read as zeros. 1792 */ 1793 if (pdev->device == 0x7906 && (mask & SDHCI_RESET_ALL)) { 1794 pci_clear_master(pdev); 1795 1796 pci_save_state(pdev); 1797 1798 pci_set_power_state(pdev, PCI_D3cold); 1799 pr_debug("%s: power_state=%u\n", mmc_hostname(host->mmc), 1800 pdev->current_state); 1801 pci_set_power_state(pdev, PCI_D0); 1802 1803 pci_restore_state(pdev); 1804 1805 /* 1806 * SDHCI_RESET_ALL says the card detect logic should not be 1807 * reset, but since we need to reset the entire controller 1808 * we should wait until the card detect logic has stabilized. 1809 * 1810 * This normally takes about 40ms. 1811 */ 1812 readx_poll_timeout( 1813 sdhci_read_present_state, 1814 host, 1815 present_state, 1816 present_state & SDHCI_CD_STABLE, 1817 10000, 1818 100000 1819 ); 1820 } 1821 1822 return sdhci_reset(host, mask); 1823 } 1824 1825 static const struct sdhci_ops amd_sdhci_pci_ops = { 1826 .set_clock = sdhci_set_clock, 1827 .enable_dma = sdhci_pci_enable_dma, 1828 .set_bus_width = sdhci_set_bus_width, 1829 .reset = amd_sdhci_reset, 1830 .set_uhs_signaling = sdhci_set_uhs_signaling, 1831 }; 1832 1833 static const struct sdhci_pci_fixes sdhci_amd = { 1834 .probe = amd_probe, 1835 .ops = &amd_sdhci_pci_ops, 1836 .probe_slot = amd_probe_slot, 1837 }; 1838 1839 static const struct pci_device_id pci_ids[] = { 1840 SDHCI_PCI_DEVICE(RICOH, R5C822, ricoh), 1841 SDHCI_PCI_DEVICE(RICOH, R5C843, ricoh_mmc), 1842 SDHCI_PCI_DEVICE(RICOH, R5CE822, ricoh_mmc), 1843 SDHCI_PCI_DEVICE(RICOH, R5CE823, ricoh_mmc), 1844 SDHCI_PCI_DEVICE(ENE, CB712_SD, ene_712), 1845 SDHCI_PCI_DEVICE(ENE, CB712_SD_2, ene_712), 1846 SDHCI_PCI_DEVICE(ENE, CB714_SD, ene_714), 1847 SDHCI_PCI_DEVICE(ENE, CB714_SD_2, ene_714), 1848 SDHCI_PCI_DEVICE(MARVELL, 88ALP01_SD, cafe), 1849 SDHCI_PCI_DEVICE(JMICRON, JMB38X_SD, jmicron), 1850 SDHCI_PCI_DEVICE(JMICRON, JMB38X_MMC, jmicron), 1851 SDHCI_PCI_DEVICE(JMICRON, JMB388_SD, jmicron), 1852 SDHCI_PCI_DEVICE(JMICRON, JMB388_ESD, jmicron), 1853 SDHCI_PCI_DEVICE(SYSKONNECT, 8000, syskt), 1854 SDHCI_PCI_DEVICE(VIA, 95D0, via), 1855 SDHCI_PCI_DEVICE(REALTEK, 5250, rtsx), 1856 SDHCI_PCI_DEVICE(INTEL, QRK_SD, intel_qrk), 1857 SDHCI_PCI_DEVICE(INTEL, MRST_SD0, intel_mrst_hc0), 1858 SDHCI_PCI_DEVICE(INTEL, MRST_SD1, intel_mrst_hc1_hc2), 1859 SDHCI_PCI_DEVICE(INTEL, MRST_SD2, intel_mrst_hc1_hc2), 1860 SDHCI_PCI_DEVICE(INTEL, MFD_SD, intel_mfd_sd), 1861 SDHCI_PCI_DEVICE(INTEL, MFD_SDIO1, intel_mfd_sdio), 1862 SDHCI_PCI_DEVICE(INTEL, MFD_SDIO2, intel_mfd_sdio), 1863 SDHCI_PCI_DEVICE(INTEL, MFD_EMMC0, intel_mfd_emmc), 1864 SDHCI_PCI_DEVICE(INTEL, MFD_EMMC1, intel_mfd_emmc), 1865 SDHCI_PCI_DEVICE(INTEL, PCH_SDIO0, intel_pch_sdio), 1866 SDHCI_PCI_DEVICE(INTEL, PCH_SDIO1, intel_pch_sdio), 1867 SDHCI_PCI_DEVICE(INTEL, BYT_EMMC, intel_byt_emmc), 1868 SDHCI_PCI_SUBDEVICE(INTEL, BYT_SDIO, NI, 7884, ni_byt_sdio), 1869 SDHCI_PCI_DEVICE(INTEL, BYT_SDIO, intel_byt_sdio), 1870 SDHCI_PCI_DEVICE(INTEL, BYT_SD, intel_byt_sd), 1871 SDHCI_PCI_DEVICE(INTEL, BYT_EMMC2, intel_byt_emmc), 1872 SDHCI_PCI_DEVICE(INTEL, BSW_EMMC, intel_byt_emmc), 1873 SDHCI_PCI_DEVICE(INTEL, BSW_SDIO, intel_byt_sdio), 1874 SDHCI_PCI_DEVICE(INTEL, BSW_SD, intel_byt_sd), 1875 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO0, intel_mfd_sd), 1876 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO1, intel_mfd_sdio), 1877 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO2, intel_mfd_sdio), 1878 SDHCI_PCI_DEVICE(INTEL, CLV_EMMC0, intel_mfd_emmc), 1879 SDHCI_PCI_DEVICE(INTEL, CLV_EMMC1, intel_mfd_emmc), 1880 SDHCI_PCI_DEVICE(INTEL, MRFLD_MMC, intel_mrfld_mmc), 1881 SDHCI_PCI_DEVICE(INTEL, SPT_EMMC, intel_byt_emmc), 1882 SDHCI_PCI_DEVICE(INTEL, SPT_SDIO, intel_byt_sdio), 1883 SDHCI_PCI_DEVICE(INTEL, SPT_SD, intel_byt_sd), 1884 SDHCI_PCI_DEVICE(INTEL, DNV_EMMC, intel_byt_emmc), 1885 SDHCI_PCI_DEVICE(INTEL, CDF_EMMC, intel_glk_emmc), 1886 SDHCI_PCI_DEVICE(INTEL, BXT_EMMC, intel_byt_emmc), 1887 SDHCI_PCI_DEVICE(INTEL, BXT_SDIO, intel_byt_sdio), 1888 SDHCI_PCI_DEVICE(INTEL, BXT_SD, intel_byt_sd), 1889 SDHCI_PCI_DEVICE(INTEL, BXTM_EMMC, intel_byt_emmc), 1890 SDHCI_PCI_DEVICE(INTEL, BXTM_SDIO, intel_byt_sdio), 1891 SDHCI_PCI_DEVICE(INTEL, BXTM_SD, intel_byt_sd), 1892 SDHCI_PCI_DEVICE(INTEL, APL_EMMC, intel_byt_emmc), 1893 SDHCI_PCI_DEVICE(INTEL, APL_SDIO, intel_byt_sdio), 1894 SDHCI_PCI_DEVICE(INTEL, APL_SD, intel_byt_sd), 1895 SDHCI_PCI_DEVICE(INTEL, GLK_EMMC, intel_glk_emmc), 1896 SDHCI_PCI_DEVICE(INTEL, GLK_SDIO, intel_byt_sdio), 1897 SDHCI_PCI_DEVICE(INTEL, GLK_SD, intel_byt_sd), 1898 SDHCI_PCI_DEVICE(INTEL, CNP_EMMC, intel_glk_emmc), 1899 SDHCI_PCI_DEVICE(INTEL, CNP_SD, intel_byt_sd), 1900 SDHCI_PCI_DEVICE(INTEL, CNPH_SD, intel_byt_sd), 1901 SDHCI_PCI_DEVICE(INTEL, ICP_EMMC, intel_glk_emmc), 1902 SDHCI_PCI_DEVICE(INTEL, ICP_SD, intel_byt_sd), 1903 SDHCI_PCI_DEVICE(INTEL, EHL_EMMC, intel_glk_emmc), 1904 SDHCI_PCI_DEVICE(INTEL, EHL_SD, intel_byt_sd), 1905 SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc), 1906 SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd), 1907 SDHCI_PCI_DEVICE(INTEL, CMLH_SD, intel_byt_sd), 1908 SDHCI_PCI_DEVICE(INTEL, JSL_EMMC, intel_glk_emmc), 1909 SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd), 1910 SDHCI_PCI_DEVICE(INTEL, LKF_EMMC, intel_glk_emmc), 1911 SDHCI_PCI_DEVICE(INTEL, LKF_SD, intel_byt_sd), 1912 SDHCI_PCI_DEVICE(INTEL, ADL_EMMC, intel_glk_emmc), 1913 SDHCI_PCI_DEVICE(O2, 8120, o2), 1914 SDHCI_PCI_DEVICE(O2, 8220, o2), 1915 SDHCI_PCI_DEVICE(O2, 8221, o2), 1916 SDHCI_PCI_DEVICE(O2, 8320, o2), 1917 SDHCI_PCI_DEVICE(O2, 8321, o2), 1918 SDHCI_PCI_DEVICE(O2, FUJIN2, o2), 1919 SDHCI_PCI_DEVICE(O2, SDS0, o2), 1920 SDHCI_PCI_DEVICE(O2, SDS1, o2), 1921 SDHCI_PCI_DEVICE(O2, SEABIRD0, o2), 1922 SDHCI_PCI_DEVICE(O2, SEABIRD1, o2), 1923 SDHCI_PCI_DEVICE(O2, GG8_9860, o2), 1924 SDHCI_PCI_DEVICE(O2, GG8_9861, o2), 1925 SDHCI_PCI_DEVICE(O2, GG8_9862, o2), 1926 SDHCI_PCI_DEVICE(O2, GG8_9863, o2), 1927 SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan), 1928 SDHCI_PCI_DEVICE(SYNOPSYS, DWC_MSHC, snps), 1929 SDHCI_PCI_DEVICE(GLI, 9750, gl9750), 1930 SDHCI_PCI_DEVICE(GLI, 9755, gl9755), 1931 SDHCI_PCI_DEVICE(GLI, 9763E, gl9763e), 1932 SDHCI_PCI_DEVICE(GLI, 9767, gl9767), 1933 SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd), 1934 /* Generic SD host controller */ 1935 {PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)}, 1936 { /* end: all zeroes */ }, 1937 }; 1938 1939 MODULE_DEVICE_TABLE(pci, pci_ids); 1940 1941 /*****************************************************************************\ 1942 * * 1943 * SDHCI core callbacks * 1944 * * 1945 \*****************************************************************************/ 1946 1947 int sdhci_pci_enable_dma(struct sdhci_host *host) 1948 { 1949 struct sdhci_pci_slot *slot; 1950 struct pci_dev *pdev; 1951 1952 slot = sdhci_priv(host); 1953 pdev = slot->chip->pdev; 1954 1955 if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) && 1956 ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) && 1957 (host->flags & SDHCI_USE_SDMA)) { 1958 dev_warn(&pdev->dev, "Will use DMA mode even though HW " 1959 "doesn't fully claim to support it.\n"); 1960 } 1961 1962 pci_set_master(pdev); 1963 1964 return 0; 1965 } 1966 1967 static void sdhci_pci_hw_reset(struct sdhci_host *host) 1968 { 1969 struct sdhci_pci_slot *slot = sdhci_priv(host); 1970 1971 if (slot->hw_reset) 1972 slot->hw_reset(host); 1973 } 1974 1975 static const struct sdhci_ops sdhci_pci_ops = { 1976 .set_clock = sdhci_set_clock, 1977 .enable_dma = sdhci_pci_enable_dma, 1978 .set_bus_width = sdhci_set_bus_width, 1979 .reset = sdhci_reset, 1980 .set_uhs_signaling = sdhci_set_uhs_signaling, 1981 .hw_reset = sdhci_pci_hw_reset, 1982 }; 1983 1984 /*****************************************************************************\ 1985 * * 1986 * Suspend/resume * 1987 * * 1988 \*****************************************************************************/ 1989 1990 #ifdef CONFIG_PM_SLEEP 1991 static int sdhci_pci_suspend(struct device *dev) 1992 { 1993 struct sdhci_pci_chip *chip = dev_get_drvdata(dev); 1994 1995 if (!chip) 1996 return 0; 1997 1998 if (chip->fixes && chip->fixes->suspend) 1999 return chip->fixes->suspend(chip); 2000 2001 return sdhci_pci_suspend_host(chip); 2002 } 2003 2004 static int sdhci_pci_resume(struct device *dev) 2005 { 2006 struct sdhci_pci_chip *chip = dev_get_drvdata(dev); 2007 2008 if (!chip) 2009 return 0; 2010 2011 if (chip->fixes && chip->fixes->resume) 2012 return chip->fixes->resume(chip); 2013 2014 return sdhci_pci_resume_host(chip); 2015 } 2016 #endif 2017 2018 #ifdef CONFIG_PM 2019 static int sdhci_pci_runtime_suspend(struct device *dev) 2020 { 2021 struct sdhci_pci_chip *chip = dev_get_drvdata(dev); 2022 2023 if (!chip) 2024 return 0; 2025 2026 if (chip->fixes && chip->fixes->runtime_suspend) 2027 return chip->fixes->runtime_suspend(chip); 2028 2029 return sdhci_pci_runtime_suspend_host(chip); 2030 } 2031 2032 static int sdhci_pci_runtime_resume(struct device *dev) 2033 { 2034 struct sdhci_pci_chip *chip = dev_get_drvdata(dev); 2035 2036 if (!chip) 2037 return 0; 2038 2039 if (chip->fixes && chip->fixes->runtime_resume) 2040 return chip->fixes->runtime_resume(chip); 2041 2042 return sdhci_pci_runtime_resume_host(chip); 2043 } 2044 #endif 2045 2046 static const struct dev_pm_ops sdhci_pci_pm_ops = { 2047 SET_SYSTEM_SLEEP_PM_OPS(sdhci_pci_suspend, sdhci_pci_resume) 2048 SET_RUNTIME_PM_OPS(sdhci_pci_runtime_suspend, 2049 sdhci_pci_runtime_resume, NULL) 2050 }; 2051 2052 /*****************************************************************************\ 2053 * * 2054 * Device probing/removal * 2055 * * 2056 \*****************************************************************************/ 2057 2058 static struct sdhci_pci_slot *sdhci_pci_probe_slot( 2059 struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar, 2060 int slotno) 2061 { 2062 struct sdhci_pci_slot *slot; 2063 struct sdhci_host *host; 2064 int ret, bar = first_bar + slotno; 2065 size_t priv_size = chip->fixes ? chip->fixes->priv_size : 0; 2066 2067 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { 2068 dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar); 2069 return ERR_PTR(-ENODEV); 2070 } 2071 2072 if (pci_resource_len(pdev, bar) < 0x100) { 2073 dev_err(&pdev->dev, "Invalid iomem size. You may " 2074 "experience problems.\n"); 2075 } 2076 2077 if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { 2078 dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n"); 2079 return ERR_PTR(-ENODEV); 2080 } 2081 2082 if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) { 2083 dev_err(&pdev->dev, "Unknown interface. Aborting.\n"); 2084 return ERR_PTR(-ENODEV); 2085 } 2086 2087 host = sdhci_alloc_host(&pdev->dev, sizeof(*slot) + priv_size); 2088 if (IS_ERR(host)) { 2089 dev_err(&pdev->dev, "cannot allocate host\n"); 2090 return ERR_CAST(host); 2091 } 2092 2093 slot = sdhci_priv(host); 2094 2095 slot->chip = chip; 2096 slot->host = host; 2097 slot->cd_idx = -1; 2098 2099 host->hw_name = "PCI"; 2100 host->ops = chip->fixes && chip->fixes->ops ? 2101 chip->fixes->ops : 2102 &sdhci_pci_ops; 2103 host->quirks = chip->quirks; 2104 host->quirks2 = chip->quirks2; 2105 2106 host->irq = pdev->irq; 2107 2108 ret = pcim_iomap_regions(pdev, BIT(bar), mmc_hostname(host->mmc)); 2109 if (ret) { 2110 dev_err(&pdev->dev, "cannot request region\n"); 2111 goto cleanup; 2112 } 2113 2114 host->ioaddr = pcim_iomap_table(pdev)[bar]; 2115 2116 if (chip->fixes && chip->fixes->probe_slot) { 2117 ret = chip->fixes->probe_slot(slot); 2118 if (ret) 2119 goto cleanup; 2120 } 2121 2122 host->mmc->pm_caps = MMC_PM_KEEP_POWER; 2123 host->mmc->slotno = slotno; 2124 host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP; 2125 2126 if (device_can_wakeup(&pdev->dev)) 2127 host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; 2128 2129 if (host->mmc->caps & MMC_CAP_CD_WAKE) 2130 device_init_wakeup(&pdev->dev, true); 2131 2132 if (slot->cd_idx >= 0) { 2133 ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx, 2134 slot->cd_override_level, 0); 2135 if (ret && ret != -EPROBE_DEFER) 2136 ret = mmc_gpiod_request_cd(host->mmc, NULL, 2137 slot->cd_idx, 2138 slot->cd_override_level, 2139 0); 2140 if (ret == -EPROBE_DEFER) 2141 goto remove; 2142 2143 if (ret) { 2144 dev_warn(&pdev->dev, "failed to setup card detect gpio\n"); 2145 slot->cd_idx = -1; 2146 } 2147 } 2148 2149 if (chip->fixes && chip->fixes->add_host) 2150 ret = chip->fixes->add_host(slot); 2151 else 2152 ret = sdhci_add_host(host); 2153 if (ret) 2154 goto remove; 2155 2156 /* 2157 * Check if the chip needs a separate GPIO for card detect to wake up 2158 * from runtime suspend. If it is not there, don't allow runtime PM. 2159 */ 2160 if (chip->fixes && chip->fixes->own_cd_for_runtime_pm && slot->cd_idx < 0) 2161 chip->allow_runtime_pm = false; 2162 2163 return slot; 2164 2165 remove: 2166 if (chip->fixes && chip->fixes->remove_slot) 2167 chip->fixes->remove_slot(slot, 0); 2168 2169 cleanup: 2170 sdhci_free_host(host); 2171 2172 return ERR_PTR(ret); 2173 } 2174 2175 static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot) 2176 { 2177 int dead; 2178 u32 scratch; 2179 2180 dead = 0; 2181 scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS); 2182 if (scratch == (u32)-1) 2183 dead = 1; 2184 2185 if (slot->chip->fixes && slot->chip->fixes->remove_host) 2186 slot->chip->fixes->remove_host(slot, dead); 2187 else 2188 sdhci_remove_host(slot->host, dead); 2189 2190 if (slot->chip->fixes && slot->chip->fixes->remove_slot) 2191 slot->chip->fixes->remove_slot(slot, dead); 2192 2193 sdhci_free_host(slot->host); 2194 } 2195 2196 int sdhci_pci_uhs2_add_host(struct sdhci_pci_slot *slot) 2197 { 2198 return sdhci_uhs2_add_host(slot->host); 2199 } 2200 2201 void sdhci_pci_uhs2_remove_host(struct sdhci_pci_slot *slot, int dead) 2202 { 2203 sdhci_uhs2_remove_host(slot->host, dead); 2204 } 2205 2206 static void sdhci_pci_runtime_pm_allow(struct device *dev) 2207 { 2208 pm_suspend_ignore_children(dev, 1); 2209 pm_runtime_set_autosuspend_delay(dev, 50); 2210 pm_runtime_use_autosuspend(dev); 2211 pm_runtime_allow(dev); 2212 /* Stay active until mmc core scans for a card */ 2213 pm_runtime_put_noidle(dev); 2214 } 2215 2216 static void sdhci_pci_runtime_pm_forbid(struct device *dev) 2217 { 2218 pm_runtime_forbid(dev); 2219 pm_runtime_get_noresume(dev); 2220 } 2221 2222 static int sdhci_pci_probe(struct pci_dev *pdev, 2223 const struct pci_device_id *ent) 2224 { 2225 struct sdhci_pci_chip *chip; 2226 struct sdhci_pci_slot *slot; 2227 2228 u8 slots, first_bar; 2229 int ret, i; 2230 2231 BUG_ON(pdev == NULL); 2232 BUG_ON(ent == NULL); 2233 2234 dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", 2235 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision); 2236 2237 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); 2238 if (ret) 2239 return pcibios_err_to_errno(ret); 2240 2241 slots = PCI_SLOT_INFO_SLOTS(slots) + 1; 2242 dev_dbg(&pdev->dev, "found %d slot(s)\n", slots); 2243 2244 BUG_ON(slots > MAX_SLOTS); 2245 2246 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar); 2247 if (ret) 2248 return pcibios_err_to_errno(ret); 2249 2250 first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK; 2251 2252 if (first_bar > 5) { 2253 dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n"); 2254 return -ENODEV; 2255 } 2256 2257 ret = pcim_enable_device(pdev); 2258 if (ret) 2259 return ret; 2260 2261 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); 2262 if (!chip) 2263 return -ENOMEM; 2264 2265 chip->pdev = pdev; 2266 chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data; 2267 if (chip->fixes) { 2268 chip->quirks = chip->fixes->quirks; 2269 chip->quirks2 = chip->fixes->quirks2; 2270 chip->allow_runtime_pm = chip->fixes->allow_runtime_pm; 2271 } 2272 chip->num_slots = slots; 2273 chip->pm_retune = true; 2274 chip->rpm_retune = true; 2275 2276 pci_set_drvdata(pdev, chip); 2277 2278 if (chip->fixes && chip->fixes->probe) { 2279 ret = chip->fixes->probe(chip); 2280 if (ret) 2281 return ret; 2282 } 2283 2284 slots = chip->num_slots; /* Quirk may have changed this */ 2285 2286 for (i = 0; i < slots; i++) { 2287 slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i); 2288 if (IS_ERR(slot)) { 2289 for (i--; i >= 0; i--) 2290 sdhci_pci_remove_slot(chip->slots[i]); 2291 return PTR_ERR(slot); 2292 } 2293 2294 chip->slots[i] = slot; 2295 } 2296 2297 if (chip->allow_runtime_pm) 2298 sdhci_pci_runtime_pm_allow(&pdev->dev); 2299 2300 return 0; 2301 } 2302 2303 static void sdhci_pci_remove(struct pci_dev *pdev) 2304 { 2305 int i; 2306 struct sdhci_pci_chip *chip = pci_get_drvdata(pdev); 2307 2308 if (chip->allow_runtime_pm) 2309 sdhci_pci_runtime_pm_forbid(&pdev->dev); 2310 2311 for (i = 0; i < chip->num_slots; i++) 2312 sdhci_pci_remove_slot(chip->slots[i]); 2313 } 2314 2315 static struct pci_driver sdhci_driver = { 2316 .name = "sdhci-pci", 2317 .id_table = pci_ids, 2318 .probe = sdhci_pci_probe, 2319 .remove = sdhci_pci_remove, 2320 .driver = { 2321 .pm = &sdhci_pci_pm_ops, 2322 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 2323 }, 2324 }; 2325 2326 module_pci_driver(sdhci_driver); 2327 2328 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 2329 MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver"); 2330 MODULE_LICENSE("GPL"); 2331