1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver 4 * 5 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. 6 */ 7 8 #include <linux/module.h> 9 #include <linux/delay.h> 10 #include <linux/mmc/mmc.h> 11 #include <linux/pm_runtime.h> 12 #include <linux/pm_opp.h> 13 #include <linux/slab.h> 14 #include <linux/iopoll.h> 15 #include <linux/regulator/consumer.h> 16 #include <linux/interconnect.h> 17 #include <linux/of.h> 18 #include <linux/pinctrl/consumer.h> 19 #include <linux/reset.h> 20 21 #include <soc/qcom/ice.h> 22 23 #include "sdhci-cqhci.h" 24 #include "sdhci-pltfm.h" 25 #include "cqhci.h" 26 27 #define CORE_MCI_VERSION 0x50 28 #define CORE_VERSION_MAJOR_SHIFT 28 29 #define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT) 30 #define CORE_VERSION_MINOR_MASK 0xff 31 32 #define CORE_MCI_GENERICS 0x70 33 #define SWITCHABLE_SIGNALING_VOLTAGE BIT(29) 34 35 #define HC_MODE_EN 0x1 36 #define CORE_POWER 0x0 37 #define CORE_SW_RST BIT(7) 38 #define FF_CLK_SW_RST_DIS BIT(13) 39 40 #define CORE_PWRCTL_BUS_OFF BIT(0) 41 #define CORE_PWRCTL_BUS_ON BIT(1) 42 #define CORE_PWRCTL_IO_LOW BIT(2) 43 #define CORE_PWRCTL_IO_HIGH BIT(3) 44 #define CORE_PWRCTL_BUS_SUCCESS BIT(0) 45 #define CORE_PWRCTL_BUS_FAIL BIT(1) 46 #define CORE_PWRCTL_IO_SUCCESS BIT(2) 47 #define CORE_PWRCTL_IO_FAIL BIT(3) 48 #define REQ_BUS_OFF BIT(0) 49 #define REQ_BUS_ON BIT(1) 50 #define REQ_IO_LOW BIT(2) 51 #define REQ_IO_HIGH BIT(3) 52 #define INT_MASK 0xf 53 #define MAX_PHASES 16 54 #define CORE_DLL_LOCK BIT(7) 55 #define CORE_DDR_DLL_LOCK BIT(11) 56 #define CORE_DLL_EN BIT(16) 57 #define CORE_CDR_EN BIT(17) 58 #define CORE_CK_OUT_EN BIT(18) 59 #define CORE_CDR_EXT_EN BIT(19) 60 #define CORE_DLL_PDN BIT(29) 61 #define CORE_DLL_RST BIT(30) 62 #define CORE_CMD_DAT_TRACK_SEL BIT(0) 63 64 #define CORE_DDR_CAL_EN BIT(0) 65 #define CORE_FLL_CYCLE_CNT BIT(18) 66 #define CORE_DLL_CLOCK_DISABLE BIT(21) 67 68 #define DLL_USR_CTL_POR_VAL 0x10800 69 #define ENABLE_DLL_LOCK_STATUS BIT(26) 70 #define FINE_TUNE_MODE_EN BIT(27) 71 #define BIAS_OK_SIGNAL BIT(29) 72 73 #define DLL_CONFIG_3_LOW_FREQ_VAL 0x08 74 #define DLL_CONFIG_3_HIGH_FREQ_VAL 0x10 75 76 #define CORE_VENDOR_SPEC_POR_VAL 0xa9c 77 #define CORE_CLK_PWRSAVE BIT(1) 78 #define CORE_HC_MCLK_SEL_DFLT (2 << 8) 79 #define CORE_HC_MCLK_SEL_HS400 (3 << 8) 80 #define CORE_HC_MCLK_SEL_MASK (3 << 8) 81 #define CORE_IO_PAD_PWR_SWITCH_EN BIT(15) 82 #define CORE_IO_PAD_PWR_SWITCH BIT(16) 83 #define CORE_HC_SELECT_IN_EN BIT(18) 84 #define CORE_HC_SELECT_IN_SDR50 (4 << 19) 85 #define CORE_HC_SELECT_IN_HS400 (6 << 19) 86 #define CORE_HC_SELECT_IN_MASK (7 << 19) 87 88 #define CORE_3_0V_SUPPORT BIT(25) 89 #define CORE_1_8V_SUPPORT BIT(26) 90 #define CORE_VOLT_SUPPORT (CORE_3_0V_SUPPORT | CORE_1_8V_SUPPORT) 91 92 #define CORE_CSR_CDC_CTLR_CFG0 0x130 93 #define CORE_SW_TRIG_FULL_CALIB BIT(16) 94 #define CORE_HW_AUTOCAL_ENA BIT(17) 95 96 #define CORE_CSR_CDC_CTLR_CFG1 0x134 97 #define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138 98 #define CORE_TIMER_ENA BIT(16) 99 100 #define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C 101 #define CORE_CSR_CDC_REFCOUNT_CFG 0x140 102 #define CORE_CSR_CDC_COARSE_CAL_CFG 0x144 103 #define CORE_CDC_OFFSET_CFG 0x14C 104 #define CORE_CSR_CDC_DELAY_CFG 0x150 105 #define CORE_CDC_SLAVE_DDA_CFG 0x160 106 #define CORE_CSR_CDC_STATUS0 0x164 107 #define CORE_CALIBRATION_DONE BIT(0) 108 109 #define CORE_CDC_ERROR_CODE_MASK 0x7000000 110 111 #define CORE_CSR_CDC_GEN_CFG 0x178 112 #define CORE_CDC_SWITCH_BYPASS_OFF BIT(0) 113 #define CORE_CDC_SWITCH_RC_EN BIT(1) 114 115 #define CORE_CDC_T4_DLY_SEL BIT(0) 116 #define CORE_CMDIN_RCLK_EN BIT(1) 117 #define CORE_START_CDC_TRAFFIC BIT(6) 118 119 #define CORE_PWRSAVE_DLL BIT(3) 120 121 #define DDR_CONFIG_POR_VAL 0x80040873 122 123 124 #define INVALID_TUNING_PHASE -1 125 #define SDHCI_MSM_MIN_CLOCK 400000 126 #define CORE_FREQ_100MHZ (100 * 1000 * 1000) 127 128 #define CDR_SELEXT_SHIFT 20 129 #define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT) 130 #define CMUX_SHIFT_PHASE_SHIFT 24 131 #define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT) 132 133 #define MSM_MMC_AUTOSUSPEND_DELAY_MS 50 134 135 /* Timeout value to avoid infinite waiting for pwr_irq */ 136 #define MSM_PWR_IRQ_TIMEOUT_MS 5000 137 138 /* Max load for eMMC Vdd supply */ 139 #define MMC_VMMC_MAX_LOAD_UA 570000 140 141 /* Max load for eMMC Vdd-io supply */ 142 #define MMC_VQMMC_MAX_LOAD_UA 325000 143 144 /* Max load for SD Vdd supply */ 145 #define SD_VMMC_MAX_LOAD_UA 800000 146 147 /* Max load for SD Vdd-io supply */ 148 #define SD_VQMMC_MAX_LOAD_UA 22000 149 150 #define msm_host_readl(msm_host, host, offset) \ 151 msm_host->var_ops->msm_readl_relaxed(host, offset) 152 153 #define msm_host_writel(msm_host, val, host, offset) \ 154 msm_host->var_ops->msm_writel_relaxed(val, host, offset) 155 156 /* CQHCI vendor specific registers */ 157 #define CQHCI_VENDOR_CFG1 0xA00 158 #define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13) 159 160 struct sdhci_msm_offset { 161 u32 core_hc_mode; 162 u32 core_mci_data_cnt; 163 u32 core_mci_status; 164 u32 core_mci_fifo_cnt; 165 u32 core_mci_version; 166 u32 core_generics; 167 u32 core_testbus_config; 168 u32 core_testbus_sel2_bit; 169 u32 core_testbus_ena; 170 u32 core_testbus_sel2; 171 u32 core_pwrctl_status; 172 u32 core_pwrctl_mask; 173 u32 core_pwrctl_clear; 174 u32 core_pwrctl_ctl; 175 u32 core_sdcc_debug_reg; 176 u32 core_dll_config; 177 u32 core_dll_status; 178 u32 core_vendor_spec; 179 u32 core_vendor_spec_adma_err_addr0; 180 u32 core_vendor_spec_adma_err_addr1; 181 u32 core_vendor_spec_func2; 182 u32 core_vendor_spec_capabilities0; 183 u32 core_ddr_200_cfg; 184 u32 core_vendor_spec3; 185 u32 core_dll_config_2; 186 u32 core_dll_config_3; 187 u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */ 188 u32 core_ddr_config; 189 u32 core_dll_usr_ctl; /* Present on SDCC5.1 onwards */ 190 }; 191 192 static const struct sdhci_msm_offset sdhci_msm_v5_offset = { 193 .core_mci_data_cnt = 0x35c, 194 .core_mci_status = 0x324, 195 .core_mci_fifo_cnt = 0x308, 196 .core_mci_version = 0x318, 197 .core_generics = 0x320, 198 .core_testbus_config = 0x32c, 199 .core_testbus_sel2_bit = 3, 200 .core_testbus_ena = (1 << 31), 201 .core_testbus_sel2 = (1 << 3), 202 .core_pwrctl_status = 0x240, 203 .core_pwrctl_mask = 0x244, 204 .core_pwrctl_clear = 0x248, 205 .core_pwrctl_ctl = 0x24c, 206 .core_sdcc_debug_reg = 0x358, 207 .core_dll_config = 0x200, 208 .core_dll_status = 0x208, 209 .core_vendor_spec = 0x20c, 210 .core_vendor_spec_adma_err_addr0 = 0x214, 211 .core_vendor_spec_adma_err_addr1 = 0x218, 212 .core_vendor_spec_func2 = 0x210, 213 .core_vendor_spec_capabilities0 = 0x21c, 214 .core_ddr_200_cfg = 0x224, 215 .core_vendor_spec3 = 0x250, 216 .core_dll_config_2 = 0x254, 217 .core_dll_config_3 = 0x258, 218 .core_ddr_config = 0x25c, 219 .core_dll_usr_ctl = 0x388, 220 }; 221 222 static const struct sdhci_msm_offset sdhci_msm_mci_offset = { 223 .core_hc_mode = 0x78, 224 .core_mci_data_cnt = 0x30, 225 .core_mci_status = 0x34, 226 .core_mci_fifo_cnt = 0x44, 227 .core_mci_version = 0x050, 228 .core_generics = 0x70, 229 .core_testbus_config = 0x0cc, 230 .core_testbus_sel2_bit = 4, 231 .core_testbus_ena = (1 << 3), 232 .core_testbus_sel2 = (1 << 4), 233 .core_pwrctl_status = 0xdc, 234 .core_pwrctl_mask = 0xe0, 235 .core_pwrctl_clear = 0xe4, 236 .core_pwrctl_ctl = 0xe8, 237 .core_sdcc_debug_reg = 0x124, 238 .core_dll_config = 0x100, 239 .core_dll_status = 0x108, 240 .core_vendor_spec = 0x10c, 241 .core_vendor_spec_adma_err_addr0 = 0x114, 242 .core_vendor_spec_adma_err_addr1 = 0x118, 243 .core_vendor_spec_func2 = 0x110, 244 .core_vendor_spec_capabilities0 = 0x11c, 245 .core_ddr_200_cfg = 0x184, 246 .core_vendor_spec3 = 0x1b0, 247 .core_dll_config_2 = 0x1b4, 248 .core_ddr_config_old = 0x1b8, 249 .core_ddr_config = 0x1bc, 250 }; 251 252 struct sdhci_msm_variant_ops { 253 u32 (*msm_readl_relaxed)(struct sdhci_host *host, u32 offset); 254 void (*msm_writel_relaxed)(u32 val, struct sdhci_host *host, 255 u32 offset); 256 }; 257 258 /* 259 * From V5, register spaces have changed. Wrap this info in a structure 260 * and choose the data_structure based on version info mentioned in DT. 261 */ 262 struct sdhci_msm_variant_info { 263 bool mci_removed; 264 bool restore_dll_config; 265 const struct sdhci_msm_variant_ops *var_ops; 266 const struct sdhci_msm_offset *offset; 267 }; 268 269 struct sdhci_msm_host { 270 struct platform_device *pdev; 271 void __iomem *core_mem; /* MSM SDCC mapped address */ 272 int pwr_irq; /* power irq */ 273 struct clk *bus_clk; /* SDHC bus voter clock */ 274 struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/ 275 /* core, iface, cal and sleep clocks */ 276 struct clk_bulk_data bulk_clks[4]; 277 #ifdef CONFIG_MMC_CRYPTO 278 struct qcom_ice *ice; 279 #endif 280 unsigned long clk_rate; 281 struct mmc_host *mmc; 282 bool use_14lpp_dll_reset; 283 bool tuning_done; 284 bool calibration_done; 285 u8 saved_tuning_phase; 286 bool use_cdclp533; 287 u32 curr_pwr_state; 288 u32 curr_io_level; 289 wait_queue_head_t pwr_irq_wait; 290 bool pwr_irq_flag; 291 u32 caps_0; 292 bool mci_removed; 293 bool restore_dll_config; 294 const struct sdhci_msm_variant_ops *var_ops; 295 const struct sdhci_msm_offset *offset; 296 bool use_cdr; 297 u32 transfer_mode; 298 bool updated_ddr_cfg; 299 bool uses_tassadar_dll; 300 u32 dll_config; 301 u32 ddr_config; 302 bool vqmmc_enabled; 303 }; 304 305 static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host) 306 { 307 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 308 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 309 310 return msm_host->offset; 311 } 312 313 /* 314 * APIs to read/write to vendor specific registers which were there in the 315 * core_mem region before MCI was removed. 316 */ 317 static u32 sdhci_msm_mci_variant_readl_relaxed(struct sdhci_host *host, 318 u32 offset) 319 { 320 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 321 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 322 323 return readl_relaxed(msm_host->core_mem + offset); 324 } 325 326 static u32 sdhci_msm_v5_variant_readl_relaxed(struct sdhci_host *host, 327 u32 offset) 328 { 329 return readl_relaxed(host->ioaddr + offset); 330 } 331 332 static void sdhci_msm_mci_variant_writel_relaxed(u32 val, 333 struct sdhci_host *host, u32 offset) 334 { 335 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 336 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 337 338 writel_relaxed(val, msm_host->core_mem + offset); 339 } 340 341 static void sdhci_msm_v5_variant_writel_relaxed(u32 val, 342 struct sdhci_host *host, u32 offset) 343 { 344 writel_relaxed(val, host->ioaddr + offset); 345 } 346 347 static unsigned int msm_get_clock_mult_for_bus_mode(struct sdhci_host *host) 348 { 349 struct mmc_ios ios = host->mmc->ios; 350 /* 351 * The SDHC requires internal clock frequency to be double the 352 * actual clock that will be set for DDR mode. The controller 353 * uses the faster clock(100/400MHz) for some of its parts and 354 * send the actual required clock (50/200MHz) to the card. 355 */ 356 if (ios.timing == MMC_TIMING_UHS_DDR50 || 357 ios.timing == MMC_TIMING_MMC_DDR52 || 358 ios.timing == MMC_TIMING_MMC_HS400 || 359 host->flags & SDHCI_HS400_TUNING) 360 return 2; 361 return 1; 362 } 363 364 static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host, 365 unsigned int clock) 366 { 367 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 368 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 369 struct mmc_ios curr_ios = host->mmc->ios; 370 struct clk *core_clk = msm_host->bulk_clks[0].clk; 371 unsigned long achieved_rate; 372 unsigned int desired_rate; 373 unsigned int mult; 374 int rc; 375 376 mult = msm_get_clock_mult_for_bus_mode(host); 377 desired_rate = clock * mult; 378 rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), desired_rate); 379 if (rc) { 380 pr_err("%s: Failed to set clock at rate %u at timing %d\n", 381 mmc_hostname(host->mmc), desired_rate, curr_ios.timing); 382 return; 383 } 384 385 /* 386 * Qualcomm clock drivers by default round clock _up_ if they can't 387 * make the requested rate. This is not good for SD. Yell if we 388 * encounter it. 389 */ 390 achieved_rate = clk_get_rate(core_clk); 391 if (achieved_rate > desired_rate) 392 pr_warn("%s: Card appears overclocked; req %u Hz, actual %lu Hz\n", 393 mmc_hostname(host->mmc), desired_rate, achieved_rate); 394 host->mmc->actual_clock = achieved_rate / mult; 395 396 /* Stash the rate we requested to use in sdhci_msm_runtime_resume() */ 397 msm_host->clk_rate = desired_rate; 398 399 pr_debug("%s: Setting clock at rate %lu at timing %d\n", 400 mmc_hostname(host->mmc), achieved_rate, curr_ios.timing); 401 } 402 403 /* Platform specific tuning */ 404 static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll) 405 { 406 u32 wait_cnt = 50; 407 u8 ck_out_en; 408 struct mmc_host *mmc = host->mmc; 409 const struct sdhci_msm_offset *msm_offset = 410 sdhci_priv_msm_offset(host); 411 412 /* Poll for CK_OUT_EN bit. max. poll time = 50us */ 413 ck_out_en = !!(readl_relaxed(host->ioaddr + 414 msm_offset->core_dll_config) & CORE_CK_OUT_EN); 415 416 while (ck_out_en != poll) { 417 if (--wait_cnt == 0) { 418 dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n", 419 mmc_hostname(mmc), poll); 420 return -ETIMEDOUT; 421 } 422 udelay(1); 423 424 ck_out_en = !!(readl_relaxed(host->ioaddr + 425 msm_offset->core_dll_config) & CORE_CK_OUT_EN); 426 } 427 428 return 0; 429 } 430 431 static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase) 432 { 433 int rc; 434 static const u8 grey_coded_phase_table[] = { 435 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4, 436 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8 437 }; 438 unsigned long flags; 439 u32 config; 440 struct mmc_host *mmc = host->mmc; 441 const struct sdhci_msm_offset *msm_offset = 442 sdhci_priv_msm_offset(host); 443 444 if (phase > 0xf) 445 return -EINVAL; 446 447 spin_lock_irqsave(&host->lock, flags); 448 449 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 450 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN); 451 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN); 452 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 453 454 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */ 455 rc = msm_dll_poll_ck_out_en(host, 0); 456 if (rc) 457 goto err_out; 458 459 /* 460 * Write the selected DLL clock output phase (0 ... 15) 461 * to CDR_SELEXT bit field of DLL_CONFIG register. 462 */ 463 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 464 config &= ~CDR_SELEXT_MASK; 465 config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT; 466 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 467 468 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 469 config |= CORE_CK_OUT_EN; 470 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 471 472 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */ 473 rc = msm_dll_poll_ck_out_en(host, 1); 474 if (rc) 475 goto err_out; 476 477 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 478 config |= CORE_CDR_EN; 479 config &= ~CORE_CDR_EXT_EN; 480 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 481 goto out; 482 483 err_out: 484 dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n", 485 mmc_hostname(mmc), phase); 486 out: 487 spin_unlock_irqrestore(&host->lock, flags); 488 return rc; 489 } 490 491 /* 492 * Find out the greatest range of consecuitive selected 493 * DLL clock output phases that can be used as sampling 494 * setting for SD3.0 UHS-I card read operation (in SDR104 495 * timing mode) or for eMMC4.5 card read operation (in 496 * HS400/HS200 timing mode). 497 * Select the 3/4 of the range and configure the DLL with the 498 * selected DLL clock output phase. 499 */ 500 501 static int msm_find_most_appropriate_phase(struct sdhci_host *host, 502 u8 *phase_table, u8 total_phases) 503 { 504 int ret; 505 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} }; 506 u8 phases_per_row[MAX_PHASES] = { 0 }; 507 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0; 508 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0; 509 bool phase_0_found = false, phase_15_found = false; 510 struct mmc_host *mmc = host->mmc; 511 512 if (!total_phases || (total_phases > MAX_PHASES)) { 513 dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n", 514 mmc_hostname(mmc), total_phases); 515 return -EINVAL; 516 } 517 518 for (cnt = 0; cnt < total_phases; cnt++) { 519 ranges[row_index][col_index] = phase_table[cnt]; 520 phases_per_row[row_index] += 1; 521 col_index++; 522 523 if ((cnt + 1) == total_phases) { 524 continue; 525 /* check if next phase in phase_table is consecutive or not */ 526 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) { 527 row_index++; 528 col_index = 0; 529 } 530 } 531 532 if (row_index >= MAX_PHASES) 533 return -EINVAL; 534 535 /* Check if phase-0 is present in first valid window? */ 536 if (!ranges[0][0]) { 537 phase_0_found = true; 538 phase_0_raw_index = 0; 539 /* Check if cycle exist between 2 valid windows */ 540 for (cnt = 1; cnt <= row_index; cnt++) { 541 if (phases_per_row[cnt]) { 542 for (i = 0; i < phases_per_row[cnt]; i++) { 543 if (ranges[cnt][i] == 15) { 544 phase_15_found = true; 545 phase_15_raw_index = cnt; 546 break; 547 } 548 } 549 } 550 } 551 } 552 553 /* If 2 valid windows form cycle then merge them as single window */ 554 if (phase_0_found && phase_15_found) { 555 /* number of phases in raw where phase 0 is present */ 556 u8 phases_0 = phases_per_row[phase_0_raw_index]; 557 /* number of phases in raw where phase 15 is present */ 558 u8 phases_15 = phases_per_row[phase_15_raw_index]; 559 560 if (phases_0 + phases_15 >= MAX_PHASES) 561 /* 562 * If there are more than 1 phase windows then total 563 * number of phases in both the windows should not be 564 * more than or equal to MAX_PHASES. 565 */ 566 return -EINVAL; 567 568 /* Merge 2 cyclic windows */ 569 i = phases_15; 570 for (cnt = 0; cnt < phases_0; cnt++) { 571 ranges[phase_15_raw_index][i] = 572 ranges[phase_0_raw_index][cnt]; 573 if (++i >= MAX_PHASES) 574 break; 575 } 576 577 phases_per_row[phase_0_raw_index] = 0; 578 phases_per_row[phase_15_raw_index] = phases_15 + phases_0; 579 } 580 581 for (cnt = 0; cnt <= row_index; cnt++) { 582 if (phases_per_row[cnt] > curr_max) { 583 curr_max = phases_per_row[cnt]; 584 selected_row_index = cnt; 585 } 586 } 587 588 i = (curr_max * 3) / 4; 589 if (i) 590 i--; 591 592 ret = ranges[selected_row_index][i]; 593 594 if (ret >= MAX_PHASES) { 595 ret = -EINVAL; 596 dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n", 597 mmc_hostname(mmc), ret); 598 } 599 600 return ret; 601 } 602 603 static inline void msm_cm_dll_set_freq(struct sdhci_host *host) 604 { 605 u32 mclk_freq = 0, config; 606 const struct sdhci_msm_offset *msm_offset = 607 sdhci_priv_msm_offset(host); 608 609 /* Program the MCLK value to MCLK_FREQ bit field */ 610 if (host->clock <= 112000000) 611 mclk_freq = 0; 612 else if (host->clock <= 125000000) 613 mclk_freq = 1; 614 else if (host->clock <= 137000000) 615 mclk_freq = 2; 616 else if (host->clock <= 150000000) 617 mclk_freq = 3; 618 else if (host->clock <= 162000000) 619 mclk_freq = 4; 620 else if (host->clock <= 175000000) 621 mclk_freq = 5; 622 else if (host->clock <= 187000000) 623 mclk_freq = 6; 624 else if (host->clock <= 200000000) 625 mclk_freq = 7; 626 627 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 628 config &= ~CMUX_SHIFT_PHASE_MASK; 629 config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT; 630 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 631 } 632 633 /* Initialize the DLL (Programmable Delay Line) */ 634 static int msm_init_cm_dll(struct sdhci_host *host) 635 { 636 struct mmc_host *mmc = host->mmc; 637 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 638 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 639 int wait_cnt = 50; 640 unsigned long flags, xo_clk = 0; 641 u32 config; 642 const struct sdhci_msm_offset *msm_offset = 643 msm_host->offset; 644 645 if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk)) 646 xo_clk = clk_get_rate(msm_host->xo_clk); 647 648 spin_lock_irqsave(&host->lock, flags); 649 650 /* 651 * Make sure that clock is always enabled when DLL 652 * tuning is in progress. Keeping PWRSAVE ON may 653 * turn off the clock. 654 */ 655 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); 656 config &= ~CORE_CLK_PWRSAVE; 657 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); 658 659 if (msm_host->dll_config) 660 writel_relaxed(msm_host->dll_config, 661 host->ioaddr + msm_offset->core_dll_config); 662 663 if (msm_host->use_14lpp_dll_reset) { 664 config = readl_relaxed(host->ioaddr + 665 msm_offset->core_dll_config); 666 config &= ~CORE_CK_OUT_EN; 667 writel_relaxed(config, host->ioaddr + 668 msm_offset->core_dll_config); 669 670 config = readl_relaxed(host->ioaddr + 671 msm_offset->core_dll_config_2); 672 config |= CORE_DLL_CLOCK_DISABLE; 673 writel_relaxed(config, host->ioaddr + 674 msm_offset->core_dll_config_2); 675 } 676 677 config = readl_relaxed(host->ioaddr + 678 msm_offset->core_dll_config); 679 config |= CORE_DLL_RST; 680 writel_relaxed(config, host->ioaddr + 681 msm_offset->core_dll_config); 682 683 config = readl_relaxed(host->ioaddr + 684 msm_offset->core_dll_config); 685 config |= CORE_DLL_PDN; 686 writel_relaxed(config, host->ioaddr + 687 msm_offset->core_dll_config); 688 689 if (!msm_host->dll_config) 690 msm_cm_dll_set_freq(host); 691 692 if (msm_host->use_14lpp_dll_reset && 693 !IS_ERR_OR_NULL(msm_host->xo_clk)) { 694 u32 mclk_freq = 0; 695 696 config = readl_relaxed(host->ioaddr + 697 msm_offset->core_dll_config_2); 698 config &= CORE_FLL_CYCLE_CNT; 699 if (config) 700 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8), 701 xo_clk); 702 else 703 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4), 704 xo_clk); 705 706 config = readl_relaxed(host->ioaddr + 707 msm_offset->core_dll_config_2); 708 config &= ~(0xFF << 10); 709 config |= mclk_freq << 10; 710 711 writel_relaxed(config, host->ioaddr + 712 msm_offset->core_dll_config_2); 713 /* wait for 5us before enabling DLL clock */ 714 udelay(5); 715 } 716 717 config = readl_relaxed(host->ioaddr + 718 msm_offset->core_dll_config); 719 config &= ~CORE_DLL_RST; 720 writel_relaxed(config, host->ioaddr + 721 msm_offset->core_dll_config); 722 723 config = readl_relaxed(host->ioaddr + 724 msm_offset->core_dll_config); 725 config &= ~CORE_DLL_PDN; 726 writel_relaxed(config, host->ioaddr + 727 msm_offset->core_dll_config); 728 729 if (msm_host->use_14lpp_dll_reset) { 730 if (!msm_host->dll_config) 731 msm_cm_dll_set_freq(host); 732 config = readl_relaxed(host->ioaddr + 733 msm_offset->core_dll_config_2); 734 config &= ~CORE_DLL_CLOCK_DISABLE; 735 writel_relaxed(config, host->ioaddr + 736 msm_offset->core_dll_config_2); 737 } 738 739 /* 740 * Configure DLL user control register to enable DLL status. 741 * This setting is applicable to SDCC v5.1 onwards only. 742 */ 743 if (msm_host->uses_tassadar_dll) { 744 config = DLL_USR_CTL_POR_VAL | FINE_TUNE_MODE_EN | 745 ENABLE_DLL_LOCK_STATUS | BIAS_OK_SIGNAL; 746 writel_relaxed(config, host->ioaddr + 747 msm_offset->core_dll_usr_ctl); 748 749 config = readl_relaxed(host->ioaddr + 750 msm_offset->core_dll_config_3); 751 config &= ~0xFF; 752 if (msm_host->clk_rate < 150000000) 753 config |= DLL_CONFIG_3_LOW_FREQ_VAL; 754 else 755 config |= DLL_CONFIG_3_HIGH_FREQ_VAL; 756 writel_relaxed(config, host->ioaddr + 757 msm_offset->core_dll_config_3); 758 } 759 760 config = readl_relaxed(host->ioaddr + 761 msm_offset->core_dll_config); 762 config |= CORE_DLL_EN; 763 writel_relaxed(config, host->ioaddr + 764 msm_offset->core_dll_config); 765 766 config = readl_relaxed(host->ioaddr + 767 msm_offset->core_dll_config); 768 config |= CORE_CK_OUT_EN; 769 writel_relaxed(config, host->ioaddr + 770 msm_offset->core_dll_config); 771 772 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */ 773 while (!(readl_relaxed(host->ioaddr + msm_offset->core_dll_status) & 774 CORE_DLL_LOCK)) { 775 /* max. wait for 50us sec for LOCK bit to be set */ 776 if (--wait_cnt == 0) { 777 dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n", 778 mmc_hostname(mmc)); 779 spin_unlock_irqrestore(&host->lock, flags); 780 return -ETIMEDOUT; 781 } 782 udelay(1); 783 } 784 785 spin_unlock_irqrestore(&host->lock, flags); 786 return 0; 787 } 788 789 static void msm_hc_select_default(struct sdhci_host *host) 790 { 791 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 792 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 793 u32 config; 794 const struct sdhci_msm_offset *msm_offset = 795 msm_host->offset; 796 797 if (!msm_host->use_cdclp533) { 798 config = readl_relaxed(host->ioaddr + 799 msm_offset->core_vendor_spec3); 800 config &= ~CORE_PWRSAVE_DLL; 801 writel_relaxed(config, host->ioaddr + 802 msm_offset->core_vendor_spec3); 803 } 804 805 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); 806 config &= ~CORE_HC_MCLK_SEL_MASK; 807 config |= CORE_HC_MCLK_SEL_DFLT; 808 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); 809 810 /* 811 * Disable HC_SELECT_IN to be able to use the UHS mode select 812 * configuration from Host Control2 register for all other 813 * modes. 814 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field 815 * in VENDOR_SPEC_FUNC 816 */ 817 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); 818 config &= ~CORE_HC_SELECT_IN_EN; 819 config &= ~CORE_HC_SELECT_IN_MASK; 820 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); 821 822 /* 823 * Make sure above writes impacting free running MCLK are completed 824 * before changing the clk_rate at GCC. 825 */ 826 wmb(); 827 } 828 829 static void msm_hc_select_hs400(struct sdhci_host *host) 830 { 831 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 832 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 833 struct mmc_ios ios = host->mmc->ios; 834 u32 config, dll_lock; 835 int rc; 836 const struct sdhci_msm_offset *msm_offset = 837 msm_host->offset; 838 839 /* Select the divided clock (free running MCLK/2) */ 840 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); 841 config &= ~CORE_HC_MCLK_SEL_MASK; 842 config |= CORE_HC_MCLK_SEL_HS400; 843 844 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); 845 /* 846 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC 847 * register 848 */ 849 if ((msm_host->tuning_done || ios.enhanced_strobe) && 850 !msm_host->calibration_done) { 851 config = readl_relaxed(host->ioaddr + 852 msm_offset->core_vendor_spec); 853 config |= CORE_HC_SELECT_IN_HS400; 854 config |= CORE_HC_SELECT_IN_EN; 855 writel_relaxed(config, host->ioaddr + 856 msm_offset->core_vendor_spec); 857 } 858 if (!msm_host->clk_rate && !msm_host->use_cdclp533) { 859 /* 860 * Poll on DLL_LOCK or DDR_DLL_LOCK bits in 861 * core_dll_status to be set. This should get set 862 * within 15 us at 200 MHz. 863 */ 864 rc = readl_relaxed_poll_timeout(host->ioaddr + 865 msm_offset->core_dll_status, 866 dll_lock, 867 (dll_lock & 868 (CORE_DLL_LOCK | 869 CORE_DDR_DLL_LOCK)), 10, 870 1000); 871 if (rc == -ETIMEDOUT) 872 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n", 873 mmc_hostname(host->mmc), dll_lock); 874 } 875 /* 876 * Make sure above writes impacting free running MCLK are completed 877 * before changing the clk_rate at GCC. 878 */ 879 wmb(); 880 } 881 882 /* 883 * sdhci_msm_hc_select_mode :- In general all timing modes are 884 * controlled via UHS mode select in Host Control2 register. 885 * eMMC specific HS200/HS400 doesn't have their respective modes 886 * defined here, hence we use these values. 887 * 888 * HS200 - SDR104 (Since they both are equivalent in functionality) 889 * HS400 - This involves multiple configurations 890 * Initially SDR104 - when tuning is required as HS200 891 * Then when switching to DDR @ 400MHz (HS400) we use 892 * the vendor specific HC_SELECT_IN to control the mode. 893 * 894 * In addition to controlling the modes we also need to select the 895 * correct input clock for DLL depending on the mode. 896 * 897 * HS400 - divided clock (free running MCLK/2) 898 * All other modes - default (free running MCLK) 899 */ 900 static void sdhci_msm_hc_select_mode(struct sdhci_host *host) 901 { 902 struct mmc_ios ios = host->mmc->ios; 903 904 if (ios.timing == MMC_TIMING_MMC_HS400 || 905 host->flags & SDHCI_HS400_TUNING) 906 msm_hc_select_hs400(host); 907 else 908 msm_hc_select_default(host); 909 } 910 911 static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host) 912 { 913 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 914 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 915 u32 config, calib_done; 916 int ret; 917 const struct sdhci_msm_offset *msm_offset = 918 msm_host->offset; 919 920 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); 921 922 /* 923 * Retuning in HS400 (DDR mode) will fail, just reset the 924 * tuning block and restore the saved tuning phase. 925 */ 926 ret = msm_init_cm_dll(host); 927 if (ret) 928 goto out; 929 930 /* Set the selected phase in delay line hw block */ 931 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase); 932 if (ret) 933 goto out; 934 935 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 936 config |= CORE_CMD_DAT_TRACK_SEL; 937 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 938 939 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); 940 config &= ~CORE_CDC_T4_DLY_SEL; 941 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); 942 943 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG); 944 config &= ~CORE_CDC_SWITCH_BYPASS_OFF; 945 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG); 946 947 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG); 948 config |= CORE_CDC_SWITCH_RC_EN; 949 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG); 950 951 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); 952 config &= ~CORE_START_CDC_TRAFFIC; 953 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); 954 955 /* Perform CDC Register Initialization Sequence */ 956 957 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 958 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1); 959 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); 960 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1); 961 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG); 962 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG); 963 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG); 964 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG); 965 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG); 966 967 /* CDC HW Calibration */ 968 969 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 970 config |= CORE_SW_TRIG_FULL_CALIB; 971 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 972 973 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 974 config &= ~CORE_SW_TRIG_FULL_CALIB; 975 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 976 977 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 978 config |= CORE_HW_AUTOCAL_ENA; 979 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 980 981 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); 982 config |= CORE_TIMER_ENA; 983 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); 984 985 ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0, 986 calib_done, 987 (calib_done & CORE_CALIBRATION_DONE), 988 1, 50); 989 990 if (ret == -ETIMEDOUT) { 991 pr_err("%s: %s: CDC calibration was not completed\n", 992 mmc_hostname(host->mmc), __func__); 993 goto out; 994 } 995 996 ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0) 997 & CORE_CDC_ERROR_CODE_MASK; 998 if (ret) { 999 pr_err("%s: %s: CDC error code %d\n", 1000 mmc_hostname(host->mmc), __func__, ret); 1001 ret = -EINVAL; 1002 goto out; 1003 } 1004 1005 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); 1006 config |= CORE_START_CDC_TRAFFIC; 1007 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); 1008 out: 1009 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), 1010 __func__, ret); 1011 return ret; 1012 } 1013 1014 static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host) 1015 { 1016 struct mmc_host *mmc = host->mmc; 1017 u32 dll_status, config, ddr_cfg_offset; 1018 int ret; 1019 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1020 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1021 const struct sdhci_msm_offset *msm_offset = 1022 sdhci_priv_msm_offset(host); 1023 1024 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); 1025 1026 /* 1027 * Currently the core_ddr_config register defaults to desired 1028 * configuration on reset. Currently reprogramming the power on 1029 * reset (POR) value in case it might have been modified by 1030 * bootloaders. In the future, if this changes, then the desired 1031 * values will need to be programmed appropriately. 1032 */ 1033 if (msm_host->updated_ddr_cfg) 1034 ddr_cfg_offset = msm_offset->core_ddr_config; 1035 else 1036 ddr_cfg_offset = msm_offset->core_ddr_config_old; 1037 writel_relaxed(msm_host->ddr_config, host->ioaddr + ddr_cfg_offset); 1038 1039 if (mmc->ios.enhanced_strobe) { 1040 config = readl_relaxed(host->ioaddr + 1041 msm_offset->core_ddr_200_cfg); 1042 config |= CORE_CMDIN_RCLK_EN; 1043 writel_relaxed(config, host->ioaddr + 1044 msm_offset->core_ddr_200_cfg); 1045 } 1046 1047 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2); 1048 config |= CORE_DDR_CAL_EN; 1049 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config_2); 1050 1051 ret = readl_relaxed_poll_timeout(host->ioaddr + 1052 msm_offset->core_dll_status, 1053 dll_status, 1054 (dll_status & CORE_DDR_DLL_LOCK), 1055 10, 1000); 1056 1057 if (ret == -ETIMEDOUT) { 1058 pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n", 1059 mmc_hostname(host->mmc), __func__); 1060 goto out; 1061 } 1062 1063 /* 1064 * Set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3. 1065 * When MCLK is gated OFF, it is not gated for less than 0.5us 1066 * and MCLK must be switched on for at-least 1us before DATA 1067 * starts coming. Controllers with 14lpp and later tech DLL cannot 1068 * guarantee above requirement. So PWRSAVE_DLL should not be 1069 * turned on for host controllers using this DLL. 1070 */ 1071 if (!msm_host->use_14lpp_dll_reset) { 1072 config = readl_relaxed(host->ioaddr + 1073 msm_offset->core_vendor_spec3); 1074 config |= CORE_PWRSAVE_DLL; 1075 writel_relaxed(config, host->ioaddr + 1076 msm_offset->core_vendor_spec3); 1077 } 1078 1079 /* 1080 * Drain writebuffer to ensure above DLL calibration 1081 * and PWRSAVE DLL is enabled. 1082 */ 1083 wmb(); 1084 out: 1085 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), 1086 __func__, ret); 1087 return ret; 1088 } 1089 1090 static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host) 1091 { 1092 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1093 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1094 struct mmc_host *mmc = host->mmc; 1095 int ret; 1096 u32 config; 1097 const struct sdhci_msm_offset *msm_offset = 1098 msm_host->offset; 1099 1100 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); 1101 1102 /* 1103 * Retuning in HS400 (DDR mode) will fail, just reset the 1104 * tuning block and restore the saved tuning phase. 1105 */ 1106 ret = msm_init_cm_dll(host); 1107 if (ret) 1108 goto out; 1109 1110 if (!mmc->ios.enhanced_strobe) { 1111 /* Set the selected phase in delay line hw block */ 1112 ret = msm_config_cm_dll_phase(host, 1113 msm_host->saved_tuning_phase); 1114 if (ret) 1115 goto out; 1116 config = readl_relaxed(host->ioaddr + 1117 msm_offset->core_dll_config); 1118 config |= CORE_CMD_DAT_TRACK_SEL; 1119 writel_relaxed(config, host->ioaddr + 1120 msm_offset->core_dll_config); 1121 } 1122 1123 if (msm_host->use_cdclp533) 1124 ret = sdhci_msm_cdclp533_calibration(host); 1125 else 1126 ret = sdhci_msm_cm_dll_sdc4_calibration(host); 1127 out: 1128 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), 1129 __func__, ret); 1130 return ret; 1131 } 1132 1133 static bool sdhci_msm_is_tuning_needed(struct sdhci_host *host) 1134 { 1135 struct mmc_ios *ios = &host->mmc->ios; 1136 1137 if (ios->timing == MMC_TIMING_UHS_SDR50 && 1138 host->flags & SDHCI_SDR50_NEEDS_TUNING) 1139 return true; 1140 1141 /* 1142 * Tuning is required for SDR104, HS200 and HS400 cards and 1143 * if clock frequency is greater than 100MHz in these modes. 1144 */ 1145 if (host->clock <= CORE_FREQ_100MHZ || 1146 !(ios->timing == MMC_TIMING_MMC_HS400 || 1147 ios->timing == MMC_TIMING_MMC_HS200 || 1148 ios->timing == MMC_TIMING_UHS_SDR104) || 1149 ios->enhanced_strobe) 1150 return false; 1151 1152 return true; 1153 } 1154 1155 static int sdhci_msm_restore_sdr_dll_config(struct sdhci_host *host) 1156 { 1157 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1158 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1159 int ret; 1160 1161 /* 1162 * SDR DLL comes into picture only for timing modes which needs 1163 * tuning. 1164 */ 1165 if (!sdhci_msm_is_tuning_needed(host)) 1166 return 0; 1167 1168 /* Reset the tuning block */ 1169 ret = msm_init_cm_dll(host); 1170 if (ret) 1171 return ret; 1172 1173 /* Restore the tuning block */ 1174 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase); 1175 1176 return ret; 1177 } 1178 1179 static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable) 1180 { 1181 const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host); 1182 u32 config, oldconfig = readl_relaxed(host->ioaddr + 1183 msm_offset->core_dll_config); 1184 1185 config = oldconfig; 1186 if (enable) { 1187 config |= CORE_CDR_EN; 1188 config &= ~CORE_CDR_EXT_EN; 1189 } else { 1190 config &= ~CORE_CDR_EN; 1191 config |= CORE_CDR_EXT_EN; 1192 } 1193 1194 if (config != oldconfig) { 1195 writel_relaxed(config, host->ioaddr + 1196 msm_offset->core_dll_config); 1197 } 1198 } 1199 1200 static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) 1201 { 1202 struct sdhci_host *host = mmc_priv(mmc); 1203 int tuning_seq_cnt = 10; 1204 u8 phase, tuned_phases[16], tuned_phase_cnt = 0; 1205 int rc; 1206 struct mmc_ios ios = host->mmc->ios; 1207 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1208 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1209 const struct sdhci_msm_offset *msm_offset = msm_host->offset; 1210 u32 config; 1211 1212 if (!sdhci_msm_is_tuning_needed(host)) { 1213 msm_host->use_cdr = false; 1214 sdhci_msm_set_cdr(host, false); 1215 return 0; 1216 } 1217 1218 /* Clock-Data-Recovery used to dynamically adjust RX sampling point */ 1219 msm_host->use_cdr = true; 1220 1221 /* 1222 * Clear tuning_done flag before tuning to ensure proper 1223 * HS400 settings. 1224 */ 1225 msm_host->tuning_done = 0; 1226 1227 if (ios.timing == MMC_TIMING_UHS_SDR50 && 1228 host->flags & SDHCI_SDR50_NEEDS_TUNING) { 1229 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); 1230 config &= ~CORE_HC_SELECT_IN_MASK; 1231 config |= CORE_HC_SELECT_IN_EN | CORE_HC_SELECT_IN_SDR50; 1232 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); 1233 } 1234 1235 /* 1236 * For HS400 tuning in HS200 timing requires: 1237 * - select MCLK/2 in VENDOR_SPEC 1238 * - program MCLK to 400MHz (or nearest supported) in GCC 1239 */ 1240 if (host->flags & SDHCI_HS400_TUNING) { 1241 sdhci_msm_hc_select_mode(host); 1242 msm_set_clock_rate_for_bus_mode(host, ios.clock); 1243 host->flags &= ~SDHCI_HS400_TUNING; 1244 } 1245 1246 retry: 1247 /* First of all reset the tuning block */ 1248 rc = msm_init_cm_dll(host); 1249 if (rc) 1250 return rc; 1251 1252 phase = 0; 1253 do { 1254 /* Set the phase in delay line hw block */ 1255 rc = msm_config_cm_dll_phase(host, phase); 1256 if (rc) 1257 return rc; 1258 1259 rc = mmc_send_tuning(mmc, opcode, NULL); 1260 if (!rc) { 1261 /* Tuning is successful at this tuning point */ 1262 tuned_phases[tuned_phase_cnt++] = phase; 1263 dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n", 1264 mmc_hostname(mmc), phase); 1265 } 1266 } while (++phase < ARRAY_SIZE(tuned_phases)); 1267 1268 if (tuned_phase_cnt) { 1269 if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) { 1270 /* 1271 * All phases valid is _almost_ as bad as no phases 1272 * valid. Probably all phases are not really reliable 1273 * but we didn't detect where the unreliable place is. 1274 * That means we'll essentially be guessing and hoping 1275 * we get a good phase. Better to try a few times. 1276 */ 1277 dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n", 1278 mmc_hostname(mmc)); 1279 if (--tuning_seq_cnt) { 1280 tuned_phase_cnt = 0; 1281 goto retry; 1282 } 1283 } 1284 1285 rc = msm_find_most_appropriate_phase(host, tuned_phases, 1286 tuned_phase_cnt); 1287 if (rc < 0) 1288 return rc; 1289 else 1290 phase = rc; 1291 1292 /* 1293 * Finally set the selected phase in delay 1294 * line hw block. 1295 */ 1296 rc = msm_config_cm_dll_phase(host, phase); 1297 if (rc) 1298 return rc; 1299 msm_host->saved_tuning_phase = phase; 1300 dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n", 1301 mmc_hostname(mmc), phase); 1302 } else { 1303 if (--tuning_seq_cnt) 1304 goto retry; 1305 /* Tuning failed */ 1306 dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n", 1307 mmc_hostname(mmc)); 1308 rc = -EIO; 1309 } 1310 1311 if (!rc) 1312 msm_host->tuning_done = true; 1313 return rc; 1314 } 1315 1316 /* 1317 * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation. 1318 * This needs to be done for both tuning and enhanced_strobe mode. 1319 * DLL operation is only needed for clock > 100MHz. For clock <= 100MHz 1320 * fixed feedback clock is used. 1321 */ 1322 static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios) 1323 { 1324 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1325 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1326 int ret; 1327 1328 if (host->clock > CORE_FREQ_100MHZ && 1329 (msm_host->tuning_done || ios->enhanced_strobe) && 1330 !msm_host->calibration_done) { 1331 ret = sdhci_msm_hs400_dll_calibration(host); 1332 if (!ret) 1333 msm_host->calibration_done = true; 1334 else 1335 pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n", 1336 mmc_hostname(host->mmc), ret); 1337 } 1338 } 1339 1340 static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host, 1341 unsigned int uhs) 1342 { 1343 struct mmc_host *mmc = host->mmc; 1344 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1345 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1346 u16 ctrl_2; 1347 u32 config; 1348 const struct sdhci_msm_offset *msm_offset = 1349 msm_host->offset; 1350 1351 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1352 /* Select Bus Speed Mode for host */ 1353 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 1354 switch (uhs) { 1355 case MMC_TIMING_UHS_SDR12: 1356 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 1357 break; 1358 case MMC_TIMING_UHS_SDR25: 1359 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 1360 break; 1361 case MMC_TIMING_UHS_SDR50: 1362 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 1363 break; 1364 case MMC_TIMING_MMC_HS400: 1365 case MMC_TIMING_MMC_HS200: 1366 case MMC_TIMING_UHS_SDR104: 1367 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 1368 break; 1369 case MMC_TIMING_UHS_DDR50: 1370 case MMC_TIMING_MMC_DDR52: 1371 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 1372 break; 1373 } 1374 1375 /* 1376 * When clock frequency is less than 100MHz, the feedback clock must be 1377 * provided and DLL must not be used so that tuning can be skipped. To 1378 * provide feedback clock, the mode selection can be any value less 1379 * than 3'b011 in bits [2:0] of HOST CONTROL2 register. 1380 */ 1381 if (host->clock <= CORE_FREQ_100MHZ) { 1382 if (uhs == MMC_TIMING_MMC_HS400 || 1383 uhs == MMC_TIMING_MMC_HS200 || 1384 uhs == MMC_TIMING_UHS_SDR104) 1385 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 1386 /* 1387 * DLL is not required for clock <= 100MHz 1388 * Thus, make sure DLL it is disabled when not required 1389 */ 1390 config = readl_relaxed(host->ioaddr + 1391 msm_offset->core_dll_config); 1392 config |= CORE_DLL_RST; 1393 writel_relaxed(config, host->ioaddr + 1394 msm_offset->core_dll_config); 1395 1396 config = readl_relaxed(host->ioaddr + 1397 msm_offset->core_dll_config); 1398 config |= CORE_DLL_PDN; 1399 writel_relaxed(config, host->ioaddr + 1400 msm_offset->core_dll_config); 1401 1402 /* 1403 * The DLL needs to be restored and CDCLP533 recalibrated 1404 * when the clock frequency is set back to 400MHz. 1405 */ 1406 msm_host->calibration_done = false; 1407 } 1408 1409 dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n", 1410 mmc_hostname(host->mmc), host->clock, uhs, ctrl_2); 1411 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1412 1413 if (mmc->ios.timing == MMC_TIMING_MMC_HS400) 1414 sdhci_msm_hs400(host, &mmc->ios); 1415 } 1416 1417 static int sdhci_msm_set_pincfg(struct sdhci_msm_host *msm_host, bool level) 1418 { 1419 struct platform_device *pdev = msm_host->pdev; 1420 int ret; 1421 1422 if (level) 1423 ret = pinctrl_pm_select_default_state(&pdev->dev); 1424 else 1425 ret = pinctrl_pm_select_sleep_state(&pdev->dev); 1426 1427 return ret; 1428 } 1429 1430 static void msm_config_vmmc_regulator(struct mmc_host *mmc, bool hpm) 1431 { 1432 int load; 1433 1434 if (!hpm) 1435 load = 0; 1436 else if (!mmc->card) 1437 load = max(MMC_VMMC_MAX_LOAD_UA, SD_VMMC_MAX_LOAD_UA); 1438 else if (mmc_card_mmc(mmc->card)) 1439 load = MMC_VMMC_MAX_LOAD_UA; 1440 else if (mmc_card_sd(mmc->card)) 1441 load = SD_VMMC_MAX_LOAD_UA; 1442 else 1443 return; 1444 1445 regulator_set_load(mmc->supply.vmmc, load); 1446 } 1447 1448 static void msm_config_vqmmc_regulator(struct mmc_host *mmc, bool hpm) 1449 { 1450 int load; 1451 1452 if (!hpm) 1453 load = 0; 1454 else if (!mmc->card) 1455 load = max(MMC_VQMMC_MAX_LOAD_UA, SD_VQMMC_MAX_LOAD_UA); 1456 else if (mmc_card_sd(mmc->card)) 1457 load = SD_VQMMC_MAX_LOAD_UA; 1458 else 1459 return; 1460 1461 regulator_set_load(mmc->supply.vqmmc, load); 1462 } 1463 1464 static int sdhci_msm_set_vmmc(struct sdhci_msm_host *msm_host, 1465 struct mmc_host *mmc, bool hpm) 1466 { 1467 if (IS_ERR(mmc->supply.vmmc)) 1468 return 0; 1469 1470 msm_config_vmmc_regulator(mmc, hpm); 1471 1472 return mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, mmc->ios.vdd); 1473 } 1474 1475 static int msm_toggle_vqmmc(struct sdhci_msm_host *msm_host, 1476 struct mmc_host *mmc, bool level) 1477 { 1478 int ret; 1479 struct mmc_ios ios; 1480 1481 if (msm_host->vqmmc_enabled == level) 1482 return 0; 1483 1484 msm_config_vqmmc_regulator(mmc, level); 1485 1486 if (level) { 1487 /* Set the IO voltage regulator to default voltage level */ 1488 if (msm_host->caps_0 & CORE_3_0V_SUPPORT) 1489 ios.signal_voltage = MMC_SIGNAL_VOLTAGE_330; 1490 else if (msm_host->caps_0 & CORE_1_8V_SUPPORT) 1491 ios.signal_voltage = MMC_SIGNAL_VOLTAGE_180; 1492 1493 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) { 1494 ret = mmc_regulator_set_vqmmc(mmc, &ios); 1495 if (ret < 0) { 1496 dev_err(mmc_dev(mmc), "%s: vqmmc set volgate failed: %d\n", 1497 mmc_hostname(mmc), ret); 1498 goto out; 1499 } 1500 } 1501 ret = regulator_enable(mmc->supply.vqmmc); 1502 } else { 1503 ret = regulator_disable(mmc->supply.vqmmc); 1504 } 1505 1506 if (ret) 1507 dev_err(mmc_dev(mmc), "%s: vqmm %sable failed: %d\n", 1508 mmc_hostname(mmc), level ? "en":"dis", ret); 1509 else 1510 msm_host->vqmmc_enabled = level; 1511 out: 1512 return ret; 1513 } 1514 1515 static int msm_config_vqmmc_mode(struct sdhci_msm_host *msm_host, 1516 struct mmc_host *mmc, bool hpm) 1517 { 1518 int load, ret; 1519 1520 load = hpm ? MMC_VQMMC_MAX_LOAD_UA : 0; 1521 ret = regulator_set_load(mmc->supply.vqmmc, load); 1522 if (ret) 1523 dev_err(mmc_dev(mmc), "%s: vqmmc set load failed: %d\n", 1524 mmc_hostname(mmc), ret); 1525 return ret; 1526 } 1527 1528 static int sdhci_msm_set_vqmmc(struct sdhci_msm_host *msm_host, 1529 struct mmc_host *mmc, bool level) 1530 { 1531 int ret; 1532 bool always_on; 1533 1534 if (IS_ERR(mmc->supply.vqmmc) || 1535 (mmc->ios.power_mode == MMC_POWER_UNDEFINED)) 1536 return 0; 1537 /* 1538 * For eMMC don't turn off Vqmmc, Instead just configure it in LPM 1539 * and HPM modes by setting the corresponding load. 1540 * 1541 * Till eMMC is initialized (i.e. always_on == 0), just turn on/off 1542 * Vqmmc. Vqmmc gets turned off only if init fails and mmc_power_off 1543 * gets invoked. Once eMMC is initialized (i.e. always_on == 1), 1544 * Vqmmc should remain ON, So just set the load instead of turning it 1545 * off/on. 1546 */ 1547 always_on = !mmc_card_is_removable(mmc) && 1548 mmc->card && mmc_card_mmc(mmc->card); 1549 1550 if (always_on) 1551 ret = msm_config_vqmmc_mode(msm_host, mmc, level); 1552 else 1553 ret = msm_toggle_vqmmc(msm_host, mmc, level); 1554 1555 return ret; 1556 } 1557 1558 static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host) 1559 { 1560 init_waitqueue_head(&msm_host->pwr_irq_wait); 1561 } 1562 1563 static inline void sdhci_msm_complete_pwr_irq_wait( 1564 struct sdhci_msm_host *msm_host) 1565 { 1566 wake_up(&msm_host->pwr_irq_wait); 1567 } 1568 1569 /* 1570 * sdhci_msm_check_power_status API should be called when registers writes 1571 * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens. 1572 * To what state the register writes will change the IO lines should be passed 1573 * as the argument req_type. This API will check whether the IO line's state 1574 * is already the expected state and will wait for power irq only if 1575 * power irq is expected to be triggered based on the current IO line state 1576 * and expected IO line state. 1577 */ 1578 static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type) 1579 { 1580 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1581 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1582 struct mmc_host *mmc = host->mmc; 1583 bool done = false; 1584 u32 val = SWITCHABLE_SIGNALING_VOLTAGE; 1585 const struct sdhci_msm_offset *msm_offset = 1586 msm_host->offset; 1587 1588 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n", 1589 mmc_hostname(host->mmc), __func__, req_type, 1590 msm_host->curr_pwr_state, msm_host->curr_io_level); 1591 1592 /* 1593 * The power interrupt will not be generated for signal voltage 1594 * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set. 1595 * Since sdhci-msm-v5, this bit has been removed and SW must consider 1596 * it as always set. 1597 */ 1598 if (!msm_host->mci_removed) 1599 val = msm_host_readl(msm_host, host, 1600 msm_offset->core_generics); 1601 if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) && 1602 !(val & SWITCHABLE_SIGNALING_VOLTAGE)) { 1603 return; 1604 } 1605 1606 /* 1607 * The IRQ for request type IO High/LOW will be generated when - 1608 * there is a state change in 1.8V enable bit (bit 3) of 1609 * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0 1610 * which indicates 3.3V IO voltage. So, when MMC core layer tries 1611 * to set it to 3.3V before card detection happens, the 1612 * IRQ doesn't get triggered as there is no state change in this bit. 1613 * The driver already handles this case by changing the IO voltage 1614 * level to high as part of controller power up sequence. Hence, check 1615 * for host->pwr to handle a case where IO voltage high request is 1616 * issued even before controller power up. 1617 */ 1618 if ((req_type & REQ_IO_HIGH) && !host->pwr) { 1619 pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n", 1620 mmc_hostname(host->mmc), req_type); 1621 return; 1622 } 1623 if ((req_type & msm_host->curr_pwr_state) || 1624 (req_type & msm_host->curr_io_level)) 1625 done = true; 1626 /* 1627 * This is needed here to handle cases where register writes will 1628 * not change the current bus state or io level of the controller. 1629 * In this case, no power irq will be triggerred and we should 1630 * not wait. 1631 */ 1632 if (!done) { 1633 if (!wait_event_timeout(msm_host->pwr_irq_wait, 1634 msm_host->pwr_irq_flag, 1635 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS))) 1636 dev_warn(&msm_host->pdev->dev, 1637 "%s: pwr_irq for req: (%d) timed out\n", 1638 mmc_hostname(host->mmc), req_type); 1639 } 1640 1641 if ((req_type & REQ_BUS_ON) && mmc->card && !mmc->ops->get_cd(mmc)) { 1642 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1643 host->pwr = 0; 1644 } 1645 1646 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc), 1647 __func__, req_type); 1648 } 1649 1650 static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host) 1651 { 1652 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1653 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1654 const struct sdhci_msm_offset *msm_offset = 1655 msm_host->offset; 1656 1657 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n", 1658 mmc_hostname(host->mmc), 1659 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_status), 1660 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_mask), 1661 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_ctl)); 1662 } 1663 1664 static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq) 1665 { 1666 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1667 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1668 struct mmc_host *mmc = host->mmc; 1669 u32 irq_status, irq_ack = 0; 1670 int retry = 10, ret; 1671 u32 pwr_state = 0, io_level = 0; 1672 u32 config; 1673 const struct sdhci_msm_offset *msm_offset = msm_host->offset; 1674 1675 irq_status = msm_host_readl(msm_host, host, 1676 msm_offset->core_pwrctl_status); 1677 irq_status &= INT_MASK; 1678 1679 msm_host_writel(msm_host, irq_status, host, 1680 msm_offset->core_pwrctl_clear); 1681 1682 /* 1683 * There is a rare HW scenario where the first clear pulse could be 1684 * lost when actual reset and clear/read of status register is 1685 * happening at a time. Hence, retry for at least 10 times to make 1686 * sure status register is cleared. Otherwise, this will result in 1687 * a spurious power IRQ resulting in system instability. 1688 */ 1689 while (irq_status & msm_host_readl(msm_host, host, 1690 msm_offset->core_pwrctl_status)) { 1691 if (retry == 0) { 1692 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n", 1693 mmc_hostname(host->mmc), irq_status); 1694 sdhci_msm_dump_pwr_ctrl_regs(host); 1695 WARN_ON(1); 1696 break; 1697 } 1698 msm_host_writel(msm_host, irq_status, host, 1699 msm_offset->core_pwrctl_clear); 1700 retry--; 1701 udelay(10); 1702 } 1703 1704 if ((irq_status & CORE_PWRCTL_BUS_ON) && mmc->card && 1705 !mmc->ops->get_cd(mmc)) { 1706 msm_host_writel(msm_host, CORE_PWRCTL_BUS_FAIL, host, 1707 msm_offset->core_pwrctl_ctl); 1708 return; 1709 } 1710 1711 /* Handle BUS ON/OFF*/ 1712 if (irq_status & CORE_PWRCTL_BUS_ON) { 1713 pwr_state = REQ_BUS_ON; 1714 io_level = REQ_IO_HIGH; 1715 } 1716 if (irq_status & CORE_PWRCTL_BUS_OFF) { 1717 pwr_state = REQ_BUS_OFF; 1718 io_level = REQ_IO_LOW; 1719 } 1720 1721 if (pwr_state) { 1722 ret = sdhci_msm_set_vmmc(msm_host, mmc, 1723 pwr_state & REQ_BUS_ON); 1724 if (!ret) 1725 ret = sdhci_msm_set_vqmmc(msm_host, mmc, 1726 pwr_state & REQ_BUS_ON); 1727 if (!ret) 1728 ret = sdhci_msm_set_pincfg(msm_host, 1729 pwr_state & REQ_BUS_ON); 1730 if (!ret) 1731 irq_ack |= CORE_PWRCTL_BUS_SUCCESS; 1732 else 1733 irq_ack |= CORE_PWRCTL_BUS_FAIL; 1734 } 1735 1736 /* Handle IO LOW/HIGH */ 1737 if (irq_status & CORE_PWRCTL_IO_LOW) 1738 io_level = REQ_IO_LOW; 1739 1740 if (irq_status & CORE_PWRCTL_IO_HIGH) 1741 io_level = REQ_IO_HIGH; 1742 1743 if (io_level) 1744 irq_ack |= CORE_PWRCTL_IO_SUCCESS; 1745 1746 if (io_level && !IS_ERR(mmc->supply.vqmmc) && !pwr_state) { 1747 ret = mmc_regulator_set_vqmmc(mmc, &mmc->ios); 1748 if (ret < 0) { 1749 dev_err(mmc_dev(mmc), "%s: IO_level setting failed(%d). signal_voltage: %d, vdd: %d irq_status: 0x%08x\n", 1750 mmc_hostname(mmc), ret, 1751 mmc->ios.signal_voltage, mmc->ios.vdd, 1752 irq_status); 1753 irq_ack |= CORE_PWRCTL_IO_FAIL; 1754 } 1755 } 1756 1757 /* 1758 * The driver has to acknowledge the interrupt, switch voltages and 1759 * report back if it succeded or not to this register. The voltage 1760 * switches are handled by the sdhci core, so just report success. 1761 */ 1762 msm_host_writel(msm_host, irq_ack, host, 1763 msm_offset->core_pwrctl_ctl); 1764 1765 /* 1766 * If we don't have info regarding the voltage levels supported by 1767 * regulators, don't change the IO PAD PWR SWITCH. 1768 */ 1769 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) { 1770 u32 new_config; 1771 /* 1772 * We should unset IO PAD PWR switch only if the register write 1773 * can set IO lines high and the regulator also switches to 3 V. 1774 * Else, we should keep the IO PAD PWR switch set. 1775 * This is applicable to certain targets where eMMC vccq supply 1776 * is only 1.8V. In such targets, even during REQ_IO_HIGH, the 1777 * IO PAD PWR switch must be kept set to reflect actual 1778 * regulator voltage. This way, during initialization of 1779 * controllers with only 1.8V, we will set the IO PAD bit 1780 * without waiting for a REQ_IO_LOW. 1781 */ 1782 config = readl_relaxed(host->ioaddr + 1783 msm_offset->core_vendor_spec); 1784 new_config = config; 1785 1786 if ((io_level & REQ_IO_HIGH) && 1787 (msm_host->caps_0 & CORE_3_0V_SUPPORT)) 1788 new_config &= ~CORE_IO_PAD_PWR_SWITCH; 1789 else if ((io_level & REQ_IO_LOW) || 1790 (msm_host->caps_0 & CORE_1_8V_SUPPORT)) 1791 new_config |= CORE_IO_PAD_PWR_SWITCH; 1792 1793 if (config ^ new_config) 1794 writel_relaxed(new_config, host->ioaddr + 1795 msm_offset->core_vendor_spec); 1796 } 1797 1798 if (pwr_state) 1799 msm_host->curr_pwr_state = pwr_state; 1800 if (io_level) 1801 msm_host->curr_io_level = io_level; 1802 1803 dev_dbg(mmc_dev(mmc), "%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n", 1804 mmc_hostname(msm_host->mmc), __func__, irq, irq_status, 1805 irq_ack); 1806 } 1807 1808 static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data) 1809 { 1810 struct sdhci_host *host = (struct sdhci_host *)data; 1811 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1812 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1813 1814 sdhci_msm_handle_pwr_irq(host, irq); 1815 msm_host->pwr_irq_flag = 1; 1816 sdhci_msm_complete_pwr_irq_wait(msm_host); 1817 1818 1819 return IRQ_HANDLED; 1820 } 1821 1822 static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host) 1823 { 1824 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1825 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1826 struct clk *core_clk = msm_host->bulk_clks[0].clk; 1827 1828 return clk_round_rate(core_clk, ULONG_MAX); 1829 } 1830 1831 static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host) 1832 { 1833 return SDHCI_MSM_MIN_CLOCK; 1834 } 1835 1836 /* 1837 * __sdhci_msm_set_clock - sdhci_msm clock control. 1838 * 1839 * Description: 1840 * MSM controller does not use internal divider and 1841 * instead directly control the GCC clock as per 1842 * HW recommendation. 1843 **/ 1844 static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock) 1845 { 1846 u16 clk; 1847 1848 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1849 1850 if (clock == 0) 1851 return; 1852 1853 /* 1854 * MSM controller do not use clock divider. 1855 * Thus read SDHCI_CLOCK_CONTROL and only enable 1856 * clock with no divider value programmed. 1857 */ 1858 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1859 sdhci_enable_clk(host, clk); 1860 } 1861 1862 /* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */ 1863 static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock) 1864 { 1865 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1866 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1867 1868 if (!clock) { 1869 host->mmc->actual_clock = msm_host->clk_rate = 0; 1870 goto out; 1871 } 1872 1873 sdhci_msm_hc_select_mode(host); 1874 1875 msm_set_clock_rate_for_bus_mode(host, clock); 1876 out: 1877 __sdhci_msm_set_clock(host, clock); 1878 } 1879 1880 /*****************************************************************************\ 1881 * * 1882 * Inline Crypto Engine (ICE) support * 1883 * * 1884 \*****************************************************************************/ 1885 1886 #ifdef CONFIG_MMC_CRYPTO 1887 1888 static const struct blk_crypto_ll_ops sdhci_msm_crypto_ops; /* forward decl */ 1889 1890 static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host, 1891 struct cqhci_host *cq_host) 1892 { 1893 struct mmc_host *mmc = msm_host->mmc; 1894 struct blk_crypto_profile *profile = &mmc->crypto_profile; 1895 struct device *dev = mmc_dev(mmc); 1896 struct qcom_ice *ice; 1897 union cqhci_crypto_capabilities caps; 1898 union cqhci_crypto_cap_entry cap; 1899 int err; 1900 int i; 1901 1902 if (!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS)) 1903 return 0; 1904 1905 ice = devm_of_qcom_ice_get(dev); 1906 if (ice == ERR_PTR(-EOPNOTSUPP)) { 1907 dev_warn(dev, "Disabling inline encryption support\n"); 1908 ice = NULL; 1909 } 1910 1911 if (IS_ERR_OR_NULL(ice)) 1912 return PTR_ERR_OR_ZERO(ice); 1913 1914 if (qcom_ice_get_supported_key_type(ice) != BLK_CRYPTO_KEY_TYPE_RAW) { 1915 dev_warn(dev, "Wrapped keys not supported. Disabling inline encryption support.\n"); 1916 return 0; 1917 } 1918 1919 msm_host->ice = ice; 1920 1921 /* Initialize the blk_crypto_profile */ 1922 1923 caps.reg_val = cpu_to_le32(cqhci_readl(cq_host, CQHCI_CCAP)); 1924 1925 /* The number of keyslots supported is (CFGC+1) */ 1926 err = devm_blk_crypto_profile_init(dev, profile, caps.config_count + 1); 1927 if (err) 1928 return err; 1929 1930 profile->ll_ops = sdhci_msm_crypto_ops; 1931 profile->max_dun_bytes_supported = 4; 1932 profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW; 1933 profile->dev = dev; 1934 1935 /* 1936 * Currently this driver only supports AES-256-XTS. All known versions 1937 * of ICE support it, but to be safe make sure it is really declared in 1938 * the crypto capability registers. The crypto capability registers 1939 * also give the supported data unit size(s). 1940 */ 1941 for (i = 0; i < caps.num_crypto_cap; i++) { 1942 cap.reg_val = cpu_to_le32(cqhci_readl(cq_host, 1943 CQHCI_CRYPTOCAP + 1944 i * sizeof(__le32))); 1945 if (cap.algorithm_id == CQHCI_CRYPTO_ALG_AES_XTS && 1946 cap.key_size == CQHCI_CRYPTO_KEY_SIZE_256) 1947 profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] |= 1948 cap.sdus_mask * 512; 1949 } 1950 1951 mmc->caps2 |= MMC_CAP2_CRYPTO; 1952 return 0; 1953 } 1954 1955 static void sdhci_msm_ice_enable(struct sdhci_msm_host *msm_host) 1956 { 1957 if (msm_host->mmc->caps2 & MMC_CAP2_CRYPTO) 1958 qcom_ice_enable(msm_host->ice); 1959 } 1960 1961 static int sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host) 1962 { 1963 if (msm_host->mmc->caps2 & MMC_CAP2_CRYPTO) 1964 return qcom_ice_resume(msm_host->ice); 1965 1966 return 0; 1967 } 1968 1969 static int sdhci_msm_ice_suspend(struct sdhci_msm_host *msm_host) 1970 { 1971 if (msm_host->mmc->caps2 & MMC_CAP2_CRYPTO) 1972 return qcom_ice_suspend(msm_host->ice); 1973 1974 return 0; 1975 } 1976 1977 static inline struct sdhci_msm_host * 1978 sdhci_msm_host_from_crypto_profile(struct blk_crypto_profile *profile) 1979 { 1980 struct mmc_host *mmc = mmc_from_crypto_profile(profile); 1981 struct sdhci_host *host = mmc_priv(mmc); 1982 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1983 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1984 1985 return msm_host; 1986 } 1987 1988 /* 1989 * Program a key into a QC ICE keyslot. QC ICE requires a QC-specific SCM call 1990 * for this; it doesn't support the standard way. 1991 */ 1992 static int sdhci_msm_ice_keyslot_program(struct blk_crypto_profile *profile, 1993 const struct blk_crypto_key *key, 1994 unsigned int slot) 1995 { 1996 struct sdhci_msm_host *msm_host = 1997 sdhci_msm_host_from_crypto_profile(profile); 1998 1999 return qcom_ice_program_key(msm_host->ice, slot, key); 2000 } 2001 2002 static int sdhci_msm_ice_keyslot_evict(struct blk_crypto_profile *profile, 2003 const struct blk_crypto_key *key, 2004 unsigned int slot) 2005 { 2006 struct sdhci_msm_host *msm_host = 2007 sdhci_msm_host_from_crypto_profile(profile); 2008 2009 return qcom_ice_evict_key(msm_host->ice, slot); 2010 } 2011 2012 static const struct blk_crypto_ll_ops sdhci_msm_crypto_ops = { 2013 .keyslot_program = sdhci_msm_ice_keyslot_program, 2014 .keyslot_evict = sdhci_msm_ice_keyslot_evict, 2015 }; 2016 2017 #else /* CONFIG_MMC_CRYPTO */ 2018 2019 static inline int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host, 2020 struct cqhci_host *cq_host) 2021 { 2022 return 0; 2023 } 2024 2025 static inline void sdhci_msm_ice_enable(struct sdhci_msm_host *msm_host) 2026 { 2027 } 2028 2029 static inline int 2030 sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host) 2031 { 2032 return 0; 2033 } 2034 2035 static inline int 2036 sdhci_msm_ice_suspend(struct sdhci_msm_host *msm_host) 2037 { 2038 return 0; 2039 } 2040 #endif /* !CONFIG_MMC_CRYPTO */ 2041 2042 /*****************************************************************************\ 2043 * * 2044 * MSM Command Queue Engine (CQE) * 2045 * * 2046 \*****************************************************************************/ 2047 2048 static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask) 2049 { 2050 int cmd_error = 0; 2051 int data_error = 0; 2052 2053 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) 2054 return intmask; 2055 2056 cqhci_irq(host->mmc, intmask, cmd_error, data_error); 2057 return 0; 2058 } 2059 2060 static void sdhci_msm_cqe_enable(struct mmc_host *mmc) 2061 { 2062 struct sdhci_host *host = mmc_priv(mmc); 2063 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2064 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2065 2066 sdhci_cqe_enable(mmc); 2067 sdhci_msm_ice_enable(msm_host); 2068 } 2069 2070 static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery) 2071 { 2072 struct sdhci_host *host = mmc_priv(mmc); 2073 unsigned long flags; 2074 u32 ctrl; 2075 2076 /* 2077 * When CQE is halted, the legacy SDHCI path operates only 2078 * on 16-byte descriptors in 64bit mode. 2079 */ 2080 if (host->flags & SDHCI_USE_64_BIT_DMA) 2081 host->desc_sz = 16; 2082 2083 spin_lock_irqsave(&host->lock, flags); 2084 2085 /* 2086 * During CQE command transfers, command complete bit gets latched. 2087 * So s/w should clear command complete interrupt status when CQE is 2088 * either halted or disabled. Otherwise unexpected SDCHI legacy 2089 * interrupt gets triggered when CQE is halted/disabled. 2090 */ 2091 ctrl = sdhci_readl(host, SDHCI_INT_ENABLE); 2092 ctrl |= SDHCI_INT_RESPONSE; 2093 sdhci_writel(host, ctrl, SDHCI_INT_ENABLE); 2094 sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS); 2095 2096 spin_unlock_irqrestore(&host->lock, flags); 2097 2098 sdhci_cqe_disable(mmc, recovery); 2099 } 2100 2101 static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 2102 { 2103 u32 count, start = 15; 2104 2105 __sdhci_set_timeout(host, cmd); 2106 count = sdhci_readb(host, SDHCI_TIMEOUT_CONTROL); 2107 /* 2108 * Update software timeout value if its value is less than hardware data 2109 * timeout value. Qcom SoC hardware data timeout value was calculated 2110 * using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock. 2111 */ 2112 if (cmd && cmd->data && host->clock > 400000 && 2113 host->clock <= 50000000 && 2114 ((1 << (count + start)) > (10 * host->clock))) 2115 host->data_timeout = 22LL * NSEC_PER_SEC; 2116 } 2117 2118 static const struct cqhci_host_ops sdhci_msm_cqhci_ops = { 2119 .enable = sdhci_msm_cqe_enable, 2120 .disable = sdhci_msm_cqe_disable, 2121 #ifdef CONFIG_MMC_CRYPTO 2122 .uses_custom_crypto_profile = true, 2123 #endif 2124 }; 2125 2126 static int sdhci_msm_cqe_add_host(struct sdhci_host *host, 2127 struct platform_device *pdev) 2128 { 2129 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2130 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2131 struct cqhci_host *cq_host; 2132 bool dma64; 2133 u32 cqcfg; 2134 int ret; 2135 2136 /* 2137 * When CQE is halted, SDHC operates only on 16byte ADMA descriptors. 2138 * So ensure ADMA table is allocated for 16byte descriptors. 2139 */ 2140 if (host->caps & SDHCI_CAN_64BIT) 2141 host->alloc_desc_sz = 16; 2142 2143 ret = sdhci_setup_host(host); 2144 if (ret) 2145 return ret; 2146 2147 cq_host = cqhci_pltfm_init(pdev); 2148 if (IS_ERR(cq_host)) { 2149 ret = PTR_ERR(cq_host); 2150 dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret); 2151 goto cleanup; 2152 } 2153 2154 msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; 2155 cq_host->ops = &sdhci_msm_cqhci_ops; 2156 2157 dma64 = host->flags & SDHCI_USE_64_BIT_DMA; 2158 2159 ret = sdhci_msm_ice_init(msm_host, cq_host); 2160 if (ret) 2161 goto cleanup; 2162 2163 ret = cqhci_init(cq_host, host->mmc, dma64); 2164 if (ret) { 2165 dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n", 2166 mmc_hostname(host->mmc), ret); 2167 goto cleanup; 2168 } 2169 2170 /* Disable cqe reset due to cqe enable signal */ 2171 cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1); 2172 cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN; 2173 cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1); 2174 2175 /* 2176 * SDHC expects 12byte ADMA descriptors till CQE is enabled. 2177 * So limit desc_sz to 12 so that the data commands that are sent 2178 * during card initialization (before CQE gets enabled) would 2179 * get executed without any issues. 2180 */ 2181 if (host->flags & SDHCI_USE_64_BIT_DMA) 2182 host->desc_sz = 12; 2183 2184 ret = __sdhci_add_host(host); 2185 if (ret) 2186 goto cleanup; 2187 2188 dev_info(&pdev->dev, "%s: CQE init: success\n", 2189 mmc_hostname(host->mmc)); 2190 return ret; 2191 2192 cleanup: 2193 sdhci_cleanup_host(host); 2194 return ret; 2195 } 2196 2197 /* 2198 * Platform specific register write functions. This is so that, if any 2199 * register write needs to be followed up by platform specific actions, 2200 * they can be added here. These functions can go to sleep when writes 2201 * to certain registers are done. 2202 * These functions are relying on sdhci_set_ios not using spinlock. 2203 */ 2204 static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg) 2205 { 2206 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2207 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2208 u32 req_type = 0; 2209 2210 switch (reg) { 2211 case SDHCI_HOST_CONTROL2: 2212 req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW : 2213 REQ_IO_HIGH; 2214 break; 2215 case SDHCI_SOFTWARE_RESET: 2216 if (host->pwr && (val & SDHCI_RESET_ALL)) 2217 req_type = REQ_BUS_OFF; 2218 break; 2219 case SDHCI_POWER_CONTROL: 2220 req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON; 2221 break; 2222 case SDHCI_TRANSFER_MODE: 2223 msm_host->transfer_mode = val; 2224 break; 2225 case SDHCI_COMMAND: 2226 if (!msm_host->use_cdr) 2227 break; 2228 if ((msm_host->transfer_mode & SDHCI_TRNS_READ) && 2229 !mmc_op_tuning(SDHCI_GET_CMD(val))) 2230 sdhci_msm_set_cdr(host, true); 2231 else 2232 sdhci_msm_set_cdr(host, false); 2233 break; 2234 } 2235 2236 if (req_type) { 2237 msm_host->pwr_irq_flag = 0; 2238 /* 2239 * Since this register write may trigger a power irq, ensure 2240 * all previous register writes are complete by this point. 2241 */ 2242 mb(); 2243 } 2244 return req_type; 2245 } 2246 2247 /* This function may sleep*/ 2248 static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg) 2249 { 2250 u32 req_type = 0; 2251 2252 req_type = __sdhci_msm_check_write(host, val, reg); 2253 writew_relaxed(val, host->ioaddr + reg); 2254 2255 if (req_type) 2256 sdhci_msm_check_power_status(host, req_type); 2257 } 2258 2259 /* This function may sleep*/ 2260 static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg) 2261 { 2262 u32 req_type = 0; 2263 2264 req_type = __sdhci_msm_check_write(host, val, reg); 2265 2266 writeb_relaxed(val, host->ioaddr + reg); 2267 2268 if (req_type) 2269 sdhci_msm_check_power_status(host, req_type); 2270 } 2271 2272 static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host) 2273 { 2274 struct mmc_host *mmc = msm_host->mmc; 2275 struct regulator *supply = mmc->supply.vqmmc; 2276 u32 caps = 0, config; 2277 struct sdhci_host *host = mmc_priv(mmc); 2278 const struct sdhci_msm_offset *msm_offset = msm_host->offset; 2279 2280 if (!IS_ERR(mmc->supply.vqmmc)) { 2281 if (regulator_is_supported_voltage(supply, 1700000, 1950000)) 2282 caps |= CORE_1_8V_SUPPORT; 2283 if (regulator_is_supported_voltage(supply, 2700000, 3600000)) 2284 caps |= CORE_3_0V_SUPPORT; 2285 2286 if (!caps) 2287 pr_warn("%s: 1.8/3V not supported for vqmmc\n", 2288 mmc_hostname(mmc)); 2289 } 2290 2291 if (caps) { 2292 /* 2293 * Set the PAD_PWR_SWITCH_EN bit so that the PAD_PWR_SWITCH 2294 * bit can be used as required later on. 2295 */ 2296 u32 io_level = msm_host->curr_io_level; 2297 2298 config = readl_relaxed(host->ioaddr + 2299 msm_offset->core_vendor_spec); 2300 config |= CORE_IO_PAD_PWR_SWITCH_EN; 2301 2302 if ((io_level & REQ_IO_HIGH) && (caps & CORE_3_0V_SUPPORT)) 2303 config &= ~CORE_IO_PAD_PWR_SWITCH; 2304 else if ((io_level & REQ_IO_LOW) || (caps & CORE_1_8V_SUPPORT)) 2305 config |= CORE_IO_PAD_PWR_SWITCH; 2306 2307 writel_relaxed(config, 2308 host->ioaddr + msm_offset->core_vendor_spec); 2309 } 2310 msm_host->caps_0 |= caps; 2311 pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps); 2312 } 2313 2314 static int sdhci_msm_register_vreg(struct sdhci_msm_host *msm_host) 2315 { 2316 int ret; 2317 2318 ret = mmc_regulator_get_supply(msm_host->mmc); 2319 if (ret) 2320 return ret; 2321 2322 sdhci_msm_set_regulator_caps(msm_host); 2323 2324 return 0; 2325 } 2326 2327 static int sdhci_msm_start_signal_voltage_switch(struct mmc_host *mmc, 2328 struct mmc_ios *ios) 2329 { 2330 struct sdhci_host *host = mmc_priv(mmc); 2331 u16 ctrl, status; 2332 2333 /* 2334 * Signal Voltage Switching is only applicable for Host Controllers 2335 * v3.00 and above. 2336 */ 2337 if (host->version < SDHCI_SPEC_300) 2338 return 0; 2339 2340 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2341 2342 switch (ios->signal_voltage) { 2343 case MMC_SIGNAL_VOLTAGE_330: 2344 if (!(host->flags & SDHCI_SIGNALING_330)) 2345 return -EINVAL; 2346 2347 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 2348 ctrl &= ~SDHCI_CTRL_VDD_180; 2349 break; 2350 case MMC_SIGNAL_VOLTAGE_180: 2351 if (!(host->flags & SDHCI_SIGNALING_180)) 2352 return -EINVAL; 2353 2354 /* Enable 1.8V Signal Enable in the Host Control2 register */ 2355 ctrl |= SDHCI_CTRL_VDD_180; 2356 break; 2357 2358 default: 2359 return -EINVAL; 2360 } 2361 2362 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2363 2364 /* Wait for 5ms */ 2365 usleep_range(5000, 5500); 2366 2367 /* regulator output should be stable within 5 ms */ 2368 status = ctrl & SDHCI_CTRL_VDD_180; 2369 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2370 if ((ctrl & SDHCI_CTRL_VDD_180) == status) 2371 return 0; 2372 2373 dev_warn(mmc_dev(mmc), "%s: Regulator output did not became stable\n", 2374 mmc_hostname(mmc)); 2375 2376 return -EAGAIN; 2377 } 2378 2379 #define DRIVER_NAME "sdhci_msm" 2380 #define SDHCI_MSM_DUMP(f, x...) \ 2381 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 2382 2383 static void sdhci_msm_dump_vendor_regs(struct sdhci_host *host) 2384 { 2385 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2386 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2387 const struct sdhci_msm_offset *msm_offset = msm_host->offset; 2388 2389 SDHCI_MSM_DUMP("----------- VENDOR REGISTER DUMP -----------\n"); 2390 2391 SDHCI_MSM_DUMP( 2392 "DLL sts: 0x%08x | DLL cfg: 0x%08x | DLL cfg2: 0x%08x\n", 2393 readl_relaxed(host->ioaddr + msm_offset->core_dll_status), 2394 readl_relaxed(host->ioaddr + msm_offset->core_dll_config), 2395 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2)); 2396 SDHCI_MSM_DUMP( 2397 "DLL cfg3: 0x%08x | DLL usr ctl: 0x%08x | DDR cfg: 0x%08x\n", 2398 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_3), 2399 readl_relaxed(host->ioaddr + msm_offset->core_dll_usr_ctl), 2400 readl_relaxed(host->ioaddr + msm_offset->core_ddr_config)); 2401 SDHCI_MSM_DUMP( 2402 "Vndr func: 0x%08x | Vndr func2 : 0x%08x Vndr func3: 0x%08x\n", 2403 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec), 2404 readl_relaxed(host->ioaddr + 2405 msm_offset->core_vendor_spec_func2), 2406 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3)); 2407 } 2408 2409 static const struct sdhci_msm_variant_ops mci_var_ops = { 2410 .msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed, 2411 .msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed, 2412 }; 2413 2414 static const struct sdhci_msm_variant_ops v5_var_ops = { 2415 .msm_readl_relaxed = sdhci_msm_v5_variant_readl_relaxed, 2416 .msm_writel_relaxed = sdhci_msm_v5_variant_writel_relaxed, 2417 }; 2418 2419 static const struct sdhci_msm_variant_info sdhci_msm_mci_var = { 2420 .var_ops = &mci_var_ops, 2421 .offset = &sdhci_msm_mci_offset, 2422 }; 2423 2424 static const struct sdhci_msm_variant_info sdhci_msm_v5_var = { 2425 .mci_removed = true, 2426 .var_ops = &v5_var_ops, 2427 .offset = &sdhci_msm_v5_offset, 2428 }; 2429 2430 static const struct sdhci_msm_variant_info sdm845_sdhci_var = { 2431 .mci_removed = true, 2432 .restore_dll_config = true, 2433 .var_ops = &v5_var_ops, 2434 .offset = &sdhci_msm_v5_offset, 2435 }; 2436 2437 static const struct of_device_id sdhci_msm_dt_match[] = { 2438 /* 2439 * Do not add new variants to the driver which are compatible with 2440 * generic ones, unless they need customization. 2441 */ 2442 {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var}, 2443 {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var}, 2444 {.compatible = "qcom,sdm670-sdhci", .data = &sdm845_sdhci_var}, 2445 {.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var}, 2446 {.compatible = "qcom,sc7180-sdhci", .data = &sdm845_sdhci_var}, 2447 {}, 2448 }; 2449 2450 MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match); 2451 2452 static const struct sdhci_ops sdhci_msm_ops = { 2453 .reset = sdhci_and_cqhci_reset, 2454 .set_clock = sdhci_msm_set_clock, 2455 .get_min_clock = sdhci_msm_get_min_clock, 2456 .get_max_clock = sdhci_msm_get_max_clock, 2457 .set_bus_width = sdhci_set_bus_width, 2458 .set_uhs_signaling = sdhci_msm_set_uhs_signaling, 2459 .write_w = sdhci_msm_writew, 2460 .write_b = sdhci_msm_writeb, 2461 .irq = sdhci_msm_cqe_irq, 2462 .dump_vendor_regs = sdhci_msm_dump_vendor_regs, 2463 .set_power = sdhci_set_power_noreg, 2464 .set_timeout = sdhci_msm_set_timeout, 2465 }; 2466 2467 static const struct sdhci_pltfm_data sdhci_msm_pdata = { 2468 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | 2469 SDHCI_QUIRK_SINGLE_POWER_WRITE | 2470 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | 2471 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, 2472 2473 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 2474 .ops = &sdhci_msm_ops, 2475 }; 2476 2477 static inline void sdhci_msm_get_of_property(struct platform_device *pdev, 2478 struct sdhci_host *host) 2479 { 2480 struct device_node *node = pdev->dev.of_node; 2481 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2482 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2483 2484 if (of_property_read_u32(node, "qcom,ddr-config", 2485 &msm_host->ddr_config)) 2486 msm_host->ddr_config = DDR_CONFIG_POR_VAL; 2487 2488 of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config); 2489 2490 if (of_device_is_compatible(node, "qcom,msm8916-sdhci")) 2491 host->quirks2 |= SDHCI_QUIRK2_BROKEN_64_BIT_DMA; 2492 } 2493 2494 static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host) 2495 { 2496 struct reset_control *reset; 2497 int ret = 0; 2498 2499 reset = reset_control_get_optional_exclusive(dev, NULL); 2500 if (IS_ERR(reset)) 2501 return dev_err_probe(dev, PTR_ERR(reset), 2502 "unable to acquire core_reset\n"); 2503 2504 if (!reset) 2505 return ret; 2506 2507 ret = reset_control_assert(reset); 2508 if (ret) { 2509 reset_control_put(reset); 2510 return dev_err_probe(dev, ret, "core_reset assert failed\n"); 2511 } 2512 2513 /* 2514 * The hardware requirement for delay between assert/deassert 2515 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to 2516 * ~125us (4/32768). To be on the safe side add 200us delay. 2517 */ 2518 usleep_range(200, 210); 2519 2520 ret = reset_control_deassert(reset); 2521 if (ret) { 2522 reset_control_put(reset); 2523 return dev_err_probe(dev, ret, "core_reset deassert failed\n"); 2524 } 2525 2526 usleep_range(200, 210); 2527 reset_control_put(reset); 2528 2529 return ret; 2530 } 2531 2532 static int sdhci_msm_probe(struct platform_device *pdev) 2533 { 2534 struct sdhci_host *host; 2535 struct sdhci_pltfm_host *pltfm_host; 2536 struct sdhci_msm_host *msm_host; 2537 struct clk *clk; 2538 int ret; 2539 u16 host_version, core_minor; 2540 u32 core_version, config; 2541 u8 core_major; 2542 const struct sdhci_msm_offset *msm_offset; 2543 const struct sdhci_msm_variant_info *var_info; 2544 struct device_node *node = pdev->dev.of_node; 2545 2546 host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host)); 2547 if (IS_ERR(host)) 2548 return PTR_ERR(host); 2549 2550 host->sdma_boundary = 0; 2551 pltfm_host = sdhci_priv(host); 2552 msm_host = sdhci_pltfm_priv(pltfm_host); 2553 msm_host->mmc = host->mmc; 2554 msm_host->pdev = pdev; 2555 2556 ret = mmc_of_parse(host->mmc); 2557 if (ret) 2558 return ret; 2559 2560 /* 2561 * Based on the compatible string, load the required msm host info from 2562 * the data associated with the version info. 2563 */ 2564 var_info = of_device_get_match_data(&pdev->dev); 2565 2566 msm_host->mci_removed = var_info->mci_removed; 2567 msm_host->restore_dll_config = var_info->restore_dll_config; 2568 msm_host->var_ops = var_info->var_ops; 2569 msm_host->offset = var_info->offset; 2570 2571 msm_offset = msm_host->offset; 2572 2573 sdhci_get_of_property(pdev); 2574 sdhci_msm_get_of_property(pdev, host); 2575 2576 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE; 2577 2578 ret = sdhci_msm_gcc_reset(&pdev->dev, host); 2579 if (ret) 2580 return ret; 2581 2582 /* Setup SDCC bus voter clock. */ 2583 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus"); 2584 if (!IS_ERR(msm_host->bus_clk)) { 2585 /* Vote for max. clk rate for max. performance */ 2586 ret = clk_set_rate(msm_host->bus_clk, INT_MAX); 2587 if (ret) 2588 return ret; 2589 ret = clk_prepare_enable(msm_host->bus_clk); 2590 if (ret) 2591 return ret; 2592 } 2593 2594 /* Setup main peripheral bus clock */ 2595 clk = devm_clk_get(&pdev->dev, "iface"); 2596 if (IS_ERR(clk)) { 2597 ret = PTR_ERR(clk); 2598 dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret); 2599 goto bus_clk_disable; 2600 } 2601 msm_host->bulk_clks[1].clk = clk; 2602 2603 /* Setup SDC MMC clock */ 2604 clk = devm_clk_get(&pdev->dev, "core"); 2605 if (IS_ERR(clk)) { 2606 ret = PTR_ERR(clk); 2607 dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret); 2608 goto bus_clk_disable; 2609 } 2610 msm_host->bulk_clks[0].clk = clk; 2611 2612 /* Check for optional interconnect paths */ 2613 ret = dev_pm_opp_of_find_icc_paths(&pdev->dev, NULL); 2614 if (ret) 2615 goto bus_clk_disable; 2616 2617 ret = devm_pm_opp_set_clkname(&pdev->dev, "core"); 2618 if (ret) 2619 goto bus_clk_disable; 2620 2621 /* OPP table is optional */ 2622 ret = devm_pm_opp_of_add_table(&pdev->dev); 2623 if (ret && ret != -ENODEV) { 2624 dev_err(&pdev->dev, "Invalid OPP table in Device tree\n"); 2625 goto bus_clk_disable; 2626 } 2627 2628 /* Vote for maximum clock rate for maximum performance */ 2629 ret = dev_pm_opp_set_rate(&pdev->dev, INT_MAX); 2630 if (ret) 2631 dev_warn(&pdev->dev, "core clock boost failed\n"); 2632 2633 clk = devm_clk_get(&pdev->dev, "cal"); 2634 if (IS_ERR(clk)) 2635 clk = NULL; 2636 msm_host->bulk_clks[2].clk = clk; 2637 2638 clk = devm_clk_get(&pdev->dev, "sleep"); 2639 if (IS_ERR(clk)) 2640 clk = NULL; 2641 msm_host->bulk_clks[3].clk = clk; 2642 2643 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), 2644 msm_host->bulk_clks); 2645 if (ret) 2646 goto bus_clk_disable; 2647 2648 /* 2649 * xo clock is needed for FLL feature of cm_dll. 2650 * In case if xo clock is not mentioned in DT, warn and proceed. 2651 */ 2652 msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo"); 2653 if (IS_ERR(msm_host->xo_clk)) { 2654 ret = PTR_ERR(msm_host->xo_clk); 2655 dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret); 2656 } 2657 2658 if (!msm_host->mci_removed) { 2659 msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1); 2660 if (IS_ERR(msm_host->core_mem)) { 2661 ret = PTR_ERR(msm_host->core_mem); 2662 goto clk_disable; 2663 } 2664 } 2665 2666 /* Reset the vendor spec register to power on reset state */ 2667 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL, 2668 host->ioaddr + msm_offset->core_vendor_spec); 2669 2670 if (!msm_host->mci_removed) { 2671 /* Set HC_MODE_EN bit in HC_MODE register */ 2672 msm_host_writel(msm_host, HC_MODE_EN, host, 2673 msm_offset->core_hc_mode); 2674 config = msm_host_readl(msm_host, host, 2675 msm_offset->core_hc_mode); 2676 config |= FF_CLK_SW_RST_DIS; 2677 msm_host_writel(msm_host, config, host, 2678 msm_offset->core_hc_mode); 2679 } 2680 2681 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION)); 2682 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n", 2683 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >> 2684 SDHCI_VENDOR_VER_SHIFT)); 2685 2686 core_version = msm_host_readl(msm_host, host, 2687 msm_offset->core_mci_version); 2688 core_major = (core_version & CORE_VERSION_MAJOR_MASK) >> 2689 CORE_VERSION_MAJOR_SHIFT; 2690 core_minor = core_version & CORE_VERSION_MINOR_MASK; 2691 dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n", 2692 core_version, core_major, core_minor); 2693 2694 if (core_major == 1 && core_minor >= 0x42) 2695 msm_host->use_14lpp_dll_reset = true; 2696 2697 /* 2698 * SDCC 5 controller with major version 1, minor version 0x34 and later 2699 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL. 2700 */ 2701 if (core_major == 1 && core_minor < 0x34) 2702 msm_host->use_cdclp533 = true; 2703 2704 /* 2705 * Support for some capabilities is not advertised by newer 2706 * controller versions and must be explicitly enabled. 2707 */ 2708 if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) { 2709 config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES); 2710 config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT; 2711 writel_relaxed(config, host->ioaddr + 2712 msm_offset->core_vendor_spec_capabilities0); 2713 } 2714 2715 if (core_major == 1 && core_minor >= 0x49) 2716 msm_host->updated_ddr_cfg = true; 2717 2718 if (core_major == 1 && core_minor >= 0x71) 2719 msm_host->uses_tassadar_dll = true; 2720 2721 ret = sdhci_msm_register_vreg(msm_host); 2722 if (ret) 2723 goto clk_disable; 2724 2725 /* 2726 * Power on reset state may trigger power irq if previous status of 2727 * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq 2728 * interrupt in GIC, any pending power irq interrupt should be 2729 * acknowledged. Otherwise power irq interrupt handler would be 2730 * fired prematurely. 2731 */ 2732 sdhci_msm_handle_pwr_irq(host, 0); 2733 2734 /* 2735 * Ensure that above writes are propagated before interrupt enablement 2736 * in GIC. 2737 */ 2738 mb(); 2739 2740 /* Setup IRQ for handling power/voltage tasks with PMIC */ 2741 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq"); 2742 if (msm_host->pwr_irq < 0) { 2743 ret = msm_host->pwr_irq; 2744 goto clk_disable; 2745 } 2746 2747 sdhci_msm_init_pwr_irq_wait(msm_host); 2748 /* Enable pwr irq interrupts */ 2749 msm_host_writel(msm_host, INT_MASK, host, 2750 msm_offset->core_pwrctl_mask); 2751 2752 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL, 2753 sdhci_msm_pwr_irq, IRQF_ONESHOT, 2754 dev_name(&pdev->dev), host); 2755 if (ret) { 2756 dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret); 2757 goto clk_disable; 2758 } 2759 2760 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY; 2761 2762 /* Set the timeout value to max possible */ 2763 host->max_timeout_count = 0xF; 2764 2765 pm_runtime_get_noresume(&pdev->dev); 2766 pm_runtime_set_active(&pdev->dev); 2767 pm_runtime_enable(&pdev->dev); 2768 pm_runtime_set_autosuspend_delay(&pdev->dev, 2769 MSM_MMC_AUTOSUSPEND_DELAY_MS); 2770 pm_runtime_use_autosuspend(&pdev->dev); 2771 2772 host->mmc_host_ops.start_signal_voltage_switch = 2773 sdhci_msm_start_signal_voltage_switch; 2774 host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning; 2775 if (of_property_read_bool(node, "supports-cqe")) 2776 ret = sdhci_msm_cqe_add_host(host, pdev); 2777 else 2778 ret = sdhci_add_host(host); 2779 if (ret) 2780 goto pm_runtime_disable; 2781 2782 pm_runtime_put_autosuspend(&pdev->dev); 2783 2784 return 0; 2785 2786 pm_runtime_disable: 2787 pm_runtime_disable(&pdev->dev); 2788 pm_runtime_set_suspended(&pdev->dev); 2789 pm_runtime_put_noidle(&pdev->dev); 2790 clk_disable: 2791 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), 2792 msm_host->bulk_clks); 2793 bus_clk_disable: 2794 if (!IS_ERR(msm_host->bus_clk)) 2795 clk_disable_unprepare(msm_host->bus_clk); 2796 return ret; 2797 } 2798 2799 static void sdhci_msm_remove(struct platform_device *pdev) 2800 { 2801 struct sdhci_host *host = platform_get_drvdata(pdev); 2802 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2803 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2804 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 2805 0xffffffff); 2806 2807 sdhci_remove_host(host, dead); 2808 2809 pm_runtime_get_sync(&pdev->dev); 2810 pm_runtime_disable(&pdev->dev); 2811 pm_runtime_put_noidle(&pdev->dev); 2812 2813 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), 2814 msm_host->bulk_clks); 2815 if (!IS_ERR(msm_host->bus_clk)) 2816 clk_disable_unprepare(msm_host->bus_clk); 2817 } 2818 2819 static int sdhci_msm_runtime_suspend(struct device *dev) 2820 { 2821 struct sdhci_host *host = dev_get_drvdata(dev); 2822 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2823 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2824 unsigned long flags; 2825 2826 spin_lock_irqsave(&host->lock, flags); 2827 host->runtime_suspended = true; 2828 spin_unlock_irqrestore(&host->lock, flags); 2829 2830 /* Drop the performance vote */ 2831 dev_pm_opp_set_rate(dev, 0); 2832 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), 2833 msm_host->bulk_clks); 2834 2835 return sdhci_msm_ice_suspend(msm_host); 2836 } 2837 2838 static int sdhci_msm_runtime_resume(struct device *dev) 2839 { 2840 struct sdhci_host *host = dev_get_drvdata(dev); 2841 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2842 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2843 unsigned long flags; 2844 int ret; 2845 2846 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), 2847 msm_host->bulk_clks); 2848 if (ret) 2849 return ret; 2850 /* 2851 * Whenever core-clock is gated dynamically, it's needed to 2852 * restore the SDR DLL settings when the clock is ungated. 2853 */ 2854 if (msm_host->restore_dll_config && msm_host->clk_rate) { 2855 ret = sdhci_msm_restore_sdr_dll_config(host); 2856 if (ret) 2857 return ret; 2858 } 2859 2860 dev_pm_opp_set_rate(dev, msm_host->clk_rate); 2861 2862 ret = sdhci_msm_ice_resume(msm_host); 2863 if (ret) 2864 return ret; 2865 2866 spin_lock_irqsave(&host->lock, flags); 2867 host->runtime_suspended = false; 2868 spin_unlock_irqrestore(&host->lock, flags); 2869 2870 return ret; 2871 } 2872 2873 static const struct dev_pm_ops sdhci_msm_pm_ops = { 2874 SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) 2875 RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume, NULL) 2876 }; 2877 2878 static struct platform_driver sdhci_msm_driver = { 2879 .probe = sdhci_msm_probe, 2880 .remove = sdhci_msm_remove, 2881 .driver = { 2882 .name = "sdhci_msm", 2883 .of_match_table = sdhci_msm_dt_match, 2884 .pm = pm_ptr(&sdhci_msm_pm_ops), 2885 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 2886 }, 2887 }; 2888 2889 module_platform_driver(sdhci_msm_driver); 2890 2891 MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver"); 2892 MODULE_LICENSE("GPL v2"); 2893