1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver 4 * 5 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. 6 */ 7 8 #include <linux/module.h> 9 #include <linux/of_device.h> 10 #include <linux/delay.h> 11 #include <linux/mmc/mmc.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/pm_opp.h> 14 #include <linux/slab.h> 15 #include <linux/iopoll.h> 16 #include <linux/regulator/consumer.h> 17 18 #include "sdhci-pltfm.h" 19 #include "cqhci.h" 20 21 #define CORE_MCI_VERSION 0x50 22 #define CORE_VERSION_MAJOR_SHIFT 28 23 #define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT) 24 #define CORE_VERSION_MINOR_MASK 0xff 25 26 #define CORE_MCI_GENERICS 0x70 27 #define SWITCHABLE_SIGNALING_VOLTAGE BIT(29) 28 29 #define HC_MODE_EN 0x1 30 #define CORE_POWER 0x0 31 #define CORE_SW_RST BIT(7) 32 #define FF_CLK_SW_RST_DIS BIT(13) 33 34 #define CORE_PWRCTL_BUS_OFF BIT(0) 35 #define CORE_PWRCTL_BUS_ON BIT(1) 36 #define CORE_PWRCTL_IO_LOW BIT(2) 37 #define CORE_PWRCTL_IO_HIGH BIT(3) 38 #define CORE_PWRCTL_BUS_SUCCESS BIT(0) 39 #define CORE_PWRCTL_IO_SUCCESS BIT(2) 40 #define REQ_BUS_OFF BIT(0) 41 #define REQ_BUS_ON BIT(1) 42 #define REQ_IO_LOW BIT(2) 43 #define REQ_IO_HIGH BIT(3) 44 #define INT_MASK 0xf 45 #define MAX_PHASES 16 46 #define CORE_DLL_LOCK BIT(7) 47 #define CORE_DDR_DLL_LOCK BIT(11) 48 #define CORE_DLL_EN BIT(16) 49 #define CORE_CDR_EN BIT(17) 50 #define CORE_CK_OUT_EN BIT(18) 51 #define CORE_CDR_EXT_EN BIT(19) 52 #define CORE_DLL_PDN BIT(29) 53 #define CORE_DLL_RST BIT(30) 54 #define CORE_CMD_DAT_TRACK_SEL BIT(0) 55 56 #define CORE_DDR_CAL_EN BIT(0) 57 #define CORE_FLL_CYCLE_CNT BIT(18) 58 #define CORE_DLL_CLOCK_DISABLE BIT(21) 59 60 #define DLL_USR_CTL_POR_VAL 0x10800 61 #define ENABLE_DLL_LOCK_STATUS BIT(26) 62 #define FINE_TUNE_MODE_EN BIT(27) 63 #define BIAS_OK_SIGNAL BIT(29) 64 65 #define DLL_CONFIG_3_LOW_FREQ_VAL 0x08 66 #define DLL_CONFIG_3_HIGH_FREQ_VAL 0x10 67 68 #define CORE_VENDOR_SPEC_POR_VAL 0xa9c 69 #define CORE_CLK_PWRSAVE BIT(1) 70 #define CORE_HC_MCLK_SEL_DFLT (2 << 8) 71 #define CORE_HC_MCLK_SEL_HS400 (3 << 8) 72 #define CORE_HC_MCLK_SEL_MASK (3 << 8) 73 #define CORE_IO_PAD_PWR_SWITCH_EN BIT(15) 74 #define CORE_IO_PAD_PWR_SWITCH BIT(16) 75 #define CORE_HC_SELECT_IN_EN BIT(18) 76 #define CORE_HC_SELECT_IN_HS400 (6 << 19) 77 #define CORE_HC_SELECT_IN_MASK (7 << 19) 78 79 #define CORE_3_0V_SUPPORT BIT(25) 80 #define CORE_1_8V_SUPPORT BIT(26) 81 #define CORE_VOLT_SUPPORT (CORE_3_0V_SUPPORT | CORE_1_8V_SUPPORT) 82 83 #define CORE_CSR_CDC_CTLR_CFG0 0x130 84 #define CORE_SW_TRIG_FULL_CALIB BIT(16) 85 #define CORE_HW_AUTOCAL_ENA BIT(17) 86 87 #define CORE_CSR_CDC_CTLR_CFG1 0x134 88 #define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138 89 #define CORE_TIMER_ENA BIT(16) 90 91 #define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C 92 #define CORE_CSR_CDC_REFCOUNT_CFG 0x140 93 #define CORE_CSR_CDC_COARSE_CAL_CFG 0x144 94 #define CORE_CDC_OFFSET_CFG 0x14C 95 #define CORE_CSR_CDC_DELAY_CFG 0x150 96 #define CORE_CDC_SLAVE_DDA_CFG 0x160 97 #define CORE_CSR_CDC_STATUS0 0x164 98 #define CORE_CALIBRATION_DONE BIT(0) 99 100 #define CORE_CDC_ERROR_CODE_MASK 0x7000000 101 102 #define CORE_CSR_CDC_GEN_CFG 0x178 103 #define CORE_CDC_SWITCH_BYPASS_OFF BIT(0) 104 #define CORE_CDC_SWITCH_RC_EN BIT(1) 105 106 #define CORE_CDC_T4_DLY_SEL BIT(0) 107 #define CORE_CMDIN_RCLK_EN BIT(1) 108 #define CORE_START_CDC_TRAFFIC BIT(6) 109 110 #define CORE_PWRSAVE_DLL BIT(3) 111 112 #define DDR_CONFIG_POR_VAL 0x80040873 113 114 115 #define INVALID_TUNING_PHASE -1 116 #define SDHCI_MSM_MIN_CLOCK 400000 117 #define CORE_FREQ_100MHZ (100 * 1000 * 1000) 118 119 #define CDR_SELEXT_SHIFT 20 120 #define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT) 121 #define CMUX_SHIFT_PHASE_SHIFT 24 122 #define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT) 123 124 #define MSM_MMC_AUTOSUSPEND_DELAY_MS 50 125 126 /* Timeout value to avoid infinite waiting for pwr_irq */ 127 #define MSM_PWR_IRQ_TIMEOUT_MS 5000 128 129 #define msm_host_readl(msm_host, host, offset) \ 130 msm_host->var_ops->msm_readl_relaxed(host, offset) 131 132 #define msm_host_writel(msm_host, val, host, offset) \ 133 msm_host->var_ops->msm_writel_relaxed(val, host, offset) 134 135 /* CQHCI vendor specific registers */ 136 #define CQHCI_VENDOR_CFG1 0xA00 137 #define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13) 138 139 struct sdhci_msm_offset { 140 u32 core_hc_mode; 141 u32 core_mci_data_cnt; 142 u32 core_mci_status; 143 u32 core_mci_fifo_cnt; 144 u32 core_mci_version; 145 u32 core_generics; 146 u32 core_testbus_config; 147 u32 core_testbus_sel2_bit; 148 u32 core_testbus_ena; 149 u32 core_testbus_sel2; 150 u32 core_pwrctl_status; 151 u32 core_pwrctl_mask; 152 u32 core_pwrctl_clear; 153 u32 core_pwrctl_ctl; 154 u32 core_sdcc_debug_reg; 155 u32 core_dll_config; 156 u32 core_dll_status; 157 u32 core_vendor_spec; 158 u32 core_vendor_spec_adma_err_addr0; 159 u32 core_vendor_spec_adma_err_addr1; 160 u32 core_vendor_spec_func2; 161 u32 core_vendor_spec_capabilities0; 162 u32 core_ddr_200_cfg; 163 u32 core_vendor_spec3; 164 u32 core_dll_config_2; 165 u32 core_dll_config_3; 166 u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */ 167 u32 core_ddr_config; 168 u32 core_dll_usr_ctl; /* Present on SDCC5.1 onwards */ 169 }; 170 171 static const struct sdhci_msm_offset sdhci_msm_v5_offset = { 172 .core_mci_data_cnt = 0x35c, 173 .core_mci_status = 0x324, 174 .core_mci_fifo_cnt = 0x308, 175 .core_mci_version = 0x318, 176 .core_generics = 0x320, 177 .core_testbus_config = 0x32c, 178 .core_testbus_sel2_bit = 3, 179 .core_testbus_ena = (1 << 31), 180 .core_testbus_sel2 = (1 << 3), 181 .core_pwrctl_status = 0x240, 182 .core_pwrctl_mask = 0x244, 183 .core_pwrctl_clear = 0x248, 184 .core_pwrctl_ctl = 0x24c, 185 .core_sdcc_debug_reg = 0x358, 186 .core_dll_config = 0x200, 187 .core_dll_status = 0x208, 188 .core_vendor_spec = 0x20c, 189 .core_vendor_spec_adma_err_addr0 = 0x214, 190 .core_vendor_spec_adma_err_addr1 = 0x218, 191 .core_vendor_spec_func2 = 0x210, 192 .core_vendor_spec_capabilities0 = 0x21c, 193 .core_ddr_200_cfg = 0x224, 194 .core_vendor_spec3 = 0x250, 195 .core_dll_config_2 = 0x254, 196 .core_dll_config_3 = 0x258, 197 .core_ddr_config = 0x25c, 198 .core_dll_usr_ctl = 0x388, 199 }; 200 201 static const struct sdhci_msm_offset sdhci_msm_mci_offset = { 202 .core_hc_mode = 0x78, 203 .core_mci_data_cnt = 0x30, 204 .core_mci_status = 0x34, 205 .core_mci_fifo_cnt = 0x44, 206 .core_mci_version = 0x050, 207 .core_generics = 0x70, 208 .core_testbus_config = 0x0cc, 209 .core_testbus_sel2_bit = 4, 210 .core_testbus_ena = (1 << 3), 211 .core_testbus_sel2 = (1 << 4), 212 .core_pwrctl_status = 0xdc, 213 .core_pwrctl_mask = 0xe0, 214 .core_pwrctl_clear = 0xe4, 215 .core_pwrctl_ctl = 0xe8, 216 .core_sdcc_debug_reg = 0x124, 217 .core_dll_config = 0x100, 218 .core_dll_status = 0x108, 219 .core_vendor_spec = 0x10c, 220 .core_vendor_spec_adma_err_addr0 = 0x114, 221 .core_vendor_spec_adma_err_addr1 = 0x118, 222 .core_vendor_spec_func2 = 0x110, 223 .core_vendor_spec_capabilities0 = 0x11c, 224 .core_ddr_200_cfg = 0x184, 225 .core_vendor_spec3 = 0x1b0, 226 .core_dll_config_2 = 0x1b4, 227 .core_ddr_config_old = 0x1b8, 228 .core_ddr_config = 0x1bc, 229 }; 230 231 struct sdhci_msm_variant_ops { 232 u32 (*msm_readl_relaxed)(struct sdhci_host *host, u32 offset); 233 void (*msm_writel_relaxed)(u32 val, struct sdhci_host *host, 234 u32 offset); 235 }; 236 237 /* 238 * From V5, register spaces have changed. Wrap this info in a structure 239 * and choose the data_structure based on version info mentioned in DT. 240 */ 241 struct sdhci_msm_variant_info { 242 bool mci_removed; 243 bool restore_dll_config; 244 bool uses_tassadar_dll; 245 const struct sdhci_msm_variant_ops *var_ops; 246 const struct sdhci_msm_offset *offset; 247 }; 248 249 struct sdhci_msm_host { 250 struct platform_device *pdev; 251 void __iomem *core_mem; /* MSM SDCC mapped address */ 252 int pwr_irq; /* power irq */ 253 struct clk *bus_clk; /* SDHC bus voter clock */ 254 struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/ 255 struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */ 256 unsigned long clk_rate; 257 struct mmc_host *mmc; 258 struct opp_table *opp_table; 259 bool has_opp_table; 260 bool use_14lpp_dll_reset; 261 bool tuning_done; 262 bool calibration_done; 263 u8 saved_tuning_phase; 264 bool use_cdclp533; 265 u32 curr_pwr_state; 266 u32 curr_io_level; 267 wait_queue_head_t pwr_irq_wait; 268 bool pwr_irq_flag; 269 u32 caps_0; 270 bool mci_removed; 271 bool restore_dll_config; 272 const struct sdhci_msm_variant_ops *var_ops; 273 const struct sdhci_msm_offset *offset; 274 bool use_cdr; 275 u32 transfer_mode; 276 bool updated_ddr_cfg; 277 bool uses_tassadar_dll; 278 u32 dll_config; 279 u32 ddr_config; 280 }; 281 282 static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host) 283 { 284 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 285 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 286 287 return msm_host->offset; 288 } 289 290 /* 291 * APIs to read/write to vendor specific registers which were there in the 292 * core_mem region before MCI was removed. 293 */ 294 static u32 sdhci_msm_mci_variant_readl_relaxed(struct sdhci_host *host, 295 u32 offset) 296 { 297 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 298 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 299 300 return readl_relaxed(msm_host->core_mem + offset); 301 } 302 303 static u32 sdhci_msm_v5_variant_readl_relaxed(struct sdhci_host *host, 304 u32 offset) 305 { 306 return readl_relaxed(host->ioaddr + offset); 307 } 308 309 static void sdhci_msm_mci_variant_writel_relaxed(u32 val, 310 struct sdhci_host *host, u32 offset) 311 { 312 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 313 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 314 315 writel_relaxed(val, msm_host->core_mem + offset); 316 } 317 318 static void sdhci_msm_v5_variant_writel_relaxed(u32 val, 319 struct sdhci_host *host, u32 offset) 320 { 321 writel_relaxed(val, host->ioaddr + offset); 322 } 323 324 static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host, 325 unsigned int clock) 326 { 327 struct mmc_ios ios = host->mmc->ios; 328 /* 329 * The SDHC requires internal clock frequency to be double the 330 * actual clock that will be set for DDR mode. The controller 331 * uses the faster clock(100/400MHz) for some of its parts and 332 * send the actual required clock (50/200MHz) to the card. 333 */ 334 if (ios.timing == MMC_TIMING_UHS_DDR50 || 335 ios.timing == MMC_TIMING_MMC_DDR52 || 336 ios.timing == MMC_TIMING_MMC_HS400 || 337 host->flags & SDHCI_HS400_TUNING) 338 clock *= 2; 339 return clock; 340 } 341 342 static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host, 343 unsigned int clock) 344 { 345 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 346 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 347 struct mmc_ios curr_ios = host->mmc->ios; 348 struct clk *core_clk = msm_host->bulk_clks[0].clk; 349 int rc; 350 351 clock = msm_get_clock_rate_for_bus_mode(host, clock); 352 rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), clock); 353 if (rc) { 354 pr_err("%s: Failed to set clock at rate %u at timing %d\n", 355 mmc_hostname(host->mmc), clock, 356 curr_ios.timing); 357 return; 358 } 359 msm_host->clk_rate = clock; 360 pr_debug("%s: Setting clock at rate %lu at timing %d\n", 361 mmc_hostname(host->mmc), clk_get_rate(core_clk), 362 curr_ios.timing); 363 } 364 365 /* Platform specific tuning */ 366 static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll) 367 { 368 u32 wait_cnt = 50; 369 u8 ck_out_en; 370 struct mmc_host *mmc = host->mmc; 371 const struct sdhci_msm_offset *msm_offset = 372 sdhci_priv_msm_offset(host); 373 374 /* Poll for CK_OUT_EN bit. max. poll time = 50us */ 375 ck_out_en = !!(readl_relaxed(host->ioaddr + 376 msm_offset->core_dll_config) & CORE_CK_OUT_EN); 377 378 while (ck_out_en != poll) { 379 if (--wait_cnt == 0) { 380 dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n", 381 mmc_hostname(mmc), poll); 382 return -ETIMEDOUT; 383 } 384 udelay(1); 385 386 ck_out_en = !!(readl_relaxed(host->ioaddr + 387 msm_offset->core_dll_config) & CORE_CK_OUT_EN); 388 } 389 390 return 0; 391 } 392 393 static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase) 394 { 395 int rc; 396 static const u8 grey_coded_phase_table[] = { 397 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4, 398 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8 399 }; 400 unsigned long flags; 401 u32 config; 402 struct mmc_host *mmc = host->mmc; 403 const struct sdhci_msm_offset *msm_offset = 404 sdhci_priv_msm_offset(host); 405 406 if (phase > 0xf) 407 return -EINVAL; 408 409 spin_lock_irqsave(&host->lock, flags); 410 411 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 412 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN); 413 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN); 414 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 415 416 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */ 417 rc = msm_dll_poll_ck_out_en(host, 0); 418 if (rc) 419 goto err_out; 420 421 /* 422 * Write the selected DLL clock output phase (0 ... 15) 423 * to CDR_SELEXT bit field of DLL_CONFIG register. 424 */ 425 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 426 config &= ~CDR_SELEXT_MASK; 427 config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT; 428 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 429 430 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 431 config |= CORE_CK_OUT_EN; 432 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 433 434 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */ 435 rc = msm_dll_poll_ck_out_en(host, 1); 436 if (rc) 437 goto err_out; 438 439 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 440 config |= CORE_CDR_EN; 441 config &= ~CORE_CDR_EXT_EN; 442 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 443 goto out; 444 445 err_out: 446 dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n", 447 mmc_hostname(mmc), phase); 448 out: 449 spin_unlock_irqrestore(&host->lock, flags); 450 return rc; 451 } 452 453 /* 454 * Find out the greatest range of consecuitive selected 455 * DLL clock output phases that can be used as sampling 456 * setting for SD3.0 UHS-I card read operation (in SDR104 457 * timing mode) or for eMMC4.5 card read operation (in 458 * HS400/HS200 timing mode). 459 * Select the 3/4 of the range and configure the DLL with the 460 * selected DLL clock output phase. 461 */ 462 463 static int msm_find_most_appropriate_phase(struct sdhci_host *host, 464 u8 *phase_table, u8 total_phases) 465 { 466 int ret; 467 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} }; 468 u8 phases_per_row[MAX_PHASES] = { 0 }; 469 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0; 470 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0; 471 bool phase_0_found = false, phase_15_found = false; 472 struct mmc_host *mmc = host->mmc; 473 474 if (!total_phases || (total_phases > MAX_PHASES)) { 475 dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n", 476 mmc_hostname(mmc), total_phases); 477 return -EINVAL; 478 } 479 480 for (cnt = 0; cnt < total_phases; cnt++) { 481 ranges[row_index][col_index] = phase_table[cnt]; 482 phases_per_row[row_index] += 1; 483 col_index++; 484 485 if ((cnt + 1) == total_phases) { 486 continue; 487 /* check if next phase in phase_table is consecutive or not */ 488 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) { 489 row_index++; 490 col_index = 0; 491 } 492 } 493 494 if (row_index >= MAX_PHASES) 495 return -EINVAL; 496 497 /* Check if phase-0 is present in first valid window? */ 498 if (!ranges[0][0]) { 499 phase_0_found = true; 500 phase_0_raw_index = 0; 501 /* Check if cycle exist between 2 valid windows */ 502 for (cnt = 1; cnt <= row_index; cnt++) { 503 if (phases_per_row[cnt]) { 504 for (i = 0; i < phases_per_row[cnt]; i++) { 505 if (ranges[cnt][i] == 15) { 506 phase_15_found = true; 507 phase_15_raw_index = cnt; 508 break; 509 } 510 } 511 } 512 } 513 } 514 515 /* If 2 valid windows form cycle then merge them as single window */ 516 if (phase_0_found && phase_15_found) { 517 /* number of phases in raw where phase 0 is present */ 518 u8 phases_0 = phases_per_row[phase_0_raw_index]; 519 /* number of phases in raw where phase 15 is present */ 520 u8 phases_15 = phases_per_row[phase_15_raw_index]; 521 522 if (phases_0 + phases_15 >= MAX_PHASES) 523 /* 524 * If there are more than 1 phase windows then total 525 * number of phases in both the windows should not be 526 * more than or equal to MAX_PHASES. 527 */ 528 return -EINVAL; 529 530 /* Merge 2 cyclic windows */ 531 i = phases_15; 532 for (cnt = 0; cnt < phases_0; cnt++) { 533 ranges[phase_15_raw_index][i] = 534 ranges[phase_0_raw_index][cnt]; 535 if (++i >= MAX_PHASES) 536 break; 537 } 538 539 phases_per_row[phase_0_raw_index] = 0; 540 phases_per_row[phase_15_raw_index] = phases_15 + phases_0; 541 } 542 543 for (cnt = 0; cnt <= row_index; cnt++) { 544 if (phases_per_row[cnt] > curr_max) { 545 curr_max = phases_per_row[cnt]; 546 selected_row_index = cnt; 547 } 548 } 549 550 i = (curr_max * 3) / 4; 551 if (i) 552 i--; 553 554 ret = ranges[selected_row_index][i]; 555 556 if (ret >= MAX_PHASES) { 557 ret = -EINVAL; 558 dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n", 559 mmc_hostname(mmc), ret); 560 } 561 562 return ret; 563 } 564 565 static inline void msm_cm_dll_set_freq(struct sdhci_host *host) 566 { 567 u32 mclk_freq = 0, config; 568 const struct sdhci_msm_offset *msm_offset = 569 sdhci_priv_msm_offset(host); 570 571 /* Program the MCLK value to MCLK_FREQ bit field */ 572 if (host->clock <= 112000000) 573 mclk_freq = 0; 574 else if (host->clock <= 125000000) 575 mclk_freq = 1; 576 else if (host->clock <= 137000000) 577 mclk_freq = 2; 578 else if (host->clock <= 150000000) 579 mclk_freq = 3; 580 else if (host->clock <= 162000000) 581 mclk_freq = 4; 582 else if (host->clock <= 175000000) 583 mclk_freq = 5; 584 else if (host->clock <= 187000000) 585 mclk_freq = 6; 586 else if (host->clock <= 200000000) 587 mclk_freq = 7; 588 589 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 590 config &= ~CMUX_SHIFT_PHASE_MASK; 591 config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT; 592 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 593 } 594 595 /* Initialize the DLL (Programmable Delay Line) */ 596 static int msm_init_cm_dll(struct sdhci_host *host) 597 { 598 struct mmc_host *mmc = host->mmc; 599 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 600 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 601 int wait_cnt = 50; 602 unsigned long flags, xo_clk = 0; 603 u32 config; 604 const struct sdhci_msm_offset *msm_offset = 605 msm_host->offset; 606 607 if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk)) 608 xo_clk = clk_get_rate(msm_host->xo_clk); 609 610 spin_lock_irqsave(&host->lock, flags); 611 612 /* 613 * Make sure that clock is always enabled when DLL 614 * tuning is in progress. Keeping PWRSAVE ON may 615 * turn off the clock. 616 */ 617 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); 618 config &= ~CORE_CLK_PWRSAVE; 619 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); 620 621 if (msm_host->dll_config) 622 writel_relaxed(msm_host->dll_config, 623 host->ioaddr + msm_offset->core_dll_config); 624 625 if (msm_host->use_14lpp_dll_reset) { 626 config = readl_relaxed(host->ioaddr + 627 msm_offset->core_dll_config); 628 config &= ~CORE_CK_OUT_EN; 629 writel_relaxed(config, host->ioaddr + 630 msm_offset->core_dll_config); 631 632 config = readl_relaxed(host->ioaddr + 633 msm_offset->core_dll_config_2); 634 config |= CORE_DLL_CLOCK_DISABLE; 635 writel_relaxed(config, host->ioaddr + 636 msm_offset->core_dll_config_2); 637 } 638 639 config = readl_relaxed(host->ioaddr + 640 msm_offset->core_dll_config); 641 config |= CORE_DLL_RST; 642 writel_relaxed(config, host->ioaddr + 643 msm_offset->core_dll_config); 644 645 config = readl_relaxed(host->ioaddr + 646 msm_offset->core_dll_config); 647 config |= CORE_DLL_PDN; 648 writel_relaxed(config, host->ioaddr + 649 msm_offset->core_dll_config); 650 651 if (!msm_host->dll_config) 652 msm_cm_dll_set_freq(host); 653 654 if (msm_host->use_14lpp_dll_reset && 655 !IS_ERR_OR_NULL(msm_host->xo_clk)) { 656 u32 mclk_freq = 0; 657 658 config = readl_relaxed(host->ioaddr + 659 msm_offset->core_dll_config_2); 660 config &= CORE_FLL_CYCLE_CNT; 661 if (config) 662 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8), 663 xo_clk); 664 else 665 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4), 666 xo_clk); 667 668 config = readl_relaxed(host->ioaddr + 669 msm_offset->core_dll_config_2); 670 config &= ~(0xFF << 10); 671 config |= mclk_freq << 10; 672 673 writel_relaxed(config, host->ioaddr + 674 msm_offset->core_dll_config_2); 675 /* wait for 5us before enabling DLL clock */ 676 udelay(5); 677 } 678 679 config = readl_relaxed(host->ioaddr + 680 msm_offset->core_dll_config); 681 config &= ~CORE_DLL_RST; 682 writel_relaxed(config, host->ioaddr + 683 msm_offset->core_dll_config); 684 685 config = readl_relaxed(host->ioaddr + 686 msm_offset->core_dll_config); 687 config &= ~CORE_DLL_PDN; 688 writel_relaxed(config, host->ioaddr + 689 msm_offset->core_dll_config); 690 691 if (msm_host->use_14lpp_dll_reset) { 692 if (!msm_host->dll_config) 693 msm_cm_dll_set_freq(host); 694 config = readl_relaxed(host->ioaddr + 695 msm_offset->core_dll_config_2); 696 config &= ~CORE_DLL_CLOCK_DISABLE; 697 writel_relaxed(config, host->ioaddr + 698 msm_offset->core_dll_config_2); 699 } 700 701 /* 702 * Configure DLL user control register to enable DLL status. 703 * This setting is applicable to SDCC v5.1 onwards only. 704 */ 705 if (msm_host->uses_tassadar_dll) { 706 config = DLL_USR_CTL_POR_VAL | FINE_TUNE_MODE_EN | 707 ENABLE_DLL_LOCK_STATUS | BIAS_OK_SIGNAL; 708 writel_relaxed(config, host->ioaddr + 709 msm_offset->core_dll_usr_ctl); 710 711 config = readl_relaxed(host->ioaddr + 712 msm_offset->core_dll_config_3); 713 config &= ~0xFF; 714 if (msm_host->clk_rate < 150000000) 715 config |= DLL_CONFIG_3_LOW_FREQ_VAL; 716 else 717 config |= DLL_CONFIG_3_HIGH_FREQ_VAL; 718 writel_relaxed(config, host->ioaddr + 719 msm_offset->core_dll_config_3); 720 } 721 722 config = readl_relaxed(host->ioaddr + 723 msm_offset->core_dll_config); 724 config |= CORE_DLL_EN; 725 writel_relaxed(config, host->ioaddr + 726 msm_offset->core_dll_config); 727 728 config = readl_relaxed(host->ioaddr + 729 msm_offset->core_dll_config); 730 config |= CORE_CK_OUT_EN; 731 writel_relaxed(config, host->ioaddr + 732 msm_offset->core_dll_config); 733 734 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */ 735 while (!(readl_relaxed(host->ioaddr + msm_offset->core_dll_status) & 736 CORE_DLL_LOCK)) { 737 /* max. wait for 50us sec for LOCK bit to be set */ 738 if (--wait_cnt == 0) { 739 dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n", 740 mmc_hostname(mmc)); 741 spin_unlock_irqrestore(&host->lock, flags); 742 return -ETIMEDOUT; 743 } 744 udelay(1); 745 } 746 747 spin_unlock_irqrestore(&host->lock, flags); 748 return 0; 749 } 750 751 static void msm_hc_select_default(struct sdhci_host *host) 752 { 753 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 754 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 755 u32 config; 756 const struct sdhci_msm_offset *msm_offset = 757 msm_host->offset; 758 759 if (!msm_host->use_cdclp533) { 760 config = readl_relaxed(host->ioaddr + 761 msm_offset->core_vendor_spec3); 762 config &= ~CORE_PWRSAVE_DLL; 763 writel_relaxed(config, host->ioaddr + 764 msm_offset->core_vendor_spec3); 765 } 766 767 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); 768 config &= ~CORE_HC_MCLK_SEL_MASK; 769 config |= CORE_HC_MCLK_SEL_DFLT; 770 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); 771 772 /* 773 * Disable HC_SELECT_IN to be able to use the UHS mode select 774 * configuration from Host Control2 register for all other 775 * modes. 776 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field 777 * in VENDOR_SPEC_FUNC 778 */ 779 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); 780 config &= ~CORE_HC_SELECT_IN_EN; 781 config &= ~CORE_HC_SELECT_IN_MASK; 782 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); 783 784 /* 785 * Make sure above writes impacting free running MCLK are completed 786 * before changing the clk_rate at GCC. 787 */ 788 wmb(); 789 } 790 791 static void msm_hc_select_hs400(struct sdhci_host *host) 792 { 793 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 794 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 795 struct mmc_ios ios = host->mmc->ios; 796 u32 config, dll_lock; 797 int rc; 798 const struct sdhci_msm_offset *msm_offset = 799 msm_host->offset; 800 801 /* Select the divided clock (free running MCLK/2) */ 802 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); 803 config &= ~CORE_HC_MCLK_SEL_MASK; 804 config |= CORE_HC_MCLK_SEL_HS400; 805 806 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); 807 /* 808 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC 809 * register 810 */ 811 if ((msm_host->tuning_done || ios.enhanced_strobe) && 812 !msm_host->calibration_done) { 813 config = readl_relaxed(host->ioaddr + 814 msm_offset->core_vendor_spec); 815 config |= CORE_HC_SELECT_IN_HS400; 816 config |= CORE_HC_SELECT_IN_EN; 817 writel_relaxed(config, host->ioaddr + 818 msm_offset->core_vendor_spec); 819 } 820 if (!msm_host->clk_rate && !msm_host->use_cdclp533) { 821 /* 822 * Poll on DLL_LOCK or DDR_DLL_LOCK bits in 823 * core_dll_status to be set. This should get set 824 * within 15 us at 200 MHz. 825 */ 826 rc = readl_relaxed_poll_timeout(host->ioaddr + 827 msm_offset->core_dll_status, 828 dll_lock, 829 (dll_lock & 830 (CORE_DLL_LOCK | 831 CORE_DDR_DLL_LOCK)), 10, 832 1000); 833 if (rc == -ETIMEDOUT) 834 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n", 835 mmc_hostname(host->mmc), dll_lock); 836 } 837 /* 838 * Make sure above writes impacting free running MCLK are completed 839 * before changing the clk_rate at GCC. 840 */ 841 wmb(); 842 } 843 844 /* 845 * sdhci_msm_hc_select_mode :- In general all timing modes are 846 * controlled via UHS mode select in Host Control2 register. 847 * eMMC specific HS200/HS400 doesn't have their respective modes 848 * defined here, hence we use these values. 849 * 850 * HS200 - SDR104 (Since they both are equivalent in functionality) 851 * HS400 - This involves multiple configurations 852 * Initially SDR104 - when tuning is required as HS200 853 * Then when switching to DDR @ 400MHz (HS400) we use 854 * the vendor specific HC_SELECT_IN to control the mode. 855 * 856 * In addition to controlling the modes we also need to select the 857 * correct input clock for DLL depending on the mode. 858 * 859 * HS400 - divided clock (free running MCLK/2) 860 * All other modes - default (free running MCLK) 861 */ 862 static void sdhci_msm_hc_select_mode(struct sdhci_host *host) 863 { 864 struct mmc_ios ios = host->mmc->ios; 865 866 if (ios.timing == MMC_TIMING_MMC_HS400 || 867 host->flags & SDHCI_HS400_TUNING) 868 msm_hc_select_hs400(host); 869 else 870 msm_hc_select_default(host); 871 } 872 873 static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host) 874 { 875 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 876 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 877 u32 config, calib_done; 878 int ret; 879 const struct sdhci_msm_offset *msm_offset = 880 msm_host->offset; 881 882 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); 883 884 /* 885 * Retuning in HS400 (DDR mode) will fail, just reset the 886 * tuning block and restore the saved tuning phase. 887 */ 888 ret = msm_init_cm_dll(host); 889 if (ret) 890 goto out; 891 892 /* Set the selected phase in delay line hw block */ 893 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase); 894 if (ret) 895 goto out; 896 897 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); 898 config |= CORE_CMD_DAT_TRACK_SEL; 899 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 900 901 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); 902 config &= ~CORE_CDC_T4_DLY_SEL; 903 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); 904 905 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG); 906 config &= ~CORE_CDC_SWITCH_BYPASS_OFF; 907 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG); 908 909 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG); 910 config |= CORE_CDC_SWITCH_RC_EN; 911 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG); 912 913 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); 914 config &= ~CORE_START_CDC_TRAFFIC; 915 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); 916 917 /* Perform CDC Register Initialization Sequence */ 918 919 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 920 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1); 921 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); 922 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1); 923 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG); 924 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG); 925 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG); 926 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG); 927 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG); 928 929 /* CDC HW Calibration */ 930 931 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 932 config |= CORE_SW_TRIG_FULL_CALIB; 933 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 934 935 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 936 config &= ~CORE_SW_TRIG_FULL_CALIB; 937 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 938 939 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 940 config |= CORE_HW_AUTOCAL_ENA; 941 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 942 943 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); 944 config |= CORE_TIMER_ENA; 945 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); 946 947 ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0, 948 calib_done, 949 (calib_done & CORE_CALIBRATION_DONE), 950 1, 50); 951 952 if (ret == -ETIMEDOUT) { 953 pr_err("%s: %s: CDC calibration was not completed\n", 954 mmc_hostname(host->mmc), __func__); 955 goto out; 956 } 957 958 ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0) 959 & CORE_CDC_ERROR_CODE_MASK; 960 if (ret) { 961 pr_err("%s: %s: CDC error code %d\n", 962 mmc_hostname(host->mmc), __func__, ret); 963 ret = -EINVAL; 964 goto out; 965 } 966 967 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); 968 config |= CORE_START_CDC_TRAFFIC; 969 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); 970 out: 971 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), 972 __func__, ret); 973 return ret; 974 } 975 976 static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host) 977 { 978 struct mmc_host *mmc = host->mmc; 979 u32 dll_status, config, ddr_cfg_offset; 980 int ret; 981 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 982 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 983 const struct sdhci_msm_offset *msm_offset = 984 sdhci_priv_msm_offset(host); 985 986 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); 987 988 /* 989 * Currently the core_ddr_config register defaults to desired 990 * configuration on reset. Currently reprogramming the power on 991 * reset (POR) value in case it might have been modified by 992 * bootloaders. In the future, if this changes, then the desired 993 * values will need to be programmed appropriately. 994 */ 995 if (msm_host->updated_ddr_cfg) 996 ddr_cfg_offset = msm_offset->core_ddr_config; 997 else 998 ddr_cfg_offset = msm_offset->core_ddr_config_old; 999 writel_relaxed(msm_host->ddr_config, host->ioaddr + ddr_cfg_offset); 1000 1001 if (mmc->ios.enhanced_strobe) { 1002 config = readl_relaxed(host->ioaddr + 1003 msm_offset->core_ddr_200_cfg); 1004 config |= CORE_CMDIN_RCLK_EN; 1005 writel_relaxed(config, host->ioaddr + 1006 msm_offset->core_ddr_200_cfg); 1007 } 1008 1009 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2); 1010 config |= CORE_DDR_CAL_EN; 1011 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config_2); 1012 1013 ret = readl_relaxed_poll_timeout(host->ioaddr + 1014 msm_offset->core_dll_status, 1015 dll_status, 1016 (dll_status & CORE_DDR_DLL_LOCK), 1017 10, 1000); 1018 1019 if (ret == -ETIMEDOUT) { 1020 pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n", 1021 mmc_hostname(host->mmc), __func__); 1022 goto out; 1023 } 1024 1025 /* 1026 * Set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3. 1027 * When MCLK is gated OFF, it is not gated for less than 0.5us 1028 * and MCLK must be switched on for at-least 1us before DATA 1029 * starts coming. Controllers with 14lpp and later tech DLL cannot 1030 * guarantee above requirement. So PWRSAVE_DLL should not be 1031 * turned on for host controllers using this DLL. 1032 */ 1033 if (!msm_host->use_14lpp_dll_reset) { 1034 config = readl_relaxed(host->ioaddr + 1035 msm_offset->core_vendor_spec3); 1036 config |= CORE_PWRSAVE_DLL; 1037 writel_relaxed(config, host->ioaddr + 1038 msm_offset->core_vendor_spec3); 1039 } 1040 1041 /* 1042 * Drain writebuffer to ensure above DLL calibration 1043 * and PWRSAVE DLL is enabled. 1044 */ 1045 wmb(); 1046 out: 1047 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), 1048 __func__, ret); 1049 return ret; 1050 } 1051 1052 static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host) 1053 { 1054 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1055 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1056 struct mmc_host *mmc = host->mmc; 1057 int ret; 1058 u32 config; 1059 const struct sdhci_msm_offset *msm_offset = 1060 msm_host->offset; 1061 1062 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); 1063 1064 /* 1065 * Retuning in HS400 (DDR mode) will fail, just reset the 1066 * tuning block and restore the saved tuning phase. 1067 */ 1068 ret = msm_init_cm_dll(host); 1069 if (ret) 1070 goto out; 1071 1072 if (!mmc->ios.enhanced_strobe) { 1073 /* Set the selected phase in delay line hw block */ 1074 ret = msm_config_cm_dll_phase(host, 1075 msm_host->saved_tuning_phase); 1076 if (ret) 1077 goto out; 1078 config = readl_relaxed(host->ioaddr + 1079 msm_offset->core_dll_config); 1080 config |= CORE_CMD_DAT_TRACK_SEL; 1081 writel_relaxed(config, host->ioaddr + 1082 msm_offset->core_dll_config); 1083 } 1084 1085 if (msm_host->use_cdclp533) 1086 ret = sdhci_msm_cdclp533_calibration(host); 1087 else 1088 ret = sdhci_msm_cm_dll_sdc4_calibration(host); 1089 out: 1090 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), 1091 __func__, ret); 1092 return ret; 1093 } 1094 1095 static bool sdhci_msm_is_tuning_needed(struct sdhci_host *host) 1096 { 1097 struct mmc_ios *ios = &host->mmc->ios; 1098 1099 /* 1100 * Tuning is required for SDR104, HS200 and HS400 cards and 1101 * if clock frequency is greater than 100MHz in these modes. 1102 */ 1103 if (host->clock <= CORE_FREQ_100MHZ || 1104 !(ios->timing == MMC_TIMING_MMC_HS400 || 1105 ios->timing == MMC_TIMING_MMC_HS200 || 1106 ios->timing == MMC_TIMING_UHS_SDR104) || 1107 ios->enhanced_strobe) 1108 return false; 1109 1110 return true; 1111 } 1112 1113 static int sdhci_msm_restore_sdr_dll_config(struct sdhci_host *host) 1114 { 1115 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1116 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1117 int ret; 1118 1119 /* 1120 * SDR DLL comes into picture only for timing modes which needs 1121 * tuning. 1122 */ 1123 if (!sdhci_msm_is_tuning_needed(host)) 1124 return 0; 1125 1126 /* Reset the tuning block */ 1127 ret = msm_init_cm_dll(host); 1128 if (ret) 1129 return ret; 1130 1131 /* Restore the tuning block */ 1132 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase); 1133 1134 return ret; 1135 } 1136 1137 static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable) 1138 { 1139 const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host); 1140 u32 config, oldconfig = readl_relaxed(host->ioaddr + 1141 msm_offset->core_dll_config); 1142 1143 config = oldconfig; 1144 if (enable) { 1145 config |= CORE_CDR_EN; 1146 config &= ~CORE_CDR_EXT_EN; 1147 } else { 1148 config &= ~CORE_CDR_EN; 1149 config |= CORE_CDR_EXT_EN; 1150 } 1151 1152 if (config != oldconfig) { 1153 writel_relaxed(config, host->ioaddr + 1154 msm_offset->core_dll_config); 1155 } 1156 } 1157 1158 static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) 1159 { 1160 struct sdhci_host *host = mmc_priv(mmc); 1161 int tuning_seq_cnt = 3; 1162 u8 phase, tuned_phases[16], tuned_phase_cnt = 0; 1163 int rc; 1164 struct mmc_ios ios = host->mmc->ios; 1165 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1166 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1167 1168 if (!sdhci_msm_is_tuning_needed(host)) { 1169 msm_host->use_cdr = false; 1170 sdhci_msm_set_cdr(host, false); 1171 return 0; 1172 } 1173 1174 /* Clock-Data-Recovery used to dynamically adjust RX sampling point */ 1175 msm_host->use_cdr = true; 1176 1177 /* 1178 * Clear tuning_done flag before tuning to ensure proper 1179 * HS400 settings. 1180 */ 1181 msm_host->tuning_done = 0; 1182 1183 /* 1184 * For HS400 tuning in HS200 timing requires: 1185 * - select MCLK/2 in VENDOR_SPEC 1186 * - program MCLK to 400MHz (or nearest supported) in GCC 1187 */ 1188 if (host->flags & SDHCI_HS400_TUNING) { 1189 sdhci_msm_hc_select_mode(host); 1190 msm_set_clock_rate_for_bus_mode(host, ios.clock); 1191 host->flags &= ~SDHCI_HS400_TUNING; 1192 } 1193 1194 retry: 1195 /* First of all reset the tuning block */ 1196 rc = msm_init_cm_dll(host); 1197 if (rc) 1198 return rc; 1199 1200 phase = 0; 1201 do { 1202 /* Set the phase in delay line hw block */ 1203 rc = msm_config_cm_dll_phase(host, phase); 1204 if (rc) 1205 return rc; 1206 1207 rc = mmc_send_tuning(mmc, opcode, NULL); 1208 if (!rc) { 1209 /* Tuning is successful at this tuning point */ 1210 tuned_phases[tuned_phase_cnt++] = phase; 1211 dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n", 1212 mmc_hostname(mmc), phase); 1213 } 1214 } while (++phase < ARRAY_SIZE(tuned_phases)); 1215 1216 if (tuned_phase_cnt) { 1217 rc = msm_find_most_appropriate_phase(host, tuned_phases, 1218 tuned_phase_cnt); 1219 if (rc < 0) 1220 return rc; 1221 else 1222 phase = rc; 1223 1224 /* 1225 * Finally set the selected phase in delay 1226 * line hw block. 1227 */ 1228 rc = msm_config_cm_dll_phase(host, phase); 1229 if (rc) 1230 return rc; 1231 msm_host->saved_tuning_phase = phase; 1232 dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n", 1233 mmc_hostname(mmc), phase); 1234 } else { 1235 if (--tuning_seq_cnt) 1236 goto retry; 1237 /* Tuning failed */ 1238 dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n", 1239 mmc_hostname(mmc)); 1240 rc = -EIO; 1241 } 1242 1243 if (!rc) 1244 msm_host->tuning_done = true; 1245 return rc; 1246 } 1247 1248 /* 1249 * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation. 1250 * This needs to be done for both tuning and enhanced_strobe mode. 1251 * DLL operation is only needed for clock > 100MHz. For clock <= 100MHz 1252 * fixed feedback clock is used. 1253 */ 1254 static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios) 1255 { 1256 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1257 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1258 int ret; 1259 1260 if (host->clock > CORE_FREQ_100MHZ && 1261 (msm_host->tuning_done || ios->enhanced_strobe) && 1262 !msm_host->calibration_done) { 1263 ret = sdhci_msm_hs400_dll_calibration(host); 1264 if (!ret) 1265 msm_host->calibration_done = true; 1266 else 1267 pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n", 1268 mmc_hostname(host->mmc), ret); 1269 } 1270 } 1271 1272 static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host, 1273 unsigned int uhs) 1274 { 1275 struct mmc_host *mmc = host->mmc; 1276 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1277 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1278 u16 ctrl_2; 1279 u32 config; 1280 const struct sdhci_msm_offset *msm_offset = 1281 msm_host->offset; 1282 1283 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1284 /* Select Bus Speed Mode for host */ 1285 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 1286 switch (uhs) { 1287 case MMC_TIMING_UHS_SDR12: 1288 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 1289 break; 1290 case MMC_TIMING_UHS_SDR25: 1291 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 1292 break; 1293 case MMC_TIMING_UHS_SDR50: 1294 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 1295 break; 1296 case MMC_TIMING_MMC_HS400: 1297 case MMC_TIMING_MMC_HS200: 1298 case MMC_TIMING_UHS_SDR104: 1299 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 1300 break; 1301 case MMC_TIMING_UHS_DDR50: 1302 case MMC_TIMING_MMC_DDR52: 1303 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 1304 break; 1305 } 1306 1307 /* 1308 * When clock frequency is less than 100MHz, the feedback clock must be 1309 * provided and DLL must not be used so that tuning can be skipped. To 1310 * provide feedback clock, the mode selection can be any value less 1311 * than 3'b011 in bits [2:0] of HOST CONTROL2 register. 1312 */ 1313 if (host->clock <= CORE_FREQ_100MHZ) { 1314 if (uhs == MMC_TIMING_MMC_HS400 || 1315 uhs == MMC_TIMING_MMC_HS200 || 1316 uhs == MMC_TIMING_UHS_SDR104) 1317 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 1318 /* 1319 * DLL is not required for clock <= 100MHz 1320 * Thus, make sure DLL it is disabled when not required 1321 */ 1322 config = readl_relaxed(host->ioaddr + 1323 msm_offset->core_dll_config); 1324 config |= CORE_DLL_RST; 1325 writel_relaxed(config, host->ioaddr + 1326 msm_offset->core_dll_config); 1327 1328 config = readl_relaxed(host->ioaddr + 1329 msm_offset->core_dll_config); 1330 config |= CORE_DLL_PDN; 1331 writel_relaxed(config, host->ioaddr + 1332 msm_offset->core_dll_config); 1333 1334 /* 1335 * The DLL needs to be restored and CDCLP533 recalibrated 1336 * when the clock frequency is set back to 400MHz. 1337 */ 1338 msm_host->calibration_done = false; 1339 } 1340 1341 dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n", 1342 mmc_hostname(host->mmc), host->clock, uhs, ctrl_2); 1343 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1344 1345 if (mmc->ios.timing == MMC_TIMING_MMC_HS400) 1346 sdhci_msm_hs400(host, &mmc->ios); 1347 } 1348 1349 static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host) 1350 { 1351 init_waitqueue_head(&msm_host->pwr_irq_wait); 1352 } 1353 1354 static inline void sdhci_msm_complete_pwr_irq_wait( 1355 struct sdhci_msm_host *msm_host) 1356 { 1357 wake_up(&msm_host->pwr_irq_wait); 1358 } 1359 1360 /* 1361 * sdhci_msm_check_power_status API should be called when registers writes 1362 * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens. 1363 * To what state the register writes will change the IO lines should be passed 1364 * as the argument req_type. This API will check whether the IO line's state 1365 * is already the expected state and will wait for power irq only if 1366 * power irq is expected to be trigerred based on the current IO line state 1367 * and expected IO line state. 1368 */ 1369 static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type) 1370 { 1371 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1372 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1373 bool done = false; 1374 u32 val = SWITCHABLE_SIGNALING_VOLTAGE; 1375 const struct sdhci_msm_offset *msm_offset = 1376 msm_host->offset; 1377 1378 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n", 1379 mmc_hostname(host->mmc), __func__, req_type, 1380 msm_host->curr_pwr_state, msm_host->curr_io_level); 1381 1382 /* 1383 * The power interrupt will not be generated for signal voltage 1384 * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set. 1385 * Since sdhci-msm-v5, this bit has been removed and SW must consider 1386 * it as always set. 1387 */ 1388 if (!msm_host->mci_removed) 1389 val = msm_host_readl(msm_host, host, 1390 msm_offset->core_generics); 1391 if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) && 1392 !(val & SWITCHABLE_SIGNALING_VOLTAGE)) { 1393 return; 1394 } 1395 1396 /* 1397 * The IRQ for request type IO High/LOW will be generated when - 1398 * there is a state change in 1.8V enable bit (bit 3) of 1399 * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0 1400 * which indicates 3.3V IO voltage. So, when MMC core layer tries 1401 * to set it to 3.3V before card detection happens, the 1402 * IRQ doesn't get triggered as there is no state change in this bit. 1403 * The driver already handles this case by changing the IO voltage 1404 * level to high as part of controller power up sequence. Hence, check 1405 * for host->pwr to handle a case where IO voltage high request is 1406 * issued even before controller power up. 1407 */ 1408 if ((req_type & REQ_IO_HIGH) && !host->pwr) { 1409 pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n", 1410 mmc_hostname(host->mmc), req_type); 1411 return; 1412 } 1413 if ((req_type & msm_host->curr_pwr_state) || 1414 (req_type & msm_host->curr_io_level)) 1415 done = true; 1416 /* 1417 * This is needed here to handle cases where register writes will 1418 * not change the current bus state or io level of the controller. 1419 * In this case, no power irq will be triggerred and we should 1420 * not wait. 1421 */ 1422 if (!done) { 1423 if (!wait_event_timeout(msm_host->pwr_irq_wait, 1424 msm_host->pwr_irq_flag, 1425 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS))) 1426 dev_warn(&msm_host->pdev->dev, 1427 "%s: pwr_irq for req: (%d) timed out\n", 1428 mmc_hostname(host->mmc), req_type); 1429 } 1430 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc), 1431 __func__, req_type); 1432 } 1433 1434 static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host) 1435 { 1436 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1437 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1438 const struct sdhci_msm_offset *msm_offset = 1439 msm_host->offset; 1440 1441 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n", 1442 mmc_hostname(host->mmc), 1443 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_status), 1444 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_mask), 1445 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_ctl)); 1446 } 1447 1448 static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq) 1449 { 1450 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1451 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1452 u32 irq_status, irq_ack = 0; 1453 int retry = 10; 1454 u32 pwr_state = 0, io_level = 0; 1455 u32 config; 1456 const struct sdhci_msm_offset *msm_offset = msm_host->offset; 1457 1458 irq_status = msm_host_readl(msm_host, host, 1459 msm_offset->core_pwrctl_status); 1460 irq_status &= INT_MASK; 1461 1462 msm_host_writel(msm_host, irq_status, host, 1463 msm_offset->core_pwrctl_clear); 1464 1465 /* 1466 * There is a rare HW scenario where the first clear pulse could be 1467 * lost when actual reset and clear/read of status register is 1468 * happening at a time. Hence, retry for at least 10 times to make 1469 * sure status register is cleared. Otherwise, this will result in 1470 * a spurious power IRQ resulting in system instability. 1471 */ 1472 while (irq_status & msm_host_readl(msm_host, host, 1473 msm_offset->core_pwrctl_status)) { 1474 if (retry == 0) { 1475 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n", 1476 mmc_hostname(host->mmc), irq_status); 1477 sdhci_msm_dump_pwr_ctrl_regs(host); 1478 WARN_ON(1); 1479 break; 1480 } 1481 msm_host_writel(msm_host, irq_status, host, 1482 msm_offset->core_pwrctl_clear); 1483 retry--; 1484 udelay(10); 1485 } 1486 1487 /* Handle BUS ON/OFF*/ 1488 if (irq_status & CORE_PWRCTL_BUS_ON) { 1489 pwr_state = REQ_BUS_ON; 1490 io_level = REQ_IO_HIGH; 1491 irq_ack |= CORE_PWRCTL_BUS_SUCCESS; 1492 } 1493 if (irq_status & CORE_PWRCTL_BUS_OFF) { 1494 pwr_state = REQ_BUS_OFF; 1495 io_level = REQ_IO_LOW; 1496 irq_ack |= CORE_PWRCTL_BUS_SUCCESS; 1497 } 1498 /* Handle IO LOW/HIGH */ 1499 if (irq_status & CORE_PWRCTL_IO_LOW) { 1500 io_level = REQ_IO_LOW; 1501 irq_ack |= CORE_PWRCTL_IO_SUCCESS; 1502 } 1503 if (irq_status & CORE_PWRCTL_IO_HIGH) { 1504 io_level = REQ_IO_HIGH; 1505 irq_ack |= CORE_PWRCTL_IO_SUCCESS; 1506 } 1507 1508 /* 1509 * The driver has to acknowledge the interrupt, switch voltages and 1510 * report back if it succeded or not to this register. The voltage 1511 * switches are handled by the sdhci core, so just report success. 1512 */ 1513 msm_host_writel(msm_host, irq_ack, host, 1514 msm_offset->core_pwrctl_ctl); 1515 1516 /* 1517 * If we don't have info regarding the voltage levels supported by 1518 * regulators, don't change the IO PAD PWR SWITCH. 1519 */ 1520 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) { 1521 u32 new_config; 1522 /* 1523 * We should unset IO PAD PWR switch only if the register write 1524 * can set IO lines high and the regulator also switches to 3 V. 1525 * Else, we should keep the IO PAD PWR switch set. 1526 * This is applicable to certain targets where eMMC vccq supply 1527 * is only 1.8V. In such targets, even during REQ_IO_HIGH, the 1528 * IO PAD PWR switch must be kept set to reflect actual 1529 * regulator voltage. This way, during initialization of 1530 * controllers with only 1.8V, we will set the IO PAD bit 1531 * without waiting for a REQ_IO_LOW. 1532 */ 1533 config = readl_relaxed(host->ioaddr + 1534 msm_offset->core_vendor_spec); 1535 new_config = config; 1536 1537 if ((io_level & REQ_IO_HIGH) && 1538 (msm_host->caps_0 & CORE_3_0V_SUPPORT)) 1539 new_config &= ~CORE_IO_PAD_PWR_SWITCH; 1540 else if ((io_level & REQ_IO_LOW) || 1541 (msm_host->caps_0 & CORE_1_8V_SUPPORT)) 1542 new_config |= CORE_IO_PAD_PWR_SWITCH; 1543 1544 if (config ^ new_config) 1545 writel_relaxed(new_config, host->ioaddr + 1546 msm_offset->core_vendor_spec); 1547 } 1548 1549 if (pwr_state) 1550 msm_host->curr_pwr_state = pwr_state; 1551 if (io_level) 1552 msm_host->curr_io_level = io_level; 1553 1554 pr_debug("%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n", 1555 mmc_hostname(msm_host->mmc), __func__, irq, irq_status, 1556 irq_ack); 1557 } 1558 1559 static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data) 1560 { 1561 struct sdhci_host *host = (struct sdhci_host *)data; 1562 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1563 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1564 1565 sdhci_msm_handle_pwr_irq(host, irq); 1566 msm_host->pwr_irq_flag = 1; 1567 sdhci_msm_complete_pwr_irq_wait(msm_host); 1568 1569 1570 return IRQ_HANDLED; 1571 } 1572 1573 static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host) 1574 { 1575 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1576 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1577 struct clk *core_clk = msm_host->bulk_clks[0].clk; 1578 1579 return clk_round_rate(core_clk, ULONG_MAX); 1580 } 1581 1582 static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host) 1583 { 1584 return SDHCI_MSM_MIN_CLOCK; 1585 } 1586 1587 /** 1588 * __sdhci_msm_set_clock - sdhci_msm clock control. 1589 * 1590 * Description: 1591 * MSM controller does not use internal divider and 1592 * instead directly control the GCC clock as per 1593 * HW recommendation. 1594 **/ 1595 static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock) 1596 { 1597 u16 clk; 1598 /* 1599 * Keep actual_clock as zero - 1600 * - since there is no divider used so no need of having actual_clock. 1601 * - MSM controller uses SDCLK for data timeout calculation. If 1602 * actual_clock is zero, host->clock is taken for calculation. 1603 */ 1604 host->mmc->actual_clock = 0; 1605 1606 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1607 1608 if (clock == 0) 1609 return; 1610 1611 /* 1612 * MSM controller do not use clock divider. 1613 * Thus read SDHCI_CLOCK_CONTROL and only enable 1614 * clock with no divider value programmed. 1615 */ 1616 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1617 sdhci_enable_clk(host, clk); 1618 } 1619 1620 /* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */ 1621 static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock) 1622 { 1623 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1624 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1625 1626 if (!clock) { 1627 msm_host->clk_rate = clock; 1628 goto out; 1629 } 1630 1631 sdhci_msm_hc_select_mode(host); 1632 1633 msm_set_clock_rate_for_bus_mode(host, clock); 1634 out: 1635 __sdhci_msm_set_clock(host, clock); 1636 } 1637 1638 /*****************************************************************************\ 1639 * * 1640 * MSM Command Queue Engine (CQE) * 1641 * * 1642 \*****************************************************************************/ 1643 1644 static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask) 1645 { 1646 int cmd_error = 0; 1647 int data_error = 0; 1648 1649 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) 1650 return intmask; 1651 1652 cqhci_irq(host->mmc, intmask, cmd_error, data_error); 1653 return 0; 1654 } 1655 1656 static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery) 1657 { 1658 struct sdhci_host *host = mmc_priv(mmc); 1659 unsigned long flags; 1660 u32 ctrl; 1661 1662 /* 1663 * When CQE is halted, the legacy SDHCI path operates only 1664 * on 16-byte descriptors in 64bit mode. 1665 */ 1666 if (host->flags & SDHCI_USE_64_BIT_DMA) 1667 host->desc_sz = 16; 1668 1669 spin_lock_irqsave(&host->lock, flags); 1670 1671 /* 1672 * During CQE command transfers, command complete bit gets latched. 1673 * So s/w should clear command complete interrupt status when CQE is 1674 * either halted or disabled. Otherwise unexpected SDCHI legacy 1675 * interrupt gets triggered when CQE is halted/disabled. 1676 */ 1677 ctrl = sdhci_readl(host, SDHCI_INT_ENABLE); 1678 ctrl |= SDHCI_INT_RESPONSE; 1679 sdhci_writel(host, ctrl, SDHCI_INT_ENABLE); 1680 sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS); 1681 1682 spin_unlock_irqrestore(&host->lock, flags); 1683 1684 sdhci_cqe_disable(mmc, recovery); 1685 } 1686 1687 static const struct cqhci_host_ops sdhci_msm_cqhci_ops = { 1688 .enable = sdhci_cqe_enable, 1689 .disable = sdhci_msm_cqe_disable, 1690 }; 1691 1692 static int sdhci_msm_cqe_add_host(struct sdhci_host *host, 1693 struct platform_device *pdev) 1694 { 1695 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1696 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1697 struct cqhci_host *cq_host; 1698 bool dma64; 1699 u32 cqcfg; 1700 int ret; 1701 1702 /* 1703 * When CQE is halted, SDHC operates only on 16byte ADMA descriptors. 1704 * So ensure ADMA table is allocated for 16byte descriptors. 1705 */ 1706 if (host->caps & SDHCI_CAN_64BIT) 1707 host->alloc_desc_sz = 16; 1708 1709 ret = sdhci_setup_host(host); 1710 if (ret) 1711 return ret; 1712 1713 cq_host = cqhci_pltfm_init(pdev); 1714 if (IS_ERR(cq_host)) { 1715 ret = PTR_ERR(cq_host); 1716 dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret); 1717 goto cleanup; 1718 } 1719 1720 msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; 1721 cq_host->ops = &sdhci_msm_cqhci_ops; 1722 1723 dma64 = host->flags & SDHCI_USE_64_BIT_DMA; 1724 1725 ret = cqhci_init(cq_host, host->mmc, dma64); 1726 if (ret) { 1727 dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n", 1728 mmc_hostname(host->mmc), ret); 1729 goto cleanup; 1730 } 1731 1732 /* Disable cqe reset due to cqe enable signal */ 1733 cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1); 1734 cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN; 1735 cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1); 1736 1737 /* 1738 * SDHC expects 12byte ADMA descriptors till CQE is enabled. 1739 * So limit desc_sz to 12 so that the data commands that are sent 1740 * during card initialization (before CQE gets enabled) would 1741 * get executed without any issues. 1742 */ 1743 if (host->flags & SDHCI_USE_64_BIT_DMA) 1744 host->desc_sz = 12; 1745 1746 ret = __sdhci_add_host(host); 1747 if (ret) 1748 goto cleanup; 1749 1750 dev_info(&pdev->dev, "%s: CQE init: success\n", 1751 mmc_hostname(host->mmc)); 1752 return ret; 1753 1754 cleanup: 1755 sdhci_cleanup_host(host); 1756 return ret; 1757 } 1758 1759 /* 1760 * Platform specific register write functions. This is so that, if any 1761 * register write needs to be followed up by platform specific actions, 1762 * they can be added here. These functions can go to sleep when writes 1763 * to certain registers are done. 1764 * These functions are relying on sdhci_set_ios not using spinlock. 1765 */ 1766 static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg) 1767 { 1768 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1769 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1770 u32 req_type = 0; 1771 1772 switch (reg) { 1773 case SDHCI_HOST_CONTROL2: 1774 req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW : 1775 REQ_IO_HIGH; 1776 break; 1777 case SDHCI_SOFTWARE_RESET: 1778 if (host->pwr && (val & SDHCI_RESET_ALL)) 1779 req_type = REQ_BUS_OFF; 1780 break; 1781 case SDHCI_POWER_CONTROL: 1782 req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON; 1783 break; 1784 case SDHCI_TRANSFER_MODE: 1785 msm_host->transfer_mode = val; 1786 break; 1787 case SDHCI_COMMAND: 1788 if (!msm_host->use_cdr) 1789 break; 1790 if ((msm_host->transfer_mode & SDHCI_TRNS_READ) && 1791 SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 && 1792 SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK) 1793 sdhci_msm_set_cdr(host, true); 1794 else 1795 sdhci_msm_set_cdr(host, false); 1796 break; 1797 } 1798 1799 if (req_type) { 1800 msm_host->pwr_irq_flag = 0; 1801 /* 1802 * Since this register write may trigger a power irq, ensure 1803 * all previous register writes are complete by this point. 1804 */ 1805 mb(); 1806 } 1807 return req_type; 1808 } 1809 1810 /* This function may sleep*/ 1811 static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg) 1812 { 1813 u32 req_type = 0; 1814 1815 req_type = __sdhci_msm_check_write(host, val, reg); 1816 writew_relaxed(val, host->ioaddr + reg); 1817 1818 if (req_type) 1819 sdhci_msm_check_power_status(host, req_type); 1820 } 1821 1822 /* This function may sleep*/ 1823 static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg) 1824 { 1825 u32 req_type = 0; 1826 1827 req_type = __sdhci_msm_check_write(host, val, reg); 1828 1829 writeb_relaxed(val, host->ioaddr + reg); 1830 1831 if (req_type) 1832 sdhci_msm_check_power_status(host, req_type); 1833 } 1834 1835 static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host) 1836 { 1837 struct mmc_host *mmc = msm_host->mmc; 1838 struct regulator *supply = mmc->supply.vqmmc; 1839 u32 caps = 0, config; 1840 struct sdhci_host *host = mmc_priv(mmc); 1841 const struct sdhci_msm_offset *msm_offset = msm_host->offset; 1842 1843 if (!IS_ERR(mmc->supply.vqmmc)) { 1844 if (regulator_is_supported_voltage(supply, 1700000, 1950000)) 1845 caps |= CORE_1_8V_SUPPORT; 1846 if (regulator_is_supported_voltage(supply, 2700000, 3600000)) 1847 caps |= CORE_3_0V_SUPPORT; 1848 1849 if (!caps) 1850 pr_warn("%s: 1.8/3V not supported for vqmmc\n", 1851 mmc_hostname(mmc)); 1852 } 1853 1854 if (caps) { 1855 /* 1856 * Set the PAD_PWR_SWITCH_EN bit so that the PAD_PWR_SWITCH 1857 * bit can be used as required later on. 1858 */ 1859 u32 io_level = msm_host->curr_io_level; 1860 1861 config = readl_relaxed(host->ioaddr + 1862 msm_offset->core_vendor_spec); 1863 config |= CORE_IO_PAD_PWR_SWITCH_EN; 1864 1865 if ((io_level & REQ_IO_HIGH) && (caps & CORE_3_0V_SUPPORT)) 1866 config &= ~CORE_IO_PAD_PWR_SWITCH; 1867 else if ((io_level & REQ_IO_LOW) || (caps & CORE_1_8V_SUPPORT)) 1868 config |= CORE_IO_PAD_PWR_SWITCH; 1869 1870 writel_relaxed(config, 1871 host->ioaddr + msm_offset->core_vendor_spec); 1872 } 1873 msm_host->caps_0 |= caps; 1874 pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps); 1875 } 1876 1877 static void sdhci_msm_reset(struct sdhci_host *host, u8 mask) 1878 { 1879 if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL)) 1880 cqhci_deactivate(host->mmc); 1881 sdhci_reset(host, mask); 1882 } 1883 1884 #define DRIVER_NAME "sdhci_msm" 1885 #define SDHCI_MSM_DUMP(f, x...) \ 1886 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 1887 1888 void sdhci_msm_dump_vendor_regs(struct sdhci_host *host) 1889 { 1890 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1891 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1892 const struct sdhci_msm_offset *msm_offset = msm_host->offset; 1893 1894 SDHCI_MSM_DUMP("----------- VENDOR REGISTER DUMP -----------\n"); 1895 1896 SDHCI_MSM_DUMP( 1897 "DLL sts: 0x%08x | DLL cfg: 0x%08x | DLL cfg2: 0x%08x\n", 1898 readl_relaxed(host->ioaddr + msm_offset->core_dll_status), 1899 readl_relaxed(host->ioaddr + msm_offset->core_dll_config), 1900 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2)); 1901 SDHCI_MSM_DUMP( 1902 "DLL cfg3: 0x%08x | DLL usr ctl: 0x%08x | DDR cfg: 0x%08x\n", 1903 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_3), 1904 readl_relaxed(host->ioaddr + msm_offset->core_dll_usr_ctl), 1905 readl_relaxed(host->ioaddr + msm_offset->core_ddr_config)); 1906 SDHCI_MSM_DUMP( 1907 "Vndr func: 0x%08x | Vndr func2 : 0x%08x Vndr func3: 0x%08x\n", 1908 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec), 1909 readl_relaxed(host->ioaddr + 1910 msm_offset->core_vendor_spec_func2), 1911 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3)); 1912 } 1913 1914 static const struct sdhci_msm_variant_ops mci_var_ops = { 1915 .msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed, 1916 .msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed, 1917 }; 1918 1919 static const struct sdhci_msm_variant_ops v5_var_ops = { 1920 .msm_readl_relaxed = sdhci_msm_v5_variant_readl_relaxed, 1921 .msm_writel_relaxed = sdhci_msm_v5_variant_writel_relaxed, 1922 }; 1923 1924 static const struct sdhci_msm_variant_info sdhci_msm_mci_var = { 1925 .var_ops = &mci_var_ops, 1926 .offset = &sdhci_msm_mci_offset, 1927 }; 1928 1929 static const struct sdhci_msm_variant_info sdhci_msm_v5_var = { 1930 .mci_removed = true, 1931 .var_ops = &v5_var_ops, 1932 .offset = &sdhci_msm_v5_offset, 1933 }; 1934 1935 static const struct sdhci_msm_variant_info sdm845_sdhci_var = { 1936 .mci_removed = true, 1937 .restore_dll_config = true, 1938 .var_ops = &v5_var_ops, 1939 .offset = &sdhci_msm_v5_offset, 1940 }; 1941 1942 static const struct sdhci_msm_variant_info sm8250_sdhci_var = { 1943 .mci_removed = true, 1944 .uses_tassadar_dll = true, 1945 .var_ops = &v5_var_ops, 1946 .offset = &sdhci_msm_v5_offset, 1947 }; 1948 1949 static const struct of_device_id sdhci_msm_dt_match[] = { 1950 {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var}, 1951 {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var}, 1952 {.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var}, 1953 {.compatible = "qcom,sm8250-sdhci", .data = &sm8250_sdhci_var}, 1954 {}, 1955 }; 1956 1957 MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match); 1958 1959 static const struct sdhci_ops sdhci_msm_ops = { 1960 .reset = sdhci_msm_reset, 1961 .set_clock = sdhci_msm_set_clock, 1962 .get_min_clock = sdhci_msm_get_min_clock, 1963 .get_max_clock = sdhci_msm_get_max_clock, 1964 .set_bus_width = sdhci_set_bus_width, 1965 .set_uhs_signaling = sdhci_msm_set_uhs_signaling, 1966 .write_w = sdhci_msm_writew, 1967 .write_b = sdhci_msm_writeb, 1968 .irq = sdhci_msm_cqe_irq, 1969 .dump_vendor_regs = sdhci_msm_dump_vendor_regs, 1970 }; 1971 1972 static const struct sdhci_pltfm_data sdhci_msm_pdata = { 1973 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | 1974 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1975 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | 1976 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, 1977 1978 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1979 .ops = &sdhci_msm_ops, 1980 }; 1981 1982 static inline void sdhci_msm_get_of_property(struct platform_device *pdev, 1983 struct sdhci_host *host) 1984 { 1985 struct device_node *node = pdev->dev.of_node; 1986 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1987 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1988 1989 if (of_property_read_u32(node, "qcom,ddr-config", 1990 &msm_host->ddr_config)) 1991 msm_host->ddr_config = DDR_CONFIG_POR_VAL; 1992 1993 of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config); 1994 } 1995 1996 1997 static int sdhci_msm_probe(struct platform_device *pdev) 1998 { 1999 struct sdhci_host *host; 2000 struct sdhci_pltfm_host *pltfm_host; 2001 struct sdhci_msm_host *msm_host; 2002 struct clk *clk; 2003 int ret; 2004 u16 host_version, core_minor; 2005 u32 core_version, config; 2006 u8 core_major; 2007 const struct sdhci_msm_offset *msm_offset; 2008 const struct sdhci_msm_variant_info *var_info; 2009 struct device_node *node = pdev->dev.of_node; 2010 2011 host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host)); 2012 if (IS_ERR(host)) 2013 return PTR_ERR(host); 2014 2015 host->sdma_boundary = 0; 2016 pltfm_host = sdhci_priv(host); 2017 msm_host = sdhci_pltfm_priv(pltfm_host); 2018 msm_host->mmc = host->mmc; 2019 msm_host->pdev = pdev; 2020 2021 ret = mmc_of_parse(host->mmc); 2022 if (ret) 2023 goto pltfm_free; 2024 2025 /* 2026 * Based on the compatible string, load the required msm host info from 2027 * the data associated with the version info. 2028 */ 2029 var_info = of_device_get_match_data(&pdev->dev); 2030 2031 msm_host->mci_removed = var_info->mci_removed; 2032 msm_host->restore_dll_config = var_info->restore_dll_config; 2033 msm_host->var_ops = var_info->var_ops; 2034 msm_host->offset = var_info->offset; 2035 msm_host->uses_tassadar_dll = var_info->uses_tassadar_dll; 2036 2037 msm_offset = msm_host->offset; 2038 2039 sdhci_get_of_property(pdev); 2040 sdhci_msm_get_of_property(pdev, host); 2041 2042 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE; 2043 2044 /* Setup SDCC bus voter clock. */ 2045 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus"); 2046 if (!IS_ERR(msm_host->bus_clk)) { 2047 /* Vote for max. clk rate for max. performance */ 2048 ret = clk_set_rate(msm_host->bus_clk, INT_MAX); 2049 if (ret) 2050 goto pltfm_free; 2051 ret = clk_prepare_enable(msm_host->bus_clk); 2052 if (ret) 2053 goto pltfm_free; 2054 } 2055 2056 /* Setup main peripheral bus clock */ 2057 clk = devm_clk_get(&pdev->dev, "iface"); 2058 if (IS_ERR(clk)) { 2059 ret = PTR_ERR(clk); 2060 dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret); 2061 goto bus_clk_disable; 2062 } 2063 msm_host->bulk_clks[1].clk = clk; 2064 2065 /* Setup SDC MMC clock */ 2066 clk = devm_clk_get(&pdev->dev, "core"); 2067 if (IS_ERR(clk)) { 2068 ret = PTR_ERR(clk); 2069 dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret); 2070 goto bus_clk_disable; 2071 } 2072 msm_host->bulk_clks[0].clk = clk; 2073 2074 msm_host->opp_table = dev_pm_opp_set_clkname(&pdev->dev, "core"); 2075 if (IS_ERR(msm_host->opp_table)) { 2076 ret = PTR_ERR(msm_host->opp_table); 2077 goto bus_clk_disable; 2078 } 2079 2080 /* OPP table is optional */ 2081 ret = dev_pm_opp_of_add_table(&pdev->dev); 2082 if (!ret) { 2083 msm_host->has_opp_table = true; 2084 } else if (ret != -ENODEV) { 2085 dev_err(&pdev->dev, "Invalid OPP table in Device tree\n"); 2086 goto opp_cleanup; 2087 } 2088 2089 /* Vote for maximum clock rate for maximum performance */ 2090 ret = dev_pm_opp_set_rate(&pdev->dev, INT_MAX); 2091 if (ret) 2092 dev_warn(&pdev->dev, "core clock boost failed\n"); 2093 2094 clk = devm_clk_get(&pdev->dev, "cal"); 2095 if (IS_ERR(clk)) 2096 clk = NULL; 2097 msm_host->bulk_clks[2].clk = clk; 2098 2099 clk = devm_clk_get(&pdev->dev, "sleep"); 2100 if (IS_ERR(clk)) 2101 clk = NULL; 2102 msm_host->bulk_clks[3].clk = clk; 2103 2104 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), 2105 msm_host->bulk_clks); 2106 if (ret) 2107 goto opp_cleanup; 2108 2109 /* 2110 * xo clock is needed for FLL feature of cm_dll. 2111 * In case if xo clock is not mentioned in DT, warn and proceed. 2112 */ 2113 msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo"); 2114 if (IS_ERR(msm_host->xo_clk)) { 2115 ret = PTR_ERR(msm_host->xo_clk); 2116 dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret); 2117 } 2118 2119 if (!msm_host->mci_removed) { 2120 msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1); 2121 if (IS_ERR(msm_host->core_mem)) { 2122 ret = PTR_ERR(msm_host->core_mem); 2123 goto clk_disable; 2124 } 2125 } 2126 2127 /* Reset the vendor spec register to power on reset state */ 2128 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL, 2129 host->ioaddr + msm_offset->core_vendor_spec); 2130 2131 if (!msm_host->mci_removed) { 2132 /* Set HC_MODE_EN bit in HC_MODE register */ 2133 msm_host_writel(msm_host, HC_MODE_EN, host, 2134 msm_offset->core_hc_mode); 2135 config = msm_host_readl(msm_host, host, 2136 msm_offset->core_hc_mode); 2137 config |= FF_CLK_SW_RST_DIS; 2138 msm_host_writel(msm_host, config, host, 2139 msm_offset->core_hc_mode); 2140 } 2141 2142 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION)); 2143 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n", 2144 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >> 2145 SDHCI_VENDOR_VER_SHIFT)); 2146 2147 core_version = msm_host_readl(msm_host, host, 2148 msm_offset->core_mci_version); 2149 core_major = (core_version & CORE_VERSION_MAJOR_MASK) >> 2150 CORE_VERSION_MAJOR_SHIFT; 2151 core_minor = core_version & CORE_VERSION_MINOR_MASK; 2152 dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n", 2153 core_version, core_major, core_minor); 2154 2155 if (core_major == 1 && core_minor >= 0x42) 2156 msm_host->use_14lpp_dll_reset = true; 2157 2158 /* 2159 * SDCC 5 controller with major version 1, minor version 0x34 and later 2160 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL. 2161 */ 2162 if (core_major == 1 && core_minor < 0x34) 2163 msm_host->use_cdclp533 = true; 2164 2165 /* 2166 * Support for some capabilities is not advertised by newer 2167 * controller versions and must be explicitly enabled. 2168 */ 2169 if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) { 2170 config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES); 2171 config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT; 2172 writel_relaxed(config, host->ioaddr + 2173 msm_offset->core_vendor_spec_capabilities0); 2174 } 2175 2176 if (core_major == 1 && core_minor >= 0x49) 2177 msm_host->updated_ddr_cfg = true; 2178 2179 /* 2180 * Power on reset state may trigger power irq if previous status of 2181 * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq 2182 * interrupt in GIC, any pending power irq interrupt should be 2183 * acknowledged. Otherwise power irq interrupt handler would be 2184 * fired prematurely. 2185 */ 2186 sdhci_msm_handle_pwr_irq(host, 0); 2187 2188 /* 2189 * Ensure that above writes are propogated before interrupt enablement 2190 * in GIC. 2191 */ 2192 mb(); 2193 2194 /* Setup IRQ for handling power/voltage tasks with PMIC */ 2195 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq"); 2196 if (msm_host->pwr_irq < 0) { 2197 ret = msm_host->pwr_irq; 2198 goto clk_disable; 2199 } 2200 2201 sdhci_msm_init_pwr_irq_wait(msm_host); 2202 /* Enable pwr irq interrupts */ 2203 msm_host_writel(msm_host, INT_MASK, host, 2204 msm_offset->core_pwrctl_mask); 2205 2206 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL, 2207 sdhci_msm_pwr_irq, IRQF_ONESHOT, 2208 dev_name(&pdev->dev), host); 2209 if (ret) { 2210 dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret); 2211 goto clk_disable; 2212 } 2213 2214 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY; 2215 2216 pm_runtime_get_noresume(&pdev->dev); 2217 pm_runtime_set_active(&pdev->dev); 2218 pm_runtime_enable(&pdev->dev); 2219 pm_runtime_set_autosuspend_delay(&pdev->dev, 2220 MSM_MMC_AUTOSUSPEND_DELAY_MS); 2221 pm_runtime_use_autosuspend(&pdev->dev); 2222 2223 host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning; 2224 if (of_property_read_bool(node, "supports-cqe")) 2225 ret = sdhci_msm_cqe_add_host(host, pdev); 2226 else 2227 ret = sdhci_add_host(host); 2228 if (ret) 2229 goto pm_runtime_disable; 2230 sdhci_msm_set_regulator_caps(msm_host); 2231 2232 pm_runtime_mark_last_busy(&pdev->dev); 2233 pm_runtime_put_autosuspend(&pdev->dev); 2234 2235 return 0; 2236 2237 pm_runtime_disable: 2238 pm_runtime_disable(&pdev->dev); 2239 pm_runtime_set_suspended(&pdev->dev); 2240 pm_runtime_put_noidle(&pdev->dev); 2241 clk_disable: 2242 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), 2243 msm_host->bulk_clks); 2244 opp_cleanup: 2245 if (msm_host->has_opp_table) 2246 dev_pm_opp_of_remove_table(&pdev->dev); 2247 dev_pm_opp_put_clkname(msm_host->opp_table); 2248 bus_clk_disable: 2249 if (!IS_ERR(msm_host->bus_clk)) 2250 clk_disable_unprepare(msm_host->bus_clk); 2251 pltfm_free: 2252 sdhci_pltfm_free(pdev); 2253 return ret; 2254 } 2255 2256 static int sdhci_msm_remove(struct platform_device *pdev) 2257 { 2258 struct sdhci_host *host = platform_get_drvdata(pdev); 2259 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2260 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2261 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 2262 0xffffffff); 2263 2264 sdhci_remove_host(host, dead); 2265 2266 if (msm_host->has_opp_table) 2267 dev_pm_opp_of_remove_table(&pdev->dev); 2268 dev_pm_opp_put_clkname(msm_host->opp_table); 2269 pm_runtime_get_sync(&pdev->dev); 2270 pm_runtime_disable(&pdev->dev); 2271 pm_runtime_put_noidle(&pdev->dev); 2272 2273 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), 2274 msm_host->bulk_clks); 2275 if (!IS_ERR(msm_host->bus_clk)) 2276 clk_disable_unprepare(msm_host->bus_clk); 2277 sdhci_pltfm_free(pdev); 2278 return 0; 2279 } 2280 2281 static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev) 2282 { 2283 struct sdhci_host *host = dev_get_drvdata(dev); 2284 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2285 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2286 2287 /* Drop the performance vote */ 2288 dev_pm_opp_set_rate(dev, 0); 2289 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), 2290 msm_host->bulk_clks); 2291 2292 return 0; 2293 } 2294 2295 static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev) 2296 { 2297 struct sdhci_host *host = dev_get_drvdata(dev); 2298 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 2299 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 2300 int ret; 2301 2302 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), 2303 msm_host->bulk_clks); 2304 if (ret) 2305 return ret; 2306 /* 2307 * Whenever core-clock is gated dynamically, it's needed to 2308 * restore the SDR DLL settings when the clock is ungated. 2309 */ 2310 if (msm_host->restore_dll_config && msm_host->clk_rate) 2311 ret = sdhci_msm_restore_sdr_dll_config(host); 2312 2313 dev_pm_opp_set_rate(dev, msm_host->clk_rate); 2314 2315 return ret; 2316 } 2317 2318 static const struct dev_pm_ops sdhci_msm_pm_ops = { 2319 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 2320 pm_runtime_force_resume) 2321 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, 2322 sdhci_msm_runtime_resume, 2323 NULL) 2324 }; 2325 2326 static struct platform_driver sdhci_msm_driver = { 2327 .probe = sdhci_msm_probe, 2328 .remove = sdhci_msm_remove, 2329 .driver = { 2330 .name = "sdhci_msm", 2331 .of_match_table = sdhci_msm_dt_match, 2332 .pm = &sdhci_msm_pm_ops, 2333 }, 2334 }; 2335 2336 module_platform_driver(sdhci_msm_driver); 2337 2338 MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver"); 2339 MODULE_LICENSE("GPL v2"); 2340