1 /* 2 * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver 3 * 4 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 and 8 * only version 2 as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 */ 16 17 #include <linux/module.h> 18 #include <linux/of_device.h> 19 #include <linux/delay.h> 20 #include <linux/mmc/mmc.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/slab.h> 23 #include <linux/iopoll.h> 24 25 #include "sdhci-pltfm.h" 26 27 #define CORE_MCI_VERSION 0x50 28 #define CORE_VERSION_MAJOR_SHIFT 28 29 #define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT) 30 #define CORE_VERSION_MINOR_MASK 0xff 31 32 #define CORE_HC_MODE 0x78 33 #define HC_MODE_EN 0x1 34 #define CORE_POWER 0x0 35 #define CORE_SW_RST BIT(7) 36 #define FF_CLK_SW_RST_DIS BIT(13) 37 38 #define CORE_PWRCTL_STATUS 0xdc 39 #define CORE_PWRCTL_MASK 0xe0 40 #define CORE_PWRCTL_CLEAR 0xe4 41 #define CORE_PWRCTL_CTL 0xe8 42 #define CORE_PWRCTL_BUS_OFF BIT(0) 43 #define CORE_PWRCTL_BUS_ON BIT(1) 44 #define CORE_PWRCTL_IO_LOW BIT(2) 45 #define CORE_PWRCTL_IO_HIGH BIT(3) 46 #define CORE_PWRCTL_BUS_SUCCESS BIT(0) 47 #define CORE_PWRCTL_IO_SUCCESS BIT(2) 48 #define REQ_BUS_OFF BIT(0) 49 #define REQ_BUS_ON BIT(1) 50 #define REQ_IO_LOW BIT(2) 51 #define REQ_IO_HIGH BIT(3) 52 #define INT_MASK 0xf 53 #define MAX_PHASES 16 54 #define CORE_DLL_LOCK BIT(7) 55 #define CORE_DDR_DLL_LOCK BIT(11) 56 #define CORE_DLL_EN BIT(16) 57 #define CORE_CDR_EN BIT(17) 58 #define CORE_CK_OUT_EN BIT(18) 59 #define CORE_CDR_EXT_EN BIT(19) 60 #define CORE_DLL_PDN BIT(29) 61 #define CORE_DLL_RST BIT(30) 62 #define CORE_DLL_CONFIG 0x100 63 #define CORE_CMD_DAT_TRACK_SEL BIT(0) 64 #define CORE_DLL_STATUS 0x108 65 66 #define CORE_DLL_CONFIG_2 0x1b4 67 #define CORE_DDR_CAL_EN BIT(0) 68 #define CORE_FLL_CYCLE_CNT BIT(18) 69 #define CORE_DLL_CLOCK_DISABLE BIT(21) 70 71 #define CORE_VENDOR_SPEC 0x10c 72 #define CORE_VENDOR_SPEC_POR_VAL 0xa1c 73 #define CORE_CLK_PWRSAVE BIT(1) 74 #define CORE_HC_MCLK_SEL_DFLT (2 << 8) 75 #define CORE_HC_MCLK_SEL_HS400 (3 << 8) 76 #define CORE_HC_MCLK_SEL_MASK (3 << 8) 77 #define CORE_HC_SELECT_IN_EN BIT(18) 78 #define CORE_HC_SELECT_IN_HS400 (6 << 19) 79 #define CORE_HC_SELECT_IN_MASK (7 << 19) 80 81 #define CORE_CSR_CDC_CTLR_CFG0 0x130 82 #define CORE_SW_TRIG_FULL_CALIB BIT(16) 83 #define CORE_HW_AUTOCAL_ENA BIT(17) 84 85 #define CORE_CSR_CDC_CTLR_CFG1 0x134 86 #define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138 87 #define CORE_TIMER_ENA BIT(16) 88 89 #define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C 90 #define CORE_CSR_CDC_REFCOUNT_CFG 0x140 91 #define CORE_CSR_CDC_COARSE_CAL_CFG 0x144 92 #define CORE_CDC_OFFSET_CFG 0x14C 93 #define CORE_CSR_CDC_DELAY_CFG 0x150 94 #define CORE_CDC_SLAVE_DDA_CFG 0x160 95 #define CORE_CSR_CDC_STATUS0 0x164 96 #define CORE_CALIBRATION_DONE BIT(0) 97 98 #define CORE_CDC_ERROR_CODE_MASK 0x7000000 99 100 #define CORE_CSR_CDC_GEN_CFG 0x178 101 #define CORE_CDC_SWITCH_BYPASS_OFF BIT(0) 102 #define CORE_CDC_SWITCH_RC_EN BIT(1) 103 104 #define CORE_DDR_200_CFG 0x184 105 #define CORE_CDC_T4_DLY_SEL BIT(0) 106 #define CORE_CMDIN_RCLK_EN BIT(1) 107 #define CORE_START_CDC_TRAFFIC BIT(6) 108 #define CORE_VENDOR_SPEC3 0x1b0 109 #define CORE_PWRSAVE_DLL BIT(3) 110 111 #define CORE_DDR_CONFIG 0x1b8 112 #define DDR_CONFIG_POR_VAL 0x80040853 113 114 #define CORE_VENDOR_SPEC_CAPABILITIES0 0x11c 115 116 #define INVALID_TUNING_PHASE -1 117 #define SDHCI_MSM_MIN_CLOCK 400000 118 #define CORE_FREQ_100MHZ (100 * 1000 * 1000) 119 120 #define CDR_SELEXT_SHIFT 20 121 #define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT) 122 #define CMUX_SHIFT_PHASE_SHIFT 24 123 #define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT) 124 125 #define MSM_MMC_AUTOSUSPEND_DELAY_MS 50 126 127 /* Timeout value to avoid infinite waiting for pwr_irq */ 128 #define MSM_PWR_IRQ_TIMEOUT_MS 5000 129 130 struct sdhci_msm_host { 131 struct platform_device *pdev; 132 void __iomem *core_mem; /* MSM SDCC mapped address */ 133 int pwr_irq; /* power irq */ 134 struct clk *bus_clk; /* SDHC bus voter clock */ 135 struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/ 136 struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */ 137 unsigned long clk_rate; 138 struct mmc_host *mmc; 139 bool use_14lpp_dll_reset; 140 bool tuning_done; 141 bool calibration_done; 142 u8 saved_tuning_phase; 143 bool use_cdclp533; 144 u32 curr_pwr_state; 145 u32 curr_io_level; 146 wait_queue_head_t pwr_irq_wait; 147 bool pwr_irq_flag; 148 }; 149 150 static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host, 151 unsigned int clock) 152 { 153 struct mmc_ios ios = host->mmc->ios; 154 /* 155 * The SDHC requires internal clock frequency to be double the 156 * actual clock that will be set for DDR mode. The controller 157 * uses the faster clock(100/400MHz) for some of its parts and 158 * send the actual required clock (50/200MHz) to the card. 159 */ 160 if (ios.timing == MMC_TIMING_UHS_DDR50 || 161 ios.timing == MMC_TIMING_MMC_DDR52 || 162 ios.timing == MMC_TIMING_MMC_HS400 || 163 host->flags & SDHCI_HS400_TUNING) 164 clock *= 2; 165 return clock; 166 } 167 168 static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host, 169 unsigned int clock) 170 { 171 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 172 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 173 struct mmc_ios curr_ios = host->mmc->ios; 174 struct clk *core_clk = msm_host->bulk_clks[0].clk; 175 int rc; 176 177 clock = msm_get_clock_rate_for_bus_mode(host, clock); 178 rc = clk_set_rate(core_clk, clock); 179 if (rc) { 180 pr_err("%s: Failed to set clock at rate %u at timing %d\n", 181 mmc_hostname(host->mmc), clock, 182 curr_ios.timing); 183 return; 184 } 185 msm_host->clk_rate = clock; 186 pr_debug("%s: Setting clock at rate %lu at timing %d\n", 187 mmc_hostname(host->mmc), clk_get_rate(core_clk), 188 curr_ios.timing); 189 } 190 191 /* Platform specific tuning */ 192 static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll) 193 { 194 u32 wait_cnt = 50; 195 u8 ck_out_en; 196 struct mmc_host *mmc = host->mmc; 197 198 /* Poll for CK_OUT_EN bit. max. poll time = 50us */ 199 ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) & 200 CORE_CK_OUT_EN); 201 202 while (ck_out_en != poll) { 203 if (--wait_cnt == 0) { 204 dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n", 205 mmc_hostname(mmc), poll); 206 return -ETIMEDOUT; 207 } 208 udelay(1); 209 210 ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) & 211 CORE_CK_OUT_EN); 212 } 213 214 return 0; 215 } 216 217 static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase) 218 { 219 int rc; 220 static const u8 grey_coded_phase_table[] = { 221 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4, 222 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8 223 }; 224 unsigned long flags; 225 u32 config; 226 struct mmc_host *mmc = host->mmc; 227 228 if (phase > 0xf) 229 return -EINVAL; 230 231 spin_lock_irqsave(&host->lock, flags); 232 233 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); 234 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN); 235 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN); 236 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); 237 238 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */ 239 rc = msm_dll_poll_ck_out_en(host, 0); 240 if (rc) 241 goto err_out; 242 243 /* 244 * Write the selected DLL clock output phase (0 ... 15) 245 * to CDR_SELEXT bit field of DLL_CONFIG register. 246 */ 247 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); 248 config &= ~CDR_SELEXT_MASK; 249 config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT; 250 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); 251 252 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); 253 config |= CORE_CK_OUT_EN; 254 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); 255 256 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */ 257 rc = msm_dll_poll_ck_out_en(host, 1); 258 if (rc) 259 goto err_out; 260 261 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); 262 config |= CORE_CDR_EN; 263 config &= ~CORE_CDR_EXT_EN; 264 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); 265 goto out; 266 267 err_out: 268 dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n", 269 mmc_hostname(mmc), phase); 270 out: 271 spin_unlock_irqrestore(&host->lock, flags); 272 return rc; 273 } 274 275 /* 276 * Find out the greatest range of consecuitive selected 277 * DLL clock output phases that can be used as sampling 278 * setting for SD3.0 UHS-I card read operation (in SDR104 279 * timing mode) or for eMMC4.5 card read operation (in 280 * HS400/HS200 timing mode). 281 * Select the 3/4 of the range and configure the DLL with the 282 * selected DLL clock output phase. 283 */ 284 285 static int msm_find_most_appropriate_phase(struct sdhci_host *host, 286 u8 *phase_table, u8 total_phases) 287 { 288 int ret; 289 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} }; 290 u8 phases_per_row[MAX_PHASES] = { 0 }; 291 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0; 292 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0; 293 bool phase_0_found = false, phase_15_found = false; 294 struct mmc_host *mmc = host->mmc; 295 296 if (!total_phases || (total_phases > MAX_PHASES)) { 297 dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n", 298 mmc_hostname(mmc), total_phases); 299 return -EINVAL; 300 } 301 302 for (cnt = 0; cnt < total_phases; cnt++) { 303 ranges[row_index][col_index] = phase_table[cnt]; 304 phases_per_row[row_index] += 1; 305 col_index++; 306 307 if ((cnt + 1) == total_phases) { 308 continue; 309 /* check if next phase in phase_table is consecutive or not */ 310 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) { 311 row_index++; 312 col_index = 0; 313 } 314 } 315 316 if (row_index >= MAX_PHASES) 317 return -EINVAL; 318 319 /* Check if phase-0 is present in first valid window? */ 320 if (!ranges[0][0]) { 321 phase_0_found = true; 322 phase_0_raw_index = 0; 323 /* Check if cycle exist between 2 valid windows */ 324 for (cnt = 1; cnt <= row_index; cnt++) { 325 if (phases_per_row[cnt]) { 326 for (i = 0; i < phases_per_row[cnt]; i++) { 327 if (ranges[cnt][i] == 15) { 328 phase_15_found = true; 329 phase_15_raw_index = cnt; 330 break; 331 } 332 } 333 } 334 } 335 } 336 337 /* If 2 valid windows form cycle then merge them as single window */ 338 if (phase_0_found && phase_15_found) { 339 /* number of phases in raw where phase 0 is present */ 340 u8 phases_0 = phases_per_row[phase_0_raw_index]; 341 /* number of phases in raw where phase 15 is present */ 342 u8 phases_15 = phases_per_row[phase_15_raw_index]; 343 344 if (phases_0 + phases_15 >= MAX_PHASES) 345 /* 346 * If there are more than 1 phase windows then total 347 * number of phases in both the windows should not be 348 * more than or equal to MAX_PHASES. 349 */ 350 return -EINVAL; 351 352 /* Merge 2 cyclic windows */ 353 i = phases_15; 354 for (cnt = 0; cnt < phases_0; cnt++) { 355 ranges[phase_15_raw_index][i] = 356 ranges[phase_0_raw_index][cnt]; 357 if (++i >= MAX_PHASES) 358 break; 359 } 360 361 phases_per_row[phase_0_raw_index] = 0; 362 phases_per_row[phase_15_raw_index] = phases_15 + phases_0; 363 } 364 365 for (cnt = 0; cnt <= row_index; cnt++) { 366 if (phases_per_row[cnt] > curr_max) { 367 curr_max = phases_per_row[cnt]; 368 selected_row_index = cnt; 369 } 370 } 371 372 i = (curr_max * 3) / 4; 373 if (i) 374 i--; 375 376 ret = ranges[selected_row_index][i]; 377 378 if (ret >= MAX_PHASES) { 379 ret = -EINVAL; 380 dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n", 381 mmc_hostname(mmc), ret); 382 } 383 384 return ret; 385 } 386 387 static inline void msm_cm_dll_set_freq(struct sdhci_host *host) 388 { 389 u32 mclk_freq = 0, config; 390 391 /* Program the MCLK value to MCLK_FREQ bit field */ 392 if (host->clock <= 112000000) 393 mclk_freq = 0; 394 else if (host->clock <= 125000000) 395 mclk_freq = 1; 396 else if (host->clock <= 137000000) 397 mclk_freq = 2; 398 else if (host->clock <= 150000000) 399 mclk_freq = 3; 400 else if (host->clock <= 162000000) 401 mclk_freq = 4; 402 else if (host->clock <= 175000000) 403 mclk_freq = 5; 404 else if (host->clock <= 187000000) 405 mclk_freq = 6; 406 else if (host->clock <= 200000000) 407 mclk_freq = 7; 408 409 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); 410 config &= ~CMUX_SHIFT_PHASE_MASK; 411 config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT; 412 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); 413 } 414 415 /* Initialize the DLL (Programmable Delay Line) */ 416 static int msm_init_cm_dll(struct sdhci_host *host) 417 { 418 struct mmc_host *mmc = host->mmc; 419 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 420 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 421 int wait_cnt = 50; 422 unsigned long flags; 423 u32 config; 424 425 spin_lock_irqsave(&host->lock, flags); 426 427 /* 428 * Make sure that clock is always enabled when DLL 429 * tuning is in progress. Keeping PWRSAVE ON may 430 * turn off the clock. 431 */ 432 config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC); 433 config &= ~CORE_CLK_PWRSAVE; 434 writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC); 435 436 if (msm_host->use_14lpp_dll_reset) { 437 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); 438 config &= ~CORE_CK_OUT_EN; 439 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); 440 441 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2); 442 config |= CORE_DLL_CLOCK_DISABLE; 443 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2); 444 } 445 446 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); 447 config |= CORE_DLL_RST; 448 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); 449 450 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); 451 config |= CORE_DLL_PDN; 452 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); 453 msm_cm_dll_set_freq(host); 454 455 if (msm_host->use_14lpp_dll_reset && 456 !IS_ERR_OR_NULL(msm_host->xo_clk)) { 457 u32 mclk_freq = 0; 458 459 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2); 460 config &= CORE_FLL_CYCLE_CNT; 461 if (config) 462 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8), 463 clk_get_rate(msm_host->xo_clk)); 464 else 465 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4), 466 clk_get_rate(msm_host->xo_clk)); 467 468 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2); 469 config &= ~(0xFF << 10); 470 config |= mclk_freq << 10; 471 472 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2); 473 /* wait for 5us before enabling DLL clock */ 474 udelay(5); 475 } 476 477 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); 478 config &= ~CORE_DLL_RST; 479 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); 480 481 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); 482 config &= ~CORE_DLL_PDN; 483 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); 484 485 if (msm_host->use_14lpp_dll_reset) { 486 msm_cm_dll_set_freq(host); 487 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2); 488 config &= ~CORE_DLL_CLOCK_DISABLE; 489 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2); 490 } 491 492 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); 493 config |= CORE_DLL_EN; 494 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); 495 496 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); 497 config |= CORE_CK_OUT_EN; 498 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); 499 500 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */ 501 while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) & 502 CORE_DLL_LOCK)) { 503 /* max. wait for 50us sec for LOCK bit to be set */ 504 if (--wait_cnt == 0) { 505 dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n", 506 mmc_hostname(mmc)); 507 spin_unlock_irqrestore(&host->lock, flags); 508 return -ETIMEDOUT; 509 } 510 udelay(1); 511 } 512 513 spin_unlock_irqrestore(&host->lock, flags); 514 return 0; 515 } 516 517 static void msm_hc_select_default(struct sdhci_host *host) 518 { 519 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 520 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 521 u32 config; 522 523 if (!msm_host->use_cdclp533) { 524 config = readl_relaxed(host->ioaddr + 525 CORE_VENDOR_SPEC3); 526 config &= ~CORE_PWRSAVE_DLL; 527 writel_relaxed(config, host->ioaddr + 528 CORE_VENDOR_SPEC3); 529 } 530 531 config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC); 532 config &= ~CORE_HC_MCLK_SEL_MASK; 533 config |= CORE_HC_MCLK_SEL_DFLT; 534 writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC); 535 536 /* 537 * Disable HC_SELECT_IN to be able to use the UHS mode select 538 * configuration from Host Control2 register for all other 539 * modes. 540 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field 541 * in VENDOR_SPEC_FUNC 542 */ 543 config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC); 544 config &= ~CORE_HC_SELECT_IN_EN; 545 config &= ~CORE_HC_SELECT_IN_MASK; 546 writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC); 547 548 /* 549 * Make sure above writes impacting free running MCLK are completed 550 * before changing the clk_rate at GCC. 551 */ 552 wmb(); 553 } 554 555 static void msm_hc_select_hs400(struct sdhci_host *host) 556 { 557 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 558 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 559 struct mmc_ios ios = host->mmc->ios; 560 u32 config, dll_lock; 561 int rc; 562 563 /* Select the divided clock (free running MCLK/2) */ 564 config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC); 565 config &= ~CORE_HC_MCLK_SEL_MASK; 566 config |= CORE_HC_MCLK_SEL_HS400; 567 568 writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC); 569 /* 570 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC 571 * register 572 */ 573 if ((msm_host->tuning_done || ios.enhanced_strobe) && 574 !msm_host->calibration_done) { 575 config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC); 576 config |= CORE_HC_SELECT_IN_HS400; 577 config |= CORE_HC_SELECT_IN_EN; 578 writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC); 579 } 580 if (!msm_host->clk_rate && !msm_host->use_cdclp533) { 581 /* 582 * Poll on DLL_LOCK or DDR_DLL_LOCK bits in 583 * CORE_DLL_STATUS to be set. This should get set 584 * within 15 us at 200 MHz. 585 */ 586 rc = readl_relaxed_poll_timeout(host->ioaddr + 587 CORE_DLL_STATUS, 588 dll_lock, 589 (dll_lock & 590 (CORE_DLL_LOCK | 591 CORE_DDR_DLL_LOCK)), 10, 592 1000); 593 if (rc == -ETIMEDOUT) 594 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n", 595 mmc_hostname(host->mmc), dll_lock); 596 } 597 /* 598 * Make sure above writes impacting free running MCLK are completed 599 * before changing the clk_rate at GCC. 600 */ 601 wmb(); 602 } 603 604 /* 605 * sdhci_msm_hc_select_mode :- In general all timing modes are 606 * controlled via UHS mode select in Host Control2 register. 607 * eMMC specific HS200/HS400 doesn't have their respective modes 608 * defined here, hence we use these values. 609 * 610 * HS200 - SDR104 (Since they both are equivalent in functionality) 611 * HS400 - This involves multiple configurations 612 * Initially SDR104 - when tuning is required as HS200 613 * Then when switching to DDR @ 400MHz (HS400) we use 614 * the vendor specific HC_SELECT_IN to control the mode. 615 * 616 * In addition to controlling the modes we also need to select the 617 * correct input clock for DLL depending on the mode. 618 * 619 * HS400 - divided clock (free running MCLK/2) 620 * All other modes - default (free running MCLK) 621 */ 622 static void sdhci_msm_hc_select_mode(struct sdhci_host *host) 623 { 624 struct mmc_ios ios = host->mmc->ios; 625 626 if (ios.timing == MMC_TIMING_MMC_HS400 || 627 host->flags & SDHCI_HS400_TUNING) 628 msm_hc_select_hs400(host); 629 else 630 msm_hc_select_default(host); 631 } 632 633 static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host) 634 { 635 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 636 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 637 u32 config, calib_done; 638 int ret; 639 640 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); 641 642 /* 643 * Retuning in HS400 (DDR mode) will fail, just reset the 644 * tuning block and restore the saved tuning phase. 645 */ 646 ret = msm_init_cm_dll(host); 647 if (ret) 648 goto out; 649 650 /* Set the selected phase in delay line hw block */ 651 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase); 652 if (ret) 653 goto out; 654 655 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); 656 config |= CORE_CMD_DAT_TRACK_SEL; 657 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); 658 659 config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG); 660 config &= ~CORE_CDC_T4_DLY_SEL; 661 writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG); 662 663 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG); 664 config &= ~CORE_CDC_SWITCH_BYPASS_OFF; 665 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG); 666 667 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG); 668 config |= CORE_CDC_SWITCH_RC_EN; 669 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG); 670 671 config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG); 672 config &= ~CORE_START_CDC_TRAFFIC; 673 writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG); 674 675 /* Perform CDC Register Initialization Sequence */ 676 677 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 678 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1); 679 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); 680 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1); 681 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG); 682 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG); 683 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG); 684 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG); 685 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG); 686 687 /* CDC HW Calibration */ 688 689 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 690 config |= CORE_SW_TRIG_FULL_CALIB; 691 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 692 693 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 694 config &= ~CORE_SW_TRIG_FULL_CALIB; 695 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 696 697 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 698 config |= CORE_HW_AUTOCAL_ENA; 699 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); 700 701 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); 702 config |= CORE_TIMER_ENA; 703 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); 704 705 ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0, 706 calib_done, 707 (calib_done & CORE_CALIBRATION_DONE), 708 1, 50); 709 710 if (ret == -ETIMEDOUT) { 711 pr_err("%s: %s: CDC calibration was not completed\n", 712 mmc_hostname(host->mmc), __func__); 713 goto out; 714 } 715 716 ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0) 717 & CORE_CDC_ERROR_CODE_MASK; 718 if (ret) { 719 pr_err("%s: %s: CDC error code %d\n", 720 mmc_hostname(host->mmc), __func__, ret); 721 ret = -EINVAL; 722 goto out; 723 } 724 725 config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG); 726 config |= CORE_START_CDC_TRAFFIC; 727 writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG); 728 out: 729 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), 730 __func__, ret); 731 return ret; 732 } 733 734 static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host) 735 { 736 struct mmc_host *mmc = host->mmc; 737 u32 dll_status, config; 738 int ret; 739 740 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); 741 742 /* 743 * Currently the CORE_DDR_CONFIG register defaults to desired 744 * configuration on reset. Currently reprogramming the power on 745 * reset (POR) value in case it might have been modified by 746 * bootloaders. In the future, if this changes, then the desired 747 * values will need to be programmed appropriately. 748 */ 749 writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + CORE_DDR_CONFIG); 750 751 if (mmc->ios.enhanced_strobe) { 752 config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG); 753 config |= CORE_CMDIN_RCLK_EN; 754 writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG); 755 } 756 757 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2); 758 config |= CORE_DDR_CAL_EN; 759 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2); 760 761 ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_DLL_STATUS, 762 dll_status, 763 (dll_status & CORE_DDR_DLL_LOCK), 764 10, 1000); 765 766 if (ret == -ETIMEDOUT) { 767 pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n", 768 mmc_hostname(host->mmc), __func__); 769 goto out; 770 } 771 772 config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3); 773 config |= CORE_PWRSAVE_DLL; 774 writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC3); 775 776 /* 777 * Drain writebuffer to ensure above DLL calibration 778 * and PWRSAVE DLL is enabled. 779 */ 780 wmb(); 781 out: 782 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), 783 __func__, ret); 784 return ret; 785 } 786 787 static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host) 788 { 789 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 790 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 791 struct mmc_host *mmc = host->mmc; 792 int ret; 793 u32 config; 794 795 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); 796 797 /* 798 * Retuning in HS400 (DDR mode) will fail, just reset the 799 * tuning block and restore the saved tuning phase. 800 */ 801 ret = msm_init_cm_dll(host); 802 if (ret) 803 goto out; 804 805 if (!mmc->ios.enhanced_strobe) { 806 /* Set the selected phase in delay line hw block */ 807 ret = msm_config_cm_dll_phase(host, 808 msm_host->saved_tuning_phase); 809 if (ret) 810 goto out; 811 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); 812 config |= CORE_CMD_DAT_TRACK_SEL; 813 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); 814 } 815 816 if (msm_host->use_cdclp533) 817 ret = sdhci_msm_cdclp533_calibration(host); 818 else 819 ret = sdhci_msm_cm_dll_sdc4_calibration(host); 820 out: 821 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), 822 __func__, ret); 823 return ret; 824 } 825 826 static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) 827 { 828 struct sdhci_host *host = mmc_priv(mmc); 829 int tuning_seq_cnt = 3; 830 u8 phase, tuned_phases[16], tuned_phase_cnt = 0; 831 int rc; 832 struct mmc_ios ios = host->mmc->ios; 833 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 834 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 835 836 /* 837 * Tuning is required for SDR104, HS200 and HS400 cards and 838 * if clock frequency is greater than 100MHz in these modes. 839 */ 840 if (host->clock <= CORE_FREQ_100MHZ || 841 !(ios.timing == MMC_TIMING_MMC_HS400 || 842 ios.timing == MMC_TIMING_MMC_HS200 || 843 ios.timing == MMC_TIMING_UHS_SDR104)) 844 return 0; 845 846 /* 847 * For HS400 tuning in HS200 timing requires: 848 * - select MCLK/2 in VENDOR_SPEC 849 * - program MCLK to 400MHz (or nearest supported) in GCC 850 */ 851 if (host->flags & SDHCI_HS400_TUNING) { 852 sdhci_msm_hc_select_mode(host); 853 msm_set_clock_rate_for_bus_mode(host, ios.clock); 854 host->flags &= ~SDHCI_HS400_TUNING; 855 } 856 857 retry: 858 /* First of all reset the tuning block */ 859 rc = msm_init_cm_dll(host); 860 if (rc) 861 return rc; 862 863 phase = 0; 864 do { 865 /* Set the phase in delay line hw block */ 866 rc = msm_config_cm_dll_phase(host, phase); 867 if (rc) 868 return rc; 869 870 msm_host->saved_tuning_phase = phase; 871 rc = mmc_send_tuning(mmc, opcode, NULL); 872 if (!rc) { 873 /* Tuning is successful at this tuning point */ 874 tuned_phases[tuned_phase_cnt++] = phase; 875 dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n", 876 mmc_hostname(mmc), phase); 877 } 878 } while (++phase < ARRAY_SIZE(tuned_phases)); 879 880 if (tuned_phase_cnt) { 881 rc = msm_find_most_appropriate_phase(host, tuned_phases, 882 tuned_phase_cnt); 883 if (rc < 0) 884 return rc; 885 else 886 phase = rc; 887 888 /* 889 * Finally set the selected phase in delay 890 * line hw block. 891 */ 892 rc = msm_config_cm_dll_phase(host, phase); 893 if (rc) 894 return rc; 895 dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n", 896 mmc_hostname(mmc), phase); 897 } else { 898 if (--tuning_seq_cnt) 899 goto retry; 900 /* Tuning failed */ 901 dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n", 902 mmc_hostname(mmc)); 903 rc = -EIO; 904 } 905 906 if (!rc) 907 msm_host->tuning_done = true; 908 return rc; 909 } 910 911 /* 912 * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation. 913 * This needs to be done for both tuning and enhanced_strobe mode. 914 * DLL operation is only needed for clock > 100MHz. For clock <= 100MHz 915 * fixed feedback clock is used. 916 */ 917 static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios) 918 { 919 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 920 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 921 int ret; 922 923 if (host->clock > CORE_FREQ_100MHZ && 924 (msm_host->tuning_done || ios->enhanced_strobe) && 925 !msm_host->calibration_done) { 926 ret = sdhci_msm_hs400_dll_calibration(host); 927 if (!ret) 928 msm_host->calibration_done = true; 929 else 930 pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n", 931 mmc_hostname(host->mmc), ret); 932 } 933 } 934 935 static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host, 936 unsigned int uhs) 937 { 938 struct mmc_host *mmc = host->mmc; 939 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 940 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 941 u16 ctrl_2; 942 u32 config; 943 944 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 945 /* Select Bus Speed Mode for host */ 946 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 947 switch (uhs) { 948 case MMC_TIMING_UHS_SDR12: 949 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 950 break; 951 case MMC_TIMING_UHS_SDR25: 952 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 953 break; 954 case MMC_TIMING_UHS_SDR50: 955 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 956 break; 957 case MMC_TIMING_MMC_HS400: 958 case MMC_TIMING_MMC_HS200: 959 case MMC_TIMING_UHS_SDR104: 960 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 961 break; 962 case MMC_TIMING_UHS_DDR50: 963 case MMC_TIMING_MMC_DDR52: 964 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 965 break; 966 } 967 968 /* 969 * When clock frequency is less than 100MHz, the feedback clock must be 970 * provided and DLL must not be used so that tuning can be skipped. To 971 * provide feedback clock, the mode selection can be any value less 972 * than 3'b011 in bits [2:0] of HOST CONTROL2 register. 973 */ 974 if (host->clock <= CORE_FREQ_100MHZ) { 975 if (uhs == MMC_TIMING_MMC_HS400 || 976 uhs == MMC_TIMING_MMC_HS200 || 977 uhs == MMC_TIMING_UHS_SDR104) 978 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 979 /* 980 * DLL is not required for clock <= 100MHz 981 * Thus, make sure DLL it is disabled when not required 982 */ 983 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); 984 config |= CORE_DLL_RST; 985 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); 986 987 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); 988 config |= CORE_DLL_PDN; 989 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); 990 991 /* 992 * The DLL needs to be restored and CDCLP533 recalibrated 993 * when the clock frequency is set back to 400MHz. 994 */ 995 msm_host->calibration_done = false; 996 } 997 998 dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n", 999 mmc_hostname(host->mmc), host->clock, uhs, ctrl_2); 1000 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1001 1002 if (mmc->ios.timing == MMC_TIMING_MMC_HS400) 1003 sdhci_msm_hs400(host, &mmc->ios); 1004 } 1005 1006 static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host) 1007 { 1008 init_waitqueue_head(&msm_host->pwr_irq_wait); 1009 } 1010 1011 static inline void sdhci_msm_complete_pwr_irq_wait( 1012 struct sdhci_msm_host *msm_host) 1013 { 1014 wake_up(&msm_host->pwr_irq_wait); 1015 } 1016 1017 /* 1018 * sdhci_msm_check_power_status API should be called when registers writes 1019 * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens. 1020 * To what state the register writes will change the IO lines should be passed 1021 * as the argument req_type. This API will check whether the IO line's state 1022 * is already the expected state and will wait for power irq only if 1023 * power irq is expected to be trigerred based on the current IO line state 1024 * and expected IO line state. 1025 */ 1026 static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type) 1027 { 1028 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1029 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1030 bool done = false; 1031 1032 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n", 1033 mmc_hostname(host->mmc), __func__, req_type, 1034 msm_host->curr_pwr_state, msm_host->curr_io_level); 1035 1036 /* 1037 * The IRQ for request type IO High/LOW will be generated when - 1038 * there is a state change in 1.8V enable bit (bit 3) of 1039 * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0 1040 * which indicates 3.3V IO voltage. So, when MMC core layer tries 1041 * to set it to 3.3V before card detection happens, the 1042 * IRQ doesn't get triggered as there is no state change in this bit. 1043 * The driver already handles this case by changing the IO voltage 1044 * level to high as part of controller power up sequence. Hence, check 1045 * for host->pwr to handle a case where IO voltage high request is 1046 * issued even before controller power up. 1047 */ 1048 if ((req_type & REQ_IO_HIGH) && !host->pwr) { 1049 pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n", 1050 mmc_hostname(host->mmc), req_type); 1051 return; 1052 } 1053 if ((req_type & msm_host->curr_pwr_state) || 1054 (req_type & msm_host->curr_io_level)) 1055 done = true; 1056 /* 1057 * This is needed here to handle cases where register writes will 1058 * not change the current bus state or io level of the controller. 1059 * In this case, no power irq will be triggerred and we should 1060 * not wait. 1061 */ 1062 if (!done) { 1063 if (!wait_event_timeout(msm_host->pwr_irq_wait, 1064 msm_host->pwr_irq_flag, 1065 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS))) 1066 dev_warn(&msm_host->pdev->dev, 1067 "%s: pwr_irq for req: (%d) timed out\n", 1068 mmc_hostname(host->mmc), req_type); 1069 } 1070 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc), 1071 __func__, req_type); 1072 } 1073 1074 static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host) 1075 { 1076 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1077 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1078 1079 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n", 1080 mmc_hostname(host->mmc), 1081 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS), 1082 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_MASK), 1083 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL)); 1084 } 1085 1086 static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq) 1087 { 1088 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1089 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1090 u32 irq_status, irq_ack = 0; 1091 int retry = 10; 1092 int pwr_state = 0, io_level = 0; 1093 1094 1095 irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS); 1096 irq_status &= INT_MASK; 1097 1098 writel_relaxed(irq_status, msm_host->core_mem + CORE_PWRCTL_CLEAR); 1099 1100 /* 1101 * There is a rare HW scenario where the first clear pulse could be 1102 * lost when actual reset and clear/read of status register is 1103 * happening at a time. Hence, retry for at least 10 times to make 1104 * sure status register is cleared. Otherwise, this will result in 1105 * a spurious power IRQ resulting in system instability. 1106 */ 1107 while (irq_status & readl_relaxed(msm_host->core_mem + 1108 CORE_PWRCTL_STATUS)) { 1109 if (retry == 0) { 1110 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n", 1111 mmc_hostname(host->mmc), irq_status); 1112 sdhci_msm_dump_pwr_ctrl_regs(host); 1113 WARN_ON(1); 1114 break; 1115 } 1116 writel_relaxed(irq_status, 1117 msm_host->core_mem + CORE_PWRCTL_CLEAR); 1118 retry--; 1119 udelay(10); 1120 } 1121 1122 /* Handle BUS ON/OFF*/ 1123 if (irq_status & CORE_PWRCTL_BUS_ON) { 1124 pwr_state = REQ_BUS_ON; 1125 io_level = REQ_IO_HIGH; 1126 irq_ack |= CORE_PWRCTL_BUS_SUCCESS; 1127 } 1128 if (irq_status & CORE_PWRCTL_BUS_OFF) { 1129 pwr_state = REQ_BUS_OFF; 1130 io_level = REQ_IO_LOW; 1131 irq_ack |= CORE_PWRCTL_BUS_SUCCESS; 1132 } 1133 /* Handle IO LOW/HIGH */ 1134 if (irq_status & CORE_PWRCTL_IO_LOW) { 1135 io_level = REQ_IO_LOW; 1136 irq_ack |= CORE_PWRCTL_IO_SUCCESS; 1137 } 1138 if (irq_status & CORE_PWRCTL_IO_HIGH) { 1139 io_level = REQ_IO_HIGH; 1140 irq_ack |= CORE_PWRCTL_IO_SUCCESS; 1141 } 1142 1143 /* 1144 * The driver has to acknowledge the interrupt, switch voltages and 1145 * report back if it succeded or not to this register. The voltage 1146 * switches are handled by the sdhci core, so just report success. 1147 */ 1148 writel_relaxed(irq_ack, msm_host->core_mem + CORE_PWRCTL_CTL); 1149 1150 if (pwr_state) 1151 msm_host->curr_pwr_state = pwr_state; 1152 if (io_level) 1153 msm_host->curr_io_level = io_level; 1154 1155 pr_debug("%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n", 1156 mmc_hostname(msm_host->mmc), __func__, irq, irq_status, 1157 irq_ack); 1158 } 1159 1160 static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data) 1161 { 1162 struct sdhci_host *host = (struct sdhci_host *)data; 1163 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1164 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1165 1166 sdhci_msm_handle_pwr_irq(host, irq); 1167 msm_host->pwr_irq_flag = 1; 1168 sdhci_msm_complete_pwr_irq_wait(msm_host); 1169 1170 1171 return IRQ_HANDLED; 1172 } 1173 1174 static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host) 1175 { 1176 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1177 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1178 struct clk *core_clk = msm_host->bulk_clks[0].clk; 1179 1180 return clk_round_rate(core_clk, ULONG_MAX); 1181 } 1182 1183 static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host) 1184 { 1185 return SDHCI_MSM_MIN_CLOCK; 1186 } 1187 1188 /** 1189 * __sdhci_msm_set_clock - sdhci_msm clock control. 1190 * 1191 * Description: 1192 * MSM controller does not use internal divider and 1193 * instead directly control the GCC clock as per 1194 * HW recommendation. 1195 **/ 1196 static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock) 1197 { 1198 u16 clk; 1199 /* 1200 * Keep actual_clock as zero - 1201 * - since there is no divider used so no need of having actual_clock. 1202 * - MSM controller uses SDCLK for data timeout calculation. If 1203 * actual_clock is zero, host->clock is taken for calculation. 1204 */ 1205 host->mmc->actual_clock = 0; 1206 1207 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1208 1209 if (clock == 0) 1210 return; 1211 1212 /* 1213 * MSM controller do not use clock divider. 1214 * Thus read SDHCI_CLOCK_CONTROL and only enable 1215 * clock with no divider value programmed. 1216 */ 1217 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1218 sdhci_enable_clk(host, clk); 1219 } 1220 1221 /* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */ 1222 static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock) 1223 { 1224 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1225 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1226 1227 if (!clock) { 1228 msm_host->clk_rate = clock; 1229 goto out; 1230 } 1231 1232 sdhci_msm_hc_select_mode(host); 1233 1234 msm_set_clock_rate_for_bus_mode(host, clock); 1235 out: 1236 __sdhci_msm_set_clock(host, clock); 1237 } 1238 1239 /* 1240 * Platform specific register write functions. This is so that, if any 1241 * register write needs to be followed up by platform specific actions, 1242 * they can be added here. These functions can go to sleep when writes 1243 * to certain registers are done. 1244 * These functions are relying on sdhci_set_ios not using spinlock. 1245 */ 1246 static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg) 1247 { 1248 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1249 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1250 u32 req_type = 0; 1251 1252 switch (reg) { 1253 case SDHCI_HOST_CONTROL2: 1254 req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW : 1255 REQ_IO_HIGH; 1256 break; 1257 case SDHCI_SOFTWARE_RESET: 1258 if (host->pwr && (val & SDHCI_RESET_ALL)) 1259 req_type = REQ_BUS_OFF; 1260 break; 1261 case SDHCI_POWER_CONTROL: 1262 req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON; 1263 break; 1264 } 1265 1266 if (req_type) { 1267 msm_host->pwr_irq_flag = 0; 1268 /* 1269 * Since this register write may trigger a power irq, ensure 1270 * all previous register writes are complete by this point. 1271 */ 1272 mb(); 1273 } 1274 return req_type; 1275 } 1276 1277 /* This function may sleep*/ 1278 static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg) 1279 { 1280 u32 req_type = 0; 1281 1282 req_type = __sdhci_msm_check_write(host, val, reg); 1283 writew_relaxed(val, host->ioaddr + reg); 1284 1285 if (req_type) 1286 sdhci_msm_check_power_status(host, req_type); 1287 } 1288 1289 /* This function may sleep*/ 1290 static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg) 1291 { 1292 u32 req_type = 0; 1293 1294 req_type = __sdhci_msm_check_write(host, val, reg); 1295 1296 writeb_relaxed(val, host->ioaddr + reg); 1297 1298 if (req_type) 1299 sdhci_msm_check_power_status(host, req_type); 1300 } 1301 1302 static const struct of_device_id sdhci_msm_dt_match[] = { 1303 { .compatible = "qcom,sdhci-msm-v4" }, 1304 {}, 1305 }; 1306 1307 MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match); 1308 1309 static const struct sdhci_ops sdhci_msm_ops = { 1310 .reset = sdhci_reset, 1311 .set_clock = sdhci_msm_set_clock, 1312 .get_min_clock = sdhci_msm_get_min_clock, 1313 .get_max_clock = sdhci_msm_get_max_clock, 1314 .set_bus_width = sdhci_set_bus_width, 1315 .set_uhs_signaling = sdhci_msm_set_uhs_signaling, 1316 .write_w = sdhci_msm_writew, 1317 .write_b = sdhci_msm_writeb, 1318 }; 1319 1320 static const struct sdhci_pltfm_data sdhci_msm_pdata = { 1321 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | 1322 SDHCI_QUIRK_NO_CARD_NO_RESET | 1323 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1324 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1325 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1326 .ops = &sdhci_msm_ops, 1327 }; 1328 1329 static int sdhci_msm_probe(struct platform_device *pdev) 1330 { 1331 struct sdhci_host *host; 1332 struct sdhci_pltfm_host *pltfm_host; 1333 struct sdhci_msm_host *msm_host; 1334 struct resource *core_memres; 1335 struct clk *clk; 1336 int ret; 1337 u16 host_version, core_minor; 1338 u32 core_version, config; 1339 u8 core_major; 1340 1341 host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host)); 1342 if (IS_ERR(host)) 1343 return PTR_ERR(host); 1344 1345 host->sdma_boundary = 0; 1346 pltfm_host = sdhci_priv(host); 1347 msm_host = sdhci_pltfm_priv(pltfm_host); 1348 msm_host->mmc = host->mmc; 1349 msm_host->pdev = pdev; 1350 1351 ret = mmc_of_parse(host->mmc); 1352 if (ret) 1353 goto pltfm_free; 1354 1355 sdhci_get_of_property(pdev); 1356 1357 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE; 1358 1359 /* Setup SDCC bus voter clock. */ 1360 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus"); 1361 if (!IS_ERR(msm_host->bus_clk)) { 1362 /* Vote for max. clk rate for max. performance */ 1363 ret = clk_set_rate(msm_host->bus_clk, INT_MAX); 1364 if (ret) 1365 goto pltfm_free; 1366 ret = clk_prepare_enable(msm_host->bus_clk); 1367 if (ret) 1368 goto pltfm_free; 1369 } 1370 1371 /* Setup main peripheral bus clock */ 1372 clk = devm_clk_get(&pdev->dev, "iface"); 1373 if (IS_ERR(clk)) { 1374 ret = PTR_ERR(clk); 1375 dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret); 1376 goto bus_clk_disable; 1377 } 1378 msm_host->bulk_clks[1].clk = clk; 1379 1380 /* Setup SDC MMC clock */ 1381 clk = devm_clk_get(&pdev->dev, "core"); 1382 if (IS_ERR(clk)) { 1383 ret = PTR_ERR(clk); 1384 dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret); 1385 goto bus_clk_disable; 1386 } 1387 msm_host->bulk_clks[0].clk = clk; 1388 1389 /* Vote for maximum clock rate for maximum performance */ 1390 ret = clk_set_rate(clk, INT_MAX); 1391 if (ret) 1392 dev_warn(&pdev->dev, "core clock boost failed\n"); 1393 1394 clk = devm_clk_get(&pdev->dev, "cal"); 1395 if (IS_ERR(clk)) 1396 clk = NULL; 1397 msm_host->bulk_clks[2].clk = clk; 1398 1399 clk = devm_clk_get(&pdev->dev, "sleep"); 1400 if (IS_ERR(clk)) 1401 clk = NULL; 1402 msm_host->bulk_clks[3].clk = clk; 1403 1404 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), 1405 msm_host->bulk_clks); 1406 if (ret) 1407 goto bus_clk_disable; 1408 1409 /* 1410 * xo clock is needed for FLL feature of cm_dll. 1411 * In case if xo clock is not mentioned in DT, warn and proceed. 1412 */ 1413 msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo"); 1414 if (IS_ERR(msm_host->xo_clk)) { 1415 ret = PTR_ERR(msm_host->xo_clk); 1416 dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret); 1417 } 1418 1419 core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1420 msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres); 1421 1422 if (IS_ERR(msm_host->core_mem)) { 1423 dev_err(&pdev->dev, "Failed to remap registers\n"); 1424 ret = PTR_ERR(msm_host->core_mem); 1425 goto clk_disable; 1426 } 1427 1428 /* Reset the vendor spec register to power on reset state */ 1429 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL, 1430 host->ioaddr + CORE_VENDOR_SPEC); 1431 1432 /* Set HC_MODE_EN bit in HC_MODE register */ 1433 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE)); 1434 1435 config = readl_relaxed(msm_host->core_mem + CORE_HC_MODE); 1436 config |= FF_CLK_SW_RST_DIS; 1437 writel_relaxed(config, msm_host->core_mem + CORE_HC_MODE); 1438 1439 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION)); 1440 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n", 1441 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >> 1442 SDHCI_VENDOR_VER_SHIFT)); 1443 1444 core_version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION); 1445 core_major = (core_version & CORE_VERSION_MAJOR_MASK) >> 1446 CORE_VERSION_MAJOR_SHIFT; 1447 core_minor = core_version & CORE_VERSION_MINOR_MASK; 1448 dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n", 1449 core_version, core_major, core_minor); 1450 1451 if (core_major == 1 && core_minor >= 0x42) 1452 msm_host->use_14lpp_dll_reset = true; 1453 1454 /* 1455 * SDCC 5 controller with major version 1, minor version 0x34 and later 1456 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL. 1457 */ 1458 if (core_major == 1 && core_minor < 0x34) 1459 msm_host->use_cdclp533 = true; 1460 1461 /* 1462 * Support for some capabilities is not advertised by newer 1463 * controller versions and must be explicitly enabled. 1464 */ 1465 if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) { 1466 config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES); 1467 config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT; 1468 writel_relaxed(config, host->ioaddr + 1469 CORE_VENDOR_SPEC_CAPABILITIES0); 1470 } 1471 1472 /* 1473 * Power on reset state may trigger power irq if previous status of 1474 * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq 1475 * interrupt in GIC, any pending power irq interrupt should be 1476 * acknowledged. Otherwise power irq interrupt handler would be 1477 * fired prematurely. 1478 */ 1479 sdhci_msm_handle_pwr_irq(host, 0); 1480 1481 /* 1482 * Ensure that above writes are propogated before interrupt enablement 1483 * in GIC. 1484 */ 1485 mb(); 1486 1487 /* Setup IRQ for handling power/voltage tasks with PMIC */ 1488 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq"); 1489 if (msm_host->pwr_irq < 0) { 1490 dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n", 1491 msm_host->pwr_irq); 1492 ret = msm_host->pwr_irq; 1493 goto clk_disable; 1494 } 1495 1496 sdhci_msm_init_pwr_irq_wait(msm_host); 1497 /* Enable pwr irq interrupts */ 1498 writel_relaxed(INT_MASK, msm_host->core_mem + CORE_PWRCTL_MASK); 1499 1500 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL, 1501 sdhci_msm_pwr_irq, IRQF_ONESHOT, 1502 dev_name(&pdev->dev), host); 1503 if (ret) { 1504 dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret); 1505 goto clk_disable; 1506 } 1507 1508 pm_runtime_get_noresume(&pdev->dev); 1509 pm_runtime_set_active(&pdev->dev); 1510 pm_runtime_enable(&pdev->dev); 1511 pm_runtime_set_autosuspend_delay(&pdev->dev, 1512 MSM_MMC_AUTOSUSPEND_DELAY_MS); 1513 pm_runtime_use_autosuspend(&pdev->dev); 1514 1515 host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning; 1516 ret = sdhci_add_host(host); 1517 if (ret) 1518 goto pm_runtime_disable; 1519 1520 pm_runtime_mark_last_busy(&pdev->dev); 1521 pm_runtime_put_autosuspend(&pdev->dev); 1522 1523 return 0; 1524 1525 pm_runtime_disable: 1526 pm_runtime_disable(&pdev->dev); 1527 pm_runtime_set_suspended(&pdev->dev); 1528 pm_runtime_put_noidle(&pdev->dev); 1529 clk_disable: 1530 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), 1531 msm_host->bulk_clks); 1532 bus_clk_disable: 1533 if (!IS_ERR(msm_host->bus_clk)) 1534 clk_disable_unprepare(msm_host->bus_clk); 1535 pltfm_free: 1536 sdhci_pltfm_free(pdev); 1537 return ret; 1538 } 1539 1540 static int sdhci_msm_remove(struct platform_device *pdev) 1541 { 1542 struct sdhci_host *host = platform_get_drvdata(pdev); 1543 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1544 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1545 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 1546 0xffffffff); 1547 1548 sdhci_remove_host(host, dead); 1549 1550 pm_runtime_get_sync(&pdev->dev); 1551 pm_runtime_disable(&pdev->dev); 1552 pm_runtime_put_noidle(&pdev->dev); 1553 1554 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), 1555 msm_host->bulk_clks); 1556 if (!IS_ERR(msm_host->bus_clk)) 1557 clk_disable_unprepare(msm_host->bus_clk); 1558 sdhci_pltfm_free(pdev); 1559 return 0; 1560 } 1561 1562 #ifdef CONFIG_PM 1563 static int sdhci_msm_runtime_suspend(struct device *dev) 1564 { 1565 struct sdhci_host *host = dev_get_drvdata(dev); 1566 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1567 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1568 1569 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), 1570 msm_host->bulk_clks); 1571 1572 return 0; 1573 } 1574 1575 static int sdhci_msm_runtime_resume(struct device *dev) 1576 { 1577 struct sdhci_host *host = dev_get_drvdata(dev); 1578 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1579 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); 1580 1581 return clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), 1582 msm_host->bulk_clks); 1583 } 1584 #endif 1585 1586 static const struct dev_pm_ops sdhci_msm_pm_ops = { 1587 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1588 pm_runtime_force_resume) 1589 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, 1590 sdhci_msm_runtime_resume, 1591 NULL) 1592 }; 1593 1594 static struct platform_driver sdhci_msm_driver = { 1595 .probe = sdhci_msm_probe, 1596 .remove = sdhci_msm_remove, 1597 .driver = { 1598 .name = "sdhci_msm", 1599 .of_match_table = sdhci_msm_dt_match, 1600 .pm = &sdhci_msm_pm_ops, 1601 }, 1602 }; 1603 1604 module_platform_driver(sdhci_msm_driver); 1605 1606 MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver"); 1607 MODULE_LICENSE("GPL v2"); 1608