1 /* 2 * SPDX-License-Identifier: GPL-2.0 3 * Copyright (c) 2018, The Linux Foundation 4 */ 5 6 #include <dt-bindings/clock/qcom,dsi-phy-28nm.h> 7 #include <linux/clk.h> 8 #include <linux/clk-provider.h> 9 #include <linux/iopoll.h> 10 11 #include "dsi_phy.h" 12 #include "dsi.xml.h" 13 #include "dsi_phy_10nm.xml.h" 14 15 /* 16 * DSI PLL 10nm - clock diagram (eg: DSI0): 17 * 18 * dsi0_pll_out_div_clk dsi0_pll_bit_clk 19 * | | 20 * | | 21 * +---------+ | +----------+ | +----+ 22 * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk 23 * +---------+ | +----------+ | +----+ 24 * | | 25 * | | dsi0_pll_by_2_bit_clk 26 * | | | 27 * | | +----+ | |\ dsi0_pclk_mux 28 * | |--| /2 |--o--| \ | 29 * | | +----+ | \ | +---------+ 30 * | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk 31 * |------------------------------| / +---------+ 32 * | +-----+ | / 33 * -----------| /4? |--o----------|/ 34 * +-----+ | | 35 * | |dsiclk_sel 36 * | 37 * dsi0_pll_post_out_div_clk 38 */ 39 40 #define VCO_REF_CLK_RATE 19200000 41 #define FRAC_BITS 18 42 43 /* v3.0.0 10nm implementation that requires the old timings settings */ 44 #define DSI_PHY_10NM_QUIRK_OLD_TIMINGS BIT(0) 45 46 struct dsi_pll_config { 47 bool enable_ssc; 48 bool ssc_center; 49 u32 ssc_freq; 50 u32 ssc_offset; 51 u32 ssc_adj_per; 52 53 /* out */ 54 u32 pll_prop_gain_rate; 55 u32 decimal_div_start; 56 u32 frac_div_start; 57 u32 pll_clock_inverters; 58 u32 ssc_stepsize; 59 u32 ssc_div_per; 60 }; 61 62 struct pll_10nm_cached_state { 63 unsigned long vco_rate; 64 u8 bit_clk_div; 65 u8 pix_clk_div; 66 u8 pll_out_div; 67 u8 pll_mux; 68 }; 69 70 struct dsi_pll_10nm { 71 struct clk_hw clk_hw; 72 73 struct msm_dsi_phy *phy; 74 75 u64 vco_current_rate; 76 77 /* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */ 78 spinlock_t postdiv_lock; 79 80 struct pll_10nm_cached_state cached_state; 81 82 struct dsi_pll_10nm *slave; 83 }; 84 85 #define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, clk_hw) 86 87 /** 88 * struct dsi_phy_10nm_tuning_cfg - Holds 10nm PHY tuning config parameters. 89 * @rescode_offset_top: Offset for pull-up legs rescode. 90 * @rescode_offset_bot: Offset for pull-down legs rescode. 91 * @vreg_ctrl: vreg ctrl to drive LDO level 92 */ 93 struct dsi_phy_10nm_tuning_cfg { 94 u8 rescode_offset_top[DSI_LANE_MAX]; 95 u8 rescode_offset_bot[DSI_LANE_MAX]; 96 u8 vreg_ctrl; 97 }; 98 99 /* 100 * Global list of private DSI PLL struct pointers. We need this for bonded DSI 101 * mode, where the master PLL's clk_ops needs access the slave's private data 102 */ 103 static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX]; 104 105 static void dsi_pll_setup_config(struct dsi_pll_config *config) 106 { 107 config->ssc_freq = 31500; 108 config->ssc_offset = 5000; 109 config->ssc_adj_per = 2; 110 111 config->enable_ssc = false; 112 config->ssc_center = false; 113 } 114 115 static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll, struct dsi_pll_config *config) 116 { 117 u64 fref = VCO_REF_CLK_RATE; 118 u64 pll_freq; 119 u64 divider; 120 u64 dec, dec_multiple; 121 u32 frac; 122 u64 multiplier; 123 124 pll_freq = pll->vco_current_rate; 125 126 divider = fref * 2; 127 128 multiplier = 1 << FRAC_BITS; 129 dec_multiple = div_u64(pll_freq * multiplier, divider); 130 dec = div_u64_rem(dec_multiple, multiplier, &frac); 131 132 if (pll_freq <= 1900000000UL) 133 config->pll_prop_gain_rate = 8; 134 else if (pll_freq <= 3000000000UL) 135 config->pll_prop_gain_rate = 10; 136 else 137 config->pll_prop_gain_rate = 12; 138 if (pll_freq < 1100000000UL) 139 config->pll_clock_inverters = 8; 140 else 141 config->pll_clock_inverters = 0; 142 143 config->decimal_div_start = dec; 144 config->frac_div_start = frac; 145 } 146 147 #define SSC_CENTER BIT(0) 148 #define SSC_EN BIT(1) 149 150 static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll, struct dsi_pll_config *config) 151 { 152 u32 ssc_per; 153 u32 ssc_mod; 154 u64 ssc_step_size; 155 u64 frac; 156 157 if (!config->enable_ssc) { 158 DBG("SSC not enabled\n"); 159 return; 160 } 161 162 ssc_per = DIV_ROUND_CLOSEST(VCO_REF_CLK_RATE, config->ssc_freq) / 2 - 1; 163 ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1); 164 ssc_per -= ssc_mod; 165 166 frac = config->frac_div_start; 167 ssc_step_size = config->decimal_div_start; 168 ssc_step_size *= (1 << FRAC_BITS); 169 ssc_step_size += frac; 170 ssc_step_size *= config->ssc_offset; 171 ssc_step_size *= (config->ssc_adj_per + 1); 172 ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1)); 173 ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000); 174 175 config->ssc_div_per = ssc_per; 176 config->ssc_stepsize = ssc_step_size; 177 178 pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n", 179 config->decimal_div_start, frac, FRAC_BITS); 180 pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n", 181 ssc_per, (u32)ssc_step_size, config->ssc_adj_per); 182 } 183 184 static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config) 185 { 186 void __iomem *base = pll->phy->pll_base; 187 188 if (config->enable_ssc) { 189 pr_debug("SSC is enabled\n"); 190 191 writel(config->ssc_stepsize & 0xff, 192 base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1); 193 writel(config->ssc_stepsize >> 8, 194 base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1); 195 writel(config->ssc_div_per & 0xff, 196 base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1); 197 writel(config->ssc_div_per >> 8, 198 base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1); 199 writel(config->ssc_adj_per & 0xff, 200 base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1); 201 writel(config->ssc_adj_per >> 8, 202 base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1); 203 writel(SSC_EN | (config->ssc_center ? SSC_CENTER : 0), 204 base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL); 205 } 206 } 207 208 static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll) 209 { 210 void __iomem *base = pll->phy->pll_base; 211 212 writel(0x80, base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE); 213 writel(0x03, base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO); 214 writel(0x00, base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE); 215 writel(0x00, base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER); 216 writel(0x4e, base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER); 217 writel(0x40, base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS); 218 writel(0xba, base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE); 219 writel(0x0c, base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE); 220 writel(0x00, base + REG_DSI_10nm_PHY_PLL_OUTDIV); 221 writel(0x00, base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE); 222 writel(0x08, base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO); 223 writel(0x08, base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1); 224 writel(0xc0, base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1); 225 writel(0xfa, base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1); 226 writel(0x4c, base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1); 227 writel(0x80, base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE); 228 writel(0x29, base + REG_DSI_10nm_PHY_PLL_PFILT); 229 writel(0x3f, base + REG_DSI_10nm_PHY_PLL_IFILT); 230 } 231 232 static void dsi_pll_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config) 233 { 234 void __iomem *base = pll->phy->pll_base; 235 236 writel(0x12, base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE); 237 writel(config->decimal_div_start, 238 base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1); 239 writel(config->frac_div_start & 0xff, 240 base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1); 241 writel((config->frac_div_start & 0xff00) >> 8, 242 base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1); 243 writel((config->frac_div_start & 0x30000) >> 16, 244 base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1); 245 writel(64, base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1); 246 writel(0x06, base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY); 247 writel(0x10, base + REG_DSI_10nm_PHY_PLL_CMODE); 248 writel(config->pll_clock_inverters, base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS); 249 } 250 251 static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, 252 unsigned long parent_rate) 253 { 254 struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw); 255 struct dsi_pll_config config; 256 257 DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->phy->id, rate, 258 parent_rate); 259 260 pll_10nm->vco_current_rate = rate; 261 262 dsi_pll_setup_config(&config); 263 264 dsi_pll_calc_dec_frac(pll_10nm, &config); 265 266 dsi_pll_calc_ssc(pll_10nm, &config); 267 268 dsi_pll_commit(pll_10nm, &config); 269 270 dsi_pll_config_hzindep_reg(pll_10nm); 271 272 dsi_pll_ssc_commit(pll_10nm, &config); 273 274 /* flush, ensure all register writes are done*/ 275 wmb(); 276 277 return 0; 278 } 279 280 static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll) 281 { 282 struct device *dev = &pll->phy->pdev->dev; 283 int rc; 284 u32 status = 0; 285 u32 const delay_us = 100; 286 u32 const timeout_us = 5000; 287 288 rc = readl_poll_timeout_atomic(pll->phy->pll_base + 289 REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE, 290 status, 291 ((status & BIT(0)) > 0), 292 delay_us, 293 timeout_us); 294 if (rc) 295 DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n", 296 pll->phy->id, status); 297 298 return rc; 299 } 300 301 static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll) 302 { 303 u32 data = readl(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0); 304 305 writel(0, pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES); 306 writel(data & ~BIT(5), pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0); 307 ndelay(250); 308 } 309 310 static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll) 311 { 312 u32 data = readl(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0); 313 314 writel(data | BIT(5), pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0); 315 writel(0xc0, pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES); 316 ndelay(250); 317 } 318 319 static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll) 320 { 321 u32 data; 322 323 data = readl(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); 324 writel(data & ~BIT(5), pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); 325 } 326 327 static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll) 328 { 329 u32 data; 330 331 data = readl(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); 332 writel(data | BIT(5), pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); 333 } 334 335 static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) 336 { 337 struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw); 338 struct device *dev = &pll_10nm->phy->pdev->dev; 339 int rc; 340 341 dsi_pll_enable_pll_bias(pll_10nm); 342 if (pll_10nm->slave) 343 dsi_pll_enable_pll_bias(pll_10nm->slave); 344 345 rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0); 346 if (rc) { 347 DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc); 348 return rc; 349 } 350 351 /* Start PLL */ 352 writel(0x01, pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL); 353 354 /* 355 * ensure all PLL configurations are written prior to checking 356 * for PLL lock. 357 */ 358 wmb(); 359 360 /* Check for PLL lock */ 361 rc = dsi_pll_10nm_lock_status(pll_10nm); 362 if (rc) { 363 DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->phy->id); 364 goto error; 365 } 366 367 pll_10nm->phy->pll_on = true; 368 369 dsi_pll_enable_global_clk(pll_10nm); 370 if (pll_10nm->slave) 371 dsi_pll_enable_global_clk(pll_10nm->slave); 372 373 writel(0x01, pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL); 374 if (pll_10nm->slave) 375 writel(0x01, pll_10nm->slave->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL); 376 377 error: 378 return rc; 379 } 380 381 static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll) 382 { 383 writel(0, pll->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL); 384 dsi_pll_disable_pll_bias(pll); 385 } 386 387 static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw) 388 { 389 struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw); 390 391 /* 392 * To avoid any stray glitches while abruptly powering down the PLL 393 * make sure to gate the clock using the clock enable bit before 394 * powering down the PLL 395 */ 396 dsi_pll_disable_global_clk(pll_10nm); 397 writel(0, pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL); 398 dsi_pll_disable_sub(pll_10nm); 399 if (pll_10nm->slave) { 400 dsi_pll_disable_global_clk(pll_10nm->slave); 401 dsi_pll_disable_sub(pll_10nm->slave); 402 } 403 /* flush, ensure all register writes are done */ 404 wmb(); 405 pll_10nm->phy->pll_on = false; 406 } 407 408 static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw, 409 unsigned long parent_rate) 410 { 411 struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw); 412 void __iomem *base = pll_10nm->phy->pll_base; 413 u64 ref_clk = VCO_REF_CLK_RATE; 414 u64 vco_rate = 0x0; 415 u64 multiplier; 416 u32 frac; 417 u32 dec; 418 u64 pll_freq, tmp64; 419 420 dec = readl(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1); 421 dec &= 0xff; 422 423 frac = readl(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1); 424 frac |= ((readl(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) & 425 0xff) << 8); 426 frac |= ((readl(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) & 427 0x3) << 16); 428 429 /* 430 * TODO: 431 * 1. Assumes prescaler is disabled 432 */ 433 multiplier = 1 << FRAC_BITS; 434 pll_freq = dec * (ref_clk * 2); 435 tmp64 = (ref_clk * 2 * frac); 436 pll_freq += div_u64(tmp64, multiplier); 437 438 vco_rate = pll_freq; 439 pll_10nm->vco_current_rate = vco_rate; 440 441 DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x", 442 pll_10nm->phy->id, (unsigned long)vco_rate, dec, frac); 443 444 return (unsigned long)vco_rate; 445 } 446 447 static int dsi_pll_10nm_clk_determine_rate(struct clk_hw *hw, 448 struct clk_rate_request *req) 449 { 450 struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw); 451 452 req->rate = clamp_t(unsigned long, req->rate, 453 pll_10nm->phy->cfg->min_pll_rate, pll_10nm->phy->cfg->max_pll_rate); 454 455 return 0; 456 } 457 458 static const struct clk_ops clk_ops_dsi_pll_10nm_vco = { 459 .determine_rate = dsi_pll_10nm_clk_determine_rate, 460 .set_rate = dsi_pll_10nm_vco_set_rate, 461 .recalc_rate = dsi_pll_10nm_vco_recalc_rate, 462 .prepare = dsi_pll_10nm_vco_prepare, 463 .unprepare = dsi_pll_10nm_vco_unprepare, 464 }; 465 466 /* 467 * PLL Callbacks 468 */ 469 470 static void dsi_10nm_pll_save_state(struct msm_dsi_phy *phy) 471 { 472 struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw); 473 struct pll_10nm_cached_state *cached = &pll_10nm->cached_state; 474 void __iomem *phy_base = pll_10nm->phy->base; 475 u32 cmn_clk_cfg0, cmn_clk_cfg1; 476 477 cached->pll_out_div = readl(pll_10nm->phy->pll_base + 478 REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE); 479 cached->pll_out_div &= 0x3; 480 481 cmn_clk_cfg0 = readl(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0); 482 cached->bit_clk_div = cmn_clk_cfg0 & 0xf; 483 cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4; 484 485 cmn_clk_cfg1 = readl(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); 486 cached->pll_mux = cmn_clk_cfg1 & 0x3; 487 488 DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x", 489 pll_10nm->phy->id, cached->pll_out_div, cached->bit_clk_div, 490 cached->pix_clk_div, cached->pll_mux); 491 } 492 493 static int dsi_10nm_pll_restore_state(struct msm_dsi_phy *phy) 494 { 495 struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw); 496 struct pll_10nm_cached_state *cached = &pll_10nm->cached_state; 497 void __iomem *phy_base = pll_10nm->phy->base; 498 u32 val; 499 int ret; 500 501 val = readl(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE); 502 val &= ~0x3; 503 val |= cached->pll_out_div; 504 writel(val, pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE); 505 506 writel(cached->bit_clk_div | (cached->pix_clk_div << 4), 507 phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0); 508 509 val = readl(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); 510 val &= ~0x3; 511 val |= cached->pll_mux; 512 writel(val, phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); 513 514 ret = dsi_pll_10nm_vco_set_rate(phy->vco_hw, 515 pll_10nm->vco_current_rate, 516 VCO_REF_CLK_RATE); 517 if (ret) { 518 DRM_DEV_ERROR(&pll_10nm->phy->pdev->dev, 519 "restore vco rate failed. ret=%d\n", ret); 520 return ret; 521 } 522 523 DBG("DSI PLL%d", pll_10nm->phy->id); 524 525 return 0; 526 } 527 528 static int dsi_10nm_set_usecase(struct msm_dsi_phy *phy) 529 { 530 struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw); 531 void __iomem *base = phy->base; 532 u32 data = 0x0; /* internal PLL */ 533 534 DBG("DSI PLL%d", pll_10nm->phy->id); 535 536 switch (phy->usecase) { 537 case MSM_DSI_PHY_STANDALONE: 538 break; 539 case MSM_DSI_PHY_MASTER: 540 pll_10nm->slave = pll_10nm_list[(pll_10nm->phy->id + 1) % DSI_MAX]; 541 break; 542 case MSM_DSI_PHY_SLAVE: 543 data = 0x1; /* external PLL */ 544 break; 545 default: 546 return -EINVAL; 547 } 548 549 /* set PLL src */ 550 writel(data << 2, base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); 551 552 return 0; 553 } 554 555 /* 556 * The post dividers and mux clocks are created using the standard divider and 557 * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux 558 * state to follow the master PLL's divider/mux state. Therefore, we don't 559 * require special clock ops that also configure the slave PLL registers 560 */ 561 static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **provided_clocks) 562 { 563 char clk_name[32]; 564 struct clk_init_data vco_init = { 565 .parent_data = &(const struct clk_parent_data) { 566 .fw_name = "ref", 567 }, 568 .num_parents = 1, 569 .name = clk_name, 570 .flags = CLK_IGNORE_UNUSED, 571 .ops = &clk_ops_dsi_pll_10nm_vco, 572 }; 573 struct device *dev = &pll_10nm->phy->pdev->dev; 574 struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit; 575 struct clk_hw *pll_post_out_div, *pclk_mux; 576 int ret; 577 578 DBG("DSI%d", pll_10nm->phy->id); 579 580 snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_10nm->phy->id); 581 pll_10nm->clk_hw.init = &vco_init; 582 583 ret = devm_clk_hw_register(dev, &pll_10nm->clk_hw); 584 if (ret) 585 return ret; 586 587 snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_out_div_clk", pll_10nm->phy->id); 588 589 pll_out_div = devm_clk_hw_register_divider_parent_hw(dev, clk_name, 590 &pll_10nm->clk_hw, CLK_SET_RATE_PARENT, 591 pll_10nm->phy->pll_base + 592 REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, 593 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL); 594 if (IS_ERR(pll_out_div)) { 595 ret = PTR_ERR(pll_out_div); 596 goto fail; 597 } 598 599 snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_bit_clk", pll_10nm->phy->id); 600 601 /* BIT CLK: DIV_CTRL_3_0 */ 602 pll_bit = devm_clk_hw_register_divider_parent_hw(dev, clk_name, 603 pll_out_div, CLK_SET_RATE_PARENT, 604 pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0, 605 0, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock); 606 if (IS_ERR(pll_bit)) { 607 ret = PTR_ERR(pll_bit); 608 goto fail; 609 } 610 611 snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id); 612 613 /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */ 614 hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name, 615 pll_bit, CLK_SET_RATE_PARENT, 1, 8); 616 if (IS_ERR(hw)) { 617 ret = PTR_ERR(hw); 618 goto fail; 619 } 620 621 provided_clocks[DSI_BYTE_PLL_CLK] = hw; 622 623 snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id); 624 625 pll_by_2_bit = devm_clk_hw_register_fixed_factor_parent_hw(dev, 626 clk_name, pll_bit, 0, 1, 2); 627 if (IS_ERR(pll_by_2_bit)) { 628 ret = PTR_ERR(pll_by_2_bit); 629 goto fail; 630 } 631 632 snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id); 633 634 pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(dev, 635 clk_name, pll_out_div, 0, 1, 4); 636 if (IS_ERR(pll_post_out_div)) { 637 ret = PTR_ERR(pll_post_out_div); 638 goto fail; 639 } 640 641 snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_10nm->phy->id); 642 643 pclk_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name, 644 ((const struct clk_hw *[]){ 645 pll_bit, 646 pll_by_2_bit, 647 pll_out_div, 648 pll_post_out_div, 649 }), 4, 0, pll_10nm->phy->base + 650 REG_DSI_10nm_PHY_CMN_CLK_CFG1, 0, 2, 0, NULL); 651 if (IS_ERR(pclk_mux)) { 652 ret = PTR_ERR(pclk_mux); 653 goto fail; 654 } 655 656 snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id); 657 658 /* PIX CLK DIV : DIV_CTRL_7_4*/ 659 hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name, pclk_mux, 660 0, pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0, 661 4, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock); 662 if (IS_ERR(hw)) { 663 ret = PTR_ERR(hw); 664 goto fail; 665 } 666 667 provided_clocks[DSI_PIXEL_PLL_CLK] = hw; 668 669 return 0; 670 671 fail: 672 673 return ret; 674 } 675 676 static int dsi_pll_10nm_init(struct msm_dsi_phy *phy) 677 { 678 struct platform_device *pdev = phy->pdev; 679 struct dsi_pll_10nm *pll_10nm; 680 int ret; 681 682 pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL); 683 if (!pll_10nm) 684 return -ENOMEM; 685 686 DBG("DSI PLL%d", phy->id); 687 688 pll_10nm_list[phy->id] = pll_10nm; 689 690 spin_lock_init(&pll_10nm->postdiv_lock); 691 692 pll_10nm->phy = phy; 693 694 ret = pll_10nm_register(pll_10nm, phy->provided_clocks->hws); 695 if (ret) { 696 DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret); 697 return ret; 698 } 699 700 phy->vco_hw = &pll_10nm->clk_hw; 701 702 /* TODO: Remove this when we have proper display handover support */ 703 msm_dsi_phy_pll_save_state(phy); 704 705 /* 706 * Store also proper vco_current_rate, because its value will be used in 707 * dsi_10nm_pll_restore_state(). 708 */ 709 if (!dsi_pll_10nm_vco_recalc_rate(&pll_10nm->clk_hw, VCO_REF_CLK_RATE)) 710 pll_10nm->vco_current_rate = pll_10nm->phy->cfg->min_pll_rate; 711 712 return 0; 713 } 714 715 static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy) 716 { 717 void __iomem *base = phy->base; 718 u32 data = 0; 719 720 data = readl(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL); 721 mb(); /* make sure read happened */ 722 723 return (data & BIT(0)); 724 } 725 726 static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable) 727 { 728 void __iomem *lane_base = phy->lane_base; 729 int phy_lane_0 = 0; /* TODO: Support all lane swap configs */ 730 731 /* 732 * LPRX and CDRX need to enabled only for physical data lane 733 * corresponding to the logical data lane 0 734 */ 735 if (enable) 736 writel(0x3, lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0)); 737 else 738 writel(0, lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0)); 739 } 740 741 static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy) 742 { 743 int i; 744 u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 }; 745 void __iomem *lane_base = phy->lane_base; 746 struct dsi_phy_10nm_tuning_cfg *tuning_cfg = phy->tuning_cfg; 747 748 if (phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS) 749 tx_dctrl[3] = 0x02; 750 751 /* Strength ctrl settings */ 752 for (i = 0; i < 5; i++) { 753 writel(0x55, lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i)); 754 /* 755 * Disable LPRX and CDRX for all lanes. And later on, it will 756 * be only enabled for the physical data lane corresponding 757 * to the logical data lane 0 758 */ 759 writel(0, lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i)); 760 writel(0x0, lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i)); 761 writel(0x88, lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i)); 762 } 763 764 dsi_phy_hw_v3_0_config_lpcdrx(phy, true); 765 766 /* other settings */ 767 for (i = 0; i < 5; i++) { 768 writel(0, lane_base + REG_DSI_10nm_PHY_LN_CFG0(i)); 769 writel(0, lane_base + REG_DSI_10nm_PHY_LN_CFG1(i)); 770 writel(0, lane_base + REG_DSI_10nm_PHY_LN_CFG2(i)); 771 writel(i == 4 ? 0x80 : 0x0, lane_base + REG_DSI_10nm_PHY_LN_CFG3(i)); 772 773 /* platform specific dsi phy drive strength adjustment */ 774 writel(tuning_cfg->rescode_offset_top[i], 775 lane_base + REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i)); 776 writel(tuning_cfg->rescode_offset_bot[i], 777 lane_base + REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i)); 778 779 writel(tx_dctrl[i], 780 lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i)); 781 } 782 783 if (!(phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)) { 784 /* Toggle BIT 0 to release freeze I/0 */ 785 writel(0x05, lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3)); 786 writel(0x04, lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3)); 787 } 788 } 789 790 static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, 791 struct msm_dsi_phy_clk_request *clk_req) 792 { 793 int ret; 794 u32 status; 795 u32 const delay_us = 5; 796 u32 const timeout_us = 1000; 797 struct msm_dsi_dphy_timing *timing = &phy->timing; 798 void __iomem *base = phy->base; 799 struct dsi_phy_10nm_tuning_cfg *tuning_cfg = phy->tuning_cfg; 800 u32 data; 801 802 DBG(""); 803 804 if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) { 805 DRM_DEV_ERROR(&phy->pdev->dev, 806 "%s: D-PHY timing calculation failed\n", __func__); 807 return -EINVAL; 808 } 809 810 if (dsi_phy_hw_v3_0_is_pll_on(phy)) 811 pr_warn("PLL turned on before configuring PHY\n"); 812 813 /* wait for REFGEN READY */ 814 ret = readl_poll_timeout_atomic(base + REG_DSI_10nm_PHY_CMN_PHY_STATUS, 815 status, (status & BIT(0)), 816 delay_us, timeout_us); 817 if (ret) { 818 pr_err("Ref gen not ready. Aborting\n"); 819 return -EINVAL; 820 } 821 822 /* de-assert digital and pll power down */ 823 data = BIT(6) | BIT(5); 824 writel(data, base + REG_DSI_10nm_PHY_CMN_CTRL_0); 825 826 /* Assert PLL core reset */ 827 writel(0x00, base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL); 828 829 /* turn off resync FIFO */ 830 writel(0x00, base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL); 831 832 /* Select MS1 byte-clk */ 833 writel(0x10, base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL); 834 835 /* Enable LDO with platform specific drive level/amplitude adjustment */ 836 writel(tuning_cfg->vreg_ctrl, base + REG_DSI_10nm_PHY_CMN_VREG_CTRL); 837 838 /* Configure PHY lane swap (TODO: we need to calculate this) */ 839 writel(0x21, base + REG_DSI_10nm_PHY_CMN_LANE_CFG0); 840 writel(0x84, base + REG_DSI_10nm_PHY_CMN_LANE_CFG1); 841 842 /* DSI PHY timings */ 843 writel(timing->hs_halfbyte_en, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0); 844 writel(timing->clk_zero, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1); 845 writel(timing->clk_prepare, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2); 846 writel(timing->clk_trail, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3); 847 writel(timing->hs_exit, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4); 848 writel(timing->hs_zero, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5); 849 writel(timing->hs_prepare, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6); 850 writel(timing->hs_trail, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7); 851 writel(timing->hs_rqst, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8); 852 writel(timing->ta_go | (timing->ta_sure << 3), base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9); 853 writel(timing->ta_get, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10); 854 writel(0x00, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11); 855 856 /* Remove power down from all blocks */ 857 writel(0x7f, base + REG_DSI_10nm_PHY_CMN_CTRL_0); 858 859 /* power up lanes */ 860 data = readl(base + REG_DSI_10nm_PHY_CMN_CTRL_0); 861 862 /* TODO: only power up lanes that are used */ 863 data |= 0x1F; 864 writel(data, base + REG_DSI_10nm_PHY_CMN_CTRL_0); 865 writel(0x1F, base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0); 866 867 /* Select full-rate mode */ 868 writel(0x40, base + REG_DSI_10nm_PHY_CMN_CTRL_2); 869 870 ret = dsi_10nm_set_usecase(phy); 871 if (ret) { 872 DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n", 873 __func__, ret); 874 return ret; 875 } 876 877 /* DSI lane settings */ 878 dsi_phy_hw_v3_0_lane_settings(phy); 879 880 DBG("DSI%d PHY enabled", phy->id); 881 882 return 0; 883 } 884 885 static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy) 886 { 887 void __iomem *base = phy->base; 888 u32 data; 889 890 DBG(""); 891 892 if (dsi_phy_hw_v3_0_is_pll_on(phy)) 893 pr_warn("Turning OFF PHY while PLL is on\n"); 894 895 dsi_phy_hw_v3_0_config_lpcdrx(phy, false); 896 data = readl(base + REG_DSI_10nm_PHY_CMN_CTRL_0); 897 898 /* disable all lanes */ 899 data &= ~0x1F; 900 writel(data, base + REG_DSI_10nm_PHY_CMN_CTRL_0); 901 writel(0, base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0); 902 903 /* Turn off all PHY blocks */ 904 writel(0x00, base + REG_DSI_10nm_PHY_CMN_CTRL_0); 905 /* make sure phy is turned off */ 906 wmb(); 907 908 DBG("DSI%d PHY disabled", phy->id); 909 } 910 911 static int dsi_10nm_phy_parse_dt(struct msm_dsi_phy *phy) 912 { 913 struct device *dev = &phy->pdev->dev; 914 struct dsi_phy_10nm_tuning_cfg *tuning_cfg; 915 s8 offset_top[DSI_LANE_MAX] = { 0 }; /* No offset */ 916 s8 offset_bot[DSI_LANE_MAX] = { 0 }; /* No offset */ 917 u32 ldo_level = 400; /* 400mV */ 918 u8 level; 919 int ret, i; 920 921 tuning_cfg = devm_kzalloc(dev, sizeof(*tuning_cfg), GFP_KERNEL); 922 if (!tuning_cfg) 923 return -ENOMEM; 924 925 /* Drive strength adjustment parameters */ 926 ret = of_property_read_u8_array(dev->of_node, "qcom,phy-rescode-offset-top", 927 offset_top, DSI_LANE_MAX); 928 if (ret && ret != -EINVAL) { 929 DRM_DEV_ERROR(dev, "failed to parse qcom,phy-rescode-offset-top, %d\n", ret); 930 return ret; 931 } 932 933 for (i = 0; i < DSI_LANE_MAX; i++) { 934 if (offset_top[i] < -32 || offset_top[i] > 31) { 935 DRM_DEV_ERROR(dev, 936 "qcom,phy-rescode-offset-top value %d is not in range [-32..31]\n", 937 offset_top[i]); 938 return -EINVAL; 939 } 940 tuning_cfg->rescode_offset_top[i] = 0x3f & offset_top[i]; 941 } 942 943 ret = of_property_read_u8_array(dev->of_node, "qcom,phy-rescode-offset-bot", 944 offset_bot, DSI_LANE_MAX); 945 if (ret && ret != -EINVAL) { 946 DRM_DEV_ERROR(dev, "failed to parse qcom,phy-rescode-offset-bot, %d\n", ret); 947 return ret; 948 } 949 950 for (i = 0; i < DSI_LANE_MAX; i++) { 951 if (offset_bot[i] < -32 || offset_bot[i] > 31) { 952 DRM_DEV_ERROR(dev, 953 "qcom,phy-rescode-offset-bot value %d is not in range [-32..31]\n", 954 offset_bot[i]); 955 return -EINVAL; 956 } 957 tuning_cfg->rescode_offset_bot[i] = 0x3f & offset_bot[i]; 958 } 959 960 /* Drive level/amplitude adjustment parameters */ 961 ret = of_property_read_u32(dev->of_node, "qcom,phy-drive-ldo-level", &ldo_level); 962 if (ret && ret != -EINVAL) { 963 DRM_DEV_ERROR(dev, "failed to parse qcom,phy-drive-ldo-level, %d\n", ret); 964 return ret; 965 } 966 967 switch (ldo_level) { 968 case 375: 969 level = 0; 970 break; 971 case 400: 972 level = 1; 973 break; 974 case 425: 975 level = 2; 976 break; 977 case 450: 978 level = 3; 979 break; 980 case 475: 981 level = 4; 982 break; 983 case 500: 984 level = 5; 985 break; 986 default: 987 DRM_DEV_ERROR(dev, "qcom,phy-drive-ldo-level %d is not supported\n", ldo_level); 988 return -EINVAL; 989 } 990 tuning_cfg->vreg_ctrl = 0x58 | (0x7 & level); 991 992 phy->tuning_cfg = tuning_cfg; 993 994 return 0; 995 } 996 997 static const struct regulator_bulk_data dsi_phy_10nm_regulators[] = { 998 { .supply = "vdds", .init_load_uA = 36000 }, 999 }; 1000 1001 const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = { 1002 .has_phy_lane = true, 1003 .regulator_data = dsi_phy_10nm_regulators, 1004 .num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators), 1005 .ops = { 1006 .enable = dsi_10nm_phy_enable, 1007 .disable = dsi_10nm_phy_disable, 1008 .pll_init = dsi_pll_10nm_init, 1009 .save_pll_state = dsi_10nm_pll_save_state, 1010 .restore_pll_state = dsi_10nm_pll_restore_state, 1011 .parse_dt_properties = dsi_10nm_phy_parse_dt, 1012 }, 1013 .min_pll_rate = 1000000000UL, 1014 .max_pll_rate = 3500000000UL, 1015 .io_start = { 0xae94400, 0xae96400 }, 1016 .num_dsi_phy = 2, 1017 }; 1018 1019 const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = { 1020 .has_phy_lane = true, 1021 .regulator_data = dsi_phy_10nm_regulators, 1022 .num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators), 1023 .ops = { 1024 .enable = dsi_10nm_phy_enable, 1025 .disable = dsi_10nm_phy_disable, 1026 .pll_init = dsi_pll_10nm_init, 1027 .save_pll_state = dsi_10nm_pll_save_state, 1028 .restore_pll_state = dsi_10nm_pll_restore_state, 1029 .parse_dt_properties = dsi_10nm_phy_parse_dt, 1030 }, 1031 .min_pll_rate = 1000000000UL, 1032 .max_pll_rate = 3500000000UL, 1033 .io_start = { 0xc994400, 0xc996400 }, 1034 .num_dsi_phy = 2, 1035 .quirks = DSI_PHY_10NM_QUIRK_OLD_TIMINGS, 1036 }; 1037