1 /* 2 * Copyright (c) 2015, The Linux Foundation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 and 6 * only version 2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 */ 13 14 #include <linux/platform_device.h> 15 16 #include "dsi_phy.h" 17 18 #define S_DIV_ROUND_UP(n, d) \ 19 (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d))) 20 21 static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent, 22 s32 min_result, bool even) 23 { 24 s32 v; 25 26 v = (tmax - tmin) * percent; 27 v = S_DIV_ROUND_UP(v, 100) + tmin; 28 if (even && (v & 0x1)) 29 return max_t(s32, min_result, v - 1); 30 else 31 return max_t(s32, min_result, v); 32 } 33 34 static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing, 35 s32 ui, s32 coeff, s32 pcnt) 36 { 37 s32 tmax, tmin, clk_z; 38 s32 temp; 39 40 /* reset */ 41 temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui; 42 tmin = S_DIV_ROUND_UP(temp, ui) - 2; 43 if (tmin > 255) { 44 tmax = 511; 45 clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true); 46 } else { 47 tmax = 255; 48 clk_z = linear_inter(tmax, tmin, pcnt, 0, true); 49 } 50 51 /* adjust */ 52 temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7; 53 timing->clk_zero = clk_z + 8 - temp; 54 } 55 56 int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, 57 const unsigned long bit_rate, const unsigned long esc_rate) 58 { 59 s32 ui, lpx; 60 s32 tmax, tmin; 61 s32 pcnt0 = 10; 62 s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10; 63 s32 pcnt2 = 10; 64 s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40; 65 s32 coeff = 1000; /* Precision, should avoid overflow */ 66 s32 temp; 67 68 if (!bit_rate || !esc_rate) 69 return -EINVAL; 70 71 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); 72 lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000); 73 74 tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2; 75 tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2; 76 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true); 77 78 temp = lpx / ui; 79 if (temp & 0x1) 80 timing->hs_rqst = temp; 81 else 82 timing->hs_rqst = max_t(s32, 0, temp - 2); 83 84 /* Calculate clk_zero after clk_prepare and hs_rqst */ 85 dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2); 86 87 temp = 105 * coeff + 12 * ui - 20 * coeff; 88 tmax = S_DIV_ROUND_UP(temp, ui) - 2; 89 tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2; 90 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true); 91 92 temp = 85 * coeff + 6 * ui; 93 tmax = S_DIV_ROUND_UP(temp, ui) - 2; 94 temp = 40 * coeff + 4 * ui; 95 tmin = S_DIV_ROUND_UP(temp, ui) - 2; 96 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true); 97 98 tmax = 255; 99 temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui; 100 temp = 145 * coeff + 10 * ui - temp; 101 tmin = S_DIV_ROUND_UP(temp, ui) - 2; 102 timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true); 103 104 temp = 105 * coeff + 12 * ui - 20 * coeff; 105 tmax = S_DIV_ROUND_UP(temp, ui) - 2; 106 temp = 60 * coeff + 4 * ui; 107 tmin = DIV_ROUND_UP(temp, ui) - 2; 108 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true); 109 110 tmax = 255; 111 tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2; 112 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true); 113 114 tmax = 63; 115 temp = ((timing->hs_exit >> 1) + 1) * 2 * ui; 116 temp = 60 * coeff + 52 * ui - 24 * ui - temp; 117 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; 118 timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false); 119 120 tmax = 63; 121 temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui; 122 temp += ((timing->clk_zero >> 1) + 1) * 2 * ui; 123 temp += 8 * ui + lpx; 124 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; 125 if (tmin > tmax) { 126 temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false); 127 timing->clk_pre = temp >> 1; 128 } else { 129 timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false); 130 } 131 132 timing->ta_go = 3; 133 timing->ta_sure = 0; 134 timing->ta_get = 4; 135 136 DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", 137 timing->clk_pre, timing->clk_post, timing->clk_zero, 138 timing->clk_trail, timing->clk_prepare, timing->hs_exit, 139 timing->hs_zero, timing->hs_prepare, timing->hs_trail, 140 timing->hs_rqst); 141 142 return 0; 143 } 144 145 void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, 146 u32 bit_mask) 147 { 148 int phy_id = phy->id; 149 u32 val; 150 151 if ((phy_id >= DSI_MAX) || (pll_id >= DSI_MAX)) 152 return; 153 154 val = dsi_phy_read(phy->base + reg); 155 156 if (phy->cfg->src_pll_truthtable[phy_id][pll_id]) 157 dsi_phy_write(phy->base + reg, val | bit_mask); 158 else 159 dsi_phy_write(phy->base + reg, val & (~bit_mask)); 160 } 161 162 static int dsi_phy_regulator_init(struct msm_dsi_phy *phy) 163 { 164 struct regulator_bulk_data *s = phy->supplies; 165 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs; 166 struct device *dev = &phy->pdev->dev; 167 int num = phy->cfg->reg_cfg.num; 168 int i, ret; 169 170 for (i = 0; i < num; i++) 171 s[i].supply = regs[i].name; 172 173 ret = devm_regulator_bulk_get(dev, num, s); 174 if (ret < 0) { 175 dev_err(dev, "%s: failed to init regulator, ret=%d\n", 176 __func__, ret); 177 return ret; 178 } 179 180 for (i = 0; i < num; i++) { 181 if (regulator_can_change_voltage(s[i].consumer)) { 182 ret = regulator_set_voltage(s[i].consumer, 183 regs[i].min_voltage, regs[i].max_voltage); 184 if (ret < 0) { 185 dev_err(dev, 186 "regulator %d set voltage failed, %d\n", 187 i, ret); 188 return ret; 189 } 190 } 191 } 192 193 return 0; 194 } 195 196 static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy) 197 { 198 struct regulator_bulk_data *s = phy->supplies; 199 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs; 200 int num = phy->cfg->reg_cfg.num; 201 int i; 202 203 DBG(""); 204 for (i = num - 1; i >= 0; i--) 205 if (regs[i].disable_load >= 0) 206 regulator_set_load(s[i].consumer, regs[i].disable_load); 207 208 regulator_bulk_disable(num, s); 209 } 210 211 static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy) 212 { 213 struct regulator_bulk_data *s = phy->supplies; 214 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs; 215 struct device *dev = &phy->pdev->dev; 216 int num = phy->cfg->reg_cfg.num; 217 int ret, i; 218 219 DBG(""); 220 for (i = 0; i < num; i++) { 221 if (regs[i].enable_load >= 0) { 222 ret = regulator_set_load(s[i].consumer, 223 regs[i].enable_load); 224 if (ret < 0) { 225 dev_err(dev, 226 "regulator %d set op mode failed, %d\n", 227 i, ret); 228 goto fail; 229 } 230 } 231 } 232 233 ret = regulator_bulk_enable(num, s); 234 if (ret < 0) { 235 dev_err(dev, "regulator enable failed, %d\n", ret); 236 goto fail; 237 } 238 239 return 0; 240 241 fail: 242 for (i--; i >= 0; i--) 243 regulator_set_load(s[i].consumer, regs[i].disable_load); 244 return ret; 245 } 246 247 static int dsi_phy_enable_resource(struct msm_dsi_phy *phy) 248 { 249 struct device *dev = &phy->pdev->dev; 250 int ret; 251 252 pm_runtime_get_sync(dev); 253 254 ret = clk_prepare_enable(phy->ahb_clk); 255 if (ret) { 256 dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret); 257 pm_runtime_put_sync(dev); 258 } 259 260 return ret; 261 } 262 263 static void dsi_phy_disable_resource(struct msm_dsi_phy *phy) 264 { 265 clk_disable_unprepare(phy->ahb_clk); 266 pm_runtime_put_sync(&phy->pdev->dev); 267 } 268 269 static const struct of_device_id dsi_phy_dt_match[] = { 270 #ifdef CONFIG_DRM_MSM_DSI_28NM_PHY 271 { .compatible = "qcom,dsi-phy-28nm-hpm", 272 .data = &dsi_phy_28nm_hpm_cfgs }, 273 { .compatible = "qcom,dsi-phy-28nm-lp", 274 .data = &dsi_phy_28nm_lp_cfgs }, 275 #endif 276 #ifdef CONFIG_DRM_MSM_DSI_20NM_PHY 277 { .compatible = "qcom,dsi-phy-20nm", 278 .data = &dsi_phy_20nm_cfgs }, 279 #endif 280 #ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY 281 { .compatible = "qcom,dsi-phy-28nm-8960", 282 .data = &dsi_phy_28nm_8960_cfgs }, 283 #endif 284 {} 285 }; 286 287 static int dsi_phy_driver_probe(struct platform_device *pdev) 288 { 289 struct msm_dsi_phy *phy; 290 struct device *dev = &pdev->dev; 291 const struct of_device_id *match; 292 int ret; 293 294 phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); 295 if (!phy) 296 return -ENOMEM; 297 298 match = of_match_node(dsi_phy_dt_match, dev->of_node); 299 if (!match) 300 return -ENODEV; 301 302 phy->cfg = match->data; 303 phy->pdev = pdev; 304 305 ret = of_property_read_u32(dev->of_node, 306 "qcom,dsi-phy-index", &phy->id); 307 if (ret) { 308 dev_err(dev, "%s: PHY index not specified, %d\n", 309 __func__, ret); 310 goto fail; 311 } 312 313 phy->regulator_ldo_mode = of_property_read_bool(dev->of_node, 314 "qcom,dsi-phy-regulator-ldo-mode"); 315 316 phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY"); 317 if (IS_ERR(phy->base)) { 318 dev_err(dev, "%s: failed to map phy base\n", __func__); 319 ret = -ENOMEM; 320 goto fail; 321 } 322 323 phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", 324 "DSI_PHY_REG"); 325 if (IS_ERR(phy->reg_base)) { 326 dev_err(dev, "%s: failed to map phy regulator base\n", 327 __func__); 328 ret = -ENOMEM; 329 goto fail; 330 } 331 332 ret = dsi_phy_regulator_init(phy); 333 if (ret) { 334 dev_err(dev, "%s: failed to init regulator\n", __func__); 335 goto fail; 336 } 337 338 phy->ahb_clk = devm_clk_get(dev, "iface_clk"); 339 if (IS_ERR(phy->ahb_clk)) { 340 dev_err(dev, "%s: Unable to get ahb clk\n", __func__); 341 ret = PTR_ERR(phy->ahb_clk); 342 goto fail; 343 } 344 345 /* PLL init will call into clk_register which requires 346 * register access, so we need to enable power and ahb clock. 347 */ 348 ret = dsi_phy_enable_resource(phy); 349 if (ret) 350 goto fail; 351 352 phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id); 353 if (!phy->pll) 354 dev_info(dev, 355 "%s: pll init failed, need separate pll clk driver\n", 356 __func__); 357 358 dsi_phy_disable_resource(phy); 359 360 platform_set_drvdata(pdev, phy); 361 362 return 0; 363 364 fail: 365 return ret; 366 } 367 368 static int dsi_phy_driver_remove(struct platform_device *pdev) 369 { 370 struct msm_dsi_phy *phy = platform_get_drvdata(pdev); 371 372 if (phy && phy->pll) { 373 msm_dsi_pll_destroy(phy->pll); 374 phy->pll = NULL; 375 } 376 377 platform_set_drvdata(pdev, NULL); 378 379 return 0; 380 } 381 382 static struct platform_driver dsi_phy_platform_driver = { 383 .probe = dsi_phy_driver_probe, 384 .remove = dsi_phy_driver_remove, 385 .driver = { 386 .name = "msm_dsi_phy", 387 .of_match_table = dsi_phy_dt_match, 388 }, 389 }; 390 391 void __init msm_dsi_phy_driver_register(void) 392 { 393 platform_driver_register(&dsi_phy_platform_driver); 394 } 395 396 void __exit msm_dsi_phy_driver_unregister(void) 397 { 398 platform_driver_unregister(&dsi_phy_platform_driver); 399 } 400 401 int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, 402 const unsigned long bit_rate, const unsigned long esc_rate) 403 { 404 struct device *dev = &phy->pdev->dev; 405 int ret; 406 407 if (!phy || !phy->cfg->ops.enable) 408 return -EINVAL; 409 410 ret = dsi_phy_regulator_enable(phy); 411 if (ret) { 412 dev_err(dev, "%s: regulator enable failed, %d\n", 413 __func__, ret); 414 return ret; 415 } 416 417 ret = phy->cfg->ops.enable(phy, src_pll_id, bit_rate, esc_rate); 418 if (ret) { 419 dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret); 420 dsi_phy_regulator_disable(phy); 421 return ret; 422 } 423 424 return 0; 425 } 426 427 void msm_dsi_phy_disable(struct msm_dsi_phy *phy) 428 { 429 if (!phy || !phy->cfg->ops.disable) 430 return; 431 432 phy->cfg->ops.disable(phy); 433 434 dsi_phy_regulator_disable(phy); 435 } 436 437 void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, 438 u32 *clk_pre, u32 *clk_post) 439 { 440 if (!phy) 441 return; 442 443 if (clk_pre) 444 *clk_pre = phy->timing.clk_pre; 445 if (clk_post) 446 *clk_post = phy->timing.clk_post; 447 } 448 449 struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy) 450 { 451 if (!phy) 452 return NULL; 453 454 return phy->pll; 455 } 456 457