1 /* 2 * Copyright (c) 2015, The Linux Foundation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 and 6 * only version 2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 */ 13 14 #include <linux/platform_device.h> 15 16 #include "dsi_phy.h" 17 18 #define S_DIV_ROUND_UP(n, d) \ 19 (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d))) 20 21 static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent, 22 s32 min_result, bool even) 23 { 24 s32 v; 25 26 v = (tmax - tmin) * percent; 27 v = S_DIV_ROUND_UP(v, 100) + tmin; 28 if (even && (v & 0x1)) 29 return max_t(s32, min_result, v - 1); 30 else 31 return max_t(s32, min_result, v); 32 } 33 34 static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing, 35 s32 ui, s32 coeff, s32 pcnt) 36 { 37 s32 tmax, tmin, clk_z; 38 s32 temp; 39 40 /* reset */ 41 temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui; 42 tmin = S_DIV_ROUND_UP(temp, ui) - 2; 43 if (tmin > 255) { 44 tmax = 511; 45 clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true); 46 } else { 47 tmax = 255; 48 clk_z = linear_inter(tmax, tmin, pcnt, 0, true); 49 } 50 51 /* adjust */ 52 temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7; 53 timing->clk_zero = clk_z + 8 - temp; 54 } 55 56 int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, 57 const unsigned long bit_rate, const unsigned long esc_rate) 58 { 59 s32 ui, lpx; 60 s32 tmax, tmin; 61 s32 pcnt0 = 10; 62 s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10; 63 s32 pcnt2 = 10; 64 s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40; 65 s32 coeff = 1000; /* Precision, should avoid overflow */ 66 s32 temp; 67 68 if (!bit_rate || !esc_rate) 69 return -EINVAL; 70 71 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); 72 lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000); 73 74 tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2; 75 tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2; 76 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true); 77 78 temp = lpx / ui; 79 if (temp & 0x1) 80 timing->hs_rqst = temp; 81 else 82 timing->hs_rqst = max_t(s32, 0, temp - 2); 83 84 /* Calculate clk_zero after clk_prepare and hs_rqst */ 85 dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2); 86 87 temp = 105 * coeff + 12 * ui - 20 * coeff; 88 tmax = S_DIV_ROUND_UP(temp, ui) - 2; 89 tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2; 90 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true); 91 92 temp = 85 * coeff + 6 * ui; 93 tmax = S_DIV_ROUND_UP(temp, ui) - 2; 94 temp = 40 * coeff + 4 * ui; 95 tmin = S_DIV_ROUND_UP(temp, ui) - 2; 96 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true); 97 98 tmax = 255; 99 temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui; 100 temp = 145 * coeff + 10 * ui - temp; 101 tmin = S_DIV_ROUND_UP(temp, ui) - 2; 102 timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true); 103 104 temp = 105 * coeff + 12 * ui - 20 * coeff; 105 tmax = S_DIV_ROUND_UP(temp, ui) - 2; 106 temp = 60 * coeff + 4 * ui; 107 tmin = DIV_ROUND_UP(temp, ui) - 2; 108 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true); 109 110 tmax = 255; 111 tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2; 112 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true); 113 114 tmax = 63; 115 temp = ((timing->hs_exit >> 1) + 1) * 2 * ui; 116 temp = 60 * coeff + 52 * ui - 24 * ui - temp; 117 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; 118 timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false); 119 120 tmax = 63; 121 temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui; 122 temp += ((timing->clk_zero >> 1) + 1) * 2 * ui; 123 temp += 8 * ui + lpx; 124 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; 125 if (tmin > tmax) { 126 temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false); 127 timing->clk_pre = temp >> 1; 128 } else { 129 timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false); 130 } 131 132 timing->ta_go = 3; 133 timing->ta_sure = 0; 134 timing->ta_get = 4; 135 136 DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", 137 timing->clk_pre, timing->clk_post, timing->clk_zero, 138 timing->clk_trail, timing->clk_prepare, timing->hs_exit, 139 timing->hs_zero, timing->hs_prepare, timing->hs_trail, 140 timing->hs_rqst); 141 142 return 0; 143 } 144 145 void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, 146 u32 bit_mask) 147 { 148 int phy_id = phy->id; 149 u32 val; 150 151 if ((phy_id >= DSI_MAX) || (pll_id >= DSI_MAX)) 152 return; 153 154 val = dsi_phy_read(phy->base + reg); 155 156 if (phy->cfg->src_pll_truthtable[phy_id][pll_id]) 157 dsi_phy_write(phy->base + reg, val | bit_mask); 158 else 159 dsi_phy_write(phy->base + reg, val & (~bit_mask)); 160 } 161 162 static int dsi_phy_regulator_init(struct msm_dsi_phy *phy) 163 { 164 struct regulator_bulk_data *s = phy->supplies; 165 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs; 166 struct device *dev = &phy->pdev->dev; 167 int num = phy->cfg->reg_cfg.num; 168 int i, ret; 169 170 for (i = 0; i < num; i++) 171 s[i].supply = regs[i].name; 172 173 ret = devm_regulator_bulk_get(dev, num, s); 174 if (ret < 0) { 175 dev_err(dev, "%s: failed to init regulator, ret=%d\n", 176 __func__, ret); 177 return ret; 178 } 179 180 return 0; 181 } 182 183 static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy) 184 { 185 struct regulator_bulk_data *s = phy->supplies; 186 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs; 187 int num = phy->cfg->reg_cfg.num; 188 int i; 189 190 DBG(""); 191 for (i = num - 1; i >= 0; i--) 192 if (regs[i].disable_load >= 0) 193 regulator_set_load(s[i].consumer, regs[i].disable_load); 194 195 regulator_bulk_disable(num, s); 196 } 197 198 static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy) 199 { 200 struct regulator_bulk_data *s = phy->supplies; 201 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs; 202 struct device *dev = &phy->pdev->dev; 203 int num = phy->cfg->reg_cfg.num; 204 int ret, i; 205 206 DBG(""); 207 for (i = 0; i < num; i++) { 208 if (regs[i].enable_load >= 0) { 209 ret = regulator_set_load(s[i].consumer, 210 regs[i].enable_load); 211 if (ret < 0) { 212 dev_err(dev, 213 "regulator %d set op mode failed, %d\n", 214 i, ret); 215 goto fail; 216 } 217 } 218 } 219 220 ret = regulator_bulk_enable(num, s); 221 if (ret < 0) { 222 dev_err(dev, "regulator enable failed, %d\n", ret); 223 goto fail; 224 } 225 226 return 0; 227 228 fail: 229 for (i--; i >= 0; i--) 230 regulator_set_load(s[i].consumer, regs[i].disable_load); 231 return ret; 232 } 233 234 static int dsi_phy_enable_resource(struct msm_dsi_phy *phy) 235 { 236 struct device *dev = &phy->pdev->dev; 237 int ret; 238 239 pm_runtime_get_sync(dev); 240 241 ret = clk_prepare_enable(phy->ahb_clk); 242 if (ret) { 243 dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret); 244 pm_runtime_put_sync(dev); 245 } 246 247 return ret; 248 } 249 250 static void dsi_phy_disable_resource(struct msm_dsi_phy *phy) 251 { 252 clk_disable_unprepare(phy->ahb_clk); 253 pm_runtime_put_sync(&phy->pdev->dev); 254 } 255 256 static const struct of_device_id dsi_phy_dt_match[] = { 257 #ifdef CONFIG_DRM_MSM_DSI_28NM_PHY 258 { .compatible = "qcom,dsi-phy-28nm-hpm", 259 .data = &dsi_phy_28nm_hpm_cfgs }, 260 { .compatible = "qcom,dsi-phy-28nm-lp", 261 .data = &dsi_phy_28nm_lp_cfgs }, 262 #endif 263 #ifdef CONFIG_DRM_MSM_DSI_20NM_PHY 264 { .compatible = "qcom,dsi-phy-20nm", 265 .data = &dsi_phy_20nm_cfgs }, 266 #endif 267 #ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY 268 { .compatible = "qcom,dsi-phy-28nm-8960", 269 .data = &dsi_phy_28nm_8960_cfgs }, 270 #endif 271 {} 272 }; 273 274 static int dsi_phy_driver_probe(struct platform_device *pdev) 275 { 276 struct msm_dsi_phy *phy; 277 struct device *dev = &pdev->dev; 278 const struct of_device_id *match; 279 int ret; 280 281 phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); 282 if (!phy) 283 return -ENOMEM; 284 285 match = of_match_node(dsi_phy_dt_match, dev->of_node); 286 if (!match) 287 return -ENODEV; 288 289 phy->cfg = match->data; 290 phy->pdev = pdev; 291 292 ret = of_property_read_u32(dev->of_node, 293 "qcom,dsi-phy-index", &phy->id); 294 if (ret) { 295 dev_err(dev, "%s: PHY index not specified, %d\n", 296 __func__, ret); 297 goto fail; 298 } 299 300 phy->regulator_ldo_mode = of_property_read_bool(dev->of_node, 301 "qcom,dsi-phy-regulator-ldo-mode"); 302 303 phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY"); 304 if (IS_ERR(phy->base)) { 305 dev_err(dev, "%s: failed to map phy base\n", __func__); 306 ret = -ENOMEM; 307 goto fail; 308 } 309 310 phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", 311 "DSI_PHY_REG"); 312 if (IS_ERR(phy->reg_base)) { 313 dev_err(dev, "%s: failed to map phy regulator base\n", 314 __func__); 315 ret = -ENOMEM; 316 goto fail; 317 } 318 319 ret = dsi_phy_regulator_init(phy); 320 if (ret) { 321 dev_err(dev, "%s: failed to init regulator\n", __func__); 322 goto fail; 323 } 324 325 phy->ahb_clk = devm_clk_get(dev, "iface_clk"); 326 if (IS_ERR(phy->ahb_clk)) { 327 dev_err(dev, "%s: Unable to get ahb clk\n", __func__); 328 ret = PTR_ERR(phy->ahb_clk); 329 goto fail; 330 } 331 332 /* PLL init will call into clk_register which requires 333 * register access, so we need to enable power and ahb clock. 334 */ 335 ret = dsi_phy_enable_resource(phy); 336 if (ret) 337 goto fail; 338 339 phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id); 340 if (!phy->pll) 341 dev_info(dev, 342 "%s: pll init failed, need separate pll clk driver\n", 343 __func__); 344 345 dsi_phy_disable_resource(phy); 346 347 platform_set_drvdata(pdev, phy); 348 349 return 0; 350 351 fail: 352 return ret; 353 } 354 355 static int dsi_phy_driver_remove(struct platform_device *pdev) 356 { 357 struct msm_dsi_phy *phy = platform_get_drvdata(pdev); 358 359 if (phy && phy->pll) { 360 msm_dsi_pll_destroy(phy->pll); 361 phy->pll = NULL; 362 } 363 364 platform_set_drvdata(pdev, NULL); 365 366 return 0; 367 } 368 369 static struct platform_driver dsi_phy_platform_driver = { 370 .probe = dsi_phy_driver_probe, 371 .remove = dsi_phy_driver_remove, 372 .driver = { 373 .name = "msm_dsi_phy", 374 .of_match_table = dsi_phy_dt_match, 375 }, 376 }; 377 378 void __init msm_dsi_phy_driver_register(void) 379 { 380 platform_driver_register(&dsi_phy_platform_driver); 381 } 382 383 void __exit msm_dsi_phy_driver_unregister(void) 384 { 385 platform_driver_unregister(&dsi_phy_platform_driver); 386 } 387 388 int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, 389 const unsigned long bit_rate, const unsigned long esc_rate) 390 { 391 struct device *dev = &phy->pdev->dev; 392 int ret; 393 394 if (!phy || !phy->cfg->ops.enable) 395 return -EINVAL; 396 397 ret = dsi_phy_regulator_enable(phy); 398 if (ret) { 399 dev_err(dev, "%s: regulator enable failed, %d\n", 400 __func__, ret); 401 return ret; 402 } 403 404 ret = phy->cfg->ops.enable(phy, src_pll_id, bit_rate, esc_rate); 405 if (ret) { 406 dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret); 407 dsi_phy_regulator_disable(phy); 408 return ret; 409 } 410 411 return 0; 412 } 413 414 void msm_dsi_phy_disable(struct msm_dsi_phy *phy) 415 { 416 if (!phy || !phy->cfg->ops.disable) 417 return; 418 419 phy->cfg->ops.disable(phy); 420 421 dsi_phy_regulator_disable(phy); 422 } 423 424 void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, 425 u32 *clk_pre, u32 *clk_post) 426 { 427 if (!phy) 428 return; 429 430 if (clk_pre) 431 *clk_pre = phy->timing.clk_pre; 432 if (clk_post) 433 *clk_post = phy->timing.clk_post; 434 } 435 436 struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy) 437 { 438 if (!phy) 439 return NULL; 440 441 return phy->pll; 442 } 443 444