1 /* 2 * SPDX-License-Identifier: GPL-2.0 3 * Copyright (c) 2018, The Linux Foundation 4 */ 5 6 #include <linux/bitfield.h> 7 #include <linux/clk.h> 8 #include <linux/delay.h> 9 #include <linux/interconnect.h> 10 #include <linux/irq.h> 11 #include <linux/irqchip.h> 12 #include <linux/irqdesc.h> 13 #include <linux/irqchip/chained_irq.h> 14 #include <linux/of_platform.h> 15 #include <linux/platform_device.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/reset.h> 18 19 #include <linux/soc/qcom/ubwc.h> 20 21 #include "msm_kms.h" 22 23 #include <generated/mdss.xml.h> 24 25 #define MIN_IB_BW 400000000UL /* Min ib vote 400MB */ 26 27 struct msm_mdss_data { 28 u32 reg_bus_bw; 29 }; 30 31 struct msm_mdss { 32 struct device *dev; 33 34 void __iomem *mmio; 35 struct clk_bulk_data *clocks; 36 size_t num_clocks; 37 bool is_mdp5; 38 struct { 39 unsigned long enabled_mask; 40 struct irq_domain *domain; 41 } irq_controller; 42 const struct qcom_ubwc_cfg_data *mdss_data; 43 u32 reg_bus_bw; 44 struct icc_path *mdp_path[2]; 45 u32 num_mdp_paths; 46 struct icc_path *reg_bus_path; 47 }; 48 49 static int msm_mdss_parse_data_bus_icc_path(struct device *dev, 50 struct msm_mdss *msm_mdss) 51 { 52 struct icc_path *path0; 53 struct icc_path *path1; 54 struct icc_path *reg_bus_path; 55 56 path0 = devm_of_icc_get(dev, "mdp0-mem"); 57 if (IS_ERR_OR_NULL(path0)) 58 return PTR_ERR_OR_ZERO(path0); 59 60 msm_mdss->mdp_path[0] = path0; 61 msm_mdss->num_mdp_paths = 1; 62 63 path1 = devm_of_icc_get(dev, "mdp1-mem"); 64 if (!IS_ERR_OR_NULL(path1)) { 65 msm_mdss->mdp_path[1] = path1; 66 msm_mdss->num_mdp_paths++; 67 } 68 69 reg_bus_path = of_icc_get(dev, "cpu-cfg"); 70 if (!IS_ERR_OR_NULL(reg_bus_path)) 71 msm_mdss->reg_bus_path = reg_bus_path; 72 73 return 0; 74 } 75 76 static void msm_mdss_irq(struct irq_desc *desc) 77 { 78 struct msm_mdss *msm_mdss = irq_desc_get_handler_data(desc); 79 struct irq_chip *chip = irq_desc_get_chip(desc); 80 u32 interrupts; 81 82 chained_irq_enter(chip, desc); 83 84 interrupts = readl_relaxed(msm_mdss->mmio + REG_MDSS_HW_INTR_STATUS); 85 86 while (interrupts) { 87 irq_hw_number_t hwirq = fls(interrupts) - 1; 88 int rc; 89 90 rc = generic_handle_domain_irq(msm_mdss->irq_controller.domain, 91 hwirq); 92 if (rc < 0) { 93 dev_err(msm_mdss->dev, "handle irq fail: irq=%lu rc=%d\n", 94 hwirq, rc); 95 break; 96 } 97 98 interrupts &= ~(1 << hwirq); 99 } 100 101 chained_irq_exit(chip, desc); 102 } 103 104 static void msm_mdss_irq_mask(struct irq_data *irqd) 105 { 106 struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd); 107 108 /* memory barrier */ 109 smp_mb__before_atomic(); 110 clear_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask); 111 /* memory barrier */ 112 smp_mb__after_atomic(); 113 } 114 115 static void msm_mdss_irq_unmask(struct irq_data *irqd) 116 { 117 struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd); 118 119 /* memory barrier */ 120 smp_mb__before_atomic(); 121 set_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask); 122 /* memory barrier */ 123 smp_mb__after_atomic(); 124 } 125 126 static struct irq_chip msm_mdss_irq_chip = { 127 .name = "msm_mdss", 128 .irq_mask = msm_mdss_irq_mask, 129 .irq_unmask = msm_mdss_irq_unmask, 130 }; 131 132 static struct lock_class_key msm_mdss_lock_key, msm_mdss_request_key; 133 134 static int msm_mdss_irqdomain_map(struct irq_domain *domain, 135 unsigned int irq, irq_hw_number_t hwirq) 136 { 137 struct msm_mdss *msm_mdss = domain->host_data; 138 139 irq_set_lockdep_class(irq, &msm_mdss_lock_key, &msm_mdss_request_key); 140 irq_set_chip_and_handler(irq, &msm_mdss_irq_chip, handle_level_irq); 141 142 return irq_set_chip_data(irq, msm_mdss); 143 } 144 145 static const struct irq_domain_ops msm_mdss_irqdomain_ops = { 146 .map = msm_mdss_irqdomain_map, 147 .xlate = irq_domain_xlate_onecell, 148 }; 149 150 static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss) 151 { 152 struct device *dev; 153 struct irq_domain *domain; 154 155 dev = msm_mdss->dev; 156 157 domain = irq_domain_create_linear(dev_fwnode(dev), 32, &msm_mdss_irqdomain_ops, msm_mdss); 158 if (!domain) { 159 dev_err(dev, "failed to add irq_domain\n"); 160 return -EINVAL; 161 } 162 163 msm_mdss->irq_controller.enabled_mask = 0; 164 msm_mdss->irq_controller.domain = domain; 165 166 return 0; 167 } 168 169 static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss) 170 { 171 const struct qcom_ubwc_cfg_data *data = msm_mdss->mdss_data; 172 u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) | 173 MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit - 13); 174 175 if (data->ubwc_bank_spread) 176 value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD; 177 178 if (data->ubwc_enc_version == UBWC_1_0) 179 value |= MDSS_UBWC_STATIC_UBWC_MIN_ACC_LEN(1); 180 181 writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC); 182 } 183 184 static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss) 185 { 186 const struct qcom_ubwc_cfg_data *data = msm_mdss->mdss_data; 187 u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle & 0x1) | 188 MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit - 13); 189 190 if (data->macrotile_mode) 191 value |= MDSS_UBWC_STATIC_MACROTILE_MODE; 192 193 if (data->ubwc_enc_version == UBWC_3_0) 194 value |= MDSS_UBWC_STATIC_UBWC_AMSBC; 195 196 if (data->ubwc_enc_version == UBWC_1_0) 197 value |= MDSS_UBWC_STATIC_UBWC_MIN_ACC_LEN(1); 198 199 writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC); 200 } 201 202 static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss) 203 { 204 const struct qcom_ubwc_cfg_data *data = msm_mdss->mdss_data; 205 u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) | 206 MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit - 13); 207 208 if (data->ubwc_bank_spread) 209 value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD; 210 211 if (data->macrotile_mode) 212 value |= MDSS_UBWC_STATIC_MACROTILE_MODE; 213 214 writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC); 215 216 if (data->ubwc_enc_version == UBWC_3_0) { 217 writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); 218 writel_relaxed(0, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE); 219 } else { 220 if (data->ubwc_dec_version == UBWC_4_3) 221 writel_relaxed(3, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); 222 else 223 writel_relaxed(2, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); 224 writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE); 225 } 226 } 227 228 static void msm_mdss_setup_ubwc_dec_50(struct msm_mdss *msm_mdss) 229 { 230 const struct qcom_ubwc_cfg_data *data = msm_mdss->mdss_data; 231 u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) | 232 MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit - 13); 233 234 if (data->ubwc_bank_spread) 235 value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD; 236 237 if (data->macrotile_mode) 238 value |= MDSS_UBWC_STATIC_MACROTILE_MODE; 239 240 writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC); 241 242 if (data->ubwc_dec_version == UBWC_6_0) 243 writel_relaxed(5, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); 244 else 245 writel_relaxed(4, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); 246 247 writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE); 248 } 249 250 static int msm_mdss_enable(struct msm_mdss *msm_mdss) 251 { 252 int ret, i; 253 254 /* 255 * Several components have AXI clocks that can only be turned on if 256 * the interconnect is enabled (non-zero bandwidth). Let's make sure 257 * that the interconnects are at least at a minimum amount. 258 */ 259 for (i = 0; i < msm_mdss->num_mdp_paths; i++) 260 icc_set_bw(msm_mdss->mdp_path[i], 0, Bps_to_icc(MIN_IB_BW)); 261 262 icc_set_bw(msm_mdss->reg_bus_path, 0, 263 msm_mdss->reg_bus_bw); 264 265 ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks); 266 if (ret) { 267 dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret); 268 return ret; 269 } 270 271 /* 272 * Register access requires MDSS_MDP_CLK, which is not enabled by the 273 * mdss on mdp5 hardware. Skip it for now. 274 */ 275 if (msm_mdss->is_mdp5 || !msm_mdss->mdss_data) 276 return 0; 277 278 /* 279 * ubwc config is part of the "mdss" region which is not accessible 280 * from the rest of the driver. hardcode known configurations here 281 * 282 * Decoder version can be read from the UBWC_DEC_HW_VERSION reg, 283 * UBWC_n and the rest of params comes from hw data. 284 */ 285 switch (msm_mdss->mdss_data->ubwc_dec_version) { 286 case 0: /* no UBWC */ 287 case UBWC_1_0: 288 /* do nothing */ 289 break; 290 case UBWC_2_0: 291 msm_mdss_setup_ubwc_dec_20(msm_mdss); 292 break; 293 case UBWC_3_0: 294 msm_mdss_setup_ubwc_dec_30(msm_mdss); 295 break; 296 case UBWC_4_0: 297 case UBWC_4_3: 298 msm_mdss_setup_ubwc_dec_40(msm_mdss); 299 break; 300 case UBWC_5_0: 301 msm_mdss_setup_ubwc_dec_50(msm_mdss); 302 break; 303 case UBWC_6_0: 304 msm_mdss_setup_ubwc_dec_50(msm_mdss); 305 break; 306 default: 307 dev_err(msm_mdss->dev, "Unsupported UBWC decoder version %x\n", 308 msm_mdss->mdss_data->ubwc_dec_version); 309 dev_err(msm_mdss->dev, "HW_REV: 0x%x\n", 310 readl_relaxed(msm_mdss->mmio + REG_MDSS_HW_VERSION)); 311 dev_err(msm_mdss->dev, "UBWC_DEC_HW_VERSION: 0x%x\n", 312 readl_relaxed(msm_mdss->mmio + REG_MDSS_UBWC_DEC_HW_VERSION)); 313 break; 314 } 315 316 return ret; 317 } 318 319 static int msm_mdss_disable(struct msm_mdss *msm_mdss) 320 { 321 int i; 322 323 clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks); 324 325 for (i = 0; i < msm_mdss->num_mdp_paths; i++) 326 icc_set_bw(msm_mdss->mdp_path[i], 0, 0); 327 328 if (msm_mdss->reg_bus_path) 329 icc_set_bw(msm_mdss->reg_bus_path, 0, 0); 330 331 return 0; 332 } 333 334 static void msm_mdss_destroy(struct msm_mdss *msm_mdss) 335 { 336 struct platform_device *pdev = to_platform_device(msm_mdss->dev); 337 int irq; 338 339 pm_runtime_suspend(msm_mdss->dev); 340 pm_runtime_disable(msm_mdss->dev); 341 irq_domain_remove(msm_mdss->irq_controller.domain); 342 msm_mdss->irq_controller.domain = NULL; 343 irq = platform_get_irq(pdev, 0); 344 irq_set_chained_handler_and_data(irq, NULL, NULL); 345 } 346 347 static int msm_mdss_reset(struct device *dev) 348 { 349 struct reset_control *reset; 350 351 reset = reset_control_get_optional_exclusive(dev, NULL); 352 if (!reset) { 353 /* Optional reset not specified */ 354 return 0; 355 } else if (IS_ERR(reset)) { 356 return dev_err_probe(dev, PTR_ERR(reset), 357 "failed to acquire mdss reset\n"); 358 } 359 360 reset_control_assert(reset); 361 /* 362 * Tests indicate that reset has to be held for some period of time, 363 * make it one frame in a typical system 364 */ 365 msleep(20); 366 reset_control_deassert(reset); 367 368 reset_control_put(reset); 369 370 return 0; 371 } 372 373 /* 374 * MDP5 MDSS uses at most three specified clocks. 375 */ 376 #define MDP5_MDSS_NUM_CLOCKS 3 377 static int mdp5_mdss_parse_clock(struct platform_device *pdev, struct clk_bulk_data **clocks) 378 { 379 struct clk_bulk_data *bulk; 380 int num_clocks = 0; 381 int ret; 382 383 if (!pdev) 384 return -EINVAL; 385 386 bulk = devm_kcalloc(&pdev->dev, MDP5_MDSS_NUM_CLOCKS, sizeof(struct clk_bulk_data), GFP_KERNEL); 387 if (!bulk) 388 return -ENOMEM; 389 390 bulk[num_clocks++].id = "iface"; 391 bulk[num_clocks++].id = "bus"; 392 bulk[num_clocks++].id = "vsync"; 393 394 ret = devm_clk_bulk_get_optional(&pdev->dev, num_clocks, bulk); 395 if (ret) 396 return ret; 397 398 *clocks = bulk; 399 400 return num_clocks; 401 } 402 403 static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5) 404 { 405 const struct msm_mdss_data *mdss_data; 406 struct msm_mdss *msm_mdss; 407 int ret; 408 int irq; 409 410 ret = msm_mdss_reset(&pdev->dev); 411 if (ret) 412 return ERR_PTR(ret); 413 414 msm_mdss = devm_kzalloc(&pdev->dev, sizeof(*msm_mdss), GFP_KERNEL); 415 if (!msm_mdss) 416 return ERR_PTR(-ENOMEM); 417 418 msm_mdss->mdss_data = qcom_ubwc_config_get_data(); 419 if (IS_ERR(msm_mdss->mdss_data)) 420 return ERR_CAST(msm_mdss->mdss_data); 421 422 mdss_data = of_device_get_match_data(&pdev->dev); 423 if (!mdss_data) 424 return ERR_PTR(-EINVAL); 425 426 msm_mdss->reg_bus_bw = mdss_data->reg_bus_bw; 427 428 msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss"); 429 if (IS_ERR(msm_mdss->mmio)) 430 return ERR_CAST(msm_mdss->mmio); 431 432 dev_dbg(&pdev->dev, "mapped mdss address space @%p\n", msm_mdss->mmio); 433 434 ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss); 435 if (ret) 436 return ERR_PTR(ret); 437 438 if (is_mdp5) 439 ret = mdp5_mdss_parse_clock(pdev, &msm_mdss->clocks); 440 else 441 ret = devm_clk_bulk_get_all(&pdev->dev, &msm_mdss->clocks); 442 if (ret < 0) { 443 dev_err(&pdev->dev, "failed to parse clocks, ret=%d\n", ret); 444 return ERR_PTR(ret); 445 } 446 msm_mdss->num_clocks = ret; 447 msm_mdss->is_mdp5 = is_mdp5; 448 449 msm_mdss->dev = &pdev->dev; 450 451 irq = platform_get_irq(pdev, 0); 452 if (irq < 0) 453 return ERR_PTR(irq); 454 455 ret = _msm_mdss_irq_domain_add(msm_mdss); 456 if (ret) 457 return ERR_PTR(ret); 458 459 irq_set_chained_handler_and_data(irq, msm_mdss_irq, 460 msm_mdss); 461 462 pm_runtime_enable(&pdev->dev); 463 464 return msm_mdss; 465 } 466 467 static int __maybe_unused mdss_runtime_suspend(struct device *dev) 468 { 469 struct msm_mdss *mdss = dev_get_drvdata(dev); 470 471 DBG(""); 472 473 return msm_mdss_disable(mdss); 474 } 475 476 static int __maybe_unused mdss_runtime_resume(struct device *dev) 477 { 478 struct msm_mdss *mdss = dev_get_drvdata(dev); 479 480 DBG(""); 481 482 return msm_mdss_enable(mdss); 483 } 484 485 static int __maybe_unused mdss_pm_suspend(struct device *dev) 486 { 487 488 if (pm_runtime_suspended(dev)) 489 return 0; 490 491 return mdss_runtime_suspend(dev); 492 } 493 494 static int __maybe_unused mdss_pm_resume(struct device *dev) 495 { 496 if (pm_runtime_suspended(dev)) 497 return 0; 498 499 return mdss_runtime_resume(dev); 500 } 501 502 static const struct dev_pm_ops mdss_pm_ops = { 503 SET_SYSTEM_SLEEP_PM_OPS(mdss_pm_suspend, mdss_pm_resume) 504 SET_RUNTIME_PM_OPS(mdss_runtime_suspend, mdss_runtime_resume, NULL) 505 }; 506 507 static int mdss_probe(struct platform_device *pdev) 508 { 509 struct msm_mdss *mdss; 510 bool is_mdp5 = of_device_is_compatible(pdev->dev.of_node, "qcom,mdss"); 511 struct device *dev = &pdev->dev; 512 int ret; 513 514 mdss = msm_mdss_init(pdev, is_mdp5); 515 if (IS_ERR(mdss)) 516 return PTR_ERR(mdss); 517 518 platform_set_drvdata(pdev, mdss); 519 520 /* 521 * MDP5/DPU based devices don't have a flat hierarchy. There is a top 522 * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc. 523 * Populate the children devices, find the MDP5/DPU node, and then add 524 * the interfaces to our components list. 525 */ 526 ret = of_platform_populate(dev->of_node, NULL, NULL, dev); 527 if (ret) { 528 DRM_DEV_ERROR(dev, "failed to populate children devices\n"); 529 msm_mdss_destroy(mdss); 530 return ret; 531 } 532 533 return 0; 534 } 535 536 static void mdss_remove(struct platform_device *pdev) 537 { 538 struct msm_mdss *mdss = platform_get_drvdata(pdev); 539 540 of_platform_depopulate(&pdev->dev); 541 542 msm_mdss_destroy(mdss); 543 } 544 545 static const struct msm_mdss_data data_57k = { 546 .reg_bus_bw = 57000, 547 }; 548 549 static const struct msm_mdss_data data_74k = { 550 .reg_bus_bw = 74000, 551 }; 552 553 static const struct msm_mdss_data data_76k8 = { 554 .reg_bus_bw = 76800, 555 }; 556 557 static const struct msm_mdss_data data_153k6 = { 558 .reg_bus_bw = 153600, 559 }; 560 561 static const struct of_device_id mdss_dt_match[] = { 562 { .compatible = "qcom,mdss", .data = &data_153k6 }, 563 { .compatible = "qcom,glymur-mdss", .data = &data_57k }, 564 { .compatible = "qcom,kaanapali-mdss", .data = &data_57k }, 565 { .compatible = "qcom,msm8998-mdss", .data = &data_76k8 }, 566 { .compatible = "qcom,qcm2290-mdss", .data = &data_76k8 }, 567 { .compatible = "qcom,qcs8300-mdss", .data = &data_74k }, 568 { .compatible = "qcom,sa8775p-mdss", .data = &data_74k }, 569 { .compatible = "qcom,sar2130p-mdss", .data = &data_74k }, 570 { .compatible = "qcom,sdm670-mdss", .data = &data_76k8 }, 571 { .compatible = "qcom,sdm845-mdss", .data = &data_76k8 }, 572 { .compatible = "qcom,sc7180-mdss", .data = &data_76k8 }, 573 { .compatible = "qcom,sc7280-mdss", .data = &data_74k }, 574 { .compatible = "qcom,sc8180x-mdss", .data = &data_76k8 }, 575 { .compatible = "qcom,sc8280xp-mdss", .data = &data_76k8 }, 576 { .compatible = "qcom,sm6115-mdss", .data = &data_76k8 }, 577 { .compatible = "qcom,sm6125-mdss", .data = &data_76k8 }, 578 { .compatible = "qcom,sm6150-mdss", .data = &data_76k8 }, 579 { .compatible = "qcom,sm6350-mdss", .data = &data_76k8 }, 580 { .compatible = "qcom,sm6375-mdss", .data = &data_76k8 }, 581 { .compatible = "qcom,sm7150-mdss", .data = &data_76k8 }, 582 { .compatible = "qcom,sm8150-mdss", .data = &data_76k8 }, 583 { .compatible = "qcom,sm8250-mdss", .data = &data_76k8 }, 584 { .compatible = "qcom,sm8350-mdss", .data = &data_74k }, 585 { .compatible = "qcom,sm8450-mdss", .data = &data_74k }, 586 { .compatible = "qcom,sm8550-mdss", .data = &data_57k }, 587 { .compatible = "qcom,sm8650-mdss", .data = &data_57k }, 588 { .compatible = "qcom,sm8750-mdss", .data = &data_57k }, 589 /* TODO: x1e8: Add reg_bus_bw with real value */ 590 { .compatible = "qcom,x1e80100-mdss", .data = &data_153k6 }, 591 {} 592 }; 593 MODULE_DEVICE_TABLE(of, mdss_dt_match); 594 595 static struct platform_driver mdss_platform_driver = { 596 .probe = mdss_probe, 597 .remove = mdss_remove, 598 .driver = { 599 .name = "msm-mdss", 600 .of_match_table = mdss_dt_match, 601 .pm = &mdss_pm_ops, 602 }, 603 }; 604 605 void __init msm_mdss_register(void) 606 { 607 platform_driver_register(&mdss_platform_driver); 608 } 609 610 void __exit msm_mdss_unregister(void) 611 { 612 platform_driver_unregister(&mdss_platform_driver); 613 } 614