1 /* 2 * SPDX-License-Identifier: GPL-2.0 3 * Copyright (c) 2018, The Linux Foundation 4 */ 5 6 #include <linux/bitfield.h> 7 #include <linux/clk.h> 8 #include <linux/delay.h> 9 #include <linux/interconnect.h> 10 #include <linux/irq.h> 11 #include <linux/irqchip.h> 12 #include <linux/irqdesc.h> 13 #include <linux/irqchip/chained_irq.h> 14 #include <linux/of_platform.h> 15 #include <linux/platform_device.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/reset.h> 18 19 #include <linux/soc/qcom/ubwc.h> 20 21 #include "msm_kms.h" 22 23 #include <generated/mdss.xml.h> 24 25 #define MIN_IB_BW 400000000UL /* Min ib vote 400MB */ 26 27 struct msm_mdss_data { 28 u32 reg_bus_bw; 29 }; 30 31 struct msm_mdss { 32 struct device *dev; 33 34 void __iomem *mmio; 35 struct clk_bulk_data *clocks; 36 size_t num_clocks; 37 bool is_mdp5; 38 struct { 39 unsigned long enabled_mask; 40 struct irq_domain *domain; 41 } irq_controller; 42 const struct qcom_ubwc_cfg_data *mdss_data; 43 u32 reg_bus_bw; 44 struct icc_path *mdp_path[2]; 45 u32 num_mdp_paths; 46 struct icc_path *reg_bus_path; 47 }; 48 49 static int msm_mdss_parse_data_bus_icc_path(struct device *dev, 50 struct msm_mdss *msm_mdss) 51 { 52 struct icc_path *path0; 53 struct icc_path *path1; 54 struct icc_path *reg_bus_path; 55 56 path0 = devm_of_icc_get(dev, "mdp0-mem"); 57 if (IS_ERR_OR_NULL(path0)) 58 return PTR_ERR_OR_ZERO(path0); 59 60 msm_mdss->mdp_path[0] = path0; 61 msm_mdss->num_mdp_paths = 1; 62 63 path1 = devm_of_icc_get(dev, "mdp1-mem"); 64 if (!IS_ERR_OR_NULL(path1)) { 65 msm_mdss->mdp_path[1] = path1; 66 msm_mdss->num_mdp_paths++; 67 } 68 69 reg_bus_path = of_icc_get(dev, "cpu-cfg"); 70 if (!IS_ERR_OR_NULL(reg_bus_path)) 71 msm_mdss->reg_bus_path = reg_bus_path; 72 73 return 0; 74 } 75 76 static void msm_mdss_irq(struct irq_desc *desc) 77 { 78 struct msm_mdss *msm_mdss = irq_desc_get_handler_data(desc); 79 struct irq_chip *chip = irq_desc_get_chip(desc); 80 u32 interrupts; 81 82 chained_irq_enter(chip, desc); 83 84 interrupts = readl_relaxed(msm_mdss->mmio + REG_MDSS_HW_INTR_STATUS); 85 86 while (interrupts) { 87 irq_hw_number_t hwirq = fls(interrupts) - 1; 88 int rc; 89 90 rc = generic_handle_domain_irq(msm_mdss->irq_controller.domain, 91 hwirq); 92 if (rc < 0) { 93 dev_err(msm_mdss->dev, "handle irq fail: irq=%lu rc=%d\n", 94 hwirq, rc); 95 break; 96 } 97 98 interrupts &= ~(1 << hwirq); 99 } 100 101 chained_irq_exit(chip, desc); 102 } 103 104 static void msm_mdss_irq_mask(struct irq_data *irqd) 105 { 106 struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd); 107 108 /* memory barrier */ 109 smp_mb__before_atomic(); 110 clear_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask); 111 /* memory barrier */ 112 smp_mb__after_atomic(); 113 } 114 115 static void msm_mdss_irq_unmask(struct irq_data *irqd) 116 { 117 struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd); 118 119 /* memory barrier */ 120 smp_mb__before_atomic(); 121 set_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask); 122 /* memory barrier */ 123 smp_mb__after_atomic(); 124 } 125 126 static struct irq_chip msm_mdss_irq_chip = { 127 .name = "msm_mdss", 128 .irq_mask = msm_mdss_irq_mask, 129 .irq_unmask = msm_mdss_irq_unmask, 130 }; 131 132 static struct lock_class_key msm_mdss_lock_key, msm_mdss_request_key; 133 134 static int msm_mdss_irqdomain_map(struct irq_domain *domain, 135 unsigned int irq, irq_hw_number_t hwirq) 136 { 137 struct msm_mdss *msm_mdss = domain->host_data; 138 139 irq_set_lockdep_class(irq, &msm_mdss_lock_key, &msm_mdss_request_key); 140 irq_set_chip_and_handler(irq, &msm_mdss_irq_chip, handle_level_irq); 141 142 return irq_set_chip_data(irq, msm_mdss); 143 } 144 145 static const struct irq_domain_ops msm_mdss_irqdomain_ops = { 146 .map = msm_mdss_irqdomain_map, 147 .xlate = irq_domain_xlate_onecell, 148 }; 149 150 static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss) 151 { 152 struct device *dev; 153 struct irq_domain *domain; 154 155 dev = msm_mdss->dev; 156 157 domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), 32, 158 &msm_mdss_irqdomain_ops, msm_mdss); 159 if (!domain) { 160 dev_err(dev, "failed to add irq_domain\n"); 161 return -EINVAL; 162 } 163 164 msm_mdss->irq_controller.enabled_mask = 0; 165 msm_mdss->irq_controller.domain = domain; 166 167 return 0; 168 } 169 170 static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss) 171 { 172 const struct qcom_ubwc_cfg_data *data = msm_mdss->mdss_data; 173 u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) | 174 MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit - 13); 175 176 if (data->ubwc_bank_spread) 177 value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD; 178 179 if (data->ubwc_enc_version == UBWC_1_0) 180 value |= MDSS_UBWC_STATIC_UBWC_MIN_ACC_LEN(1); 181 182 writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC); 183 } 184 185 static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss) 186 { 187 const struct qcom_ubwc_cfg_data *data = msm_mdss->mdss_data; 188 u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle & 0x1) | 189 MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit - 13); 190 191 if (data->macrotile_mode) 192 value |= MDSS_UBWC_STATIC_MACROTILE_MODE; 193 194 if (data->ubwc_enc_version == UBWC_3_0) 195 value |= MDSS_UBWC_STATIC_UBWC_AMSBC; 196 197 if (data->ubwc_enc_version == UBWC_1_0) 198 value |= MDSS_UBWC_STATIC_UBWC_MIN_ACC_LEN(1); 199 200 writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC); 201 } 202 203 static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss) 204 { 205 const struct qcom_ubwc_cfg_data *data = msm_mdss->mdss_data; 206 u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) | 207 MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit - 13); 208 209 if (data->ubwc_bank_spread) 210 value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD; 211 212 if (data->macrotile_mode) 213 value |= MDSS_UBWC_STATIC_MACROTILE_MODE; 214 215 writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC); 216 217 if (data->ubwc_enc_version == UBWC_3_0) { 218 writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); 219 writel_relaxed(0, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE); 220 } else { 221 if (data->ubwc_dec_version == UBWC_4_3) 222 writel_relaxed(3, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); 223 else 224 writel_relaxed(2, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); 225 writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE); 226 } 227 } 228 229 static void msm_mdss_setup_ubwc_dec_50(struct msm_mdss *msm_mdss) 230 { 231 const struct qcom_ubwc_cfg_data *data = msm_mdss->mdss_data; 232 u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) | 233 MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit); 234 235 if (data->ubwc_bank_spread) 236 value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD; 237 238 if (data->macrotile_mode) 239 value |= MDSS_UBWC_STATIC_MACROTILE_MODE; 240 241 writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC); 242 243 writel_relaxed(4, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); 244 writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE); 245 } 246 247 static int msm_mdss_enable(struct msm_mdss *msm_mdss) 248 { 249 int ret, i; 250 251 /* 252 * Several components have AXI clocks that can only be turned on if 253 * the interconnect is enabled (non-zero bandwidth). Let's make sure 254 * that the interconnects are at least at a minimum amount. 255 */ 256 for (i = 0; i < msm_mdss->num_mdp_paths; i++) 257 icc_set_bw(msm_mdss->mdp_path[i], 0, Bps_to_icc(MIN_IB_BW)); 258 259 icc_set_bw(msm_mdss->reg_bus_path, 0, 260 msm_mdss->reg_bus_bw); 261 262 ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks); 263 if (ret) { 264 dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret); 265 return ret; 266 } 267 268 /* 269 * Register access requires MDSS_MDP_CLK, which is not enabled by the 270 * mdss on mdp5 hardware. Skip it for now. 271 */ 272 if (msm_mdss->is_mdp5 || !msm_mdss->mdss_data) 273 return 0; 274 275 /* 276 * ubwc config is part of the "mdss" region which is not accessible 277 * from the rest of the driver. hardcode known configurations here 278 * 279 * Decoder version can be read from the UBWC_DEC_HW_VERSION reg, 280 * UBWC_n and the rest of params comes from hw data. 281 */ 282 switch (msm_mdss->mdss_data->ubwc_dec_version) { 283 case 0: /* no UBWC */ 284 case UBWC_1_0: 285 /* do nothing */ 286 break; 287 case UBWC_2_0: 288 msm_mdss_setup_ubwc_dec_20(msm_mdss); 289 break; 290 case UBWC_3_0: 291 msm_mdss_setup_ubwc_dec_30(msm_mdss); 292 break; 293 case UBWC_4_0: 294 case UBWC_4_3: 295 msm_mdss_setup_ubwc_dec_40(msm_mdss); 296 break; 297 case UBWC_5_0: 298 msm_mdss_setup_ubwc_dec_50(msm_mdss); 299 break; 300 default: 301 dev_err(msm_mdss->dev, "Unsupported UBWC decoder version %x\n", 302 msm_mdss->mdss_data->ubwc_dec_version); 303 dev_err(msm_mdss->dev, "HW_REV: 0x%x\n", 304 readl_relaxed(msm_mdss->mmio + REG_MDSS_HW_VERSION)); 305 dev_err(msm_mdss->dev, "UBWC_DEC_HW_VERSION: 0x%x\n", 306 readl_relaxed(msm_mdss->mmio + REG_MDSS_UBWC_DEC_HW_VERSION)); 307 break; 308 } 309 310 return ret; 311 } 312 313 static int msm_mdss_disable(struct msm_mdss *msm_mdss) 314 { 315 int i; 316 317 clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks); 318 319 for (i = 0; i < msm_mdss->num_mdp_paths; i++) 320 icc_set_bw(msm_mdss->mdp_path[i], 0, 0); 321 322 if (msm_mdss->reg_bus_path) 323 icc_set_bw(msm_mdss->reg_bus_path, 0, 0); 324 325 return 0; 326 } 327 328 static void msm_mdss_destroy(struct msm_mdss *msm_mdss) 329 { 330 struct platform_device *pdev = to_platform_device(msm_mdss->dev); 331 int irq; 332 333 pm_runtime_suspend(msm_mdss->dev); 334 pm_runtime_disable(msm_mdss->dev); 335 irq_domain_remove(msm_mdss->irq_controller.domain); 336 msm_mdss->irq_controller.domain = NULL; 337 irq = platform_get_irq(pdev, 0); 338 irq_set_chained_handler_and_data(irq, NULL, NULL); 339 } 340 341 static int msm_mdss_reset(struct device *dev) 342 { 343 struct reset_control *reset; 344 345 reset = reset_control_get_optional_exclusive(dev, NULL); 346 if (!reset) { 347 /* Optional reset not specified */ 348 return 0; 349 } else if (IS_ERR(reset)) { 350 return dev_err_probe(dev, PTR_ERR(reset), 351 "failed to acquire mdss reset\n"); 352 } 353 354 reset_control_assert(reset); 355 /* 356 * Tests indicate that reset has to be held for some period of time, 357 * make it one frame in a typical system 358 */ 359 msleep(20); 360 reset_control_deassert(reset); 361 362 reset_control_put(reset); 363 364 return 0; 365 } 366 367 /* 368 * MDP5 MDSS uses at most three specified clocks. 369 */ 370 #define MDP5_MDSS_NUM_CLOCKS 3 371 static int mdp5_mdss_parse_clock(struct platform_device *pdev, struct clk_bulk_data **clocks) 372 { 373 struct clk_bulk_data *bulk; 374 int num_clocks = 0; 375 int ret; 376 377 if (!pdev) 378 return -EINVAL; 379 380 bulk = devm_kcalloc(&pdev->dev, MDP5_MDSS_NUM_CLOCKS, sizeof(struct clk_bulk_data), GFP_KERNEL); 381 if (!bulk) 382 return -ENOMEM; 383 384 bulk[num_clocks++].id = "iface"; 385 bulk[num_clocks++].id = "bus"; 386 bulk[num_clocks++].id = "vsync"; 387 388 ret = devm_clk_bulk_get_optional(&pdev->dev, num_clocks, bulk); 389 if (ret) 390 return ret; 391 392 *clocks = bulk; 393 394 return num_clocks; 395 } 396 397 static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5) 398 { 399 const struct msm_mdss_data *mdss_data; 400 struct msm_mdss *msm_mdss; 401 int ret; 402 int irq; 403 404 ret = msm_mdss_reset(&pdev->dev); 405 if (ret) 406 return ERR_PTR(ret); 407 408 msm_mdss = devm_kzalloc(&pdev->dev, sizeof(*msm_mdss), GFP_KERNEL); 409 if (!msm_mdss) 410 return ERR_PTR(-ENOMEM); 411 412 msm_mdss->mdss_data = qcom_ubwc_config_get_data(); 413 if (IS_ERR(msm_mdss->mdss_data)) 414 return ERR_CAST(msm_mdss->mdss_data); 415 416 mdss_data = of_device_get_match_data(&pdev->dev); 417 if (!mdss_data) 418 return ERR_PTR(-EINVAL); 419 420 msm_mdss->reg_bus_bw = mdss_data->reg_bus_bw; 421 422 msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss"); 423 if (IS_ERR(msm_mdss->mmio)) 424 return ERR_CAST(msm_mdss->mmio); 425 426 dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio); 427 428 ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss); 429 if (ret) 430 return ERR_PTR(ret); 431 432 if (is_mdp5) 433 ret = mdp5_mdss_parse_clock(pdev, &msm_mdss->clocks); 434 else 435 ret = devm_clk_bulk_get_all(&pdev->dev, &msm_mdss->clocks); 436 if (ret < 0) { 437 dev_err(&pdev->dev, "failed to parse clocks, ret=%d\n", ret); 438 return ERR_PTR(ret); 439 } 440 msm_mdss->num_clocks = ret; 441 msm_mdss->is_mdp5 = is_mdp5; 442 443 msm_mdss->dev = &pdev->dev; 444 445 irq = platform_get_irq(pdev, 0); 446 if (irq < 0) 447 return ERR_PTR(irq); 448 449 ret = _msm_mdss_irq_domain_add(msm_mdss); 450 if (ret) 451 return ERR_PTR(ret); 452 453 irq_set_chained_handler_and_data(irq, msm_mdss_irq, 454 msm_mdss); 455 456 pm_runtime_enable(&pdev->dev); 457 458 return msm_mdss; 459 } 460 461 static int __maybe_unused mdss_runtime_suspend(struct device *dev) 462 { 463 struct msm_mdss *mdss = dev_get_drvdata(dev); 464 465 DBG(""); 466 467 return msm_mdss_disable(mdss); 468 } 469 470 static int __maybe_unused mdss_runtime_resume(struct device *dev) 471 { 472 struct msm_mdss *mdss = dev_get_drvdata(dev); 473 474 DBG(""); 475 476 return msm_mdss_enable(mdss); 477 } 478 479 static int __maybe_unused mdss_pm_suspend(struct device *dev) 480 { 481 482 if (pm_runtime_suspended(dev)) 483 return 0; 484 485 return mdss_runtime_suspend(dev); 486 } 487 488 static int __maybe_unused mdss_pm_resume(struct device *dev) 489 { 490 if (pm_runtime_suspended(dev)) 491 return 0; 492 493 return mdss_runtime_resume(dev); 494 } 495 496 static const struct dev_pm_ops mdss_pm_ops = { 497 SET_SYSTEM_SLEEP_PM_OPS(mdss_pm_suspend, mdss_pm_resume) 498 SET_RUNTIME_PM_OPS(mdss_runtime_suspend, mdss_runtime_resume, NULL) 499 }; 500 501 static int mdss_probe(struct platform_device *pdev) 502 { 503 struct msm_mdss *mdss; 504 bool is_mdp5 = of_device_is_compatible(pdev->dev.of_node, "qcom,mdss"); 505 struct device *dev = &pdev->dev; 506 int ret; 507 508 mdss = msm_mdss_init(pdev, is_mdp5); 509 if (IS_ERR(mdss)) 510 return PTR_ERR(mdss); 511 512 platform_set_drvdata(pdev, mdss); 513 514 /* 515 * MDP5/DPU based devices don't have a flat hierarchy. There is a top 516 * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc. 517 * Populate the children devices, find the MDP5/DPU node, and then add 518 * the interfaces to our components list. 519 */ 520 ret = of_platform_populate(dev->of_node, NULL, NULL, dev); 521 if (ret) { 522 DRM_DEV_ERROR(dev, "failed to populate children devices\n"); 523 msm_mdss_destroy(mdss); 524 return ret; 525 } 526 527 return 0; 528 } 529 530 static void mdss_remove(struct platform_device *pdev) 531 { 532 struct msm_mdss *mdss = platform_get_drvdata(pdev); 533 534 of_platform_depopulate(&pdev->dev); 535 536 msm_mdss_destroy(mdss); 537 } 538 539 static const struct msm_mdss_data data_57k = { 540 .reg_bus_bw = 57000, 541 }; 542 543 static const struct msm_mdss_data data_74k = { 544 .reg_bus_bw = 74000, 545 }; 546 547 static const struct msm_mdss_data data_76k8 = { 548 .reg_bus_bw = 76800, 549 }; 550 551 static const struct msm_mdss_data data_153k6 = { 552 .reg_bus_bw = 153600, 553 }; 554 555 static const struct of_device_id mdss_dt_match[] = { 556 { .compatible = "qcom,mdss", .data = &data_153k6 }, 557 { .compatible = "qcom,msm8998-mdss", .data = &data_76k8 }, 558 { .compatible = "qcom,qcm2290-mdss", .data = &data_76k8 }, 559 { .compatible = "qcom,sa8775p-mdss", .data = &data_74k }, 560 { .compatible = "qcom,sar2130p-mdss", .data = &data_74k }, 561 { .compatible = "qcom,sdm670-mdss", .data = &data_76k8 }, 562 { .compatible = "qcom,sdm845-mdss", .data = &data_76k8 }, 563 { .compatible = "qcom,sc7180-mdss", .data = &data_76k8 }, 564 { .compatible = "qcom,sc7280-mdss", .data = &data_74k }, 565 { .compatible = "qcom,sc8180x-mdss", .data = &data_76k8 }, 566 { .compatible = "qcom,sc8280xp-mdss", .data = &data_76k8 }, 567 { .compatible = "qcom,sm6115-mdss", .data = &data_76k8 }, 568 { .compatible = "qcom,sm6125-mdss", .data = &data_76k8 }, 569 { .compatible = "qcom,sm6150-mdss", .data = &data_76k8 }, 570 { .compatible = "qcom,sm6350-mdss", .data = &data_76k8 }, 571 { .compatible = "qcom,sm6375-mdss", .data = &data_76k8 }, 572 { .compatible = "qcom,sm7150-mdss", .data = &data_76k8 }, 573 { .compatible = "qcom,sm8150-mdss", .data = &data_76k8 }, 574 { .compatible = "qcom,sm8250-mdss", .data = &data_76k8 }, 575 { .compatible = "qcom,sm8350-mdss", .data = &data_74k }, 576 { .compatible = "qcom,sm8450-mdss", .data = &data_74k }, 577 { .compatible = "qcom,sm8550-mdss", .data = &data_57k }, 578 { .compatible = "qcom,sm8650-mdss", .data = &data_57k }, 579 { .compatible = "qcom,sm8750-mdss", .data = &data_57k }, 580 /* TODO: x1e8: Add reg_bus_bw with real value */ 581 { .compatible = "qcom,x1e80100-mdss", .data = &data_153k6 }, 582 {} 583 }; 584 MODULE_DEVICE_TABLE(of, mdss_dt_match); 585 586 static struct platform_driver mdss_platform_driver = { 587 .probe = mdss_probe, 588 .remove = mdss_remove, 589 .driver = { 590 .name = "msm-mdss", 591 .of_match_table = mdss_dt_match, 592 .pm = &mdss_pm_ops, 593 }, 594 }; 595 596 void __init msm_mdss_register(void) 597 { 598 platform_driver_register(&mdss_platform_driver); 599 } 600 601 void __exit msm_mdss_unregister(void) 602 { 603 platform_driver_unregister(&mdss_platform_driver); 604 } 605