1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #include <linux/delay.h> 8 9 #include <drm/drm_vblank.h> 10 11 #include "msm_drv.h" 12 #include "msm_gem.h" 13 #include "msm_mmu.h" 14 #include "mdp4_kms.h" 15 16 static int mdp4_hw_init(struct msm_kms *kms) 17 { 18 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 19 struct drm_device *dev = mdp4_kms->dev; 20 u32 dmap_cfg, vg_cfg; 21 unsigned long clk; 22 23 pm_runtime_get_sync(dev->dev); 24 25 if (mdp4_kms->rev > 1) { 26 mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff); 27 mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f); 28 } 29 30 mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3); 31 32 /* max read pending cmd config, 3 pending requests: */ 33 mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222); 34 35 clk = clk_get_rate(mdp4_kms->clk); 36 37 if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) { 38 dmap_cfg = 0x47; /* 16 bytes-burst x 8 req */ 39 vg_cfg = 0x47; /* 16 bytes-burs x 8 req */ 40 } else { 41 dmap_cfg = 0x27; /* 8 bytes-burst x 8 req */ 42 vg_cfg = 0x43; /* 16 bytes-burst x 4 req */ 43 } 44 45 DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg); 46 47 mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg); 48 mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg); 49 50 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg); 51 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg); 52 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg); 53 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg); 54 55 if (mdp4_kms->rev >= 2) 56 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1); 57 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, 0); 58 59 /* disable CSC matrix / YUV by default: */ 60 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0); 61 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0); 62 mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0); 63 mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0); 64 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0); 65 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0); 66 67 if (mdp4_kms->rev > 1) 68 mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1); 69 70 pm_runtime_put_sync(dev->dev); 71 72 return 0; 73 } 74 75 static void mdp4_enable_commit(struct msm_kms *kms) 76 { 77 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 78 mdp4_enable(mdp4_kms); 79 } 80 81 static void mdp4_disable_commit(struct msm_kms *kms) 82 { 83 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 84 mdp4_disable(mdp4_kms); 85 } 86 87 static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask) 88 { 89 /* TODO */ 90 } 91 92 static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask) 93 { 94 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 95 struct drm_crtc *crtc; 96 97 for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask) 98 mdp4_crtc_wait_for_commit_done(crtc); 99 } 100 101 static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask) 102 { 103 } 104 105 static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate, 106 struct drm_encoder *encoder) 107 { 108 /* if we had >1 encoder, we'd need something more clever: */ 109 switch (encoder->encoder_type) { 110 case DRM_MODE_ENCODER_TMDS: 111 return mdp4_dtv_round_pixclk(encoder, rate); 112 case DRM_MODE_ENCODER_LVDS: 113 case DRM_MODE_ENCODER_DSI: 114 default: 115 return rate; 116 } 117 } 118 119 static void mdp4_destroy(struct msm_kms *kms) 120 { 121 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 122 struct device *dev = mdp4_kms->dev->dev; 123 struct msm_gem_address_space *aspace = kms->aspace; 124 125 if (mdp4_kms->blank_cursor_iova) 126 msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace); 127 drm_gem_object_put(mdp4_kms->blank_cursor_bo); 128 129 if (aspace) { 130 aspace->mmu->funcs->detach(aspace->mmu); 131 msm_gem_address_space_put(aspace); 132 } 133 134 if (mdp4_kms->rpm_enabled) 135 pm_runtime_disable(dev); 136 137 mdp_kms_destroy(&mdp4_kms->base); 138 } 139 140 static const struct mdp_kms_funcs kms_funcs = { 141 .base = { 142 .hw_init = mdp4_hw_init, 143 .irq_preinstall = mdp4_irq_preinstall, 144 .irq_postinstall = mdp4_irq_postinstall, 145 .irq_uninstall = mdp4_irq_uninstall, 146 .irq = mdp4_irq, 147 .enable_vblank = mdp4_enable_vblank, 148 .disable_vblank = mdp4_disable_vblank, 149 .enable_commit = mdp4_enable_commit, 150 .disable_commit = mdp4_disable_commit, 151 .flush_commit = mdp4_flush_commit, 152 .wait_flush = mdp4_wait_flush, 153 .complete_commit = mdp4_complete_commit, 154 .get_format = mdp_get_format, 155 .round_pixclk = mdp4_round_pixclk, 156 .destroy = mdp4_destroy, 157 }, 158 .set_irqmask = mdp4_set_irqmask, 159 }; 160 161 int mdp4_disable(struct mdp4_kms *mdp4_kms) 162 { 163 DBG(""); 164 165 clk_disable_unprepare(mdp4_kms->clk); 166 clk_disable_unprepare(mdp4_kms->pclk); 167 clk_disable_unprepare(mdp4_kms->lut_clk); 168 clk_disable_unprepare(mdp4_kms->axi_clk); 169 170 return 0; 171 } 172 173 int mdp4_enable(struct mdp4_kms *mdp4_kms) 174 { 175 DBG(""); 176 177 clk_prepare_enable(mdp4_kms->clk); 178 clk_prepare_enable(mdp4_kms->pclk); 179 clk_prepare_enable(mdp4_kms->lut_clk); 180 clk_prepare_enable(mdp4_kms->axi_clk); 181 182 return 0; 183 } 184 185 186 static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, 187 int intf_type) 188 { 189 struct drm_device *dev = mdp4_kms->dev; 190 struct msm_drm_private *priv = dev->dev_private; 191 struct drm_encoder *encoder; 192 struct drm_connector *connector; 193 struct device_node *panel_node; 194 int dsi_id; 195 int ret; 196 197 switch (intf_type) { 198 case DRM_MODE_ENCODER_LVDS: 199 /* 200 * bail out early if there is no panel node (no need to 201 * initialize LCDC encoder and LVDS connector) 202 */ 203 panel_node = of_graph_get_remote_node(dev->dev->of_node, 0, 0); 204 if (!panel_node) 205 return 0; 206 207 encoder = mdp4_lcdc_encoder_init(dev, panel_node); 208 if (IS_ERR(encoder)) { 209 DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n"); 210 of_node_put(panel_node); 211 return PTR_ERR(encoder); 212 } 213 214 /* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */ 215 encoder->possible_crtcs = 1 << DMA_P; 216 217 connector = mdp4_lvds_connector_init(dev, panel_node, encoder); 218 if (IS_ERR(connector)) { 219 DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n"); 220 of_node_put(panel_node); 221 return PTR_ERR(connector); 222 } 223 224 break; 225 case DRM_MODE_ENCODER_TMDS: 226 encoder = mdp4_dtv_encoder_init(dev); 227 if (IS_ERR(encoder)) { 228 DRM_DEV_ERROR(dev->dev, "failed to construct DTV encoder\n"); 229 return PTR_ERR(encoder); 230 } 231 232 /* DTV can be hooked to DMA_E: */ 233 encoder->possible_crtcs = 1 << 1; 234 235 if (priv->hdmi) { 236 /* Construct bridge/connector for HDMI: */ 237 ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder); 238 if (ret) { 239 DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret); 240 return ret; 241 } 242 } 243 244 break; 245 case DRM_MODE_ENCODER_DSI: 246 /* only DSI1 supported for now */ 247 dsi_id = 0; 248 249 if (!priv->dsi[dsi_id]) 250 break; 251 252 encoder = mdp4_dsi_encoder_init(dev); 253 if (IS_ERR(encoder)) { 254 ret = PTR_ERR(encoder); 255 DRM_DEV_ERROR(dev->dev, 256 "failed to construct DSI encoder: %d\n", ret); 257 return ret; 258 } 259 260 /* TODO: Add DMA_S later? */ 261 encoder->possible_crtcs = 1 << DMA_P; 262 263 ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); 264 if (ret) { 265 DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n", 266 ret); 267 return ret; 268 } 269 270 break; 271 default: 272 DRM_DEV_ERROR(dev->dev, "Invalid or unsupported interface\n"); 273 return -EINVAL; 274 } 275 276 return 0; 277 } 278 279 static int modeset_init(struct mdp4_kms *mdp4_kms) 280 { 281 struct drm_device *dev = mdp4_kms->dev; 282 struct msm_drm_private *priv = dev->dev_private; 283 struct drm_plane *plane; 284 struct drm_crtc *crtc; 285 int i, ret; 286 static const enum mdp4_pipe rgb_planes[] = { 287 RGB1, RGB2, 288 }; 289 static const enum mdp4_pipe vg_planes[] = { 290 VG1, VG2, 291 }; 292 static const enum mdp4_dma mdp4_crtcs[] = { 293 DMA_P, DMA_E, 294 }; 295 static const char * const mdp4_crtc_names[] = { 296 "DMA_P", "DMA_E", 297 }; 298 static const int mdp4_intfs[] = { 299 DRM_MODE_ENCODER_LVDS, 300 DRM_MODE_ENCODER_DSI, 301 DRM_MODE_ENCODER_TMDS, 302 }; 303 304 /* construct non-private planes: */ 305 for (i = 0; i < ARRAY_SIZE(vg_planes); i++) { 306 plane = mdp4_plane_init(dev, vg_planes[i], false); 307 if (IS_ERR(plane)) { 308 DRM_DEV_ERROR(dev->dev, 309 "failed to construct plane for VG%d\n", i + 1); 310 ret = PTR_ERR(plane); 311 goto fail; 312 } 313 } 314 315 for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) { 316 plane = mdp4_plane_init(dev, rgb_planes[i], true); 317 if (IS_ERR(plane)) { 318 DRM_DEV_ERROR(dev->dev, 319 "failed to construct plane for RGB%d\n", i + 1); 320 ret = PTR_ERR(plane); 321 goto fail; 322 } 323 324 crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i, 325 mdp4_crtcs[i]); 326 if (IS_ERR(crtc)) { 327 DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n", 328 mdp4_crtc_names[i]); 329 ret = PTR_ERR(crtc); 330 goto fail; 331 } 332 333 priv->num_crtcs++; 334 } 335 336 /* 337 * we currently set up two relatively fixed paths: 338 * 339 * LCDC/LVDS path: RGB1 -> DMA_P -> LCDC -> LVDS 340 * or 341 * DSI path: RGB1 -> DMA_P -> DSI1 -> DSI Panel 342 * 343 * DTV/HDMI path: RGB2 -> DMA_E -> DTV -> HDMI 344 */ 345 346 for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) { 347 ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]); 348 if (ret) { 349 DRM_DEV_ERROR(dev->dev, "failed to initialize intf: %d, %d\n", 350 i, ret); 351 goto fail; 352 } 353 } 354 355 return 0; 356 357 fail: 358 return ret; 359 } 360 361 static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms, 362 u32 *major, u32 *minor) 363 { 364 struct drm_device *dev = mdp4_kms->dev; 365 u32 version; 366 367 mdp4_enable(mdp4_kms); 368 version = mdp4_read(mdp4_kms, REG_MDP4_VERSION); 369 mdp4_disable(mdp4_kms); 370 371 *major = FIELD(version, MDP4_VERSION_MAJOR); 372 *minor = FIELD(version, MDP4_VERSION_MINOR); 373 374 DRM_DEV_INFO(dev->dev, "MDP4 version v%d.%d", *major, *minor); 375 } 376 377 static int mdp4_kms_init(struct drm_device *dev) 378 { 379 struct platform_device *pdev = to_platform_device(dev->dev); 380 struct msm_drm_private *priv = dev->dev_private; 381 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(priv->kms)); 382 struct msm_kms *kms = NULL; 383 struct msm_mmu *mmu; 384 struct msm_gem_address_space *aspace; 385 int ret; 386 u32 major, minor; 387 unsigned long max_clk; 388 389 /* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */ 390 max_clk = 266667000; 391 392 ret = mdp_kms_init(&mdp4_kms->base, &kms_funcs); 393 if (ret) { 394 DRM_DEV_ERROR(dev->dev, "failed to init kms\n"); 395 goto fail; 396 } 397 398 kms = priv->kms; 399 400 mdp4_kms->dev = dev; 401 402 if (mdp4_kms->vdd) { 403 ret = regulator_enable(mdp4_kms->vdd); 404 if (ret) { 405 DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", ret); 406 goto fail; 407 } 408 } 409 410 clk_set_rate(mdp4_kms->clk, max_clk); 411 412 read_mdp_hw_revision(mdp4_kms, &major, &minor); 413 414 if (major != 4) { 415 DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n", 416 major, minor); 417 ret = -ENXIO; 418 goto fail; 419 } 420 421 mdp4_kms->rev = minor; 422 423 if (mdp4_kms->rev >= 2) { 424 if (!mdp4_kms->lut_clk) { 425 DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n"); 426 ret = -ENODEV; 427 goto fail; 428 } 429 clk_set_rate(mdp4_kms->lut_clk, max_clk); 430 } 431 432 pm_runtime_enable(dev->dev); 433 mdp4_kms->rpm_enabled = true; 434 435 /* make sure things are off before attaching iommu (bootloader could 436 * have left things on, in which case we'll start getting faults if 437 * we don't disable): 438 */ 439 mdp4_enable(mdp4_kms); 440 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0); 441 mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); 442 mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0); 443 mdp4_disable(mdp4_kms); 444 mdelay(16); 445 446 mmu = msm_iommu_new(&pdev->dev, 0); 447 if (IS_ERR(mmu)) { 448 ret = PTR_ERR(mmu); 449 goto fail; 450 } else if (!mmu) { 451 DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys " 452 "contig buffers for scanout\n"); 453 aspace = NULL; 454 } else { 455 aspace = msm_gem_address_space_create(mmu, 456 "mdp4", 0x1000, 0x100000000 - 0x1000); 457 458 if (IS_ERR(aspace)) { 459 if (!IS_ERR(mmu)) 460 mmu->funcs->destroy(mmu); 461 ret = PTR_ERR(aspace); 462 goto fail; 463 } 464 465 kms->aspace = aspace; 466 } 467 468 ret = modeset_init(mdp4_kms); 469 if (ret) { 470 DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret); 471 goto fail; 472 } 473 474 mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC | MSM_BO_SCANOUT); 475 if (IS_ERR(mdp4_kms->blank_cursor_bo)) { 476 ret = PTR_ERR(mdp4_kms->blank_cursor_bo); 477 DRM_DEV_ERROR(dev->dev, "could not allocate blank-cursor bo: %d\n", ret); 478 mdp4_kms->blank_cursor_bo = NULL; 479 goto fail; 480 } 481 482 ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->aspace, 483 &mdp4_kms->blank_cursor_iova); 484 if (ret) { 485 DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret); 486 goto fail; 487 } 488 489 dev->mode_config.min_width = 0; 490 dev->mode_config.min_height = 0; 491 dev->mode_config.max_width = 2048; 492 dev->mode_config.max_height = 2048; 493 494 return 0; 495 496 fail: 497 if (kms) 498 mdp4_destroy(kms); 499 500 return ret; 501 } 502 503 static const struct dev_pm_ops mdp4_pm_ops = { 504 .prepare = msm_kms_pm_prepare, 505 .complete = msm_kms_pm_complete, 506 }; 507 508 static int mdp4_probe(struct platform_device *pdev) 509 { 510 struct device *dev = &pdev->dev; 511 struct mdp4_kms *mdp4_kms; 512 int irq; 513 514 mdp4_kms = devm_kzalloc(dev, sizeof(*mdp4_kms), GFP_KERNEL); 515 if (!mdp4_kms) 516 return dev_err_probe(dev, -ENOMEM, "failed to allocate kms\n"); 517 518 mdp4_kms->mmio = msm_ioremap(pdev, NULL); 519 if (IS_ERR(mdp4_kms->mmio)) 520 return PTR_ERR(mdp4_kms->mmio); 521 522 irq = platform_get_irq(pdev, 0); 523 if (irq < 0) 524 return dev_err_probe(dev, irq, "failed to get irq\n"); 525 526 mdp4_kms->base.base.irq = irq; 527 528 /* NOTE: driver for this regulator still missing upstream.. use 529 * _get_exclusive() and ignore the error if it does not exist 530 * (and hope that the bootloader left it on for us) 531 */ 532 mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd"); 533 if (IS_ERR(mdp4_kms->vdd)) 534 mdp4_kms->vdd = NULL; 535 536 mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk"); 537 if (IS_ERR(mdp4_kms->clk)) 538 return dev_err_probe(dev, PTR_ERR(mdp4_kms->clk), "failed to get core_clk\n"); 539 540 mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk"); 541 if (IS_ERR(mdp4_kms->pclk)) 542 mdp4_kms->pclk = NULL; 543 544 mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk"); 545 if (IS_ERR(mdp4_kms->axi_clk)) 546 return dev_err_probe(dev, PTR_ERR(mdp4_kms->axi_clk), "failed to get axi_clk\n"); 547 548 /* 549 * This is required for revn >= 2. Handle errors here and let the kms 550 * init bail out if the clock is not provided. 551 */ 552 mdp4_kms->lut_clk = devm_clk_get_optional(&pdev->dev, "lut_clk"); 553 if (IS_ERR(mdp4_kms->lut_clk)) 554 return dev_err_probe(dev, PTR_ERR(mdp4_kms->lut_clk), "failed to get lut_clk\n"); 555 556 return msm_drv_probe(&pdev->dev, mdp4_kms_init, &mdp4_kms->base.base); 557 } 558 559 static void mdp4_remove(struct platform_device *pdev) 560 { 561 component_master_del(&pdev->dev, &msm_drm_ops); 562 } 563 564 static const struct of_device_id mdp4_dt_match[] = { 565 { .compatible = "qcom,mdp4" }, 566 { /* sentinel */ } 567 }; 568 MODULE_DEVICE_TABLE(of, mdp4_dt_match); 569 570 static struct platform_driver mdp4_platform_driver = { 571 .probe = mdp4_probe, 572 .remove_new = mdp4_remove, 573 .shutdown = msm_kms_shutdown, 574 .driver = { 575 .name = "mdp4", 576 .of_match_table = mdp4_dt_match, 577 .pm = &mdp4_pm_ops, 578 }, 579 }; 580 581 void __init msm_mdp4_register(void) 582 { 583 platform_driver_register(&mdp4_platform_driver); 584 } 585 586 void __exit msm_mdp4_unregister(void) 587 { 588 platform_driver_unregister(&mdp4_platform_driver); 589 } 590