1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2014, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2013 Red Hat 5 * Author: Rob Clark <robdclark@gmail.com> 6 */ 7 8 #include <linux/delay.h> 9 #include <linux/interconnect.h> 10 #include <linux/of_irq.h> 11 12 #include <drm/drm_debugfs.h> 13 #include <drm/drm_drv.h> 14 #include <drm/drm_file.h> 15 #include <drm/drm_vblank.h> 16 17 #include "msm_drv.h" 18 #include "msm_gem.h" 19 #include "msm_mmu.h" 20 #include "mdp5_kms.h" 21 22 static int mdp5_hw_init(struct msm_kms *kms) 23 { 24 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 25 struct device *dev = &mdp5_kms->pdev->dev; 26 unsigned long flags; 27 28 pm_runtime_get_sync(dev); 29 30 /* Magic unknown register writes: 31 * 32 * W VBIF:0x004 00000001 (mdss_mdp.c:839) 33 * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839) 34 * W MDP5:0x2e4 0x55 (mdss_mdp.c:839) 35 * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839) 36 * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839) 37 * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839) 38 * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839) 39 * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839) 40 * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839) 41 * 42 * Downstream fbdev driver gets these register offsets/values 43 * from DT.. not really sure what these registers are or if 44 * different values for different boards/SoC's, etc. I guess 45 * they are the golden registers. 46 * 47 * Not setting these does not seem to cause any problem. But 48 * we may be getting lucky with the bootloader initializing 49 * them for us. OTOH, if we can always count on the bootloader 50 * setting the golden registers, then perhaps we don't need to 51 * care. 52 */ 53 54 spin_lock_irqsave(&mdp5_kms->resource_lock, flags); 55 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); 56 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); 57 58 mdp5_ctlm_hw_reset(mdp5_kms->ctlm); 59 60 pm_runtime_put_sync(dev); 61 62 return 0; 63 } 64 65 /* Global/shared object state funcs */ 66 67 /* 68 * This is a helper that returns the private state currently in operation. 69 * Note that this would return the "old_state" if called in the atomic check 70 * path, and the "new_state" after the atomic swap has been done. 71 */ 72 struct mdp5_global_state * 73 mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms) 74 { 75 return to_mdp5_global_state(mdp5_kms->glob_state.state); 76 } 77 78 /* 79 * This acquires the modeset lock set aside for global state, creates 80 * a new duplicated private object state. 81 */ 82 struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s) 83 { 84 struct msm_drm_private *priv = s->dev->dev_private; 85 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); 86 struct drm_private_state *priv_state; 87 88 priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state); 89 if (IS_ERR(priv_state)) 90 return ERR_CAST(priv_state); 91 92 return to_mdp5_global_state(priv_state); 93 } 94 95 static struct drm_private_state * 96 mdp5_global_duplicate_state(struct drm_private_obj *obj) 97 { 98 struct mdp5_global_state *state; 99 100 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 101 if (!state) 102 return NULL; 103 104 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 105 106 return &state->base; 107 } 108 109 static void mdp5_global_destroy_state(struct drm_private_obj *obj, 110 struct drm_private_state *state) 111 { 112 struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state); 113 114 kfree(mdp5_state); 115 } 116 117 static void mdp5_global_print_state(struct drm_printer *p, 118 const struct drm_private_state *state) 119 { 120 struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state); 121 122 if (mdp5_state->mdp5_kms->smp) 123 mdp5_smp_dump(mdp5_state->mdp5_kms->smp, p, mdp5_state); 124 } 125 126 static const struct drm_private_state_funcs mdp5_global_state_funcs = { 127 .atomic_duplicate_state = mdp5_global_duplicate_state, 128 .atomic_destroy_state = mdp5_global_destroy_state, 129 .atomic_print_state = mdp5_global_print_state, 130 }; 131 132 static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms) 133 { 134 struct mdp5_global_state *state; 135 136 state = kzalloc(sizeof(*state), GFP_KERNEL); 137 if (!state) 138 return -ENOMEM; 139 140 state->mdp5_kms = mdp5_kms; 141 142 drm_atomic_private_obj_init(mdp5_kms->dev, &mdp5_kms->glob_state, 143 &state->base, 144 &mdp5_global_state_funcs); 145 return 0; 146 } 147 148 static void mdp5_enable_commit(struct msm_kms *kms) 149 { 150 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 151 pm_runtime_get_sync(&mdp5_kms->pdev->dev); 152 } 153 154 static void mdp5_disable_commit(struct msm_kms *kms) 155 { 156 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 157 pm_runtime_put_sync(&mdp5_kms->pdev->dev); 158 } 159 160 static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) 161 { 162 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 163 struct mdp5_global_state *global_state; 164 165 global_state = mdp5_get_existing_global_state(mdp5_kms); 166 167 if (mdp5_kms->smp) 168 mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp); 169 } 170 171 static void mdp5_flush_commit(struct msm_kms *kms, unsigned crtc_mask) 172 { 173 /* TODO */ 174 } 175 176 static void mdp5_wait_flush(struct msm_kms *kms, unsigned crtc_mask) 177 { 178 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 179 struct drm_crtc *crtc; 180 181 for_each_crtc_mask(mdp5_kms->dev, crtc, crtc_mask) 182 mdp5_crtc_wait_for_commit_done(crtc); 183 } 184 185 static void mdp5_complete_commit(struct msm_kms *kms, unsigned crtc_mask) 186 { 187 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 188 struct mdp5_global_state *global_state; 189 190 global_state = mdp5_get_existing_global_state(mdp5_kms); 191 192 if (mdp5_kms->smp) 193 mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp); 194 } 195 196 static void mdp5_destroy(struct mdp5_kms *mdp5_kms); 197 198 static void mdp5_kms_destroy(struct msm_kms *kms) 199 { 200 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 201 struct msm_gem_address_space *aspace = kms->aspace; 202 203 if (aspace) { 204 aspace->mmu->funcs->detach(aspace->mmu); 205 msm_gem_address_space_put(aspace); 206 } 207 208 mdp_kms_destroy(&mdp5_kms->base); 209 mdp5_destroy(mdp5_kms); 210 } 211 212 static const struct mdp_kms_funcs kms_funcs = { 213 .base = { 214 .hw_init = mdp5_hw_init, 215 .irq_preinstall = mdp5_irq_preinstall, 216 .irq_postinstall = mdp5_irq_postinstall, 217 .irq_uninstall = mdp5_irq_uninstall, 218 .irq = mdp5_irq, 219 .enable_vblank = mdp5_enable_vblank, 220 .disable_vblank = mdp5_disable_vblank, 221 .flush_commit = mdp5_flush_commit, 222 .enable_commit = mdp5_enable_commit, 223 .disable_commit = mdp5_disable_commit, 224 .prepare_commit = mdp5_prepare_commit, 225 .wait_flush = mdp5_wait_flush, 226 .complete_commit = mdp5_complete_commit, 227 .destroy = mdp5_kms_destroy, 228 }, 229 .set_irqmask = mdp5_set_irqmask, 230 }; 231 232 static int mdp5_disable(struct mdp5_kms *mdp5_kms) 233 { 234 DBG(""); 235 236 mdp5_kms->enable_count--; 237 WARN_ON(mdp5_kms->enable_count < 0); 238 239 clk_disable_unprepare(mdp5_kms->tbu_rt_clk); 240 clk_disable_unprepare(mdp5_kms->tbu_clk); 241 clk_disable_unprepare(mdp5_kms->ahb_clk); 242 clk_disable_unprepare(mdp5_kms->axi_clk); 243 clk_disable_unprepare(mdp5_kms->core_clk); 244 clk_disable_unprepare(mdp5_kms->lut_clk); 245 246 return 0; 247 } 248 249 static int mdp5_enable(struct mdp5_kms *mdp5_kms) 250 { 251 DBG(""); 252 253 mdp5_kms->enable_count++; 254 255 clk_prepare_enable(mdp5_kms->ahb_clk); 256 clk_prepare_enable(mdp5_kms->axi_clk); 257 clk_prepare_enable(mdp5_kms->core_clk); 258 clk_prepare_enable(mdp5_kms->lut_clk); 259 clk_prepare_enable(mdp5_kms->tbu_clk); 260 clk_prepare_enable(mdp5_kms->tbu_rt_clk); 261 262 return 0; 263 } 264 265 static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, 266 struct mdp5_interface *intf, 267 struct mdp5_ctl *ctl) 268 { 269 struct drm_device *dev = mdp5_kms->dev; 270 struct drm_encoder *encoder; 271 272 encoder = mdp5_encoder_init(dev, intf, ctl); 273 if (IS_ERR(encoder)) { 274 DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n"); 275 return encoder; 276 } 277 278 return encoder; 279 } 280 281 static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num) 282 { 283 const enum mdp5_intf_type *intfs = hw_cfg->intf.connect; 284 const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect); 285 int id = 0, i; 286 287 for (i = 0; i < intf_cnt; i++) { 288 if (intfs[i] == INTF_DSI) { 289 if (intf_num == i) 290 return id; 291 292 id++; 293 } 294 } 295 296 return -EINVAL; 297 } 298 299 static int modeset_init_intf(struct mdp5_kms *mdp5_kms, 300 struct mdp5_interface *intf) 301 { 302 struct drm_device *dev = mdp5_kms->dev; 303 struct msm_drm_private *priv = dev->dev_private; 304 struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm; 305 struct mdp5_ctl *ctl; 306 struct drm_encoder *encoder; 307 int ret = 0; 308 309 switch (intf->type) { 310 case INTF_eDP: 311 DRM_DEV_INFO(dev->dev, "Skipping eDP interface %d\n", intf->num); 312 break; 313 case INTF_HDMI: 314 if (!priv->hdmi) 315 break; 316 317 ctl = mdp5_ctlm_request(ctlm, intf->num); 318 if (!ctl) { 319 ret = -EINVAL; 320 break; 321 } 322 323 encoder = construct_encoder(mdp5_kms, intf, ctl); 324 if (IS_ERR(encoder)) { 325 ret = PTR_ERR(encoder); 326 break; 327 } 328 329 ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder); 330 break; 331 case INTF_DSI: 332 { 333 const struct mdp5_cfg_hw *hw_cfg = 334 mdp5_cfg_get_hw_config(mdp5_kms->cfg); 335 int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num); 336 337 if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) { 338 DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n", 339 intf->num); 340 ret = -EINVAL; 341 break; 342 } 343 344 if (!priv->dsi[dsi_id]) 345 break; 346 347 ctl = mdp5_ctlm_request(ctlm, intf->num); 348 if (!ctl) { 349 ret = -EINVAL; 350 break; 351 } 352 353 encoder = construct_encoder(mdp5_kms, intf, ctl); 354 if (IS_ERR(encoder)) { 355 ret = PTR_ERR(encoder); 356 break; 357 } 358 359 ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); 360 if (!ret) 361 mdp5_encoder_set_intf_mode(encoder, msm_dsi_is_cmd_mode(priv->dsi[dsi_id])); 362 363 break; 364 } 365 default: 366 DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type); 367 ret = -EINVAL; 368 break; 369 } 370 371 return ret; 372 } 373 374 static int modeset_init(struct mdp5_kms *mdp5_kms) 375 { 376 struct drm_device *dev = mdp5_kms->dev; 377 struct msm_drm_private *priv = dev->dev_private; 378 unsigned int num_crtcs; 379 int i, ret, pi = 0, ci = 0; 380 struct drm_plane *primary[MAX_BASES] = { NULL }; 381 struct drm_plane *cursor[MAX_BASES] = { NULL }; 382 struct drm_encoder *encoder; 383 unsigned int num_encoders; 384 385 /* 386 * Construct encoders and modeset initialize connector devices 387 * for each external display interface. 388 */ 389 for (i = 0; i < mdp5_kms->num_intfs; i++) { 390 ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]); 391 if (ret) 392 goto fail; 393 } 394 395 num_encoders = 0; 396 drm_for_each_encoder(encoder, dev) 397 num_encoders++; 398 399 /* 400 * We should ideally have less number of encoders (set up by parsing 401 * the MDP5 interfaces) than the number of layer mixers present in HW, 402 * but let's be safe here anyway 403 */ 404 num_crtcs = min(num_encoders, mdp5_kms->num_hwmixers); 405 406 /* 407 * Construct planes equaling the number of hw pipes, and CRTCs for the 408 * N encoders set up by the driver. The first N planes become primary 409 * planes for the CRTCs, with the remainder as overlay planes: 410 */ 411 for (i = 0; i < mdp5_kms->num_hwpipes; i++) { 412 struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; 413 struct drm_plane *plane; 414 enum drm_plane_type type; 415 416 if (i < num_crtcs) 417 type = DRM_PLANE_TYPE_PRIMARY; 418 else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR) 419 type = DRM_PLANE_TYPE_CURSOR; 420 else 421 type = DRM_PLANE_TYPE_OVERLAY; 422 423 plane = mdp5_plane_init(dev, type); 424 if (IS_ERR(plane)) { 425 ret = PTR_ERR(plane); 426 DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret); 427 goto fail; 428 } 429 430 if (type == DRM_PLANE_TYPE_PRIMARY) 431 primary[pi++] = plane; 432 if (type == DRM_PLANE_TYPE_CURSOR) 433 cursor[ci++] = plane; 434 } 435 436 for (i = 0; i < num_crtcs; i++) { 437 struct drm_crtc *crtc; 438 439 crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i); 440 if (IS_ERR(crtc)) { 441 ret = PTR_ERR(crtc); 442 DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret); 443 goto fail; 444 } 445 priv->num_crtcs++; 446 } 447 448 /* 449 * Now that we know the number of crtcs we've created, set the possible 450 * crtcs for the encoders 451 */ 452 drm_for_each_encoder(encoder, dev) 453 encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; 454 455 return 0; 456 457 fail: 458 return ret; 459 } 460 461 static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms, 462 u32 *major, u32 *minor) 463 { 464 struct device *dev = &mdp5_kms->pdev->dev; 465 u32 version; 466 467 pm_runtime_get_sync(dev); 468 version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION); 469 pm_runtime_put_sync(dev); 470 471 *major = FIELD(version, MDP5_HW_VERSION_MAJOR); 472 *minor = FIELD(version, MDP5_HW_VERSION_MINOR); 473 474 DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor); 475 } 476 477 static int get_clk(struct platform_device *pdev, struct clk **clkp, 478 const char *name, bool mandatory) 479 { 480 struct device *dev = &pdev->dev; 481 struct clk *clk = msm_clk_get(pdev, name); 482 if (IS_ERR(clk) && mandatory) { 483 DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); 484 return PTR_ERR(clk); 485 } 486 if (IS_ERR(clk)) 487 DBG("skipping %s", name); 488 else 489 *clkp = clk; 490 491 return 0; 492 } 493 494 static int mdp5_init(struct platform_device *pdev, struct drm_device *dev); 495 496 static int mdp5_kms_init(struct drm_device *dev) 497 { 498 struct msm_drm_private *priv = dev->dev_private; 499 struct platform_device *pdev; 500 struct mdp5_kms *mdp5_kms; 501 struct mdp5_cfg *config; 502 struct msm_kms *kms = priv->kms; 503 struct msm_gem_address_space *aspace; 504 int i, ret; 505 506 ret = mdp5_init(to_platform_device(dev->dev), dev); 507 if (ret) 508 return ret; 509 510 mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 511 512 pdev = mdp5_kms->pdev; 513 514 ret = mdp_kms_init(&mdp5_kms->base, &kms_funcs); 515 if (ret) { 516 DRM_DEV_ERROR(&pdev->dev, "failed to init kms\n"); 517 goto fail; 518 } 519 520 config = mdp5_cfg_get_config(mdp5_kms->cfg); 521 522 /* make sure things are off before attaching iommu (bootloader could 523 * have left things on, in which case we'll start getting faults if 524 * we don't disable): 525 */ 526 pm_runtime_get_sync(&pdev->dev); 527 for (i = 0; i < MDP5_INTF_NUM_MAX; i++) { 528 if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) || 529 !config->hw->intf.base[i]) 530 continue; 531 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); 532 533 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3); 534 } 535 mdelay(16); 536 537 aspace = msm_kms_init_aspace(mdp5_kms->dev); 538 if (IS_ERR(aspace)) { 539 ret = PTR_ERR(aspace); 540 goto fail; 541 } 542 543 kms->aspace = aspace; 544 545 pm_runtime_put_sync(&pdev->dev); 546 547 ret = modeset_init(mdp5_kms); 548 if (ret) { 549 DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret); 550 goto fail; 551 } 552 553 dev->mode_config.min_width = 0; 554 dev->mode_config.min_height = 0; 555 dev->mode_config.max_width = 0xffff; 556 dev->mode_config.max_height = 0xffff; 557 558 dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */ 559 dev->vblank_disable_immediate = true; 560 561 return 0; 562 fail: 563 if (kms) 564 mdp5_kms_destroy(kms); 565 566 return ret; 567 } 568 569 static void mdp5_destroy(struct mdp5_kms *mdp5_kms) 570 { 571 if (mdp5_kms->rpm_enabled) 572 pm_runtime_disable(&mdp5_kms->pdev->dev); 573 574 drm_atomic_private_obj_fini(&mdp5_kms->glob_state); 575 } 576 577 static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt, 578 const enum mdp5_pipe *pipes, const uint32_t *offsets, 579 uint32_t caps) 580 { 581 struct drm_device *dev = mdp5_kms->dev; 582 int i, ret; 583 584 for (i = 0; i < cnt; i++) { 585 struct mdp5_hw_pipe *hwpipe; 586 587 hwpipe = mdp5_pipe_init(dev, pipes[i], offsets[i], caps); 588 if (IS_ERR(hwpipe)) { 589 ret = PTR_ERR(hwpipe); 590 DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n", 591 pipe2name(pipes[i]), ret); 592 return ret; 593 } 594 hwpipe->idx = mdp5_kms->num_hwpipes; 595 mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe; 596 } 597 598 return 0; 599 } 600 601 static int hwpipe_init(struct mdp5_kms *mdp5_kms) 602 { 603 static const enum mdp5_pipe rgb_planes[] = { 604 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, 605 }; 606 static const enum mdp5_pipe vig_planes[] = { 607 SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3, 608 }; 609 static const enum mdp5_pipe dma_planes[] = { 610 SSPP_DMA0, SSPP_DMA1, 611 }; 612 static const enum mdp5_pipe cursor_planes[] = { 613 SSPP_CURSOR0, SSPP_CURSOR1, 614 }; 615 const struct mdp5_cfg_hw *hw_cfg; 616 int ret; 617 618 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 619 620 /* Construct RGB pipes: */ 621 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes, 622 hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps); 623 if (ret) 624 return ret; 625 626 /* Construct video (VIG) pipes: */ 627 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes, 628 hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps); 629 if (ret) 630 return ret; 631 632 /* Construct DMA pipes: */ 633 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes, 634 hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps); 635 if (ret) 636 return ret; 637 638 /* Construct cursor pipes: */ 639 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count, 640 cursor_planes, hw_cfg->pipe_cursor.base, 641 hw_cfg->pipe_cursor.caps); 642 if (ret) 643 return ret; 644 645 return 0; 646 } 647 648 static int hwmixer_init(struct mdp5_kms *mdp5_kms) 649 { 650 struct drm_device *dev = mdp5_kms->dev; 651 const struct mdp5_cfg_hw *hw_cfg; 652 int i, ret; 653 654 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 655 656 for (i = 0; i < hw_cfg->lm.count; i++) { 657 struct mdp5_hw_mixer *mixer; 658 659 mixer = mdp5_mixer_init(dev, &hw_cfg->lm.instances[i]); 660 if (IS_ERR(mixer)) { 661 ret = PTR_ERR(mixer); 662 DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n", 663 i, ret); 664 return ret; 665 } 666 667 mixer->idx = mdp5_kms->num_hwmixers; 668 mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer; 669 } 670 671 return 0; 672 } 673 674 static int interface_init(struct mdp5_kms *mdp5_kms) 675 { 676 struct drm_device *dev = mdp5_kms->dev; 677 const struct mdp5_cfg_hw *hw_cfg; 678 const enum mdp5_intf_type *intf_types; 679 int i; 680 681 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 682 intf_types = hw_cfg->intf.connect; 683 684 for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) { 685 struct mdp5_interface *intf; 686 687 if (intf_types[i] == INTF_DISABLED) 688 continue; 689 690 intf = devm_kzalloc(dev->dev, sizeof(*intf), GFP_KERNEL); 691 if (!intf) { 692 DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i); 693 return -ENOMEM; 694 } 695 696 intf->num = i; 697 intf->type = intf_types[i]; 698 intf->mode = MDP5_INTF_MODE_NONE; 699 intf->idx = mdp5_kms->num_intfs; 700 mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf; 701 } 702 703 return 0; 704 } 705 706 static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) 707 { 708 struct msm_drm_private *priv = dev->dev_private; 709 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); 710 struct mdp5_cfg *config; 711 u32 major, minor; 712 int ret; 713 714 mdp5_kms->dev = dev; 715 716 ret = mdp5_global_obj_init(mdp5_kms); 717 if (ret) 718 goto fail; 719 720 /* we need to set a default rate before enabling. Set a safe 721 * rate first, then figure out hw revision, and then set a 722 * more optimal rate: 723 */ 724 clk_set_rate(mdp5_kms->core_clk, 200000000); 725 726 pm_runtime_enable(&pdev->dev); 727 mdp5_kms->rpm_enabled = true; 728 729 read_mdp_hw_revision(mdp5_kms, &major, &minor); 730 731 mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor); 732 if (IS_ERR(mdp5_kms->cfg)) { 733 ret = PTR_ERR(mdp5_kms->cfg); 734 mdp5_kms->cfg = NULL; 735 goto fail; 736 } 737 738 config = mdp5_cfg_get_config(mdp5_kms->cfg); 739 mdp5_kms->caps = config->hw->mdp.caps; 740 741 /* TODO: compute core clock rate at runtime */ 742 clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk); 743 744 /* 745 * Some chipsets have a Shared Memory Pool (SMP), while others 746 * have dedicated latency buffering per source pipe instead; 747 * this section initializes the SMP: 748 */ 749 if (mdp5_kms->caps & MDP_CAP_SMP) { 750 mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp); 751 if (IS_ERR(mdp5_kms->smp)) { 752 ret = PTR_ERR(mdp5_kms->smp); 753 mdp5_kms->smp = NULL; 754 goto fail; 755 } 756 } 757 758 mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg); 759 if (IS_ERR(mdp5_kms->ctlm)) { 760 ret = PTR_ERR(mdp5_kms->ctlm); 761 mdp5_kms->ctlm = NULL; 762 goto fail; 763 } 764 765 ret = hwpipe_init(mdp5_kms); 766 if (ret) 767 goto fail; 768 769 ret = hwmixer_init(mdp5_kms); 770 if (ret) 771 goto fail; 772 773 ret = interface_init(mdp5_kms); 774 if (ret) 775 goto fail; 776 777 return 0; 778 fail: 779 mdp5_destroy(mdp5_kms); 780 return ret; 781 } 782 783 static int mdp5_setup_interconnect(struct platform_device *pdev) 784 { 785 struct icc_path *path0 = msm_icc_get(&pdev->dev, "mdp0-mem"); 786 struct icc_path *path1 = msm_icc_get(&pdev->dev, "mdp1-mem"); 787 struct icc_path *path_rot = msm_icc_get(&pdev->dev, "rotator-mem"); 788 789 if (IS_ERR(path0)) 790 return PTR_ERR(path0); 791 792 if (!path0) { 793 /* no interconnect support is not necessarily a fatal 794 * condition, the platform may simply not have an 795 * interconnect driver yet. But warn about it in case 796 * bootloader didn't setup bus clocks high enough for 797 * scanout. 798 */ 799 dev_warn(&pdev->dev, "No interconnect support may cause display underflows!\n"); 800 return 0; 801 } 802 803 icc_set_bw(path0, 0, MBps_to_icc(6400)); 804 805 if (!IS_ERR_OR_NULL(path1)) 806 icc_set_bw(path1, 0, MBps_to_icc(6400)); 807 if (!IS_ERR_OR_NULL(path_rot)) 808 icc_set_bw(path_rot, 0, MBps_to_icc(6400)); 809 810 return 0; 811 } 812 813 static int mdp5_dev_probe(struct platform_device *pdev) 814 { 815 struct mdp5_kms *mdp5_kms; 816 int ret, irq; 817 818 DBG(""); 819 820 if (!msm_disp_drv_should_bind(&pdev->dev, false)) 821 return -ENODEV; 822 823 mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL); 824 if (!mdp5_kms) 825 return -ENOMEM; 826 827 ret = mdp5_setup_interconnect(pdev); 828 if (ret) 829 return ret; 830 831 mdp5_kms->pdev = pdev; 832 833 spin_lock_init(&mdp5_kms->resource_lock); 834 835 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys"); 836 if (IS_ERR(mdp5_kms->mmio)) 837 return PTR_ERR(mdp5_kms->mmio); 838 839 /* mandatory clocks: */ 840 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true); 841 if (ret) 842 return ret; 843 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true); 844 if (ret) 845 return ret; 846 ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true); 847 if (ret) 848 return ret; 849 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true); 850 if (ret) 851 return ret; 852 853 /* optional clocks: */ 854 get_clk(pdev, &mdp5_kms->lut_clk, "lut", false); 855 get_clk(pdev, &mdp5_kms->tbu_clk, "tbu", false); 856 get_clk(pdev, &mdp5_kms->tbu_rt_clk, "tbu_rt", false); 857 858 irq = platform_get_irq(pdev, 0); 859 if (irq < 0) 860 return dev_err_probe(&pdev->dev, irq, "failed to get irq\n"); 861 862 mdp5_kms->base.base.irq = irq; 863 864 return msm_drv_probe(&pdev->dev, mdp5_kms_init, &mdp5_kms->base.base); 865 } 866 867 static void mdp5_dev_remove(struct platform_device *pdev) 868 { 869 DBG(""); 870 component_master_del(&pdev->dev, &msm_drm_ops); 871 } 872 873 static __maybe_unused int mdp5_runtime_suspend(struct device *dev) 874 { 875 struct platform_device *pdev = to_platform_device(dev); 876 struct msm_drm_private *priv = platform_get_drvdata(pdev); 877 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); 878 879 DBG(""); 880 881 return mdp5_disable(mdp5_kms); 882 } 883 884 static __maybe_unused int mdp5_runtime_resume(struct device *dev) 885 { 886 struct platform_device *pdev = to_platform_device(dev); 887 struct msm_drm_private *priv = platform_get_drvdata(pdev); 888 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); 889 890 DBG(""); 891 892 return mdp5_enable(mdp5_kms); 893 } 894 895 static const struct dev_pm_ops mdp5_pm_ops = { 896 SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL) 897 .prepare = msm_kms_pm_prepare, 898 .complete = msm_kms_pm_complete, 899 }; 900 901 static const struct of_device_id mdp5_dt_match[] = { 902 { .compatible = "qcom,mdp5", }, 903 /* to support downstream DT files */ 904 { .compatible = "qcom,mdss_mdp", }, 905 {} 906 }; 907 MODULE_DEVICE_TABLE(of, mdp5_dt_match); 908 909 static struct platform_driver mdp5_driver = { 910 .probe = mdp5_dev_probe, 911 .remove_new = mdp5_dev_remove, 912 .shutdown = msm_kms_shutdown, 913 .driver = { 914 .name = "msm_mdp", 915 .of_match_table = mdp5_dt_match, 916 .pm = &mdp5_pm_ops, 917 }, 918 }; 919 920 void __init msm_mdp_register(void) 921 { 922 DBG(""); 923 platform_driver_register(&mdp5_driver); 924 } 925 926 void __exit msm_mdp_unregister(void) 927 { 928 DBG(""); 929 platform_driver_unregister(&mdp5_driver); 930 } 931