1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2014, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2013 Red Hat 5 * Author: Rob Clark <robdclark@gmail.com> 6 */ 7 8 #include <linux/delay.h> 9 #include <linux/interconnect.h> 10 #include <linux/of_irq.h> 11 12 #include <drm/drm_debugfs.h> 13 #include <drm/drm_drv.h> 14 #include <drm/drm_file.h> 15 #include <drm/drm_vblank.h> 16 17 #include "msm_drv.h" 18 #include "msm_gem.h" 19 #include "msm_mmu.h" 20 #include "mdp5_kms.h" 21 22 static int mdp5_hw_init(struct msm_kms *kms) 23 { 24 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 25 struct device *dev = &mdp5_kms->pdev->dev; 26 unsigned long flags; 27 28 pm_runtime_get_sync(dev); 29 30 /* Magic unknown register writes: 31 * 32 * W VBIF:0x004 00000001 (mdss_mdp.c:839) 33 * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839) 34 * W MDP5:0x2e4 0x55 (mdss_mdp.c:839) 35 * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839) 36 * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839) 37 * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839) 38 * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839) 39 * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839) 40 * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839) 41 * 42 * Downstream fbdev driver gets these register offsets/values 43 * from DT.. not really sure what these registers are or if 44 * different values for different boards/SoC's, etc. I guess 45 * they are the golden registers. 46 * 47 * Not setting these does not seem to cause any problem. But 48 * we may be getting lucky with the bootloader initializing 49 * them for us. OTOH, if we can always count on the bootloader 50 * setting the golden registers, then perhaps we don't need to 51 * care. 52 */ 53 54 spin_lock_irqsave(&mdp5_kms->resource_lock, flags); 55 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); 56 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); 57 58 mdp5_ctlm_hw_reset(mdp5_kms->ctlm); 59 60 pm_runtime_put_sync(dev); 61 62 return 0; 63 } 64 65 /* Global/shared object state funcs */ 66 67 /* 68 * This is a helper that returns the private state currently in operation. 69 * Note that this would return the "old_state" if called in the atomic check 70 * path, and the "new_state" after the atomic swap has been done. 71 */ 72 struct mdp5_global_state * 73 mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms) 74 { 75 return to_mdp5_global_state(mdp5_kms->glob_state.state); 76 } 77 78 /* 79 * This acquires the modeset lock set aside for global state, creates 80 * a new duplicated private object state. 81 */ 82 struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s) 83 { 84 struct msm_drm_private *priv = s->dev->dev_private; 85 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); 86 struct drm_private_state *priv_state; 87 88 priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state); 89 if (IS_ERR(priv_state)) 90 return ERR_CAST(priv_state); 91 92 return to_mdp5_global_state(priv_state); 93 } 94 95 static struct drm_private_state * 96 mdp5_global_duplicate_state(struct drm_private_obj *obj) 97 { 98 struct mdp5_global_state *state; 99 100 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 101 if (!state) 102 return NULL; 103 104 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 105 106 return &state->base; 107 } 108 109 static void mdp5_global_destroy_state(struct drm_private_obj *obj, 110 struct drm_private_state *state) 111 { 112 struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state); 113 114 kfree(mdp5_state); 115 } 116 117 static void mdp5_global_print_state(struct drm_printer *p, 118 const struct drm_private_state *state) 119 { 120 struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state); 121 122 if (mdp5_state->mdp5_kms->smp) 123 mdp5_smp_dump(mdp5_state->mdp5_kms->smp, p, mdp5_state); 124 } 125 126 static const struct drm_private_state_funcs mdp5_global_state_funcs = { 127 .atomic_duplicate_state = mdp5_global_duplicate_state, 128 .atomic_destroy_state = mdp5_global_destroy_state, 129 .atomic_print_state = mdp5_global_print_state, 130 }; 131 132 static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms) 133 { 134 struct mdp5_global_state *state; 135 136 state = kzalloc(sizeof(*state), GFP_KERNEL); 137 if (!state) 138 return -ENOMEM; 139 140 state->mdp5_kms = mdp5_kms; 141 142 drm_atomic_private_obj_init(mdp5_kms->dev, &mdp5_kms->glob_state, 143 &state->base, 144 &mdp5_global_state_funcs); 145 return 0; 146 } 147 148 static void mdp5_enable_commit(struct msm_kms *kms) 149 { 150 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 151 pm_runtime_get_sync(&mdp5_kms->pdev->dev); 152 } 153 154 static void mdp5_disable_commit(struct msm_kms *kms) 155 { 156 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 157 pm_runtime_put_sync(&mdp5_kms->pdev->dev); 158 } 159 160 static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) 161 { 162 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 163 struct mdp5_global_state *global_state; 164 165 global_state = mdp5_get_existing_global_state(mdp5_kms); 166 167 if (mdp5_kms->smp) 168 mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp); 169 } 170 171 static void mdp5_flush_commit(struct msm_kms *kms, unsigned crtc_mask) 172 { 173 /* TODO */ 174 } 175 176 static void mdp5_wait_flush(struct msm_kms *kms, unsigned crtc_mask) 177 { 178 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 179 struct drm_crtc *crtc; 180 181 for_each_crtc_mask(mdp5_kms->dev, crtc, crtc_mask) 182 mdp5_crtc_wait_for_commit_done(crtc); 183 } 184 185 static void mdp5_complete_commit(struct msm_kms *kms, unsigned crtc_mask) 186 { 187 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 188 struct mdp5_global_state *global_state; 189 190 global_state = mdp5_get_existing_global_state(mdp5_kms); 191 192 if (mdp5_kms->smp) 193 mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp); 194 } 195 196 static void mdp5_destroy(struct mdp5_kms *mdp5_kms); 197 198 static void mdp5_kms_destroy(struct msm_kms *kms) 199 { 200 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 201 struct msm_gem_address_space *aspace = kms->aspace; 202 203 if (aspace) { 204 aspace->mmu->funcs->detach(aspace->mmu); 205 msm_gem_address_space_put(aspace); 206 } 207 208 mdp_kms_destroy(&mdp5_kms->base); 209 mdp5_destroy(mdp5_kms); 210 } 211 212 static const struct mdp_kms_funcs kms_funcs = { 213 .base = { 214 .hw_init = mdp5_hw_init, 215 .irq_preinstall = mdp5_irq_preinstall, 216 .irq_postinstall = mdp5_irq_postinstall, 217 .irq_uninstall = mdp5_irq_uninstall, 218 .irq = mdp5_irq, 219 .enable_vblank = mdp5_enable_vblank, 220 .disable_vblank = mdp5_disable_vblank, 221 .flush_commit = mdp5_flush_commit, 222 .enable_commit = mdp5_enable_commit, 223 .disable_commit = mdp5_disable_commit, 224 .prepare_commit = mdp5_prepare_commit, 225 .wait_flush = mdp5_wait_flush, 226 .complete_commit = mdp5_complete_commit, 227 .get_format = mdp_get_format, 228 .destroy = mdp5_kms_destroy, 229 }, 230 .set_irqmask = mdp5_set_irqmask, 231 }; 232 233 static int mdp5_disable(struct mdp5_kms *mdp5_kms) 234 { 235 DBG(""); 236 237 mdp5_kms->enable_count--; 238 WARN_ON(mdp5_kms->enable_count < 0); 239 240 clk_disable_unprepare(mdp5_kms->tbu_rt_clk); 241 clk_disable_unprepare(mdp5_kms->tbu_clk); 242 clk_disable_unprepare(mdp5_kms->ahb_clk); 243 clk_disable_unprepare(mdp5_kms->axi_clk); 244 clk_disable_unprepare(mdp5_kms->core_clk); 245 clk_disable_unprepare(mdp5_kms->lut_clk); 246 247 return 0; 248 } 249 250 static int mdp5_enable(struct mdp5_kms *mdp5_kms) 251 { 252 DBG(""); 253 254 mdp5_kms->enable_count++; 255 256 clk_prepare_enable(mdp5_kms->ahb_clk); 257 clk_prepare_enable(mdp5_kms->axi_clk); 258 clk_prepare_enable(mdp5_kms->core_clk); 259 clk_prepare_enable(mdp5_kms->lut_clk); 260 clk_prepare_enable(mdp5_kms->tbu_clk); 261 clk_prepare_enable(mdp5_kms->tbu_rt_clk); 262 263 return 0; 264 } 265 266 static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, 267 struct mdp5_interface *intf, 268 struct mdp5_ctl *ctl) 269 { 270 struct drm_device *dev = mdp5_kms->dev; 271 struct drm_encoder *encoder; 272 273 encoder = mdp5_encoder_init(dev, intf, ctl); 274 if (IS_ERR(encoder)) { 275 DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n"); 276 return encoder; 277 } 278 279 return encoder; 280 } 281 282 static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num) 283 { 284 const enum mdp5_intf_type *intfs = hw_cfg->intf.connect; 285 const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect); 286 int id = 0, i; 287 288 for (i = 0; i < intf_cnt; i++) { 289 if (intfs[i] == INTF_DSI) { 290 if (intf_num == i) 291 return id; 292 293 id++; 294 } 295 } 296 297 return -EINVAL; 298 } 299 300 static int modeset_init_intf(struct mdp5_kms *mdp5_kms, 301 struct mdp5_interface *intf) 302 { 303 struct drm_device *dev = mdp5_kms->dev; 304 struct msm_drm_private *priv = dev->dev_private; 305 struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm; 306 struct mdp5_ctl *ctl; 307 struct drm_encoder *encoder; 308 int ret = 0; 309 310 switch (intf->type) { 311 case INTF_eDP: 312 DRM_DEV_INFO(dev->dev, "Skipping eDP interface %d\n", intf->num); 313 break; 314 case INTF_HDMI: 315 if (!priv->hdmi) 316 break; 317 318 ctl = mdp5_ctlm_request(ctlm, intf->num); 319 if (!ctl) { 320 ret = -EINVAL; 321 break; 322 } 323 324 encoder = construct_encoder(mdp5_kms, intf, ctl); 325 if (IS_ERR(encoder)) { 326 ret = PTR_ERR(encoder); 327 break; 328 } 329 330 ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder); 331 break; 332 case INTF_DSI: 333 { 334 const struct mdp5_cfg_hw *hw_cfg = 335 mdp5_cfg_get_hw_config(mdp5_kms->cfg); 336 int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num); 337 338 if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) { 339 DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n", 340 intf->num); 341 ret = -EINVAL; 342 break; 343 } 344 345 if (!priv->dsi[dsi_id]) 346 break; 347 348 ctl = mdp5_ctlm_request(ctlm, intf->num); 349 if (!ctl) { 350 ret = -EINVAL; 351 break; 352 } 353 354 encoder = construct_encoder(mdp5_kms, intf, ctl); 355 if (IS_ERR(encoder)) { 356 ret = PTR_ERR(encoder); 357 break; 358 } 359 360 ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); 361 if (!ret) 362 mdp5_encoder_set_intf_mode(encoder, msm_dsi_is_cmd_mode(priv->dsi[dsi_id])); 363 364 break; 365 } 366 default: 367 DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type); 368 ret = -EINVAL; 369 break; 370 } 371 372 return ret; 373 } 374 375 static int modeset_init(struct mdp5_kms *mdp5_kms) 376 { 377 struct drm_device *dev = mdp5_kms->dev; 378 struct msm_drm_private *priv = dev->dev_private; 379 unsigned int num_crtcs; 380 int i, ret, pi = 0, ci = 0; 381 struct drm_plane *primary[MAX_BASES] = { NULL }; 382 struct drm_plane *cursor[MAX_BASES] = { NULL }; 383 struct drm_encoder *encoder; 384 unsigned int num_encoders; 385 386 /* 387 * Construct encoders and modeset initialize connector devices 388 * for each external display interface. 389 */ 390 for (i = 0; i < mdp5_kms->num_intfs; i++) { 391 ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]); 392 if (ret) 393 goto fail; 394 } 395 396 num_encoders = 0; 397 drm_for_each_encoder(encoder, dev) 398 num_encoders++; 399 400 /* 401 * We should ideally have less number of encoders (set up by parsing 402 * the MDP5 interfaces) than the number of layer mixers present in HW, 403 * but let's be safe here anyway 404 */ 405 num_crtcs = min(num_encoders, mdp5_kms->num_hwmixers); 406 407 /* 408 * Construct planes equaling the number of hw pipes, and CRTCs for the 409 * N encoders set up by the driver. The first N planes become primary 410 * planes for the CRTCs, with the remainder as overlay planes: 411 */ 412 for (i = 0; i < mdp5_kms->num_hwpipes; i++) { 413 struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; 414 struct drm_plane *plane; 415 enum drm_plane_type type; 416 417 if (i < num_crtcs) 418 type = DRM_PLANE_TYPE_PRIMARY; 419 else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR) 420 type = DRM_PLANE_TYPE_CURSOR; 421 else 422 type = DRM_PLANE_TYPE_OVERLAY; 423 424 plane = mdp5_plane_init(dev, type); 425 if (IS_ERR(plane)) { 426 ret = PTR_ERR(plane); 427 DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret); 428 goto fail; 429 } 430 431 if (type == DRM_PLANE_TYPE_PRIMARY) 432 primary[pi++] = plane; 433 if (type == DRM_PLANE_TYPE_CURSOR) 434 cursor[ci++] = plane; 435 } 436 437 for (i = 0; i < num_crtcs; i++) { 438 struct drm_crtc *crtc; 439 440 crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i); 441 if (IS_ERR(crtc)) { 442 ret = PTR_ERR(crtc); 443 DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret); 444 goto fail; 445 } 446 priv->num_crtcs++; 447 } 448 449 /* 450 * Now that we know the number of crtcs we've created, set the possible 451 * crtcs for the encoders 452 */ 453 drm_for_each_encoder(encoder, dev) 454 encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; 455 456 return 0; 457 458 fail: 459 return ret; 460 } 461 462 static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms, 463 u32 *major, u32 *minor) 464 { 465 struct device *dev = &mdp5_kms->pdev->dev; 466 u32 version; 467 468 pm_runtime_get_sync(dev); 469 version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION); 470 pm_runtime_put_sync(dev); 471 472 *major = FIELD(version, MDP5_HW_VERSION_MAJOR); 473 *minor = FIELD(version, MDP5_HW_VERSION_MINOR); 474 475 DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor); 476 } 477 478 static int get_clk(struct platform_device *pdev, struct clk **clkp, 479 const char *name, bool mandatory) 480 { 481 struct device *dev = &pdev->dev; 482 struct clk *clk = msm_clk_get(pdev, name); 483 if (IS_ERR(clk) && mandatory) { 484 DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); 485 return PTR_ERR(clk); 486 } 487 if (IS_ERR(clk)) 488 DBG("skipping %s", name); 489 else 490 *clkp = clk; 491 492 return 0; 493 } 494 495 static int mdp5_init(struct platform_device *pdev, struct drm_device *dev); 496 497 static int mdp5_kms_init(struct drm_device *dev) 498 { 499 struct msm_drm_private *priv = dev->dev_private; 500 struct platform_device *pdev; 501 struct mdp5_kms *mdp5_kms; 502 struct mdp5_cfg *config; 503 struct msm_kms *kms = priv->kms; 504 struct msm_gem_address_space *aspace; 505 int i, ret; 506 507 ret = mdp5_init(to_platform_device(dev->dev), dev); 508 if (ret) 509 return ret; 510 511 mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 512 513 pdev = mdp5_kms->pdev; 514 515 ret = mdp_kms_init(&mdp5_kms->base, &kms_funcs); 516 if (ret) { 517 DRM_DEV_ERROR(&pdev->dev, "failed to init kms\n"); 518 goto fail; 519 } 520 521 config = mdp5_cfg_get_config(mdp5_kms->cfg); 522 523 /* make sure things are off before attaching iommu (bootloader could 524 * have left things on, in which case we'll start getting faults if 525 * we don't disable): 526 */ 527 pm_runtime_get_sync(&pdev->dev); 528 for (i = 0; i < MDP5_INTF_NUM_MAX; i++) { 529 if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) || 530 !config->hw->intf.base[i]) 531 continue; 532 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); 533 534 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3); 535 } 536 mdelay(16); 537 538 aspace = msm_kms_init_aspace(mdp5_kms->dev); 539 if (IS_ERR(aspace)) { 540 ret = PTR_ERR(aspace); 541 goto fail; 542 } 543 544 kms->aspace = aspace; 545 546 pm_runtime_put_sync(&pdev->dev); 547 548 ret = modeset_init(mdp5_kms); 549 if (ret) { 550 DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret); 551 goto fail; 552 } 553 554 dev->mode_config.min_width = 0; 555 dev->mode_config.min_height = 0; 556 dev->mode_config.max_width = 0xffff; 557 dev->mode_config.max_height = 0xffff; 558 559 dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */ 560 dev->vblank_disable_immediate = true; 561 562 return 0; 563 fail: 564 if (kms) 565 mdp5_kms_destroy(kms); 566 567 return ret; 568 } 569 570 static void mdp5_destroy(struct mdp5_kms *mdp5_kms) 571 { 572 if (mdp5_kms->rpm_enabled) 573 pm_runtime_disable(&mdp5_kms->pdev->dev); 574 575 drm_atomic_private_obj_fini(&mdp5_kms->glob_state); 576 } 577 578 static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt, 579 const enum mdp5_pipe *pipes, const uint32_t *offsets, 580 uint32_t caps) 581 { 582 struct drm_device *dev = mdp5_kms->dev; 583 int i, ret; 584 585 for (i = 0; i < cnt; i++) { 586 struct mdp5_hw_pipe *hwpipe; 587 588 hwpipe = mdp5_pipe_init(dev, pipes[i], offsets[i], caps); 589 if (IS_ERR(hwpipe)) { 590 ret = PTR_ERR(hwpipe); 591 DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n", 592 pipe2name(pipes[i]), ret); 593 return ret; 594 } 595 hwpipe->idx = mdp5_kms->num_hwpipes; 596 mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe; 597 } 598 599 return 0; 600 } 601 602 static int hwpipe_init(struct mdp5_kms *mdp5_kms) 603 { 604 static const enum mdp5_pipe rgb_planes[] = { 605 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, 606 }; 607 static const enum mdp5_pipe vig_planes[] = { 608 SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3, 609 }; 610 static const enum mdp5_pipe dma_planes[] = { 611 SSPP_DMA0, SSPP_DMA1, 612 }; 613 static const enum mdp5_pipe cursor_planes[] = { 614 SSPP_CURSOR0, SSPP_CURSOR1, 615 }; 616 const struct mdp5_cfg_hw *hw_cfg; 617 int ret; 618 619 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 620 621 /* Construct RGB pipes: */ 622 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes, 623 hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps); 624 if (ret) 625 return ret; 626 627 /* Construct video (VIG) pipes: */ 628 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes, 629 hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps); 630 if (ret) 631 return ret; 632 633 /* Construct DMA pipes: */ 634 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes, 635 hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps); 636 if (ret) 637 return ret; 638 639 /* Construct cursor pipes: */ 640 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count, 641 cursor_planes, hw_cfg->pipe_cursor.base, 642 hw_cfg->pipe_cursor.caps); 643 if (ret) 644 return ret; 645 646 return 0; 647 } 648 649 static int hwmixer_init(struct mdp5_kms *mdp5_kms) 650 { 651 struct drm_device *dev = mdp5_kms->dev; 652 const struct mdp5_cfg_hw *hw_cfg; 653 int i, ret; 654 655 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 656 657 for (i = 0; i < hw_cfg->lm.count; i++) { 658 struct mdp5_hw_mixer *mixer; 659 660 mixer = mdp5_mixer_init(dev, &hw_cfg->lm.instances[i]); 661 if (IS_ERR(mixer)) { 662 ret = PTR_ERR(mixer); 663 DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n", 664 i, ret); 665 return ret; 666 } 667 668 mixer->idx = mdp5_kms->num_hwmixers; 669 mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer; 670 } 671 672 return 0; 673 } 674 675 static int interface_init(struct mdp5_kms *mdp5_kms) 676 { 677 struct drm_device *dev = mdp5_kms->dev; 678 const struct mdp5_cfg_hw *hw_cfg; 679 const enum mdp5_intf_type *intf_types; 680 int i; 681 682 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 683 intf_types = hw_cfg->intf.connect; 684 685 for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) { 686 struct mdp5_interface *intf; 687 688 if (intf_types[i] == INTF_DISABLED) 689 continue; 690 691 intf = devm_kzalloc(dev->dev, sizeof(*intf), GFP_KERNEL); 692 if (!intf) { 693 DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i); 694 return -ENOMEM; 695 } 696 697 intf->num = i; 698 intf->type = intf_types[i]; 699 intf->mode = MDP5_INTF_MODE_NONE; 700 intf->idx = mdp5_kms->num_intfs; 701 mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf; 702 } 703 704 return 0; 705 } 706 707 static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) 708 { 709 struct msm_drm_private *priv = dev->dev_private; 710 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); 711 struct mdp5_cfg *config; 712 u32 major, minor; 713 int ret; 714 715 mdp5_kms->dev = dev; 716 717 ret = mdp5_global_obj_init(mdp5_kms); 718 if (ret) 719 goto fail; 720 721 /* we need to set a default rate before enabling. Set a safe 722 * rate first, then figure out hw revision, and then set a 723 * more optimal rate: 724 */ 725 clk_set_rate(mdp5_kms->core_clk, 200000000); 726 727 pm_runtime_enable(&pdev->dev); 728 mdp5_kms->rpm_enabled = true; 729 730 read_mdp_hw_revision(mdp5_kms, &major, &minor); 731 732 mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor); 733 if (IS_ERR(mdp5_kms->cfg)) { 734 ret = PTR_ERR(mdp5_kms->cfg); 735 mdp5_kms->cfg = NULL; 736 goto fail; 737 } 738 739 config = mdp5_cfg_get_config(mdp5_kms->cfg); 740 mdp5_kms->caps = config->hw->mdp.caps; 741 742 /* TODO: compute core clock rate at runtime */ 743 clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk); 744 745 /* 746 * Some chipsets have a Shared Memory Pool (SMP), while others 747 * have dedicated latency buffering per source pipe instead; 748 * this section initializes the SMP: 749 */ 750 if (mdp5_kms->caps & MDP_CAP_SMP) { 751 mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp); 752 if (IS_ERR(mdp5_kms->smp)) { 753 ret = PTR_ERR(mdp5_kms->smp); 754 mdp5_kms->smp = NULL; 755 goto fail; 756 } 757 } 758 759 mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg); 760 if (IS_ERR(mdp5_kms->ctlm)) { 761 ret = PTR_ERR(mdp5_kms->ctlm); 762 mdp5_kms->ctlm = NULL; 763 goto fail; 764 } 765 766 ret = hwpipe_init(mdp5_kms); 767 if (ret) 768 goto fail; 769 770 ret = hwmixer_init(mdp5_kms); 771 if (ret) 772 goto fail; 773 774 ret = interface_init(mdp5_kms); 775 if (ret) 776 goto fail; 777 778 return 0; 779 fail: 780 mdp5_destroy(mdp5_kms); 781 return ret; 782 } 783 784 static int mdp5_setup_interconnect(struct platform_device *pdev) 785 { 786 struct icc_path *path0 = msm_icc_get(&pdev->dev, "mdp0-mem"); 787 struct icc_path *path1 = msm_icc_get(&pdev->dev, "mdp1-mem"); 788 struct icc_path *path_rot = msm_icc_get(&pdev->dev, "rotator-mem"); 789 790 if (IS_ERR(path0)) 791 return PTR_ERR(path0); 792 793 if (!path0) { 794 /* no interconnect support is not necessarily a fatal 795 * condition, the platform may simply not have an 796 * interconnect driver yet. But warn about it in case 797 * bootloader didn't setup bus clocks high enough for 798 * scanout. 799 */ 800 dev_warn(&pdev->dev, "No interconnect support may cause display underflows!\n"); 801 return 0; 802 } 803 804 icc_set_bw(path0, 0, MBps_to_icc(6400)); 805 806 if (!IS_ERR_OR_NULL(path1)) 807 icc_set_bw(path1, 0, MBps_to_icc(6400)); 808 if (!IS_ERR_OR_NULL(path_rot)) 809 icc_set_bw(path_rot, 0, MBps_to_icc(6400)); 810 811 return 0; 812 } 813 814 static int mdp5_dev_probe(struct platform_device *pdev) 815 { 816 struct mdp5_kms *mdp5_kms; 817 int ret, irq; 818 819 DBG(""); 820 821 if (!msm_disp_drv_should_bind(&pdev->dev, false)) 822 return -ENODEV; 823 824 mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL); 825 if (!mdp5_kms) 826 return -ENOMEM; 827 828 ret = mdp5_setup_interconnect(pdev); 829 if (ret) 830 return ret; 831 832 mdp5_kms->pdev = pdev; 833 834 spin_lock_init(&mdp5_kms->resource_lock); 835 836 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys"); 837 if (IS_ERR(mdp5_kms->mmio)) 838 return PTR_ERR(mdp5_kms->mmio); 839 840 /* mandatory clocks: */ 841 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true); 842 if (ret) 843 return ret; 844 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true); 845 if (ret) 846 return ret; 847 ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true); 848 if (ret) 849 return ret; 850 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true); 851 if (ret) 852 return ret; 853 854 /* optional clocks: */ 855 get_clk(pdev, &mdp5_kms->lut_clk, "lut", false); 856 get_clk(pdev, &mdp5_kms->tbu_clk, "tbu", false); 857 get_clk(pdev, &mdp5_kms->tbu_rt_clk, "tbu_rt", false); 858 859 irq = platform_get_irq(pdev, 0); 860 if (irq < 0) 861 return dev_err_probe(&pdev->dev, irq, "failed to get irq\n"); 862 863 mdp5_kms->base.base.irq = irq; 864 865 return msm_drv_probe(&pdev->dev, mdp5_kms_init, &mdp5_kms->base.base); 866 } 867 868 static void mdp5_dev_remove(struct platform_device *pdev) 869 { 870 DBG(""); 871 component_master_del(&pdev->dev, &msm_drm_ops); 872 } 873 874 static __maybe_unused int mdp5_runtime_suspend(struct device *dev) 875 { 876 struct platform_device *pdev = to_platform_device(dev); 877 struct msm_drm_private *priv = platform_get_drvdata(pdev); 878 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); 879 880 DBG(""); 881 882 return mdp5_disable(mdp5_kms); 883 } 884 885 static __maybe_unused int mdp5_runtime_resume(struct device *dev) 886 { 887 struct platform_device *pdev = to_platform_device(dev); 888 struct msm_drm_private *priv = platform_get_drvdata(pdev); 889 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); 890 891 DBG(""); 892 893 return mdp5_enable(mdp5_kms); 894 } 895 896 static const struct dev_pm_ops mdp5_pm_ops = { 897 SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL) 898 .prepare = msm_kms_pm_prepare, 899 .complete = msm_kms_pm_complete, 900 }; 901 902 static const struct of_device_id mdp5_dt_match[] = { 903 { .compatible = "qcom,mdp5", }, 904 /* to support downstream DT files */ 905 { .compatible = "qcom,mdss_mdp", }, 906 {} 907 }; 908 MODULE_DEVICE_TABLE(of, mdp5_dt_match); 909 910 static struct platform_driver mdp5_driver = { 911 .probe = mdp5_dev_probe, 912 .remove_new = mdp5_dev_remove, 913 .shutdown = msm_kms_shutdown, 914 .driver = { 915 .name = "msm_mdp", 916 .of_match_table = mdp5_dt_match, 917 .pm = &mdp5_pm_ops, 918 }, 919 }; 920 921 void __init msm_mdp_register(void) 922 { 923 DBG(""); 924 platform_driver_register(&mdp5_driver); 925 } 926 927 void __exit msm_mdp_unregister(void) 928 { 929 DBG(""); 930 platform_driver_unregister(&mdp5_driver); 931 } 932