1 /* 2 * Copyright (c) 2014, The Linux Foundation. All rights reserved. 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published by 8 * the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include <linux/of_irq.h> 20 21 #include "msm_drv.h" 22 #include "msm_gem.h" 23 #include "msm_mmu.h" 24 #include "mdp5_kms.h" 25 26 static const char *iommu_ports[] = { 27 "mdp_0", 28 }; 29 30 static int mdp5_hw_init(struct msm_kms *kms) 31 { 32 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 33 struct device *dev = &mdp5_kms->pdev->dev; 34 unsigned long flags; 35 36 pm_runtime_get_sync(dev); 37 38 /* Magic unknown register writes: 39 * 40 * W VBIF:0x004 00000001 (mdss_mdp.c:839) 41 * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839) 42 * W MDP5:0x2e4 0x55 (mdss_mdp.c:839) 43 * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839) 44 * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839) 45 * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839) 46 * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839) 47 * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839) 48 * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839) 49 * 50 * Downstream fbdev driver gets these register offsets/values 51 * from DT.. not really sure what these registers are or if 52 * different values for different boards/SoC's, etc. I guess 53 * they are the golden registers. 54 * 55 * Not setting these does not seem to cause any problem. But 56 * we may be getting lucky with the bootloader initializing 57 * them for us. OTOH, if we can always count on the bootloader 58 * setting the golden registers, then perhaps we don't need to 59 * care. 60 */ 61 62 spin_lock_irqsave(&mdp5_kms->resource_lock, flags); 63 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); 64 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); 65 66 mdp5_ctlm_hw_reset(mdp5_kms->ctlm); 67 68 pm_runtime_put_sync(dev); 69 70 return 0; 71 } 72 73 /* Global/shared object state funcs */ 74 75 /* 76 * This is a helper that returns the private state currently in operation. 77 * Note that this would return the "old_state" if called in the atomic check 78 * path, and the "new_state" after the atomic swap has been done. 79 */ 80 struct mdp5_global_state * 81 mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms) 82 { 83 return to_mdp5_global_state(mdp5_kms->glob_state.state); 84 } 85 86 /* 87 * This acquires the modeset lock set aside for global state, creates 88 * a new duplicated private object state. 89 */ 90 struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s) 91 { 92 struct msm_drm_private *priv = s->dev->dev_private; 93 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); 94 struct drm_private_state *priv_state; 95 int ret; 96 97 ret = drm_modeset_lock(&mdp5_kms->glob_state_lock, s->acquire_ctx); 98 if (ret) 99 return ERR_PTR(ret); 100 101 priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state); 102 if (IS_ERR(priv_state)) 103 return ERR_CAST(priv_state); 104 105 return to_mdp5_global_state(priv_state); 106 } 107 108 static struct drm_private_state * 109 mdp5_global_duplicate_state(struct drm_private_obj *obj) 110 { 111 struct mdp5_global_state *state; 112 113 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 114 if (!state) 115 return NULL; 116 117 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 118 119 return &state->base; 120 } 121 122 static void mdp5_global_destroy_state(struct drm_private_obj *obj, 123 struct drm_private_state *state) 124 { 125 struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state); 126 127 kfree(mdp5_state); 128 } 129 130 static const struct drm_private_state_funcs mdp5_global_state_funcs = { 131 .atomic_duplicate_state = mdp5_global_duplicate_state, 132 .atomic_destroy_state = mdp5_global_destroy_state, 133 }; 134 135 static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms) 136 { 137 struct mdp5_global_state *state; 138 139 drm_modeset_lock_init(&mdp5_kms->glob_state_lock); 140 141 state = kzalloc(sizeof(*state), GFP_KERNEL); 142 if (!state) 143 return -ENOMEM; 144 145 state->mdp5_kms = mdp5_kms; 146 147 drm_atomic_private_obj_init(&mdp5_kms->glob_state, 148 &state->base, 149 &mdp5_global_state_funcs); 150 return 0; 151 } 152 153 static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) 154 { 155 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 156 struct device *dev = &mdp5_kms->pdev->dev; 157 struct mdp5_global_state *global_state; 158 159 global_state = mdp5_get_existing_global_state(mdp5_kms); 160 161 pm_runtime_get_sync(dev); 162 163 if (mdp5_kms->smp) 164 mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp); 165 } 166 167 static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 168 { 169 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 170 struct device *dev = &mdp5_kms->pdev->dev; 171 struct mdp5_global_state *global_state; 172 173 global_state = mdp5_get_existing_global_state(mdp5_kms); 174 175 if (mdp5_kms->smp) 176 mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp); 177 178 pm_runtime_put_sync(dev); 179 } 180 181 static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms, 182 struct drm_crtc *crtc) 183 { 184 mdp5_crtc_wait_for_commit_done(crtc); 185 } 186 187 static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate, 188 struct drm_encoder *encoder) 189 { 190 return rate; 191 } 192 193 static int mdp5_set_split_display(struct msm_kms *kms, 194 struct drm_encoder *encoder, 195 struct drm_encoder *slave_encoder, 196 bool is_cmd_mode) 197 { 198 if (is_cmd_mode) 199 return mdp5_cmd_encoder_set_split_display(encoder, 200 slave_encoder); 201 else 202 return mdp5_vid_encoder_set_split_display(encoder, 203 slave_encoder); 204 } 205 206 static void mdp5_set_encoder_mode(struct msm_kms *kms, 207 struct drm_encoder *encoder, 208 bool cmd_mode) 209 { 210 mdp5_encoder_set_intf_mode(encoder, cmd_mode); 211 } 212 213 static void mdp5_kms_destroy(struct msm_kms *kms) 214 { 215 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 216 struct msm_gem_address_space *aspace = kms->aspace; 217 int i; 218 219 for (i = 0; i < mdp5_kms->num_hwmixers; i++) 220 mdp5_mixer_destroy(mdp5_kms->hwmixers[i]); 221 222 for (i = 0; i < mdp5_kms->num_hwpipes; i++) 223 mdp5_pipe_destroy(mdp5_kms->hwpipes[i]); 224 225 if (aspace) { 226 aspace->mmu->funcs->detach(aspace->mmu, 227 iommu_ports, ARRAY_SIZE(iommu_ports)); 228 msm_gem_address_space_put(aspace); 229 } 230 } 231 232 #ifdef CONFIG_DEBUG_FS 233 static int smp_show(struct seq_file *m, void *arg) 234 { 235 struct drm_info_node *node = (struct drm_info_node *) m->private; 236 struct drm_device *dev = node->minor->dev; 237 struct msm_drm_private *priv = dev->dev_private; 238 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); 239 struct drm_printer p = drm_seq_file_printer(m); 240 241 if (!mdp5_kms->smp) { 242 drm_printf(&p, "no SMP pool\n"); 243 return 0; 244 } 245 246 mdp5_smp_dump(mdp5_kms->smp, &p); 247 248 return 0; 249 } 250 251 static struct drm_info_list mdp5_debugfs_list[] = { 252 {"smp", smp_show }, 253 }; 254 255 static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) 256 { 257 struct drm_device *dev = minor->dev; 258 int ret; 259 260 ret = drm_debugfs_create_files(mdp5_debugfs_list, 261 ARRAY_SIZE(mdp5_debugfs_list), 262 minor->debugfs_root, minor); 263 264 if (ret) { 265 dev_err(dev->dev, "could not install mdp5_debugfs_list\n"); 266 return ret; 267 } 268 269 return 0; 270 } 271 #endif 272 273 static const struct mdp_kms_funcs kms_funcs = { 274 .base = { 275 .hw_init = mdp5_hw_init, 276 .irq_preinstall = mdp5_irq_preinstall, 277 .irq_postinstall = mdp5_irq_postinstall, 278 .irq_uninstall = mdp5_irq_uninstall, 279 .irq = mdp5_irq, 280 .enable_vblank = mdp5_enable_vblank, 281 .disable_vblank = mdp5_disable_vblank, 282 .prepare_commit = mdp5_prepare_commit, 283 .complete_commit = mdp5_complete_commit, 284 .wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done, 285 .get_format = mdp_get_format, 286 .round_pixclk = mdp5_round_pixclk, 287 .set_split_display = mdp5_set_split_display, 288 .set_encoder_mode = mdp5_set_encoder_mode, 289 .destroy = mdp5_kms_destroy, 290 #ifdef CONFIG_DEBUG_FS 291 .debugfs_init = mdp5_kms_debugfs_init, 292 #endif 293 }, 294 .set_irqmask = mdp5_set_irqmask, 295 }; 296 297 int mdp5_disable(struct mdp5_kms *mdp5_kms) 298 { 299 DBG(""); 300 301 mdp5_kms->enable_count--; 302 WARN_ON(mdp5_kms->enable_count < 0); 303 304 clk_disable_unprepare(mdp5_kms->ahb_clk); 305 clk_disable_unprepare(mdp5_kms->axi_clk); 306 clk_disable_unprepare(mdp5_kms->core_clk); 307 if (mdp5_kms->lut_clk) 308 clk_disable_unprepare(mdp5_kms->lut_clk); 309 310 return 0; 311 } 312 313 int mdp5_enable(struct mdp5_kms *mdp5_kms) 314 { 315 DBG(""); 316 317 mdp5_kms->enable_count++; 318 319 clk_prepare_enable(mdp5_kms->ahb_clk); 320 clk_prepare_enable(mdp5_kms->axi_clk); 321 clk_prepare_enable(mdp5_kms->core_clk); 322 if (mdp5_kms->lut_clk) 323 clk_prepare_enable(mdp5_kms->lut_clk); 324 325 return 0; 326 } 327 328 static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, 329 struct mdp5_interface *intf, 330 struct mdp5_ctl *ctl) 331 { 332 struct drm_device *dev = mdp5_kms->dev; 333 struct msm_drm_private *priv = dev->dev_private; 334 struct drm_encoder *encoder; 335 336 encoder = mdp5_encoder_init(dev, intf, ctl); 337 if (IS_ERR(encoder)) { 338 dev_err(dev->dev, "failed to construct encoder\n"); 339 return encoder; 340 } 341 342 priv->encoders[priv->num_encoders++] = encoder; 343 344 return encoder; 345 } 346 347 static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num) 348 { 349 const enum mdp5_intf_type *intfs = hw_cfg->intf.connect; 350 const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect); 351 int id = 0, i; 352 353 for (i = 0; i < intf_cnt; i++) { 354 if (intfs[i] == INTF_DSI) { 355 if (intf_num == i) 356 return id; 357 358 id++; 359 } 360 } 361 362 return -EINVAL; 363 } 364 365 static int modeset_init_intf(struct mdp5_kms *mdp5_kms, 366 struct mdp5_interface *intf) 367 { 368 struct drm_device *dev = mdp5_kms->dev; 369 struct msm_drm_private *priv = dev->dev_private; 370 struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm; 371 struct mdp5_ctl *ctl; 372 struct drm_encoder *encoder; 373 int ret = 0; 374 375 switch (intf->type) { 376 case INTF_eDP: 377 if (!priv->edp) 378 break; 379 380 ctl = mdp5_ctlm_request(ctlm, intf->num); 381 if (!ctl) { 382 ret = -EINVAL; 383 break; 384 } 385 386 encoder = construct_encoder(mdp5_kms, intf, ctl); 387 if (IS_ERR(encoder)) { 388 ret = PTR_ERR(encoder); 389 break; 390 } 391 392 ret = msm_edp_modeset_init(priv->edp, dev, encoder); 393 break; 394 case INTF_HDMI: 395 if (!priv->hdmi) 396 break; 397 398 ctl = mdp5_ctlm_request(ctlm, intf->num); 399 if (!ctl) { 400 ret = -EINVAL; 401 break; 402 } 403 404 encoder = construct_encoder(mdp5_kms, intf, ctl); 405 if (IS_ERR(encoder)) { 406 ret = PTR_ERR(encoder); 407 break; 408 } 409 410 ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder); 411 break; 412 case INTF_DSI: 413 { 414 const struct mdp5_cfg_hw *hw_cfg = 415 mdp5_cfg_get_hw_config(mdp5_kms->cfg); 416 int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num); 417 418 if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) { 419 dev_err(dev->dev, "failed to find dsi from intf %d\n", 420 intf->num); 421 ret = -EINVAL; 422 break; 423 } 424 425 if (!priv->dsi[dsi_id]) 426 break; 427 428 ctl = mdp5_ctlm_request(ctlm, intf->num); 429 if (!ctl) { 430 ret = -EINVAL; 431 break; 432 } 433 434 encoder = construct_encoder(mdp5_kms, intf, ctl); 435 if (IS_ERR(encoder)) { 436 ret = PTR_ERR(encoder); 437 break; 438 } 439 440 ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); 441 break; 442 } 443 default: 444 dev_err(dev->dev, "unknown intf: %d\n", intf->type); 445 ret = -EINVAL; 446 break; 447 } 448 449 return ret; 450 } 451 452 static int modeset_init(struct mdp5_kms *mdp5_kms) 453 { 454 struct drm_device *dev = mdp5_kms->dev; 455 struct msm_drm_private *priv = dev->dev_private; 456 const struct mdp5_cfg_hw *hw_cfg; 457 unsigned int num_crtcs; 458 int i, ret, pi = 0, ci = 0; 459 struct drm_plane *primary[MAX_BASES] = { NULL }; 460 struct drm_plane *cursor[MAX_BASES] = { NULL }; 461 462 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 463 464 /* 465 * Construct encoders and modeset initialize connector devices 466 * for each external display interface. 467 */ 468 for (i = 0; i < mdp5_kms->num_intfs; i++) { 469 ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]); 470 if (ret) 471 goto fail; 472 } 473 474 /* 475 * We should ideally have less number of encoders (set up by parsing 476 * the MDP5 interfaces) than the number of layer mixers present in HW, 477 * but let's be safe here anyway 478 */ 479 num_crtcs = min(priv->num_encoders, mdp5_kms->num_hwmixers); 480 481 /* 482 * Construct planes equaling the number of hw pipes, and CRTCs for the 483 * N encoders set up by the driver. The first N planes become primary 484 * planes for the CRTCs, with the remainder as overlay planes: 485 */ 486 for (i = 0; i < mdp5_kms->num_hwpipes; i++) { 487 struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; 488 struct drm_plane *plane; 489 enum drm_plane_type type; 490 491 if (i < num_crtcs) 492 type = DRM_PLANE_TYPE_PRIMARY; 493 else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR) 494 type = DRM_PLANE_TYPE_CURSOR; 495 else 496 type = DRM_PLANE_TYPE_OVERLAY; 497 498 plane = mdp5_plane_init(dev, type); 499 if (IS_ERR(plane)) { 500 ret = PTR_ERR(plane); 501 dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret); 502 goto fail; 503 } 504 priv->planes[priv->num_planes++] = plane; 505 506 if (type == DRM_PLANE_TYPE_PRIMARY) 507 primary[pi++] = plane; 508 if (type == DRM_PLANE_TYPE_CURSOR) 509 cursor[ci++] = plane; 510 } 511 512 for (i = 0; i < num_crtcs; i++) { 513 struct drm_crtc *crtc; 514 515 crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i); 516 if (IS_ERR(crtc)) { 517 ret = PTR_ERR(crtc); 518 dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret); 519 goto fail; 520 } 521 priv->crtcs[priv->num_crtcs++] = crtc; 522 } 523 524 /* 525 * Now that we know the number of crtcs we've created, set the possible 526 * crtcs for the encoders 527 */ 528 for (i = 0; i < priv->num_encoders; i++) { 529 struct drm_encoder *encoder = priv->encoders[i]; 530 531 encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; 532 } 533 534 return 0; 535 536 fail: 537 return ret; 538 } 539 540 static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms, 541 u32 *major, u32 *minor) 542 { 543 struct device *dev = &mdp5_kms->pdev->dev; 544 u32 version; 545 546 pm_runtime_get_sync(dev); 547 version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION); 548 pm_runtime_put_sync(dev); 549 550 *major = FIELD(version, MDP5_HW_VERSION_MAJOR); 551 *minor = FIELD(version, MDP5_HW_VERSION_MINOR); 552 553 dev_info(dev, "MDP5 version v%d.%d", *major, *minor); 554 } 555 556 static int get_clk(struct platform_device *pdev, struct clk **clkp, 557 const char *name, bool mandatory) 558 { 559 struct device *dev = &pdev->dev; 560 struct clk *clk = msm_clk_get(pdev, name); 561 if (IS_ERR(clk) && mandatory) { 562 dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); 563 return PTR_ERR(clk); 564 } 565 if (IS_ERR(clk)) 566 DBG("skipping %s", name); 567 else 568 *clkp = clk; 569 570 return 0; 571 } 572 573 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc) 574 { 575 struct drm_device *dev = crtc->dev; 576 struct drm_encoder *encoder; 577 578 drm_for_each_encoder(encoder, dev) 579 if (encoder->crtc == crtc) 580 return encoder; 581 582 return NULL; 583 } 584 585 static bool mdp5_get_scanoutpos(struct drm_device *dev, unsigned int pipe, 586 bool in_vblank_irq, int *vpos, int *hpos, 587 ktime_t *stime, ktime_t *etime, 588 const struct drm_display_mode *mode) 589 { 590 struct msm_drm_private *priv = dev->dev_private; 591 struct drm_crtc *crtc; 592 struct drm_encoder *encoder; 593 int line, vsw, vbp, vactive_start, vactive_end, vfp_end; 594 595 crtc = priv->crtcs[pipe]; 596 if (!crtc) { 597 DRM_ERROR("Invalid crtc %d\n", pipe); 598 return false; 599 } 600 601 encoder = get_encoder_from_crtc(crtc); 602 if (!encoder) { 603 DRM_ERROR("no encoder found for crtc %d\n", pipe); 604 return false; 605 } 606 607 vsw = mode->crtc_vsync_end - mode->crtc_vsync_start; 608 vbp = mode->crtc_vtotal - mode->crtc_vsync_end; 609 610 /* 611 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at 612 * the end of VFP. Translate the porch values relative to the line 613 * counter positions. 614 */ 615 616 vactive_start = vsw + vbp + 1; 617 618 vactive_end = vactive_start + mode->crtc_vdisplay; 619 620 /* last scan line before VSYNC */ 621 vfp_end = mode->crtc_vtotal; 622 623 if (stime) 624 *stime = ktime_get(); 625 626 line = mdp5_encoder_get_linecount(encoder); 627 628 if (line < vactive_start) { 629 line -= vactive_start; 630 } else if (line > vactive_end) { 631 line = line - vfp_end - vactive_start; 632 } else { 633 line -= vactive_start; 634 } 635 636 *vpos = line; 637 *hpos = 0; 638 639 if (etime) 640 *etime = ktime_get(); 641 642 return true; 643 } 644 645 static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 646 { 647 struct msm_drm_private *priv = dev->dev_private; 648 struct drm_crtc *crtc; 649 struct drm_encoder *encoder; 650 651 if (pipe >= priv->num_crtcs) 652 return 0; 653 654 crtc = priv->crtcs[pipe]; 655 if (!crtc) 656 return 0; 657 658 encoder = get_encoder_from_crtc(crtc); 659 if (!encoder) 660 return 0; 661 662 return mdp5_encoder_get_framecount(encoder); 663 } 664 665 struct msm_kms *mdp5_kms_init(struct drm_device *dev) 666 { 667 struct msm_drm_private *priv = dev->dev_private; 668 struct platform_device *pdev; 669 struct mdp5_kms *mdp5_kms; 670 struct mdp5_cfg *config; 671 struct msm_kms *kms; 672 struct msm_gem_address_space *aspace; 673 int irq, i, ret; 674 675 /* priv->kms would have been populated by the MDP5 driver */ 676 kms = priv->kms; 677 if (!kms) 678 return NULL; 679 680 mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 681 682 mdp_kms_init(&mdp5_kms->base, &kms_funcs); 683 684 pdev = mdp5_kms->pdev; 685 686 irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 687 if (irq < 0) { 688 ret = irq; 689 dev_err(&pdev->dev, "failed to get irq: %d\n", ret); 690 goto fail; 691 } 692 693 kms->irq = irq; 694 695 config = mdp5_cfg_get_config(mdp5_kms->cfg); 696 697 /* make sure things are off before attaching iommu (bootloader could 698 * have left things on, in which case we'll start getting faults if 699 * we don't disable): 700 */ 701 pm_runtime_get_sync(&pdev->dev); 702 for (i = 0; i < MDP5_INTF_NUM_MAX; i++) { 703 if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) || 704 !config->hw->intf.base[i]) 705 continue; 706 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); 707 708 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3); 709 } 710 mdelay(16); 711 712 if (config->platform.iommu) { 713 aspace = msm_gem_address_space_create(&pdev->dev, 714 config->platform.iommu, "mdp5"); 715 if (IS_ERR(aspace)) { 716 ret = PTR_ERR(aspace); 717 goto fail; 718 } 719 720 kms->aspace = aspace; 721 722 ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, 723 ARRAY_SIZE(iommu_ports)); 724 if (ret) { 725 dev_err(&pdev->dev, "failed to attach iommu: %d\n", 726 ret); 727 goto fail; 728 } 729 } else { 730 dev_info(&pdev->dev, 731 "no iommu, fallback to phys contig buffers for scanout\n"); 732 aspace = NULL; 733 } 734 735 pm_runtime_put_sync(&pdev->dev); 736 737 ret = modeset_init(mdp5_kms); 738 if (ret) { 739 dev_err(&pdev->dev, "modeset_init failed: %d\n", ret); 740 goto fail; 741 } 742 743 dev->mode_config.min_width = 0; 744 dev->mode_config.min_height = 0; 745 dev->mode_config.max_width = 0xffff; 746 dev->mode_config.max_height = 0xffff; 747 748 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 749 dev->driver->get_scanout_position = mdp5_get_scanoutpos; 750 dev->driver->get_vblank_counter = mdp5_get_vblank_counter; 751 dev->max_vblank_count = 0xffffffff; 752 dev->vblank_disable_immediate = true; 753 754 return kms; 755 fail: 756 if (kms) 757 mdp5_kms_destroy(kms); 758 return ERR_PTR(ret); 759 } 760 761 static void mdp5_destroy(struct platform_device *pdev) 762 { 763 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); 764 int i; 765 766 if (mdp5_kms->ctlm) 767 mdp5_ctlm_destroy(mdp5_kms->ctlm); 768 if (mdp5_kms->smp) 769 mdp5_smp_destroy(mdp5_kms->smp); 770 if (mdp5_kms->cfg) 771 mdp5_cfg_destroy(mdp5_kms->cfg); 772 773 for (i = 0; i < mdp5_kms->num_intfs; i++) 774 kfree(mdp5_kms->intfs[i]); 775 776 if (mdp5_kms->rpm_enabled) 777 pm_runtime_disable(&pdev->dev); 778 779 drm_atomic_private_obj_fini(&mdp5_kms->glob_state); 780 drm_modeset_lock_fini(&mdp5_kms->glob_state_lock); 781 } 782 783 static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt, 784 const enum mdp5_pipe *pipes, const uint32_t *offsets, 785 uint32_t caps) 786 { 787 struct drm_device *dev = mdp5_kms->dev; 788 int i, ret; 789 790 for (i = 0; i < cnt; i++) { 791 struct mdp5_hw_pipe *hwpipe; 792 793 hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps); 794 if (IS_ERR(hwpipe)) { 795 ret = PTR_ERR(hwpipe); 796 dev_err(dev->dev, "failed to construct pipe for %s (%d)\n", 797 pipe2name(pipes[i]), ret); 798 return ret; 799 } 800 hwpipe->idx = mdp5_kms->num_hwpipes; 801 mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe; 802 } 803 804 return 0; 805 } 806 807 static int hwpipe_init(struct mdp5_kms *mdp5_kms) 808 { 809 static const enum mdp5_pipe rgb_planes[] = { 810 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, 811 }; 812 static const enum mdp5_pipe vig_planes[] = { 813 SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3, 814 }; 815 static const enum mdp5_pipe dma_planes[] = { 816 SSPP_DMA0, SSPP_DMA1, 817 }; 818 static const enum mdp5_pipe cursor_planes[] = { 819 SSPP_CURSOR0, SSPP_CURSOR1, 820 }; 821 const struct mdp5_cfg_hw *hw_cfg; 822 int ret; 823 824 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 825 826 /* Construct RGB pipes: */ 827 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes, 828 hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps); 829 if (ret) 830 return ret; 831 832 /* Construct video (VIG) pipes: */ 833 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes, 834 hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps); 835 if (ret) 836 return ret; 837 838 /* Construct DMA pipes: */ 839 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes, 840 hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps); 841 if (ret) 842 return ret; 843 844 /* Construct cursor pipes: */ 845 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count, 846 cursor_planes, hw_cfg->pipe_cursor.base, 847 hw_cfg->pipe_cursor.caps); 848 if (ret) 849 return ret; 850 851 return 0; 852 } 853 854 static int hwmixer_init(struct mdp5_kms *mdp5_kms) 855 { 856 struct drm_device *dev = mdp5_kms->dev; 857 const struct mdp5_cfg_hw *hw_cfg; 858 int i, ret; 859 860 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 861 862 for (i = 0; i < hw_cfg->lm.count; i++) { 863 struct mdp5_hw_mixer *mixer; 864 865 mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]); 866 if (IS_ERR(mixer)) { 867 ret = PTR_ERR(mixer); 868 dev_err(dev->dev, "failed to construct LM%d (%d)\n", 869 i, ret); 870 return ret; 871 } 872 873 mixer->idx = mdp5_kms->num_hwmixers; 874 mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer; 875 } 876 877 return 0; 878 } 879 880 static int interface_init(struct mdp5_kms *mdp5_kms) 881 { 882 struct drm_device *dev = mdp5_kms->dev; 883 const struct mdp5_cfg_hw *hw_cfg; 884 const enum mdp5_intf_type *intf_types; 885 int i; 886 887 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 888 intf_types = hw_cfg->intf.connect; 889 890 for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) { 891 struct mdp5_interface *intf; 892 893 if (intf_types[i] == INTF_DISABLED) 894 continue; 895 896 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 897 if (!intf) { 898 dev_err(dev->dev, "failed to construct INTF%d\n", i); 899 return -ENOMEM; 900 } 901 902 intf->num = i; 903 intf->type = intf_types[i]; 904 intf->mode = MDP5_INTF_MODE_NONE; 905 intf->idx = mdp5_kms->num_intfs; 906 mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf; 907 } 908 909 return 0; 910 } 911 912 static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) 913 { 914 struct msm_drm_private *priv = dev->dev_private; 915 struct mdp5_kms *mdp5_kms; 916 struct mdp5_cfg *config; 917 u32 major, minor; 918 int ret; 919 920 mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL); 921 if (!mdp5_kms) { 922 ret = -ENOMEM; 923 goto fail; 924 } 925 926 platform_set_drvdata(pdev, mdp5_kms); 927 928 spin_lock_init(&mdp5_kms->resource_lock); 929 930 mdp5_kms->dev = dev; 931 mdp5_kms->pdev = pdev; 932 933 ret = mdp5_global_obj_init(mdp5_kms); 934 if (ret) 935 goto fail; 936 937 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); 938 if (IS_ERR(mdp5_kms->mmio)) { 939 ret = PTR_ERR(mdp5_kms->mmio); 940 goto fail; 941 } 942 943 /* mandatory clocks: */ 944 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true); 945 if (ret) 946 goto fail; 947 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true); 948 if (ret) 949 goto fail; 950 ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true); 951 if (ret) 952 goto fail; 953 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true); 954 if (ret) 955 goto fail; 956 957 /* optional clocks: */ 958 get_clk(pdev, &mdp5_kms->lut_clk, "lut", false); 959 960 /* we need to set a default rate before enabling. Set a safe 961 * rate first, then figure out hw revision, and then set a 962 * more optimal rate: 963 */ 964 clk_set_rate(mdp5_kms->core_clk, 200000000); 965 966 pm_runtime_enable(&pdev->dev); 967 mdp5_kms->rpm_enabled = true; 968 969 read_mdp_hw_revision(mdp5_kms, &major, &minor); 970 971 mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor); 972 if (IS_ERR(mdp5_kms->cfg)) { 973 ret = PTR_ERR(mdp5_kms->cfg); 974 mdp5_kms->cfg = NULL; 975 goto fail; 976 } 977 978 config = mdp5_cfg_get_config(mdp5_kms->cfg); 979 mdp5_kms->caps = config->hw->mdp.caps; 980 981 /* TODO: compute core clock rate at runtime */ 982 clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk); 983 984 /* 985 * Some chipsets have a Shared Memory Pool (SMP), while others 986 * have dedicated latency buffering per source pipe instead; 987 * this section initializes the SMP: 988 */ 989 if (mdp5_kms->caps & MDP_CAP_SMP) { 990 mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp); 991 if (IS_ERR(mdp5_kms->smp)) { 992 ret = PTR_ERR(mdp5_kms->smp); 993 mdp5_kms->smp = NULL; 994 goto fail; 995 } 996 } 997 998 mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg); 999 if (IS_ERR(mdp5_kms->ctlm)) { 1000 ret = PTR_ERR(mdp5_kms->ctlm); 1001 mdp5_kms->ctlm = NULL; 1002 goto fail; 1003 } 1004 1005 ret = hwpipe_init(mdp5_kms); 1006 if (ret) 1007 goto fail; 1008 1009 ret = hwmixer_init(mdp5_kms); 1010 if (ret) 1011 goto fail; 1012 1013 ret = interface_init(mdp5_kms); 1014 if (ret) 1015 goto fail; 1016 1017 /* set uninit-ed kms */ 1018 priv->kms = &mdp5_kms->base.base; 1019 1020 return 0; 1021 fail: 1022 mdp5_destroy(pdev); 1023 return ret; 1024 } 1025 1026 static int mdp5_bind(struct device *dev, struct device *master, void *data) 1027 { 1028 struct drm_device *ddev = dev_get_drvdata(master); 1029 struct platform_device *pdev = to_platform_device(dev); 1030 1031 DBG(""); 1032 1033 return mdp5_init(pdev, ddev); 1034 } 1035 1036 static void mdp5_unbind(struct device *dev, struct device *master, 1037 void *data) 1038 { 1039 struct platform_device *pdev = to_platform_device(dev); 1040 1041 mdp5_destroy(pdev); 1042 } 1043 1044 static const struct component_ops mdp5_ops = { 1045 .bind = mdp5_bind, 1046 .unbind = mdp5_unbind, 1047 }; 1048 1049 static int mdp5_dev_probe(struct platform_device *pdev) 1050 { 1051 DBG(""); 1052 return component_add(&pdev->dev, &mdp5_ops); 1053 } 1054 1055 static int mdp5_dev_remove(struct platform_device *pdev) 1056 { 1057 DBG(""); 1058 component_del(&pdev->dev, &mdp5_ops); 1059 return 0; 1060 } 1061 1062 static __maybe_unused int mdp5_runtime_suspend(struct device *dev) 1063 { 1064 struct platform_device *pdev = to_platform_device(dev); 1065 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); 1066 1067 DBG(""); 1068 1069 return mdp5_disable(mdp5_kms); 1070 } 1071 1072 static __maybe_unused int mdp5_runtime_resume(struct device *dev) 1073 { 1074 struct platform_device *pdev = to_platform_device(dev); 1075 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); 1076 1077 DBG(""); 1078 1079 return mdp5_enable(mdp5_kms); 1080 } 1081 1082 static const struct dev_pm_ops mdp5_pm_ops = { 1083 SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL) 1084 }; 1085 1086 static const struct of_device_id mdp5_dt_match[] = { 1087 { .compatible = "qcom,mdp5", }, 1088 /* to support downstream DT files */ 1089 { .compatible = "qcom,mdss_mdp", }, 1090 {} 1091 }; 1092 MODULE_DEVICE_TABLE(of, mdp5_dt_match); 1093 1094 static struct platform_driver mdp5_driver = { 1095 .probe = mdp5_dev_probe, 1096 .remove = mdp5_dev_remove, 1097 .driver = { 1098 .name = "msm_mdp", 1099 .of_match_table = mdp5_dt_match, 1100 .pm = &mdp5_pm_ops, 1101 }, 1102 }; 1103 1104 void __init msm_mdp_register(void) 1105 { 1106 DBG(""); 1107 platform_driver_register(&mdp5_driver); 1108 } 1109 1110 void __exit msm_mdp_unregister(void) 1111 { 1112 DBG(""); 1113 platform_driver_unregister(&mdp5_driver); 1114 } 1115