1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. 5 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 6 * 7 * Author: Rob Clark <robdclark@gmail.com> 8 */ 9 10 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ 11 12 #include <linux/debugfs.h> 13 #include <linux/dma-buf.h> 14 #include <linux/of_irq.h> 15 #include <linux/pm_opp.h> 16 17 #include <drm/drm_crtc.h> 18 #include <drm/drm_file.h> 19 #include <drm/drm_framebuffer.h> 20 #include <drm/drm_vblank.h> 21 #include <drm/drm_writeback.h> 22 23 #include "msm_drv.h" 24 #include "msm_mmu.h" 25 #include "msm_mdss.h" 26 #include "msm_gem.h" 27 #include "disp/msm_disp_snapshot.h" 28 29 #include "dpu_core_irq.h" 30 #include "dpu_crtc.h" 31 #include "dpu_encoder.h" 32 #include "dpu_formats.h" 33 #include "dpu_hw_vbif.h" 34 #include "dpu_kms.h" 35 #include "dpu_plane.h" 36 #include "dpu_vbif.h" 37 #include "dpu_writeback.h" 38 39 #define CREATE_TRACE_POINTS 40 #include "dpu_trace.h" 41 42 /* 43 * To enable overall DRM driver logging 44 * # echo 0x2 > /sys/module/drm/parameters/debug 45 * 46 * To enable DRM driver h/w logging 47 * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask 48 * 49 * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_) 50 */ 51 #define DPU_DEBUGFS_DIR "msm_dpu" 52 #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask" 53 54 static int dpu_kms_hw_init(struct msm_kms *kms); 55 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms); 56 57 #ifdef CONFIG_DEBUG_FS 58 static int _dpu_danger_signal_status(struct seq_file *s, 59 bool danger_status) 60 { 61 struct dpu_danger_safe_status status; 62 struct dpu_kms *kms = s->private; 63 int i; 64 65 if (!kms->hw_mdp) { 66 DPU_ERROR("invalid arg(s)\n"); 67 return 0; 68 } 69 70 memset(&status, 0, sizeof(struct dpu_danger_safe_status)); 71 72 pm_runtime_get_sync(&kms->pdev->dev); 73 if (danger_status) { 74 seq_puts(s, "\nDanger signal status:\n"); 75 if (kms->hw_mdp->ops.get_danger_status) 76 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp, 77 &status); 78 } else { 79 seq_puts(s, "\nSafe signal status:\n"); 80 if (kms->hw_mdp->ops.get_safe_status) 81 kms->hw_mdp->ops.get_safe_status(kms->hw_mdp, 82 &status); 83 } 84 pm_runtime_put_sync(&kms->pdev->dev); 85 86 seq_printf(s, "MDP : 0x%x\n", status.mdp); 87 88 for (i = SSPP_VIG0; i < SSPP_MAX; i++) 89 seq_printf(s, "SSPP%d : 0x%x \n", i - SSPP_VIG0, 90 status.sspp[i]); 91 seq_puts(s, "\n"); 92 93 return 0; 94 } 95 96 static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v) 97 { 98 return _dpu_danger_signal_status(s, true); 99 } 100 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats); 101 102 static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v) 103 { 104 return _dpu_danger_signal_status(s, false); 105 } 106 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats); 107 108 static ssize_t _dpu_plane_danger_read(struct file *file, 109 char __user *buff, size_t count, loff_t *ppos) 110 { 111 struct dpu_kms *kms = file->private_data; 112 int len; 113 char buf[40]; 114 115 len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl); 116 117 return simple_read_from_buffer(buff, count, ppos, buf, len); 118 } 119 120 static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable) 121 { 122 struct drm_plane *plane; 123 124 drm_for_each_plane(plane, kms->dev) { 125 if (plane->fb && plane->state) { 126 dpu_plane_danger_signal_ctrl(plane, enable); 127 DPU_DEBUG("plane:%d img:%dx%d ", 128 plane->base.id, plane->fb->width, 129 plane->fb->height); 130 DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n", 131 plane->state->src_x >> 16, 132 plane->state->src_y >> 16, 133 plane->state->src_w >> 16, 134 plane->state->src_h >> 16, 135 plane->state->crtc_x, plane->state->crtc_y, 136 plane->state->crtc_w, plane->state->crtc_h); 137 } else { 138 DPU_DEBUG("Inactive plane:%d\n", plane->base.id); 139 } 140 } 141 } 142 143 static ssize_t _dpu_plane_danger_write(struct file *file, 144 const char __user *user_buf, size_t count, loff_t *ppos) 145 { 146 struct dpu_kms *kms = file->private_data; 147 int disable_panic; 148 int ret; 149 150 ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic); 151 if (ret) 152 return ret; 153 154 if (disable_panic) { 155 /* Disable panic signal for all active pipes */ 156 DPU_DEBUG("Disabling danger:\n"); 157 _dpu_plane_set_danger_state(kms, false); 158 kms->has_danger_ctrl = false; 159 } else { 160 /* Enable panic signal for all active pipes */ 161 DPU_DEBUG("Enabling danger:\n"); 162 kms->has_danger_ctrl = true; 163 _dpu_plane_set_danger_state(kms, true); 164 } 165 166 return count; 167 } 168 169 static const struct file_operations dpu_plane_danger_enable = { 170 .open = simple_open, 171 .read = _dpu_plane_danger_read, 172 .write = _dpu_plane_danger_write, 173 }; 174 175 static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms, 176 struct dentry *parent) 177 { 178 struct dentry *entry = debugfs_create_dir("danger", parent); 179 180 debugfs_create_file("danger_status", 0600, entry, 181 dpu_kms, &dpu_debugfs_danger_stats_fops); 182 debugfs_create_file("safe_status", 0600, entry, 183 dpu_kms, &dpu_debugfs_safe_stats_fops); 184 debugfs_create_file("disable_danger", 0600, entry, 185 dpu_kms, &dpu_plane_danger_enable); 186 187 } 188 189 /* 190 * Companion structure for dpu_debugfs_create_regset32. 191 */ 192 struct dpu_debugfs_regset32 { 193 uint32_t offset; 194 uint32_t blk_len; 195 struct dpu_kms *dpu_kms; 196 }; 197 198 static int dpu_regset32_show(struct seq_file *s, void *data) 199 { 200 struct dpu_debugfs_regset32 *regset = s->private; 201 struct dpu_kms *dpu_kms = regset->dpu_kms; 202 void __iomem *base; 203 uint32_t i, addr; 204 205 if (!dpu_kms->mmio) 206 return 0; 207 208 base = dpu_kms->mmio + regset->offset; 209 210 /* insert padding spaces, if needed */ 211 if (regset->offset & 0xF) { 212 seq_printf(s, "[%x]", regset->offset & ~0xF); 213 for (i = 0; i < (regset->offset & 0xF); i += 4) 214 seq_puts(s, " "); 215 } 216 217 pm_runtime_get_sync(&dpu_kms->pdev->dev); 218 219 /* main register output */ 220 for (i = 0; i < regset->blk_len; i += 4) { 221 addr = regset->offset + i; 222 if ((addr & 0xF) == 0x0) 223 seq_printf(s, i ? "\n[%x]" : "[%x]", addr); 224 seq_printf(s, " %08x", readl_relaxed(base + i)); 225 } 226 seq_puts(s, "\n"); 227 pm_runtime_put_sync(&dpu_kms->pdev->dev); 228 229 return 0; 230 } 231 DEFINE_SHOW_ATTRIBUTE(dpu_regset32); 232 233 void dpu_debugfs_create_regset32(const char *name, umode_t mode, 234 void *parent, 235 uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms) 236 { 237 struct dpu_debugfs_regset32 *regset; 238 239 if (WARN_ON(!name || !dpu_kms || !length)) 240 return; 241 242 regset = devm_kzalloc(&dpu_kms->pdev->dev, sizeof(*regset), GFP_KERNEL); 243 if (!regset) 244 return; 245 246 /* make sure offset is a multiple of 4 */ 247 regset->offset = round_down(offset, 4); 248 regset->blk_len = length; 249 regset->dpu_kms = dpu_kms; 250 251 debugfs_create_file(name, mode, parent, regset, &dpu_regset32_fops); 252 } 253 254 static void dpu_debugfs_sspp_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root) 255 { 256 struct dentry *entry = debugfs_create_dir("sspp", debugfs_root); 257 int i; 258 259 if (IS_ERR(entry)) 260 return; 261 262 for (i = SSPP_NONE; i < SSPP_MAX; i++) { 263 struct dpu_hw_sspp *hw = dpu_rm_get_sspp(&dpu_kms->rm, i); 264 265 if (!hw) 266 continue; 267 268 _dpu_hw_sspp_init_debugfs(hw, dpu_kms, entry); 269 } 270 } 271 272 static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) 273 { 274 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 275 void *p = dpu_hw_util_get_log_mask_ptr(); 276 struct dentry *entry; 277 278 if (!p) 279 return -EINVAL; 280 281 /* Only create a set of debugfs for the primary node, ignore render nodes */ 282 if (minor->type != DRM_MINOR_PRIMARY) 283 return 0; 284 285 entry = debugfs_create_dir("debug", minor->debugfs_root); 286 287 debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p); 288 289 dpu_debugfs_danger_init(dpu_kms, entry); 290 dpu_debugfs_vbif_init(dpu_kms, entry); 291 dpu_debugfs_core_irq_init(dpu_kms, entry); 292 dpu_debugfs_sspp_init(dpu_kms, entry); 293 294 return dpu_core_perf_debugfs_init(dpu_kms, entry); 295 } 296 #endif 297 298 /* Global/shared object state funcs */ 299 300 /* 301 * This is a helper that returns the private state currently in operation. 302 * Note that this would return the "old_state" if called in the atomic check 303 * path, and the "new_state" after the atomic swap has been done. 304 */ 305 struct dpu_global_state * 306 dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms) 307 { 308 return to_dpu_global_state(dpu_kms->global_state.state); 309 } 310 311 /* 312 * This acquires the modeset lock set aside for global state, creates 313 * a new duplicated private object state. 314 */ 315 struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s) 316 { 317 struct msm_drm_private *priv = s->dev->dev_private; 318 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); 319 struct drm_private_state *priv_state; 320 int ret; 321 322 ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx); 323 if (ret) 324 return ERR_PTR(ret); 325 326 priv_state = drm_atomic_get_private_obj_state(s, 327 &dpu_kms->global_state); 328 if (IS_ERR(priv_state)) 329 return ERR_CAST(priv_state); 330 331 return to_dpu_global_state(priv_state); 332 } 333 334 static struct drm_private_state * 335 dpu_kms_global_duplicate_state(struct drm_private_obj *obj) 336 { 337 struct dpu_global_state *state; 338 339 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 340 if (!state) 341 return NULL; 342 343 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 344 345 return &state->base; 346 } 347 348 static void dpu_kms_global_destroy_state(struct drm_private_obj *obj, 349 struct drm_private_state *state) 350 { 351 struct dpu_global_state *dpu_state = to_dpu_global_state(state); 352 353 kfree(dpu_state); 354 } 355 356 static const struct drm_private_state_funcs dpu_kms_global_state_funcs = { 357 .atomic_duplicate_state = dpu_kms_global_duplicate_state, 358 .atomic_destroy_state = dpu_kms_global_destroy_state, 359 }; 360 361 static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms) 362 { 363 struct dpu_global_state *state; 364 365 drm_modeset_lock_init(&dpu_kms->global_state_lock); 366 367 state = kzalloc(sizeof(*state), GFP_KERNEL); 368 if (!state) 369 return -ENOMEM; 370 371 drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state, 372 &state->base, 373 &dpu_kms_global_state_funcs); 374 return 0; 375 } 376 377 static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms) 378 { 379 struct icc_path *path0; 380 struct icc_path *path1; 381 struct device *dpu_dev = &dpu_kms->pdev->dev; 382 383 path0 = msm_icc_get(dpu_dev, "mdp0-mem"); 384 path1 = msm_icc_get(dpu_dev, "mdp1-mem"); 385 386 if (IS_ERR_OR_NULL(path0)) 387 return PTR_ERR_OR_ZERO(path0); 388 389 dpu_kms->path[0] = path0; 390 dpu_kms->num_paths = 1; 391 392 if (!IS_ERR_OR_NULL(path1)) { 393 dpu_kms->path[1] = path1; 394 dpu_kms->num_paths++; 395 } 396 return 0; 397 } 398 399 static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) 400 { 401 return dpu_crtc_vblank(crtc, true); 402 } 403 404 static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) 405 { 406 dpu_crtc_vblank(crtc, false); 407 } 408 409 static void dpu_kms_enable_commit(struct msm_kms *kms) 410 { 411 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 412 pm_runtime_get_sync(&dpu_kms->pdev->dev); 413 } 414 415 static void dpu_kms_disable_commit(struct msm_kms *kms) 416 { 417 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 418 pm_runtime_put_sync(&dpu_kms->pdev->dev); 419 } 420 421 static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask) 422 { 423 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 424 struct drm_crtc *crtc; 425 426 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) { 427 if (!crtc->state->active) 428 continue; 429 430 trace_dpu_kms_commit(DRMID(crtc)); 431 dpu_crtc_commit_kickoff(crtc); 432 } 433 } 434 435 static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask) 436 { 437 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 438 struct drm_crtc *crtc; 439 440 DPU_ATRACE_BEGIN("kms_complete_commit"); 441 442 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) 443 dpu_crtc_complete_commit(crtc); 444 445 DPU_ATRACE_END("kms_complete_commit"); 446 } 447 448 static void dpu_kms_wait_for_commit_done(struct msm_kms *kms, 449 struct drm_crtc *crtc) 450 { 451 struct drm_encoder *encoder; 452 struct drm_device *dev; 453 int ret; 454 455 if (!kms || !crtc || !crtc->state) { 456 DPU_ERROR("invalid params\n"); 457 return; 458 } 459 460 dev = crtc->dev; 461 462 if (!crtc->state->enable) { 463 DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id); 464 return; 465 } 466 467 if (!drm_atomic_crtc_effectively_active(crtc->state)) { 468 DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id); 469 return; 470 } 471 472 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 473 if (encoder->crtc != crtc) 474 continue; 475 /* 476 * Wait for post-flush if necessary to delay before 477 * plane_cleanup. For example, wait for vsync in case of video 478 * mode panels. This may be a no-op for command mode panels. 479 */ 480 trace_dpu_kms_wait_for_commit_done(DRMID(crtc)); 481 ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE); 482 if (ret && ret != -EWOULDBLOCK) { 483 DPU_ERROR("wait for commit done returned %d\n", ret); 484 break; 485 } 486 } 487 } 488 489 static void dpu_kms_wait_flush(struct msm_kms *kms, unsigned crtc_mask) 490 { 491 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 492 struct drm_crtc *crtc; 493 494 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) 495 dpu_kms_wait_for_commit_done(kms, crtc); 496 } 497 498 static int _dpu_kms_initialize_dsi(struct drm_device *dev, 499 struct msm_drm_private *priv, 500 struct dpu_kms *dpu_kms) 501 { 502 struct drm_encoder *encoder = NULL; 503 struct msm_display_info info; 504 int i, rc = 0; 505 506 if (!(priv->dsi[0] || priv->dsi[1])) 507 return rc; 508 509 /* 510 * We support following confiurations: 511 * - Single DSI host (dsi0 or dsi1) 512 * - Two independent DSI hosts 513 * - Bonded DSI0 and DSI1 hosts 514 * 515 * TODO: Support swapping DSI0 and DSI1 in the bonded setup. 516 */ 517 for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { 518 int other = (i + 1) % 2; 519 520 if (!priv->dsi[i]) 521 continue; 522 523 if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && 524 !msm_dsi_is_master_dsi(priv->dsi[i])) 525 continue; 526 527 memset(&info, 0, sizeof(info)); 528 info.intf_type = INTF_DSI; 529 530 info.h_tile_instance[info.num_of_h_tiles++] = i; 531 if (msm_dsi_is_bonded_dsi(priv->dsi[i])) 532 info.h_tile_instance[info.num_of_h_tiles++] = other; 533 534 info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->dsi[i]); 535 536 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI, &info); 537 if (IS_ERR(encoder)) { 538 DPU_ERROR("encoder init failed for dsi display\n"); 539 return PTR_ERR(encoder); 540 } 541 542 rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder); 543 if (rc) { 544 DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n", 545 i, rc); 546 break; 547 } 548 549 if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) { 550 rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder); 551 if (rc) { 552 DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n", 553 other, rc); 554 break; 555 } 556 } 557 } 558 559 return rc; 560 } 561 562 static int _dpu_kms_initialize_displayport(struct drm_device *dev, 563 struct msm_drm_private *priv, 564 struct dpu_kms *dpu_kms) 565 { 566 struct drm_encoder *encoder = NULL; 567 struct msm_display_info info; 568 int rc; 569 int i; 570 571 for (i = 0; i < ARRAY_SIZE(priv->dp); i++) { 572 if (!priv->dp[i]) 573 continue; 574 575 memset(&info, 0, sizeof(info)); 576 info.num_of_h_tiles = 1; 577 info.h_tile_instance[0] = i; 578 info.intf_type = INTF_DP; 579 580 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS, &info); 581 if (IS_ERR(encoder)) { 582 DPU_ERROR("encoder init failed for dsi display\n"); 583 return PTR_ERR(encoder); 584 } 585 586 rc = msm_dp_modeset_init(priv->dp[i], dev, encoder); 587 if (rc) { 588 DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc); 589 drm_encoder_cleanup(encoder); 590 return rc; 591 } 592 } 593 594 return 0; 595 } 596 597 static int _dpu_kms_initialize_hdmi(struct drm_device *dev, 598 struct msm_drm_private *priv, 599 struct dpu_kms *dpu_kms) 600 { 601 struct drm_encoder *encoder = NULL; 602 struct msm_display_info info; 603 int rc; 604 605 if (!priv->hdmi) 606 return 0; 607 608 memset(&info, 0, sizeof(info)); 609 info.num_of_h_tiles = 1; 610 info.h_tile_instance[0] = 0; 611 info.intf_type = INTF_HDMI; 612 613 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS, &info); 614 if (IS_ERR(encoder)) { 615 DPU_ERROR("encoder init failed for HDMI display\n"); 616 return PTR_ERR(encoder); 617 } 618 619 rc = msm_hdmi_modeset_init(priv->hdmi, dev, encoder); 620 if (rc) { 621 DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc); 622 drm_encoder_cleanup(encoder); 623 return rc; 624 } 625 626 return 0; 627 } 628 629 static int _dpu_kms_initialize_writeback(struct drm_device *dev, 630 struct msm_drm_private *priv, struct dpu_kms *dpu_kms, 631 const u32 *wb_formats, int n_formats) 632 { 633 struct drm_encoder *encoder = NULL; 634 struct msm_display_info info; 635 int rc; 636 637 memset(&info, 0, sizeof(info)); 638 639 info.num_of_h_tiles = 1; 640 /* use only WB idx 2 instance for DPU */ 641 info.h_tile_instance[0] = WB_2; 642 info.intf_type = INTF_WB; 643 644 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_VIRTUAL, &info); 645 if (IS_ERR(encoder)) { 646 DPU_ERROR("encoder init failed for dsi display\n"); 647 return PTR_ERR(encoder); 648 } 649 650 rc = dpu_writeback_init(dev, encoder, wb_formats, 651 n_formats); 652 if (rc) { 653 DPU_ERROR("dpu_writeback_init, rc = %d\n", rc); 654 drm_encoder_cleanup(encoder); 655 return rc; 656 } 657 658 return 0; 659 } 660 661 /** 662 * _dpu_kms_setup_displays - create encoders, bridges and connectors 663 * for underlying displays 664 * @dev: Pointer to drm device structure 665 * @priv: Pointer to private drm device data 666 * @dpu_kms: Pointer to dpu kms structure 667 * Returns: Zero on success 668 */ 669 static int _dpu_kms_setup_displays(struct drm_device *dev, 670 struct msm_drm_private *priv, 671 struct dpu_kms *dpu_kms) 672 { 673 int rc = 0; 674 int i; 675 676 rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms); 677 if (rc) { 678 DPU_ERROR("initialize_dsi failed, rc = %d\n", rc); 679 return rc; 680 } 681 682 rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms); 683 if (rc) { 684 DPU_ERROR("initialize_DP failed, rc = %d\n", rc); 685 return rc; 686 } 687 688 rc = _dpu_kms_initialize_hdmi(dev, priv, dpu_kms); 689 if (rc) { 690 DPU_ERROR("initialize HDMI failed, rc = %d\n", rc); 691 return rc; 692 } 693 694 /* Since WB isn't a driver check the catalog before initializing */ 695 if (dpu_kms->catalog->wb_count) { 696 for (i = 0; i < dpu_kms->catalog->wb_count; i++) { 697 if (dpu_kms->catalog->wb[i].id == WB_2) { 698 rc = _dpu_kms_initialize_writeback(dev, priv, dpu_kms, 699 dpu_kms->catalog->wb[i].format_list, 700 dpu_kms->catalog->wb[i].num_formats); 701 if (rc) { 702 DPU_ERROR("initialize_WB failed, rc = %d\n", rc); 703 return rc; 704 } 705 } 706 } 707 } 708 709 return rc; 710 } 711 712 #define MAX_PLANES 20 713 static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms) 714 { 715 struct drm_device *dev; 716 struct drm_plane *primary_planes[MAX_PLANES], *plane; 717 struct drm_plane *cursor_planes[MAX_PLANES] = { NULL }; 718 struct drm_crtc *crtc; 719 struct drm_encoder *encoder; 720 unsigned int num_encoders; 721 722 struct msm_drm_private *priv; 723 const struct dpu_mdss_cfg *catalog; 724 725 int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret; 726 int max_crtc_count; 727 dev = dpu_kms->dev; 728 priv = dev->dev_private; 729 catalog = dpu_kms->catalog; 730 731 /* 732 * Create encoder and query display drivers to create 733 * bridges and connectors 734 */ 735 ret = _dpu_kms_setup_displays(dev, priv, dpu_kms); 736 if (ret) 737 return ret; 738 739 num_encoders = 0; 740 drm_for_each_encoder(encoder, dev) 741 num_encoders++; 742 743 max_crtc_count = min(catalog->mixer_count, num_encoders); 744 745 /* Create the planes, keeping track of one primary/cursor per crtc */ 746 for (i = 0; i < catalog->sspp_count; i++) { 747 enum drm_plane_type type; 748 749 if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)) 750 && cursor_planes_idx < max_crtc_count) 751 type = DRM_PLANE_TYPE_CURSOR; 752 else if (primary_planes_idx < max_crtc_count) 753 type = DRM_PLANE_TYPE_PRIMARY; 754 else 755 type = DRM_PLANE_TYPE_OVERLAY; 756 757 DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n", 758 type, catalog->sspp[i].features, 759 catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)); 760 761 plane = dpu_plane_init(dev, catalog->sspp[i].id, type, 762 (1UL << max_crtc_count) - 1); 763 if (IS_ERR(plane)) { 764 DPU_ERROR("dpu_plane_init failed\n"); 765 ret = PTR_ERR(plane); 766 return ret; 767 } 768 769 if (type == DRM_PLANE_TYPE_CURSOR) 770 cursor_planes[cursor_planes_idx++] = plane; 771 else if (type == DRM_PLANE_TYPE_PRIMARY) 772 primary_planes[primary_planes_idx++] = plane; 773 } 774 775 max_crtc_count = min(max_crtc_count, primary_planes_idx); 776 777 /* Create one CRTC per encoder */ 778 for (i = 0; i < max_crtc_count; i++) { 779 crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]); 780 if (IS_ERR(crtc)) { 781 ret = PTR_ERR(crtc); 782 return ret; 783 } 784 priv->num_crtcs++; 785 } 786 787 /* All CRTCs are compatible with all encoders */ 788 drm_for_each_encoder(encoder, dev) 789 encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; 790 791 return 0; 792 } 793 794 static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms) 795 { 796 int i; 797 798 if (dpu_kms->hw_intr) 799 dpu_hw_intr_destroy(dpu_kms->hw_intr); 800 dpu_kms->hw_intr = NULL; 801 802 /* safe to call these more than once during shutdown */ 803 _dpu_kms_mmu_destroy(dpu_kms); 804 805 if (dpu_kms->catalog) { 806 for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { 807 if (dpu_kms->hw_vbif[i]) { 808 dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]); 809 dpu_kms->hw_vbif[i] = NULL; 810 } 811 } 812 } 813 814 if (dpu_kms->rm_init) 815 dpu_rm_destroy(&dpu_kms->rm); 816 dpu_kms->rm_init = false; 817 818 dpu_kms->catalog = NULL; 819 820 if (dpu_kms->hw_mdp) 821 dpu_hw_mdp_destroy(dpu_kms->hw_mdp); 822 dpu_kms->hw_mdp = NULL; 823 } 824 825 static void dpu_kms_destroy(struct msm_kms *kms) 826 { 827 struct dpu_kms *dpu_kms; 828 829 if (!kms) { 830 DPU_ERROR("invalid kms\n"); 831 return; 832 } 833 834 dpu_kms = to_dpu_kms(kms); 835 836 _dpu_kms_hw_destroy(dpu_kms); 837 838 msm_kms_destroy(&dpu_kms->base); 839 840 if (dpu_kms->rpm_enabled) 841 pm_runtime_disable(&dpu_kms->pdev->dev); 842 } 843 844 static int dpu_irq_postinstall(struct msm_kms *kms) 845 { 846 struct msm_drm_private *priv; 847 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 848 849 if (!dpu_kms || !dpu_kms->dev) 850 return -EINVAL; 851 852 priv = dpu_kms->dev->dev_private; 853 if (!priv) 854 return -EINVAL; 855 856 return 0; 857 } 858 859 static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_kms *kms) 860 { 861 int i; 862 struct dpu_kms *dpu_kms; 863 const struct dpu_mdss_cfg *cat; 864 void __iomem *base; 865 866 dpu_kms = to_dpu_kms(kms); 867 868 cat = dpu_kms->catalog; 869 870 pm_runtime_get_sync(&dpu_kms->pdev->dev); 871 872 /* dump CTL sub-blocks HW regs info */ 873 for (i = 0; i < cat->ctl_count; i++) 874 msm_disp_snapshot_add_block(disp_state, cat->ctl[i].len, 875 dpu_kms->mmio + cat->ctl[i].base, cat->ctl[i].name); 876 877 /* dump DSPP sub-blocks HW regs info */ 878 for (i = 0; i < cat->dspp_count; i++) { 879 base = dpu_kms->mmio + cat->dspp[i].base; 880 msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len, base, cat->dspp[i].name); 881 882 if (cat->dspp[i].sblk && cat->dspp[i].sblk->pcc.len > 0) 883 msm_disp_snapshot_add_block(disp_state, cat->dspp[i].sblk->pcc.len, 884 base + cat->dspp[i].sblk->pcc.base, "%s_%s", 885 cat->dspp[i].name, 886 cat->dspp[i].sblk->pcc.name); 887 } 888 889 /* dump INTF sub-blocks HW regs info */ 890 for (i = 0; i < cat->intf_count; i++) 891 msm_disp_snapshot_add_block(disp_state, cat->intf[i].len, 892 dpu_kms->mmio + cat->intf[i].base, cat->intf[i].name); 893 894 /* dump PP sub-blocks HW regs info */ 895 for (i = 0; i < cat->pingpong_count; i++) { 896 base = dpu_kms->mmio + cat->pingpong[i].base; 897 msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].len, base, 898 cat->pingpong[i].name); 899 900 /* TE2 sub-block has length of 0, so will not print it */ 901 902 if (cat->pingpong[i].sblk && cat->pingpong[i].sblk->dither.len > 0) 903 msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].sblk->dither.len, 904 base + cat->pingpong[i].sblk->dither.base, 905 "%s_%s", cat->pingpong[i].name, 906 cat->pingpong[i].sblk->dither.name); 907 } 908 909 /* dump SSPP sub-blocks HW regs info */ 910 for (i = 0; i < cat->sspp_count; i++) { 911 base = dpu_kms->mmio + cat->sspp[i].base; 912 msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len, base, cat->sspp[i].name); 913 914 if (cat->sspp[i].sblk && cat->sspp[i].sblk->scaler_blk.len > 0) 915 msm_disp_snapshot_add_block(disp_state, cat->sspp[i].sblk->scaler_blk.len, 916 base + cat->sspp[i].sblk->scaler_blk.base, 917 "%s_%s", cat->sspp[i].name, 918 cat->sspp[i].sblk->scaler_blk.name); 919 920 if (cat->sspp[i].sblk && cat->sspp[i].sblk->csc_blk.len > 0) 921 msm_disp_snapshot_add_block(disp_state, cat->sspp[i].sblk->csc_blk.len, 922 base + cat->sspp[i].sblk->csc_blk.base, 923 "%s_%s", cat->sspp[i].name, 924 cat->sspp[i].sblk->csc_blk.name); 925 } 926 927 /* dump LM sub-blocks HW regs info */ 928 for (i = 0; i < cat->mixer_count; i++) 929 msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len, 930 dpu_kms->mmio + cat->mixer[i].base, cat->mixer[i].name); 931 932 /* dump WB sub-blocks HW regs info */ 933 for (i = 0; i < cat->wb_count; i++) 934 msm_disp_snapshot_add_block(disp_state, cat->wb[i].len, 935 dpu_kms->mmio + cat->wb[i].base, cat->wb[i].name); 936 937 if (cat->mdp[0].features & BIT(DPU_MDP_PERIPH_0_REMOVED)) { 938 msm_disp_snapshot_add_block(disp_state, MDP_PERIPH_TOP0, 939 dpu_kms->mmio + cat->mdp[0].base, "top"); 940 msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len - MDP_PERIPH_TOP0_END, 941 dpu_kms->mmio + cat->mdp[0].base + MDP_PERIPH_TOP0_END, "top_2"); 942 } else { 943 msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len, 944 dpu_kms->mmio + cat->mdp[0].base, "top"); 945 } 946 947 /* dump DSC sub-blocks HW regs info */ 948 for (i = 0; i < cat->dsc_count; i++) { 949 base = dpu_kms->mmio + cat->dsc[i].base; 950 msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len, base, cat->dsc[i].name); 951 952 if (cat->dsc[i].features & BIT(DPU_DSC_HW_REV_1_2)) { 953 struct dpu_dsc_blk enc = cat->dsc[i].sblk->enc; 954 struct dpu_dsc_blk ctl = cat->dsc[i].sblk->ctl; 955 956 msm_disp_snapshot_add_block(disp_state, enc.len, base + enc.base, "%s_%s", 957 cat->dsc[i].name, enc.name); 958 msm_disp_snapshot_add_block(disp_state, ctl.len, base + ctl.base, "%s_%s", 959 cat->dsc[i].name, ctl.name); 960 } 961 } 962 963 pm_runtime_put_sync(&dpu_kms->pdev->dev); 964 } 965 966 static const struct msm_kms_funcs kms_funcs = { 967 .hw_init = dpu_kms_hw_init, 968 .irq_preinstall = dpu_core_irq_preinstall, 969 .irq_postinstall = dpu_irq_postinstall, 970 .irq_uninstall = dpu_core_irq_uninstall, 971 .irq = dpu_core_irq, 972 .enable_commit = dpu_kms_enable_commit, 973 .disable_commit = dpu_kms_disable_commit, 974 .flush_commit = dpu_kms_flush_commit, 975 .wait_flush = dpu_kms_wait_flush, 976 .complete_commit = dpu_kms_complete_commit, 977 .enable_vblank = dpu_kms_enable_vblank, 978 .disable_vblank = dpu_kms_disable_vblank, 979 .check_modified_format = dpu_format_check_modified_format, 980 .get_format = dpu_get_msm_format, 981 .destroy = dpu_kms_destroy, 982 .snapshot = dpu_kms_mdp_snapshot, 983 #ifdef CONFIG_DEBUG_FS 984 .debugfs_init = dpu_kms_debugfs_init, 985 #endif 986 }; 987 988 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms) 989 { 990 struct msm_mmu *mmu; 991 992 if (!dpu_kms->base.aspace) 993 return; 994 995 mmu = dpu_kms->base.aspace->mmu; 996 997 mmu->funcs->detach(mmu); 998 msm_gem_address_space_put(dpu_kms->base.aspace); 999 1000 dpu_kms->base.aspace = NULL; 1001 } 1002 1003 static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms) 1004 { 1005 struct msm_gem_address_space *aspace; 1006 1007 aspace = msm_kms_init_aspace(dpu_kms->dev); 1008 if (IS_ERR(aspace)) 1009 return PTR_ERR(aspace); 1010 1011 dpu_kms->base.aspace = aspace; 1012 1013 return 0; 1014 } 1015 1016 unsigned long dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name) 1017 { 1018 struct clk *clk; 1019 1020 clk = msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, clock_name); 1021 if (!clk) 1022 return 0; 1023 1024 return clk_get_rate(clk); 1025 } 1026 1027 #define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000 1028 1029 static int dpu_kms_hw_init(struct msm_kms *kms) 1030 { 1031 struct dpu_kms *dpu_kms; 1032 struct drm_device *dev; 1033 int i, rc = -EINVAL; 1034 unsigned long max_core_clk_rate; 1035 u32 core_rev; 1036 1037 if (!kms) { 1038 DPU_ERROR("invalid kms\n"); 1039 return rc; 1040 } 1041 1042 dpu_kms = to_dpu_kms(kms); 1043 dev = dpu_kms->dev; 1044 1045 dev->mode_config.cursor_width = 512; 1046 dev->mode_config.cursor_height = 512; 1047 1048 rc = dpu_kms_global_obj_init(dpu_kms); 1049 if (rc) 1050 return rc; 1051 1052 atomic_set(&dpu_kms->bandwidth_ref, 0); 1053 1054 rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev); 1055 if (rc < 0) 1056 goto error; 1057 1058 core_rev = readl_relaxed(dpu_kms->mmio + 0x0); 1059 1060 pr_info("dpu hardware revision:0x%x\n", core_rev); 1061 1062 dpu_kms->catalog = of_device_get_match_data(dev->dev); 1063 if (!dpu_kms->catalog) { 1064 DPU_ERROR("device config not known!\n"); 1065 rc = -EINVAL; 1066 goto power_error; 1067 } 1068 1069 /* 1070 * Now we need to read the HW catalog and initialize resources such as 1071 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc 1072 */ 1073 rc = _dpu_kms_mmu_init(dpu_kms); 1074 if (rc) { 1075 DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc); 1076 goto power_error; 1077 } 1078 1079 dpu_kms->mdss = msm_mdss_get_mdss_data(dpu_kms->pdev->dev.parent); 1080 if (IS_ERR(dpu_kms->mdss)) { 1081 rc = PTR_ERR(dpu_kms->mdss); 1082 DPU_ERROR("failed to get MDSS data: %d\n", rc); 1083 goto power_error; 1084 } 1085 1086 if (!dpu_kms->mdss) { 1087 rc = -EINVAL; 1088 DPU_ERROR("NULL MDSS data\n"); 1089 goto power_error; 1090 } 1091 1092 rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mdss, dpu_kms->mmio); 1093 if (rc) { 1094 DPU_ERROR("rm init failed: %d\n", rc); 1095 goto power_error; 1096 } 1097 1098 dpu_kms->rm_init = true; 1099 1100 dpu_kms->hw_mdp = dpu_hw_mdptop_init(dpu_kms->catalog->mdp, 1101 dpu_kms->mmio, 1102 dpu_kms->catalog); 1103 if (IS_ERR(dpu_kms->hw_mdp)) { 1104 rc = PTR_ERR(dpu_kms->hw_mdp); 1105 DPU_ERROR("failed to get hw_mdp: %d\n", rc); 1106 dpu_kms->hw_mdp = NULL; 1107 goto power_error; 1108 } 1109 1110 for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { 1111 struct dpu_hw_vbif *hw; 1112 const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i]; 1113 1114 hw = dpu_hw_vbif_init(vbif, dpu_kms->vbif[vbif->id]); 1115 if (IS_ERR(hw)) { 1116 rc = PTR_ERR(hw); 1117 DPU_ERROR("failed to init vbif %d: %d\n", vbif->id, rc); 1118 goto power_error; 1119 } 1120 1121 dpu_kms->hw_vbif[vbif->id] = hw; 1122 } 1123 1124 /* TODO: use the same max_freq as in dpu_kms_hw_init */ 1125 max_core_clk_rate = dpu_kms_get_clk_rate(dpu_kms, "core"); 1126 if (!max_core_clk_rate) { 1127 DPU_DEBUG("max core clk rate not determined, using default\n"); 1128 max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE; 1129 } 1130 1131 rc = dpu_core_perf_init(&dpu_kms->perf, dpu_kms->catalog->perf, max_core_clk_rate); 1132 if (rc) { 1133 DPU_ERROR("failed to init perf %d\n", rc); 1134 goto perf_err; 1135 } 1136 1137 dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog); 1138 if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) { 1139 rc = PTR_ERR(dpu_kms->hw_intr); 1140 DPU_ERROR("hw_intr init failed: %d\n", rc); 1141 dpu_kms->hw_intr = NULL; 1142 goto hw_intr_init_err; 1143 } 1144 1145 dev->mode_config.min_width = 0; 1146 dev->mode_config.min_height = 0; 1147 1148 /* 1149 * max crtc width is equal to the max mixer width * 2 and max height is 1150 * is 4K 1151 */ 1152 dev->mode_config.max_width = 1153 dpu_kms->catalog->caps->max_mixer_width * 2; 1154 dev->mode_config.max_height = 4096; 1155 1156 dev->max_vblank_count = 0xffffffff; 1157 /* Disable vblank irqs aggressively for power-saving */ 1158 dev->vblank_disable_immediate = true; 1159 1160 /* 1161 * _dpu_kms_drm_obj_init should create the DRM related objects 1162 * i.e. CRTCs, planes, encoders, connectors and so forth 1163 */ 1164 rc = _dpu_kms_drm_obj_init(dpu_kms); 1165 if (rc) { 1166 DPU_ERROR("modeset init failed: %d\n", rc); 1167 goto drm_obj_init_err; 1168 } 1169 1170 dpu_vbif_init_memtypes(dpu_kms); 1171 1172 pm_runtime_put_sync(&dpu_kms->pdev->dev); 1173 1174 return 0; 1175 1176 drm_obj_init_err: 1177 hw_intr_init_err: 1178 perf_err: 1179 power_error: 1180 pm_runtime_put_sync(&dpu_kms->pdev->dev); 1181 error: 1182 _dpu_kms_hw_destroy(dpu_kms); 1183 1184 return rc; 1185 } 1186 1187 static int dpu_kms_init(struct drm_device *ddev) 1188 { 1189 struct msm_drm_private *priv = ddev->dev_private; 1190 struct device *dev = ddev->dev; 1191 struct platform_device *pdev = to_platform_device(dev); 1192 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); 1193 struct dev_pm_opp *opp; 1194 int ret = 0; 1195 unsigned long max_freq = ULONG_MAX; 1196 1197 opp = dev_pm_opp_find_freq_floor(dev, &max_freq); 1198 if (!IS_ERR(opp)) 1199 dev_pm_opp_put(opp); 1200 1201 dev_pm_opp_set_rate(dev, max_freq); 1202 1203 ret = msm_kms_init(&dpu_kms->base, &kms_funcs); 1204 if (ret) { 1205 DPU_ERROR("failed to init kms, ret=%d\n", ret); 1206 return ret; 1207 } 1208 dpu_kms->dev = ddev; 1209 1210 pm_runtime_enable(&pdev->dev); 1211 dpu_kms->rpm_enabled = true; 1212 1213 return 0; 1214 } 1215 1216 static int dpu_dev_probe(struct platform_device *pdev) 1217 { 1218 struct device *dev = &pdev->dev; 1219 struct dpu_kms *dpu_kms; 1220 int irq; 1221 int ret = 0; 1222 1223 dpu_kms = devm_kzalloc(dev, sizeof(*dpu_kms), GFP_KERNEL); 1224 if (!dpu_kms) 1225 return -ENOMEM; 1226 1227 dpu_kms->pdev = pdev; 1228 1229 ret = devm_pm_opp_set_clkname(dev, "core"); 1230 if (ret) 1231 return ret; 1232 /* OPP table is optional */ 1233 ret = devm_pm_opp_of_add_table(dev); 1234 if (ret && ret != -ENODEV) 1235 return dev_err_probe(dev, ret, "invalid OPP table in device tree\n"); 1236 1237 ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks); 1238 if (ret < 0) 1239 return dev_err_probe(dev, ret, "failed to parse clocks\n"); 1240 1241 dpu_kms->num_clocks = ret; 1242 1243 irq = platform_get_irq(pdev, 0); 1244 if (irq < 0) 1245 return dev_err_probe(dev, irq, "failed to get irq\n"); 1246 1247 dpu_kms->base.irq = irq; 1248 1249 dpu_kms->mmio = msm_ioremap(pdev, "mdp"); 1250 if (IS_ERR(dpu_kms->mmio)) { 1251 ret = PTR_ERR(dpu_kms->mmio); 1252 DPU_ERROR("mdp register memory map failed: %d\n", ret); 1253 dpu_kms->mmio = NULL; 1254 return ret; 1255 } 1256 DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio); 1257 1258 dpu_kms->vbif[VBIF_RT] = msm_ioremap(pdev, "vbif"); 1259 if (IS_ERR(dpu_kms->vbif[VBIF_RT])) { 1260 ret = PTR_ERR(dpu_kms->vbif[VBIF_RT]); 1261 DPU_ERROR("vbif register memory map failed: %d\n", ret); 1262 dpu_kms->vbif[VBIF_RT] = NULL; 1263 return ret; 1264 } 1265 1266 dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(pdev, "vbif_nrt"); 1267 if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) { 1268 dpu_kms->vbif[VBIF_NRT] = NULL; 1269 DPU_DEBUG("VBIF NRT is not defined"); 1270 } 1271 1272 ret = dpu_kms_parse_data_bus_icc_path(dpu_kms); 1273 if (ret) 1274 return ret; 1275 1276 return msm_drv_probe(&pdev->dev, dpu_kms_init, &dpu_kms->base); 1277 } 1278 1279 static void dpu_dev_remove(struct platform_device *pdev) 1280 { 1281 component_master_del(&pdev->dev, &msm_drm_ops); 1282 } 1283 1284 static int __maybe_unused dpu_runtime_suspend(struct device *dev) 1285 { 1286 int i; 1287 struct platform_device *pdev = to_platform_device(dev); 1288 struct msm_drm_private *priv = platform_get_drvdata(pdev); 1289 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); 1290 1291 /* Drop the performance state vote */ 1292 dev_pm_opp_set_rate(dev, 0); 1293 clk_bulk_disable_unprepare(dpu_kms->num_clocks, dpu_kms->clocks); 1294 1295 for (i = 0; i < dpu_kms->num_paths; i++) 1296 icc_set_bw(dpu_kms->path[i], 0, 0); 1297 1298 return 0; 1299 } 1300 1301 static int __maybe_unused dpu_runtime_resume(struct device *dev) 1302 { 1303 int rc = -1; 1304 struct platform_device *pdev = to_platform_device(dev); 1305 struct msm_drm_private *priv = platform_get_drvdata(pdev); 1306 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); 1307 struct drm_encoder *encoder; 1308 struct drm_device *ddev; 1309 1310 ddev = dpu_kms->dev; 1311 1312 rc = clk_bulk_prepare_enable(dpu_kms->num_clocks, dpu_kms->clocks); 1313 if (rc) { 1314 DPU_ERROR("clock enable failed rc:%d\n", rc); 1315 return rc; 1316 } 1317 1318 dpu_vbif_init_memtypes(dpu_kms); 1319 1320 drm_for_each_encoder(encoder, ddev) 1321 dpu_encoder_virt_runtime_resume(encoder); 1322 1323 return rc; 1324 } 1325 1326 static const struct dev_pm_ops dpu_pm_ops = { 1327 SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL) 1328 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1329 pm_runtime_force_resume) 1330 .prepare = msm_kms_pm_prepare, 1331 .complete = msm_kms_pm_complete, 1332 }; 1333 1334 static const struct of_device_id dpu_dt_match[] = { 1335 { .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, }, 1336 { .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, }, 1337 { .compatible = "qcom,sdm670-dpu", .data = &dpu_sdm670_cfg, }, 1338 { .compatible = "qcom,sdm845-dpu", .data = &dpu_sdm845_cfg, }, 1339 { .compatible = "qcom,sc7180-dpu", .data = &dpu_sc7180_cfg, }, 1340 { .compatible = "qcom,sc7280-dpu", .data = &dpu_sc7280_cfg, }, 1341 { .compatible = "qcom,sc8180x-dpu", .data = &dpu_sc8180x_cfg, }, 1342 { .compatible = "qcom,sc8280xp-dpu", .data = &dpu_sc8280xp_cfg, }, 1343 { .compatible = "qcom,sm6115-dpu", .data = &dpu_sm6115_cfg, }, 1344 { .compatible = "qcom,sm6125-dpu", .data = &dpu_sm6125_cfg, }, 1345 { .compatible = "qcom,sm6350-dpu", .data = &dpu_sm6350_cfg, }, 1346 { .compatible = "qcom,sm6375-dpu", .data = &dpu_sm6375_cfg, }, 1347 { .compatible = "qcom,sm8150-dpu", .data = &dpu_sm8150_cfg, }, 1348 { .compatible = "qcom,sm8250-dpu", .data = &dpu_sm8250_cfg, }, 1349 { .compatible = "qcom,sm8350-dpu", .data = &dpu_sm8350_cfg, }, 1350 { .compatible = "qcom,sm8450-dpu", .data = &dpu_sm8450_cfg, }, 1351 { .compatible = "qcom,sm8550-dpu", .data = &dpu_sm8550_cfg, }, 1352 { .compatible = "qcom,sm8650-dpu", .data = &dpu_sm8650_cfg, }, 1353 {} 1354 }; 1355 MODULE_DEVICE_TABLE(of, dpu_dt_match); 1356 1357 static struct platform_driver dpu_driver = { 1358 .probe = dpu_dev_probe, 1359 .remove_new = dpu_dev_remove, 1360 .shutdown = msm_kms_shutdown, 1361 .driver = { 1362 .name = "msm_dpu", 1363 .of_match_table = dpu_dt_match, 1364 .pm = &dpu_pm_ops, 1365 }, 1366 }; 1367 1368 void __init msm_dpu_register(void) 1369 { 1370 platform_driver_register(&dpu_driver); 1371 } 1372 1373 void __exit msm_dpu_unregister(void) 1374 { 1375 platform_driver_unregister(&dpu_driver); 1376 } 1377