1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. 5 * Copyright (C) 2013 Red Hat 6 * Author: Rob Clark <robdclark@gmail.com> 7 */ 8 9 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ 10 #include <linux/sort.h> 11 #include <linux/debugfs.h> 12 #include <linux/ktime.h> 13 #include <linux/bits.h> 14 15 #include <drm/drm_atomic.h> 16 #include <drm/drm_blend.h> 17 #include <drm/drm_crtc.h> 18 #include <drm/drm_flip_work.h> 19 #include <drm/drm_framebuffer.h> 20 #include <drm/drm_mode.h> 21 #include <drm/drm_probe_helper.h> 22 #include <drm/drm_rect.h> 23 #include <drm/drm_vblank.h> 24 #include <drm/drm_self_refresh_helper.h> 25 26 #include "dpu_kms.h" 27 #include "dpu_hw_lm.h" 28 #include "dpu_hw_ctl.h" 29 #include "dpu_hw_dspp.h" 30 #include "dpu_crtc.h" 31 #include "dpu_plane.h" 32 #include "dpu_encoder.h" 33 #include "dpu_vbif.h" 34 #include "dpu_core_perf.h" 35 #include "dpu_trace.h" 36 37 /* layer mixer index on dpu_crtc */ 38 #define LEFT_MIXER 0 39 #define RIGHT_MIXER 1 40 41 /* timeout in ms waiting for frame done */ 42 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60 43 44 #define CONVERT_S3_15(val) \ 45 (((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0)) 46 47 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) 48 { 49 struct msm_drm_private *priv = crtc->dev->dev_private; 50 51 return to_dpu_kms(priv->kms); 52 } 53 54 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc) 55 { 56 struct drm_device *dev = crtc->dev; 57 struct drm_encoder *encoder; 58 59 drm_for_each_encoder(encoder, dev) 60 if (encoder->crtc == crtc) 61 return encoder; 62 63 return NULL; 64 } 65 66 static enum dpu_crtc_crc_source dpu_crtc_parse_crc_source(const char *src_name) 67 { 68 if (!src_name || 69 !strcmp(src_name, "none")) 70 return DPU_CRTC_CRC_SOURCE_NONE; 71 if (!strcmp(src_name, "auto") || 72 !strcmp(src_name, "lm")) 73 return DPU_CRTC_CRC_SOURCE_LAYER_MIXER; 74 if (!strcmp(src_name, "encoder")) 75 return DPU_CRTC_CRC_SOURCE_ENCODER; 76 77 return DPU_CRTC_CRC_SOURCE_INVALID; 78 } 79 80 static int dpu_crtc_verify_crc_source(struct drm_crtc *crtc, 81 const char *src_name, size_t *values_cnt) 82 { 83 enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name); 84 struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state); 85 86 if (source < 0) { 87 DRM_DEBUG_DRIVER("Invalid source %s for CRTC%d\n", src_name, crtc->index); 88 return -EINVAL; 89 } 90 91 if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) { 92 *values_cnt = crtc_state->num_mixers; 93 } else if (source == DPU_CRTC_CRC_SOURCE_ENCODER) { 94 struct drm_encoder *drm_enc; 95 96 *values_cnt = 0; 97 98 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) 99 *values_cnt += dpu_encoder_get_crc_values_cnt(drm_enc); 100 } 101 102 return 0; 103 } 104 105 static void dpu_crtc_setup_lm_misr(struct dpu_crtc_state *crtc_state) 106 { 107 struct dpu_crtc_mixer *m; 108 int i; 109 110 for (i = 0; i < crtc_state->num_mixers; ++i) { 111 m = &crtc_state->mixers[i]; 112 113 if (!m->hw_lm || !m->hw_lm->ops.setup_misr) 114 continue; 115 116 /* Calculate MISR over 1 frame */ 117 m->hw_lm->ops.setup_misr(m->hw_lm); 118 } 119 } 120 121 static void dpu_crtc_setup_encoder_misr(struct drm_crtc *crtc) 122 { 123 struct drm_encoder *drm_enc; 124 125 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) 126 dpu_encoder_setup_misr(drm_enc); 127 } 128 129 static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) 130 { 131 enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name); 132 enum dpu_crtc_crc_source current_source; 133 struct dpu_crtc_state *crtc_state; 134 struct drm_device *drm_dev = crtc->dev; 135 136 bool was_enabled; 137 bool enable = false; 138 int ret = 0; 139 140 if (source < 0) { 141 DRM_DEBUG_DRIVER("Invalid CRC source %s for CRTC%d\n", src_name, crtc->index); 142 return -EINVAL; 143 } 144 145 ret = drm_modeset_lock(&crtc->mutex, NULL); 146 147 if (ret) 148 return ret; 149 150 enable = (source != DPU_CRTC_CRC_SOURCE_NONE); 151 crtc_state = to_dpu_crtc_state(crtc->state); 152 153 spin_lock_irq(&drm_dev->event_lock); 154 current_source = crtc_state->crc_source; 155 spin_unlock_irq(&drm_dev->event_lock); 156 157 was_enabled = (current_source != DPU_CRTC_CRC_SOURCE_NONE); 158 159 if (!was_enabled && enable) { 160 ret = drm_crtc_vblank_get(crtc); 161 162 if (ret) 163 goto cleanup; 164 165 } else if (was_enabled && !enable) { 166 drm_crtc_vblank_put(crtc); 167 } 168 169 spin_lock_irq(&drm_dev->event_lock); 170 crtc_state->crc_source = source; 171 spin_unlock_irq(&drm_dev->event_lock); 172 173 crtc_state->crc_frame_skip_count = 0; 174 175 if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) 176 dpu_crtc_setup_lm_misr(crtc_state); 177 else if (source == DPU_CRTC_CRC_SOURCE_ENCODER) 178 dpu_crtc_setup_encoder_misr(crtc); 179 else 180 ret = -EINVAL; 181 182 cleanup: 183 drm_modeset_unlock(&crtc->mutex); 184 185 return ret; 186 } 187 188 static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc) 189 { 190 struct drm_encoder *encoder = get_encoder_from_crtc(crtc); 191 if (!encoder) { 192 DRM_ERROR("no encoder found for crtc %d\n", crtc->index); 193 return 0; 194 } 195 196 return dpu_encoder_get_vsync_count(encoder); 197 } 198 199 static int dpu_crtc_get_lm_crc(struct drm_crtc *crtc, 200 struct dpu_crtc_state *crtc_state) 201 { 202 struct dpu_crtc_mixer *m; 203 u32 crcs[CRTC_DUAL_MIXERS]; 204 205 int rc = 0; 206 int i; 207 208 BUILD_BUG_ON(ARRAY_SIZE(crcs) != ARRAY_SIZE(crtc_state->mixers)); 209 210 for (i = 0; i < crtc_state->num_mixers; ++i) { 211 212 m = &crtc_state->mixers[i]; 213 214 if (!m->hw_lm || !m->hw_lm->ops.collect_misr) 215 continue; 216 217 rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]); 218 219 if (rc) { 220 if (rc != -ENODATA) 221 DRM_DEBUG_DRIVER("MISR read failed\n"); 222 return rc; 223 } 224 } 225 226 return drm_crtc_add_crc_entry(crtc, true, 227 drm_crtc_accurate_vblank_count(crtc), crcs); 228 } 229 230 static int dpu_crtc_get_encoder_crc(struct drm_crtc *crtc) 231 { 232 struct drm_encoder *drm_enc; 233 int rc, pos = 0; 234 u32 crcs[INTF_MAX]; 235 236 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) { 237 rc = dpu_encoder_get_crc(drm_enc, crcs, pos); 238 if (rc < 0) { 239 if (rc != -ENODATA) 240 DRM_DEBUG_DRIVER("MISR read failed\n"); 241 242 return rc; 243 } 244 245 pos += rc; 246 } 247 248 return drm_crtc_add_crc_entry(crtc, true, 249 drm_crtc_accurate_vblank_count(crtc), crcs); 250 } 251 252 static int dpu_crtc_get_crc(struct drm_crtc *crtc) 253 { 254 struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state); 255 256 /* Skip first 2 frames in case of "uncooked" CRCs */ 257 if (crtc_state->crc_frame_skip_count < 2) { 258 crtc_state->crc_frame_skip_count++; 259 return 0; 260 } 261 262 if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) 263 return dpu_crtc_get_lm_crc(crtc, crtc_state); 264 else if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_ENCODER) 265 return dpu_crtc_get_encoder_crc(crtc); 266 267 return -EINVAL; 268 } 269 270 static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc, 271 bool in_vblank_irq, 272 int *vpos, int *hpos, 273 ktime_t *stime, ktime_t *etime, 274 const struct drm_display_mode *mode) 275 { 276 unsigned int pipe = crtc->index; 277 struct drm_encoder *encoder; 278 int line, vsw, vbp, vactive_start, vactive_end, vfp_end; 279 280 encoder = get_encoder_from_crtc(crtc); 281 if (!encoder) { 282 DRM_ERROR("no encoder found for crtc %d\n", pipe); 283 return false; 284 } 285 286 vsw = mode->crtc_vsync_end - mode->crtc_vsync_start; 287 vbp = mode->crtc_vtotal - mode->crtc_vsync_end; 288 289 /* 290 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at 291 * the end of VFP. Translate the porch values relative to the line 292 * counter positions. 293 */ 294 295 vactive_start = vsw + vbp + 1; 296 vactive_end = vactive_start + mode->crtc_vdisplay; 297 298 /* last scan line before VSYNC */ 299 vfp_end = mode->crtc_vtotal; 300 301 if (stime) 302 *stime = ktime_get(); 303 304 line = dpu_encoder_get_linecount(encoder); 305 306 if (line < vactive_start) 307 line -= vactive_start; 308 else if (line > vactive_end) 309 line = line - vfp_end - vactive_start; 310 else 311 line -= vactive_start; 312 313 *vpos = line; 314 *hpos = 0; 315 316 if (etime) 317 *etime = ktime_get(); 318 319 return true; 320 } 321 322 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, 323 struct dpu_plane_state *pstate, 324 const struct msm_format *format, 325 const struct dpu_mdss_version *mdss_ver) 326 { 327 struct dpu_hw_mixer *lm = mixer->hw_lm; 328 u32 blend_op; 329 u32 fg_alpha, bg_alpha, max_alpha; 330 331 if (mdss_ver->core_major_ver < 12) { 332 max_alpha = 0xff; 333 fg_alpha = pstate->base.alpha >> 8; 334 } else { 335 max_alpha = 0x3ff; 336 fg_alpha = pstate->base.alpha >> 6; 337 } 338 bg_alpha = max_alpha - fg_alpha; 339 340 /* default to opaque blending */ 341 if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE || 342 !format->alpha_enable) { 343 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | 344 DPU_BLEND_BG_ALPHA_BG_CONST; 345 } else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) { 346 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | 347 DPU_BLEND_BG_ALPHA_FG_PIXEL; 348 if (fg_alpha != max_alpha) { 349 bg_alpha = fg_alpha; 350 blend_op |= DPU_BLEND_BG_MOD_ALPHA | 351 DPU_BLEND_BG_INV_MOD_ALPHA; 352 } else { 353 blend_op |= DPU_BLEND_BG_INV_ALPHA; 354 } 355 } else { 356 /* coverage blending */ 357 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL | 358 DPU_BLEND_BG_ALPHA_FG_PIXEL; 359 if (fg_alpha != max_alpha) { 360 bg_alpha = fg_alpha; 361 blend_op |= DPU_BLEND_FG_MOD_ALPHA | 362 DPU_BLEND_FG_INV_MOD_ALPHA | 363 DPU_BLEND_BG_MOD_ALPHA | 364 DPU_BLEND_BG_INV_MOD_ALPHA; 365 } else { 366 blend_op |= DPU_BLEND_BG_INV_ALPHA; 367 } 368 } 369 370 lm->ops.setup_blend_config(lm, pstate->stage, 371 fg_alpha, bg_alpha, blend_op); 372 373 DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n", 374 &format->pixel_format, format->alpha_enable, blend_op); 375 } 376 377 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) 378 { 379 struct dpu_crtc_state *crtc_state; 380 int lm_idx; 381 382 crtc_state = to_dpu_crtc_state(crtc->state); 383 384 for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) { 385 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx]; 386 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm; 387 struct dpu_hw_mixer_cfg cfg; 388 389 if (!lm_roi || !drm_rect_visible(lm_roi)) 390 continue; 391 392 cfg.out_width = drm_rect_width(lm_roi); 393 cfg.out_height = drm_rect_height(lm_roi); 394 cfg.right_mixer = lm_idx & 0x1; 395 cfg.flags = 0; 396 hw_lm->ops.setup_mixer_out(hw_lm, &cfg); 397 } 398 } 399 400 static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc, 401 struct drm_plane *plane, 402 struct dpu_crtc_mixer *mixer, 403 u32 lms_in_pair, 404 enum dpu_stage stage, 405 const struct msm_format *format, 406 uint64_t modifier, 407 struct dpu_sw_pipe *pipe, 408 unsigned int stage_idx, 409 struct dpu_hw_stage_cfg *stage_cfg 410 ) 411 { 412 u32 lm_idx; 413 enum dpu_sspp sspp_idx; 414 struct drm_plane_state *state; 415 416 sspp_idx = pipe->sspp->idx; 417 418 state = plane->state; 419 420 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane), 421 state, to_dpu_plane_state(state), stage_idx, 422 format->pixel_format, pipe, 423 modifier); 424 425 DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d multirect_idx %d\n", 426 crtc->base.id, 427 stage, 428 plane->base.id, 429 sspp_idx - SSPP_NONE, 430 state->fb ? state->fb->base.id : -1, 431 pipe->multirect_index); 432 433 stage_cfg->stage[stage][stage_idx] = sspp_idx; 434 stage_cfg->multirect_index[stage][stage_idx] = pipe->multirect_index; 435 436 /* blend config update */ 437 for (lm_idx = 0; lm_idx < lms_in_pair; lm_idx++) 438 mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl, sspp_idx); 439 } 440 441 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, 442 struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer, 443 struct dpu_hw_stage_cfg *stage_cfg) 444 { 445 struct drm_plane *plane; 446 struct drm_framebuffer *fb; 447 struct drm_plane_state *state; 448 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 449 struct dpu_plane_state *pstate = NULL; 450 const struct msm_format *format; 451 struct dpu_hw_ctl *ctl = mixer->lm_ctl; 452 u32 lm_idx, stage, i, pipe_idx, head_pipe_in_stage, lms_in_pair; 453 bool bg_alpha_enable = false; 454 DECLARE_BITMAP(active_fetch, SSPP_MAX); 455 DECLARE_BITMAP(active_pipes, SSPP_MAX); 456 457 memset(active_fetch, 0, sizeof(active_fetch)); 458 memset(active_pipes, 0, sizeof(active_pipes)); 459 drm_atomic_crtc_for_each_plane(plane, crtc) { 460 state = plane->state; 461 if (!state) 462 continue; 463 464 if (!state->visible) 465 continue; 466 467 pstate = to_dpu_plane_state(state); 468 fb = state->fb; 469 470 format = msm_framebuffer_format(pstate->base.fb); 471 472 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable) 473 bg_alpha_enable = true; 474 475 /* loop pipe per mixer pair with config in stage structure */ 476 for (stage = 0; stage < STAGES_PER_PLANE; stage++) { 477 head_pipe_in_stage = stage * PIPES_PER_STAGE; 478 for (i = 0; i < PIPES_PER_STAGE; i++) { 479 pipe_idx = i + head_pipe_in_stage; 480 if (!pstate->pipe[pipe_idx].sspp) 481 continue; 482 lms_in_pair = min(cstate->num_mixers - (stage * PIPES_PER_STAGE), 483 PIPES_PER_STAGE); 484 set_bit(pstate->pipe[pipe_idx].sspp->idx, active_fetch); 485 set_bit(pstate->pipe[pipe_idx].sspp->idx, active_pipes); 486 _dpu_crtc_blend_setup_pipe(crtc, plane, 487 &mixer[head_pipe_in_stage], 488 lms_in_pair, 489 pstate->stage, 490 format, fb ? fb->modifier : 0, 491 &pstate->pipe[pipe_idx], i, 492 &stage_cfg[stage]); 493 } 494 } 495 496 /* blend config update */ 497 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) { 498 _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate, format, 499 ctl->mdss_ver); 500 501 if (bg_alpha_enable && !format->alpha_enable) 502 mixer[lm_idx].mixer_op_mode = 0; 503 else 504 mixer[lm_idx].mixer_op_mode |= 505 1 << pstate->stage; 506 } 507 } 508 509 if (ctl->ops.set_active_fetch_pipes) 510 ctl->ops.set_active_fetch_pipes(ctl, active_fetch); 511 512 if (ctl->ops.set_active_pipes) 513 ctl->ops.set_active_pipes(ctl, active_pipes); 514 515 _dpu_crtc_program_lm_output_roi(crtc); 516 } 517 518 /** 519 * _dpu_crtc_blend_setup - configure crtc mixers 520 * @crtc: Pointer to drm crtc structure 521 */ 522 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) 523 { 524 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 525 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 526 struct dpu_crtc_mixer *mixer = cstate->mixers; 527 struct dpu_hw_ctl *ctl; 528 struct dpu_hw_mixer *lm; 529 struct dpu_hw_stage_cfg stage_cfg[STAGES_PER_PLANE]; 530 DECLARE_BITMAP(active_lms, LM_MAX); 531 int i; 532 533 DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name); 534 535 for (i = 0; i < cstate->num_mixers; i++) { 536 mixer[i].mixer_op_mode = 0; 537 if (mixer[i].lm_ctl->ops.clear_all_blendstages) 538 mixer[i].lm_ctl->ops.clear_all_blendstages( 539 mixer[i].lm_ctl); 540 if (mixer[i].lm_ctl->ops.set_active_fetch_pipes) 541 mixer[i].lm_ctl->ops.set_active_fetch_pipes(mixer[i].lm_ctl, NULL); 542 if (mixer[i].lm_ctl->ops.set_active_pipes) 543 mixer[i].lm_ctl->ops.set_active_pipes(mixer[i].lm_ctl, NULL); 544 545 if (mixer[i].hw_lm->ops.clear_all_blendstages) 546 mixer[i].hw_lm->ops.clear_all_blendstages(mixer[i].hw_lm); 547 } 548 549 /* initialize stage cfg */ 550 memset(&stage_cfg, 0, sizeof(stage_cfg)); 551 memset(active_lms, 0, sizeof(active_lms)); 552 553 _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, stage_cfg); 554 555 for (i = 0; i < cstate->num_mixers; i++) { 556 ctl = mixer[i].lm_ctl; 557 lm = mixer[i].hw_lm; 558 559 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode); 560 561 /* stage config flush mask */ 562 ctl->ops.update_pending_flush_mixer(ctl, 563 mixer[i].hw_lm->idx); 564 565 set_bit(lm->idx, active_lms); 566 if (ctl->ops.set_active_lms) 567 ctl->ops.set_active_lms(ctl, active_lms); 568 569 DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n", 570 mixer[i].hw_lm->idx - LM_0, 571 mixer[i].mixer_op_mode, 572 ctl->idx - CTL_0); 573 574 /* 575 * call dpu_hw_ctl_setup_blendstage() to blend layers per stage cfg. 576 * stage data is shared between PIPES_PER_STAGE pipes. 577 */ 578 if (ctl->ops.setup_blendstage) 579 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx, 580 &stage_cfg[i / PIPES_PER_STAGE]); 581 582 if (lm->ops.setup_blendstage) 583 lm->ops.setup_blendstage(lm, mixer[i].hw_lm->idx, 584 &stage_cfg[i / PIPES_PER_STAGE]); 585 } 586 } 587 588 /** 589 * _dpu_crtc_complete_flip - signal pending page_flip events 590 * Any pending vblank events are added to the vblank_event_list 591 * so that the next vblank interrupt shall signal them. 592 * However PAGE_FLIP events are not handled through the vblank_event_list. 593 * This API signals any pending PAGE_FLIP events requested through 594 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event. 595 * @crtc: Pointer to drm crtc structure 596 */ 597 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc) 598 { 599 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 600 struct drm_device *dev = crtc->dev; 601 unsigned long flags; 602 603 spin_lock_irqsave(&dev->event_lock, flags); 604 if (dpu_crtc->event) { 605 DRM_DEBUG_VBL("%s: send event: %p\n", dpu_crtc->name, 606 dpu_crtc->event); 607 trace_dpu_crtc_complete_flip(DRMID(crtc)); 608 drm_crtc_send_vblank_event(crtc, dpu_crtc->event); 609 dpu_crtc->event = NULL; 610 } 611 spin_unlock_irqrestore(&dev->event_lock, flags); 612 } 613 614 /** 615 * dpu_crtc_get_intf_mode - get interface mode of the given crtc 616 * @crtc: Pointert to crtc 617 */ 618 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) 619 { 620 struct drm_encoder *encoder; 621 622 /* 623 * TODO: This function is called from dpu debugfs and as part of atomic 624 * check. When called from debugfs, the crtc->mutex must be held to 625 * read crtc->state. However reading crtc->state from atomic check isn't 626 * allowed (unless you have a good reason, a big comment, and a deep 627 * understanding of how the atomic/modeset locks work (<- and this is 628 * probably not possible)). So we'll keep the WARN_ON here for now, but 629 * really we need to figure out a better way to track our operating mode 630 */ 631 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 632 633 /* TODO: Returns the first INTF_MODE, could there be multiple values? */ 634 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 635 return dpu_encoder_get_intf_mode(encoder); 636 637 return INTF_MODE_NONE; 638 } 639 640 /** 641 * dpu_crtc_vblank_callback - called on vblank irq, issues completion events 642 * @crtc: Pointer to drm crtc object 643 */ 644 void dpu_crtc_vblank_callback(struct drm_crtc *crtc) 645 { 646 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 647 648 /* keep statistics on vblank callback - with auto reset via debugfs */ 649 if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0) 650 dpu_crtc->vblank_cb_time = ktime_get(); 651 else 652 dpu_crtc->vblank_cb_count++; 653 654 dpu_crtc_get_crc(crtc); 655 656 drm_crtc_handle_vblank(crtc); 657 trace_dpu_crtc_vblank_cb(DRMID(crtc)); 658 } 659 660 static void dpu_crtc_frame_event_work(struct kthread_work *work) 661 { 662 struct dpu_crtc_frame_event *fevent = container_of(work, 663 struct dpu_crtc_frame_event, work); 664 struct drm_crtc *crtc = fevent->crtc; 665 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 666 unsigned long flags; 667 bool frame_done = false; 668 669 DPU_ATRACE_BEGIN("crtc_frame_event"); 670 671 DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event, 672 ktime_to_ns(fevent->ts)); 673 674 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE 675 | DPU_ENCODER_FRAME_EVENT_ERROR 676 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) { 677 678 if (atomic_read(&dpu_crtc->frame_pending) < 1) { 679 /* ignore vblank when not pending */ 680 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) { 681 /* release bandwidth and other resources */ 682 trace_dpu_crtc_frame_event_done(DRMID(crtc), 683 fevent->event); 684 dpu_core_perf_crtc_release_bw(crtc); 685 } else { 686 trace_dpu_crtc_frame_event_more_pending(DRMID(crtc), 687 fevent->event); 688 } 689 690 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE 691 | DPU_ENCODER_FRAME_EVENT_ERROR)) 692 frame_done = true; 693 } 694 695 if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD) 696 DPU_ERROR("crtc%d ts:%lld received panel dead event\n", 697 crtc->base.id, ktime_to_ns(fevent->ts)); 698 699 if (frame_done) 700 complete_all(&dpu_crtc->frame_done_comp); 701 702 spin_lock_irqsave(&dpu_crtc->spin_lock, flags); 703 list_add_tail(&fevent->list, &dpu_crtc->frame_event_list); 704 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); 705 DPU_ATRACE_END("crtc_frame_event"); 706 } 707 708 /** 709 * dpu_crtc_frame_event_cb - crtc frame event callback API 710 * @crtc: Pointer to crtc 711 * @event: Event to process 712 * 713 * Encoder may call this for different events from different context - IRQ, 714 * user thread, commit_thread, etc. Each event should be carefully reviewed and 715 * should be processed in proper task context to avoid schedulin delay or 716 * properly manage the irq context's bottom half processing. 717 */ 718 void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event) 719 { 720 struct dpu_crtc *dpu_crtc; 721 struct msm_drm_private *priv; 722 struct dpu_crtc_frame_event *fevent; 723 unsigned long flags; 724 u32 crtc_id; 725 726 /* Nothing to do on idle event */ 727 if (event & DPU_ENCODER_FRAME_EVENT_IDLE) 728 return; 729 730 dpu_crtc = to_dpu_crtc(crtc); 731 priv = crtc->dev->dev_private; 732 crtc_id = drm_crtc_index(crtc); 733 734 trace_dpu_crtc_frame_event_cb(DRMID(crtc), event); 735 736 spin_lock_irqsave(&dpu_crtc->spin_lock, flags); 737 fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list, 738 struct dpu_crtc_frame_event, list); 739 if (fevent) 740 list_del_init(&fevent->list); 741 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); 742 743 if (!fevent) { 744 DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event); 745 return; 746 } 747 748 fevent->event = event; 749 fevent->crtc = crtc; 750 fevent->ts = ktime_get(); 751 kthread_queue_work(priv->kms->event_thread[crtc_id].worker, &fevent->work); 752 } 753 754 /** 755 * dpu_crtc_complete_commit - callback signalling completion of current commit 756 * @crtc: Pointer to drm crtc object 757 */ 758 void dpu_crtc_complete_commit(struct drm_crtc *crtc) 759 { 760 trace_dpu_crtc_complete_commit(DRMID(crtc)); 761 dpu_core_perf_crtc_update(crtc, 0); 762 _dpu_crtc_complete_flip(crtc); 763 } 764 765 static int _dpu_crtc_check_and_setup_lm_bounds(struct drm_crtc *crtc, 766 struct drm_crtc_state *state) 767 { 768 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 769 struct drm_display_mode *adj_mode = &state->adjusted_mode; 770 u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers; 771 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); 772 int i; 773 774 /* if we cannot merge 2 LMs (no 3d mux) better to fail earlier 775 * before even checking the width after the split 776 */ 777 if (!dpu_kms->catalog->caps->has_3d_merge && 778 adj_mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width) 779 return -E2BIG; 780 781 for (i = 0; i < cstate->num_mixers; i++) { 782 struct drm_rect *r = &cstate->lm_bounds[i]; 783 r->x1 = crtc_split_width * i; 784 r->y1 = 0; 785 r->x2 = r->x1 + crtc_split_width; 786 r->y2 = adj_mode->vdisplay; 787 788 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r); 789 790 if (drm_rect_width(r) > dpu_kms->catalog->caps->max_mixer_width) 791 return -E2BIG; 792 } 793 794 return 0; 795 } 796 797 static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state, 798 struct dpu_hw_pcc_cfg *cfg) 799 { 800 struct drm_color_ctm *ctm; 801 802 memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg)); 803 804 ctm = (struct drm_color_ctm *)state->ctm->data; 805 806 if (!ctm) 807 return; 808 809 cfg->r.r = CONVERT_S3_15(ctm->matrix[0]); 810 cfg->g.r = CONVERT_S3_15(ctm->matrix[1]); 811 cfg->b.r = CONVERT_S3_15(ctm->matrix[2]); 812 813 cfg->r.g = CONVERT_S3_15(ctm->matrix[3]); 814 cfg->g.g = CONVERT_S3_15(ctm->matrix[4]); 815 cfg->b.g = CONVERT_S3_15(ctm->matrix[5]); 816 817 cfg->r.b = CONVERT_S3_15(ctm->matrix[6]); 818 cfg->g.b = CONVERT_S3_15(ctm->matrix[7]); 819 cfg->b.b = CONVERT_S3_15(ctm->matrix[8]); 820 } 821 822 static void _dpu_crtc_get_gc_lut(struct drm_crtc_state *state, 823 struct dpu_hw_gc_lut *gc_lut) 824 { 825 struct drm_color_lut *lut; 826 int i; 827 u32 val_even, val_odd; 828 829 lut = (struct drm_color_lut *)state->gamma_lut->data; 830 831 if (!lut) 832 return; 833 834 /* Pack 1024 10-bit entries in 512 32-bit registers */ 835 for (i = 0; i < PGC_TBL_LEN; i++) { 836 val_even = drm_color_lut_extract(lut[i * 2].green, 10); 837 val_odd = drm_color_lut_extract(lut[i * 2 + 1].green, 10); 838 gc_lut->c0[i] = val_even | (val_odd << 16); 839 val_even = drm_color_lut_extract(lut[i * 2].blue, 10); 840 val_odd = drm_color_lut_extract(lut[i * 2 + 1].blue, 10); 841 gc_lut->c1[i] = val_even | (val_odd << 16); 842 val_even = drm_color_lut_extract(lut[i * 2].red, 10); 843 val_odd = drm_color_lut_extract(lut[i * 2 + 1].red, 10); 844 gc_lut->c2[i] = val_even | (val_odd << 16); 845 } 846 847 /* Disable 8-bit rounding mode */ 848 gc_lut->flags = 0; 849 } 850 851 static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc) 852 { 853 struct drm_crtc_state *state = crtc->state; 854 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 855 struct dpu_crtc_mixer *mixer = cstate->mixers; 856 struct dpu_hw_pcc_cfg cfg; 857 struct dpu_hw_gc_lut *gc_lut; 858 struct dpu_hw_ctl *ctl; 859 struct dpu_hw_dspp *dspp; 860 int i; 861 862 863 if (!state->color_mgmt_changed && !drm_atomic_crtc_needs_modeset(state)) 864 return; 865 866 for (i = 0; i < cstate->num_mixers; i++) { 867 ctl = mixer[i].lm_ctl; 868 dspp = mixer[i].hw_dspp; 869 870 if (!dspp) 871 continue; 872 873 if (dspp->ops.setup_pcc) { 874 if (!state->ctm) { 875 dspp->ops.setup_pcc(dspp, NULL); 876 } else { 877 _dpu_crtc_get_pcc_coeff(state, &cfg); 878 dspp->ops.setup_pcc(dspp, &cfg); 879 } 880 881 /* stage config flush mask */ 882 ctl->ops.update_pending_flush_dspp(ctl, 883 mixer[i].hw_dspp->idx, DPU_DSPP_PCC); 884 } 885 886 if (dspp->ops.setup_gc) { 887 if (!state->gamma_lut) { 888 dspp->ops.setup_gc(dspp, NULL); 889 } else { 890 gc_lut = kzalloc(sizeof(*gc_lut), GFP_KERNEL); 891 if (!gc_lut) 892 continue; 893 _dpu_crtc_get_gc_lut(state, gc_lut); 894 dspp->ops.setup_gc(dspp, gc_lut); 895 kfree(gc_lut); 896 } 897 898 /* stage config flush mask */ 899 ctl->ops.update_pending_flush_dspp(ctl, 900 mixer[i].hw_dspp->idx, DPU_DSPP_GC); 901 } 902 } 903 } 904 905 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, 906 struct drm_atomic_state *state) 907 { 908 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 909 struct drm_encoder *encoder; 910 911 if (!crtc->state->enable) { 912 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n", 913 crtc->base.id, crtc->state->enable); 914 return; 915 } 916 917 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); 918 919 _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc->state); 920 921 /* encoder will trigger pending mask now */ 922 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 923 dpu_encoder_trigger_kickoff_pending(encoder); 924 925 /* 926 * If no mixers have been allocated in dpu_crtc_atomic_check(), 927 * it means we are trying to flush a CRTC whose state is disabled: 928 * nothing else needs to be done. 929 */ 930 if (unlikely(!cstate->num_mixers)) 931 return; 932 933 _dpu_crtc_blend_setup(crtc); 934 935 _dpu_crtc_setup_cp_blocks(crtc); 936 937 /* 938 * PP_DONE irq is only used by command mode for now. 939 * It is better to request pending before FLUSH and START trigger 940 * to make sure no pp_done irq missed. 941 * This is safe because no pp_done will happen before SW trigger 942 * in command mode. 943 */ 944 } 945 946 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, 947 struct drm_atomic_state *state) 948 { 949 struct dpu_crtc *dpu_crtc; 950 struct drm_device *dev; 951 struct drm_plane *plane; 952 struct msm_drm_private *priv; 953 unsigned long flags; 954 struct dpu_crtc_state *cstate; 955 956 if (!crtc->state->enable) { 957 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n", 958 crtc->base.id, crtc->state->enable); 959 return; 960 } 961 962 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); 963 964 dpu_crtc = to_dpu_crtc(crtc); 965 cstate = to_dpu_crtc_state(crtc->state); 966 dev = crtc->dev; 967 priv = dev->dev_private; 968 969 if (crtc->index >= ARRAY_SIZE(priv->kms->event_thread)) { 970 DPU_ERROR("invalid crtc index[%d]\n", crtc->index); 971 return; 972 } 973 974 WARN_ON(dpu_crtc->event); 975 spin_lock_irqsave(&dev->event_lock, flags); 976 dpu_crtc->event = crtc->state->event; 977 crtc->state->event = NULL; 978 spin_unlock_irqrestore(&dev->event_lock, flags); 979 980 /* 981 * If no mixers has been allocated in dpu_crtc_atomic_check(), 982 * it means we are trying to flush a CRTC whose state is disabled: 983 * nothing else needs to be done. 984 */ 985 if (unlikely(!cstate->num_mixers)) 986 return; 987 988 /* update performance setting before crtc kickoff */ 989 dpu_core_perf_crtc_update(crtc, 1); 990 991 /* 992 * Final plane updates: Give each plane a chance to complete all 993 * required writes/flushing before crtc's "flush 994 * everything" call below. 995 */ 996 drm_atomic_crtc_for_each_plane(plane, crtc) { 997 if (dpu_crtc->smmu_state.transition_error) 998 dpu_plane_set_error(plane, true); 999 dpu_plane_flush(plane); 1000 } 1001 1002 /* Kickoff will be scheduled by outer layer */ 1003 } 1004 1005 /** 1006 * dpu_crtc_destroy_state - state destroy hook 1007 * @crtc: drm CRTC 1008 * @state: CRTC state object to release 1009 */ 1010 static void dpu_crtc_destroy_state(struct drm_crtc *crtc, 1011 struct drm_crtc_state *state) 1012 { 1013 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 1014 1015 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); 1016 1017 __drm_atomic_helper_crtc_destroy_state(state); 1018 1019 kfree(cstate); 1020 } 1021 1022 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc) 1023 { 1024 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1025 int ret, rc = 0; 1026 1027 if (!atomic_read(&dpu_crtc->frame_pending)) { 1028 DRM_DEBUG_ATOMIC("no frames pending\n"); 1029 return 0; 1030 } 1031 1032 DPU_ATRACE_BEGIN("frame done completion wait"); 1033 ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp, 1034 msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS)); 1035 if (!ret) { 1036 DRM_ERROR("frame done wait timed out, ret:%d\n", ret); 1037 rc = -ETIMEDOUT; 1038 } 1039 DPU_ATRACE_END("frame done completion wait"); 1040 1041 return rc; 1042 } 1043 1044 static int dpu_crtc_kickoff_clone_mode(struct drm_crtc *crtc) 1045 { 1046 struct drm_encoder *encoder; 1047 struct drm_encoder *rt_encoder = NULL, *wb_encoder = NULL; 1048 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); 1049 1050 /* Find encoder for real time display */ 1051 drm_for_each_encoder_mask(encoder, crtc->dev, 1052 crtc->state->encoder_mask) { 1053 if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) 1054 wb_encoder = encoder; 1055 else 1056 rt_encoder = encoder; 1057 } 1058 1059 if (!rt_encoder || !wb_encoder) { 1060 DRM_DEBUG_ATOMIC("real time or wb encoder not found\n"); 1061 return -EINVAL; 1062 } 1063 1064 dpu_encoder_prepare_for_kickoff(wb_encoder); 1065 dpu_encoder_prepare_for_kickoff(rt_encoder); 1066 1067 dpu_vbif_clear_errors(dpu_kms); 1068 1069 /* 1070 * Kickoff real time encoder last as it's the encoder that 1071 * will do the flush 1072 */ 1073 dpu_encoder_kickoff(wb_encoder); 1074 dpu_encoder_kickoff(rt_encoder); 1075 1076 /* Don't start frame done timers until the kickoffs have finished */ 1077 dpu_encoder_start_frame_done_timer(wb_encoder); 1078 dpu_encoder_start_frame_done_timer(rt_encoder); 1079 1080 return 0; 1081 } 1082 1083 /** 1084 * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc 1085 * @crtc: Pointer to drm crtc object 1086 */ 1087 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc) 1088 { 1089 struct drm_encoder *encoder; 1090 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1091 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); 1092 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 1093 1094 /* 1095 * If no mixers has been allocated in dpu_crtc_atomic_check(), 1096 * it means we are trying to start a CRTC whose state is disabled: 1097 * nothing else needs to be done. 1098 */ 1099 if (unlikely(!cstate->num_mixers)) 1100 return; 1101 1102 DPU_ATRACE_BEGIN("crtc_commit"); 1103 1104 drm_for_each_encoder_mask(encoder, crtc->dev, 1105 crtc->state->encoder_mask) { 1106 if (!dpu_encoder_is_valid_for_commit(encoder)) { 1107 DRM_DEBUG_ATOMIC("invalid FB not kicking off crtc\n"); 1108 goto end; 1109 } 1110 } 1111 1112 if (drm_crtc_in_clone_mode(crtc->state)) { 1113 if (dpu_crtc_kickoff_clone_mode(crtc)) 1114 goto end; 1115 } else { 1116 /* 1117 * Encoder will flush/start now, unless it has a tx pending. 1118 * If so, it may delay and flush at an irq event (e.g. ppdone) 1119 */ 1120 drm_for_each_encoder_mask(encoder, crtc->dev, 1121 crtc->state->encoder_mask) 1122 dpu_encoder_prepare_for_kickoff(encoder); 1123 1124 dpu_vbif_clear_errors(dpu_kms); 1125 1126 drm_for_each_encoder_mask(encoder, crtc->dev, 1127 crtc->state->encoder_mask) { 1128 dpu_encoder_kickoff(encoder); 1129 dpu_encoder_start_frame_done_timer(encoder); 1130 } 1131 } 1132 1133 if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) { 1134 /* acquire bandwidth and other resources */ 1135 DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id); 1136 } else 1137 DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id); 1138 1139 dpu_crtc->play_count++; 1140 1141 reinit_completion(&dpu_crtc->frame_done_comp); 1142 1143 end: 1144 DPU_ATRACE_END("crtc_commit"); 1145 } 1146 1147 static void dpu_crtc_reset(struct drm_crtc *crtc) 1148 { 1149 struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL); 1150 1151 if (crtc->state) 1152 dpu_crtc_destroy_state(crtc, crtc->state); 1153 1154 if (cstate) 1155 __drm_atomic_helper_crtc_reset(crtc, &cstate->base); 1156 else 1157 __drm_atomic_helper_crtc_reset(crtc, NULL); 1158 } 1159 1160 /** 1161 * dpu_crtc_duplicate_state - state duplicate hook 1162 * @crtc: Pointer to drm crtc structure 1163 */ 1164 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc) 1165 { 1166 struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state); 1167 1168 cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL); 1169 if (!cstate) { 1170 DPU_ERROR("failed to allocate state\n"); 1171 return NULL; 1172 } 1173 1174 /* duplicate base helper */ 1175 __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base); 1176 1177 return &cstate->base; 1178 } 1179 1180 static void dpu_crtc_atomic_print_state(struct drm_printer *p, 1181 const struct drm_crtc_state *state) 1182 { 1183 const struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 1184 int i; 1185 1186 for (i = 0; i < cstate->num_mixers; i++) { 1187 drm_printf(p, "\tlm[%d]=%d\n", i, cstate->mixers[i].hw_lm->idx - LM_0); 1188 drm_printf(p, "\tctl[%d]=%d\n", i, cstate->mixers[i].lm_ctl->idx - CTL_0); 1189 if (cstate->mixers[i].hw_dspp) 1190 drm_printf(p, "\tdspp[%d]=%d\n", i, cstate->mixers[i].hw_dspp->idx - DSPP_0); 1191 } 1192 } 1193 1194 static void dpu_crtc_disable(struct drm_crtc *crtc, 1195 struct drm_atomic_state *state) 1196 { 1197 struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, 1198 crtc); 1199 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1200 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 1201 struct drm_encoder *encoder; 1202 unsigned long flags; 1203 bool release_bandwidth = false; 1204 1205 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); 1206 1207 /* If disable is triggered while in self refresh mode, 1208 * reset the encoder software state so that in enable 1209 * it won't trigger a warn while assigning crtc. 1210 */ 1211 if (old_crtc_state->self_refresh_active) { 1212 drm_for_each_encoder_mask(encoder, crtc->dev, 1213 old_crtc_state->encoder_mask) { 1214 dpu_encoder_assign_crtc(encoder, NULL); 1215 } 1216 return; 1217 } 1218 1219 /* Disable/save vblank irq handling */ 1220 drm_crtc_vblank_off(crtc); 1221 1222 drm_for_each_encoder_mask(encoder, crtc->dev, 1223 old_crtc_state->encoder_mask) { 1224 /* in video mode, we hold an extra bandwidth reference 1225 * as we cannot drop bandwidth at frame-done if any 1226 * crtc is being used in video mode. 1227 */ 1228 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO) 1229 release_bandwidth = true; 1230 1231 /* 1232 * If disable is triggered during psr active(e.g: screen dim in PSR), 1233 * we will need encoder->crtc connection to process the device sleep & 1234 * preserve it during psr sequence. 1235 */ 1236 if (!crtc->state->self_refresh_active) 1237 dpu_encoder_assign_crtc(encoder, NULL); 1238 } 1239 1240 /* wait for frame_event_done completion */ 1241 if (_dpu_crtc_wait_for_frame_done(crtc)) 1242 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n", 1243 crtc->base.id, 1244 atomic_read(&dpu_crtc->frame_pending)); 1245 1246 trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc); 1247 dpu_crtc->enabled = false; 1248 1249 if (atomic_read(&dpu_crtc->frame_pending)) { 1250 trace_dpu_crtc_disable_frame_pending(DRMID(crtc), 1251 atomic_read(&dpu_crtc->frame_pending)); 1252 if (release_bandwidth) 1253 dpu_core_perf_crtc_release_bw(crtc); 1254 atomic_set(&dpu_crtc->frame_pending, 0); 1255 } 1256 1257 dpu_core_perf_crtc_update(crtc, 0); 1258 1259 /* disable clk & bw control until clk & bw properties are set */ 1260 cstate->bw_control = false; 1261 cstate->bw_split_vote = false; 1262 1263 if (crtc->state->event && !crtc->state->active) { 1264 spin_lock_irqsave(&crtc->dev->event_lock, flags); 1265 drm_crtc_send_vblank_event(crtc, crtc->state->event); 1266 crtc->state->event = NULL; 1267 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 1268 } 1269 1270 pm_runtime_put_sync(crtc->dev->dev); 1271 } 1272 1273 static void dpu_crtc_enable(struct drm_crtc *crtc, 1274 struct drm_atomic_state *state) 1275 { 1276 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1277 struct drm_encoder *encoder; 1278 bool request_bandwidth = false; 1279 struct drm_crtc_state *old_crtc_state; 1280 1281 old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); 1282 1283 pm_runtime_get_sync(crtc->dev->dev); 1284 1285 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); 1286 1287 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) { 1288 /* in video mode, we hold an extra bandwidth reference 1289 * as we cannot drop bandwidth at frame-done if any 1290 * crtc is being used in video mode. 1291 */ 1292 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO) 1293 request_bandwidth = true; 1294 } 1295 1296 if (request_bandwidth) 1297 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref); 1298 1299 trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc); 1300 dpu_crtc->enabled = true; 1301 1302 if (!old_crtc_state->self_refresh_active) { 1303 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 1304 dpu_encoder_assign_crtc(encoder, crtc); 1305 } 1306 1307 /* Enable/restore vblank irq handling */ 1308 drm_crtc_vblank_on(crtc); 1309 } 1310 1311 static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate) 1312 { 1313 struct drm_crtc *crtc = cstate->crtc; 1314 struct drm_encoder *encoder; 1315 1316 if (cstate->self_refresh_active) 1317 return true; 1318 1319 drm_for_each_encoder_mask (encoder, crtc->dev, cstate->encoder_mask) { 1320 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_CMD) { 1321 return true; 1322 } 1323 } 1324 1325 return false; 1326 } 1327 1328 static int dpu_crtc_reassign_planes(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) 1329 { 1330 int total_planes = crtc->dev->mode_config.num_total_plane; 1331 struct drm_atomic_state *state = crtc_state->state; 1332 struct dpu_global_state *global_state; 1333 struct drm_plane_state **states; 1334 struct drm_plane *plane; 1335 int ret; 1336 1337 global_state = dpu_kms_get_global_state(crtc_state->state); 1338 if (IS_ERR(global_state)) 1339 return PTR_ERR(global_state); 1340 1341 dpu_rm_release_all_sspp(global_state, crtc); 1342 1343 if (!crtc_state->enable) 1344 return 0; 1345 1346 states = kcalloc(total_planes, sizeof(*states), GFP_KERNEL); 1347 if (!states) 1348 return -ENOMEM; 1349 1350 drm_atomic_crtc_state_for_each_plane(plane, crtc_state) { 1351 struct drm_plane_state *plane_state = 1352 drm_atomic_get_plane_state(state, plane); 1353 1354 if (IS_ERR(plane_state)) { 1355 ret = PTR_ERR(plane_state); 1356 goto done; 1357 } 1358 1359 states[plane_state->normalized_zpos] = plane_state; 1360 } 1361 1362 ret = dpu_assign_plane_resources(global_state, state, crtc, states, total_planes); 1363 1364 done: 1365 kfree(states); 1366 return ret; 1367 } 1368 1369 #define MAX_CHANNELS_PER_CRTC PIPES_PER_PLANE 1370 #define MAX_HDISPLAY_SPLIT 1080 1371 1372 static struct msm_display_topology dpu_crtc_get_topology( 1373 struct drm_crtc *crtc, 1374 struct dpu_kms *dpu_kms, 1375 struct drm_crtc_state *crtc_state) 1376 { 1377 struct drm_display_mode *mode = &crtc_state->adjusted_mode; 1378 struct msm_display_topology topology = {0}; 1379 struct drm_encoder *drm_enc; 1380 1381 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) 1382 dpu_encoder_update_topology(drm_enc, &topology, crtc_state->state, 1383 &crtc_state->adjusted_mode); 1384 1385 topology.cwb_enabled = drm_crtc_in_clone_mode(crtc_state); 1386 1387 /* 1388 * Datapath topology selection 1389 * 1390 * Dual display 1391 * 2 LM, 2 INTF ( Split display using 2 interfaces) 1392 * 1393 * Single display 1394 * 1 LM, 1 INTF 1395 * 2 LM, 1 INTF (stream merge to support high resolution interfaces) 1396 * 1397 * If DSC is enabled, use 2 LMs for 2:2:1 topology 1398 * 1399 * Add dspps to the reservation requirements if ctm or gamma_lut are requested 1400 * 1401 * Only hardcode num_lm to 2 for cases where num_intf == 2 and CWB is not 1402 * enabled. This is because in cases where CWB is enabled, num_intf will 1403 * count both the WB and real-time phys encoders. 1404 * 1405 * For non-DSC CWB usecases, have the num_lm be decided by the 1406 * (mode->hdisplay > MAX_HDISPLAY_SPLIT) check. 1407 */ 1408 1409 if (topology.num_intf == 2 && !topology.cwb_enabled) 1410 topology.num_lm = 2; 1411 else if (topology.num_dsc == 2) 1412 topology.num_lm = 2; 1413 else if (dpu_kms->catalog->caps->has_3d_merge) 1414 topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1; 1415 else 1416 topology.num_lm = 1; 1417 1418 if (crtc_state->ctm || crtc_state->gamma_lut) 1419 topology.num_dspp = topology.num_lm; 1420 1421 return topology; 1422 } 1423 1424 static int dpu_crtc_assign_resources(struct drm_crtc *crtc, 1425 struct drm_crtc_state *crtc_state) 1426 { 1427 struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_CRTC]; 1428 struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_CRTC]; 1429 struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_CRTC]; 1430 int i, num_lm, num_ctl, num_dspp; 1431 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); 1432 struct dpu_global_state *global_state; 1433 struct dpu_crtc_state *cstate; 1434 struct msm_display_topology topology; 1435 int ret; 1436 1437 /* 1438 * Release and Allocate resources on every modeset 1439 */ 1440 global_state = dpu_kms_get_global_state(crtc_state->state); 1441 if (IS_ERR(global_state)) 1442 return PTR_ERR(global_state); 1443 1444 dpu_rm_release(global_state, crtc); 1445 1446 if (!crtc_state->enable) 1447 return 0; 1448 1449 topology = dpu_crtc_get_topology(crtc, dpu_kms, crtc_state); 1450 ret = dpu_rm_reserve(&dpu_kms->rm, global_state, 1451 crtc_state->crtc, &topology); 1452 if (ret) 1453 return ret; 1454 1455 cstate = to_dpu_crtc_state(crtc_state); 1456 1457 num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1458 crtc_state->crtc, 1459 DPU_HW_BLK_CTL, hw_ctl, 1460 ARRAY_SIZE(hw_ctl)); 1461 num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1462 crtc_state->crtc, 1463 DPU_HW_BLK_LM, hw_lm, 1464 ARRAY_SIZE(hw_lm)); 1465 num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1466 crtc_state->crtc, 1467 DPU_HW_BLK_DSPP, hw_dspp, 1468 ARRAY_SIZE(hw_dspp)); 1469 1470 for (i = 0; i < num_lm; i++) { 1471 int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); 1472 1473 cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); 1474 cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); 1475 if (i < num_dspp) 1476 cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]); 1477 } 1478 1479 cstate->num_mixers = num_lm; 1480 1481 return 0; 1482 } 1483 1484 /** 1485 * dpu_crtc_check_mode_changed: check if full modeset is required 1486 * @old_crtc_state: Previous CRTC state 1487 * @new_crtc_state: Corresponding CRTC state to be checked 1488 * 1489 * Check if the changes in the object properties demand full mode set. 1490 */ 1491 int dpu_crtc_check_mode_changed(struct drm_crtc_state *old_crtc_state, 1492 struct drm_crtc_state *new_crtc_state) 1493 { 1494 struct drm_encoder *drm_enc; 1495 struct drm_crtc *crtc = new_crtc_state->crtc; 1496 bool clone_mode_enabled = drm_crtc_in_clone_mode(old_crtc_state); 1497 bool clone_mode_requested = drm_crtc_in_clone_mode(new_crtc_state); 1498 1499 DRM_DEBUG_ATOMIC("%d\n", crtc->base.id); 1500 1501 /* there might be cases where encoder needs a modeset too */ 1502 drm_for_each_encoder_mask(drm_enc, crtc->dev, new_crtc_state->encoder_mask) { 1503 if (dpu_encoder_needs_modeset(drm_enc, new_crtc_state->state)) 1504 new_crtc_state->mode_changed = true; 1505 } 1506 1507 if ((clone_mode_requested && !clone_mode_enabled) || 1508 (!clone_mode_requested && clone_mode_enabled)) 1509 new_crtc_state->mode_changed = true; 1510 1511 return 0; 1512 } 1513 1514 static int dpu_crtc_atomic_check(struct drm_crtc *crtc, 1515 struct drm_atomic_state *state) 1516 { 1517 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, 1518 crtc); 1519 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1520 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state); 1521 1522 const struct drm_plane_state *pstate; 1523 struct drm_plane *plane; 1524 1525 int rc = 0; 1526 1527 bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state); 1528 1529 /* don't reallocate resources if only ACTIVE has beeen changed */ 1530 if (crtc_state->mode_changed || crtc_state->connectors_changed || 1531 crtc_state->color_mgmt_changed) { 1532 rc = dpu_crtc_assign_resources(crtc, crtc_state); 1533 if (rc < 0) 1534 return rc; 1535 } 1536 1537 if (dpu_use_virtual_planes && 1538 (crtc_state->planes_changed || crtc_state->zpos_changed)) { 1539 rc = dpu_crtc_reassign_planes(crtc, crtc_state); 1540 if (rc < 0) 1541 return rc; 1542 } 1543 1544 if (!crtc_state->enable || !drm_atomic_crtc_effectively_active(crtc_state)) { 1545 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n", 1546 crtc->base.id, crtc_state->enable, 1547 crtc_state->active); 1548 memset(&cstate->new_perf, 0, sizeof(cstate->new_perf)); 1549 return 0; 1550 } 1551 1552 DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name); 1553 1554 if (cstate->num_mixers) { 1555 rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state); 1556 if (rc) 1557 return rc; 1558 } 1559 1560 /* FIXME: move this to dpu_plane_atomic_check? */ 1561 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { 1562 struct dpu_plane_state *dpu_pstate = to_dpu_plane_state(pstate); 1563 1564 if (IS_ERR_OR_NULL(pstate)) { 1565 rc = PTR_ERR(pstate); 1566 DPU_ERROR("%s: failed to get plane%d state, %d\n", 1567 dpu_crtc->name, plane->base.id, rc); 1568 return rc; 1569 } 1570 1571 if (!pstate->visible) 1572 continue; 1573 1574 dpu_pstate->needs_dirtyfb = needs_dirtyfb; 1575 } 1576 1577 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref); 1578 1579 rc = dpu_core_perf_crtc_check(crtc, crtc_state); 1580 if (rc) { 1581 DPU_ERROR("crtc%d failed performance check %d\n", 1582 crtc->base.id, rc); 1583 return rc; 1584 } 1585 1586 return 0; 1587 } 1588 1589 static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc, 1590 const struct drm_display_mode *mode) 1591 { 1592 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); 1593 u64 adjusted_mode_clk; 1594 1595 /* if there is no 3d_mux block we cannot merge LMs so we cannot 1596 * split the large layer into 2 LMs, filter out such modes 1597 */ 1598 if (!dpu_kms->catalog->caps->has_3d_merge && 1599 mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width) 1600 return MODE_BAD_HVALUE; 1601 1602 adjusted_mode_clk = dpu_core_perf_adjusted_mode_clk(mode->clock, 1603 dpu_kms->perf.perf_cfg); 1604 1605 if (dpu_kms->catalog->caps->has_3d_merge) 1606 adjusted_mode_clk /= 2; 1607 1608 /* 1609 * The given mode, adjusted for the perf clock factor, should not exceed 1610 * the max core clock rate 1611 */ 1612 if (dpu_kms->perf.max_core_clk_rate < adjusted_mode_clk * 1000) 1613 return MODE_CLOCK_HIGH; 1614 1615 /* 1616 * max crtc width is equal to the max mixer width * 2 and max height is 4K 1617 */ 1618 return drm_mode_validate_size(mode, 1619 2 * dpu_kms->catalog->caps->max_mixer_width, 1620 4096); 1621 } 1622 1623 /** 1624 * dpu_crtc_vblank - enable or disable vblanks for this crtc 1625 * @crtc: Pointer to drm crtc object 1626 * @en: true to enable vblanks, false to disable 1627 */ 1628 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en) 1629 { 1630 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1631 struct drm_encoder *enc; 1632 1633 trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc); 1634 1635 /* 1636 * Normally we would iterate through encoder_mask in crtc state to find 1637 * attached encoders. In this case, we might be disabling vblank _after_ 1638 * encoder_mask has been cleared. 1639 * 1640 * Instead, we "assign" a crtc to the encoder in enable and clear it in 1641 * disable (which is also after encoder_mask is cleared). So instead of 1642 * using encoder mask, we'll ask the encoder to toggle itself iff it's 1643 * currently assigned to our crtc. 1644 * 1645 * Note also that this function cannot be called while crtc is disabled 1646 * since we use drm_crtc_vblank_on/off. So we don't need to worry 1647 * about the assigned crtcs being inconsistent with the current state 1648 * (which means no need to worry about modeset locks). 1649 */ 1650 list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) { 1651 trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en, 1652 dpu_crtc); 1653 1654 dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en); 1655 } 1656 1657 return 0; 1658 } 1659 1660 #ifdef CONFIG_DEBUG_FS 1661 static int _dpu_debugfs_status_show(struct seq_file *s, void *data) 1662 { 1663 struct dpu_crtc *dpu_crtc; 1664 struct dpu_plane_state *pstate = NULL; 1665 struct dpu_crtc_mixer *m; 1666 1667 struct drm_crtc *crtc; 1668 struct drm_plane *plane; 1669 struct drm_display_mode *mode; 1670 struct drm_framebuffer *fb; 1671 struct drm_plane_state *state; 1672 struct dpu_crtc_state *cstate; 1673 1674 int i, out_width; 1675 1676 dpu_crtc = s->private; 1677 crtc = &dpu_crtc->base; 1678 1679 drm_modeset_lock_all(crtc->dev); 1680 cstate = to_dpu_crtc_state(crtc->state); 1681 1682 mode = &crtc->state->adjusted_mode; 1683 out_width = mode->hdisplay / cstate->num_mixers; 1684 1685 seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id, 1686 mode->hdisplay, mode->vdisplay); 1687 1688 seq_puts(s, "\n"); 1689 1690 for (i = 0; i < cstate->num_mixers; ++i) { 1691 m = &cstate->mixers[i]; 1692 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n", 1693 m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0, 1694 out_width, mode->vdisplay); 1695 } 1696 1697 seq_puts(s, "\n"); 1698 1699 drm_atomic_crtc_for_each_plane(plane, crtc) { 1700 pstate = to_dpu_plane_state(plane->state); 1701 state = plane->state; 1702 1703 if (!pstate || !state) 1704 continue; 1705 1706 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id, 1707 pstate->stage); 1708 1709 if (plane->state->fb) { 1710 fb = plane->state->fb; 1711 1712 seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ", 1713 fb->base.id, (char *) &fb->format->format, 1714 fb->width, fb->height); 1715 for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i) 1716 seq_printf(s, "cpp[%d]:%u ", 1717 i, fb->format->cpp[i]); 1718 seq_puts(s, "\n\t"); 1719 1720 seq_printf(s, "modifier:%8llu ", fb->modifier); 1721 seq_puts(s, "\n"); 1722 1723 seq_puts(s, "\t"); 1724 for (i = 0; i < ARRAY_SIZE(fb->pitches); i++) 1725 seq_printf(s, "pitches[%d]:%8u ", i, 1726 fb->pitches[i]); 1727 seq_puts(s, "\n"); 1728 1729 seq_puts(s, "\t"); 1730 for (i = 0; i < ARRAY_SIZE(fb->offsets); i++) 1731 seq_printf(s, "offsets[%d]:%8u ", i, 1732 fb->offsets[i]); 1733 seq_puts(s, "\n"); 1734 } 1735 1736 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n", 1737 state->src_x, state->src_y, state->src_w, state->src_h); 1738 1739 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n", 1740 state->crtc_x, state->crtc_y, state->crtc_w, 1741 state->crtc_h); 1742 1743 for (i = 0; i < PIPES_PER_PLANE; i++) { 1744 if (!pstate->pipe[i].sspp) 1745 continue; 1746 seq_printf(s, "\tsspp[%d]:%s\n", 1747 i, pstate->pipe[i].sspp->cap->name); 1748 seq_printf(s, "\tmultirect[%d]: mode: %d index: %d\n", 1749 i, pstate->pipe[i].multirect_mode, 1750 pstate->pipe[i].multirect_index); 1751 } 1752 1753 seq_puts(s, "\n"); 1754 } 1755 if (dpu_crtc->vblank_cb_count) { 1756 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time); 1757 s64 diff_ms = ktime_to_ms(diff); 1758 s64 fps = diff_ms ? div_s64( 1759 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0; 1760 1761 seq_printf(s, 1762 "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n", 1763 fps, dpu_crtc->vblank_cb_count, 1764 ktime_to_ms(diff), dpu_crtc->play_count); 1765 1766 /* reset time & count for next measurement */ 1767 dpu_crtc->vblank_cb_count = 0; 1768 dpu_crtc->vblank_cb_time = ktime_set(0, 0); 1769 } 1770 1771 drm_modeset_unlock_all(crtc->dev); 1772 1773 return 0; 1774 } 1775 1776 DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status); 1777 1778 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v) 1779 { 1780 struct drm_crtc *crtc = s->private; 1781 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1782 1783 seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc)); 1784 seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc)); 1785 seq_printf(s, "core_clk_rate: %llu\n", 1786 dpu_crtc->cur_perf.core_clk_rate); 1787 seq_printf(s, "bw_ctl: %uk\n", 1788 (u32)DIV_ROUND_UP_ULL(dpu_crtc->cur_perf.bw_ctl, 1000)); 1789 seq_printf(s, "max_per_pipe_ib: %u\n", 1790 dpu_crtc->cur_perf.max_per_pipe_ib); 1791 1792 return 0; 1793 } 1794 DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state); 1795 1796 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) 1797 { 1798 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1799 1800 debugfs_create_file("status", 0400, 1801 crtc->debugfs_entry, 1802 dpu_crtc, &_dpu_debugfs_status_fops); 1803 debugfs_create_file("state", 0600, 1804 crtc->debugfs_entry, 1805 &dpu_crtc->base, 1806 &dpu_crtc_debugfs_state_fops); 1807 1808 return 0; 1809 } 1810 #else 1811 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) 1812 { 1813 return 0; 1814 } 1815 #endif /* CONFIG_DEBUG_FS */ 1816 1817 static int dpu_crtc_late_register(struct drm_crtc *crtc) 1818 { 1819 return _dpu_crtc_init_debugfs(crtc); 1820 } 1821 1822 static const struct drm_crtc_funcs dpu_crtc_funcs = { 1823 .set_config = drm_atomic_helper_set_config, 1824 .page_flip = drm_atomic_helper_page_flip, 1825 .reset = dpu_crtc_reset, 1826 .atomic_duplicate_state = dpu_crtc_duplicate_state, 1827 .atomic_destroy_state = dpu_crtc_destroy_state, 1828 .atomic_print_state = dpu_crtc_atomic_print_state, 1829 .late_register = dpu_crtc_late_register, 1830 .verify_crc_source = dpu_crtc_verify_crc_source, 1831 .set_crc_source = dpu_crtc_set_crc_source, 1832 .enable_vblank = msm_crtc_enable_vblank, 1833 .disable_vblank = msm_crtc_disable_vblank, 1834 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, 1835 .get_vblank_counter = dpu_crtc_get_vblank_counter, 1836 }; 1837 1838 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = { 1839 .atomic_disable = dpu_crtc_disable, 1840 .atomic_enable = dpu_crtc_enable, 1841 .atomic_check = dpu_crtc_atomic_check, 1842 .atomic_begin = dpu_crtc_atomic_begin, 1843 .atomic_flush = dpu_crtc_atomic_flush, 1844 .mode_valid = dpu_crtc_mode_valid, 1845 .get_scanout_position = dpu_crtc_get_scanout_position, 1846 }; 1847 1848 /** 1849 * dpu_crtc_init - create a new crtc object 1850 * @dev: dpu device 1851 * @plane: base plane 1852 * @cursor: cursor plane 1853 * @return: new crtc object or error 1854 * 1855 * initialize CRTC 1856 */ 1857 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, 1858 struct drm_plane *cursor) 1859 { 1860 struct msm_drm_private *priv = dev->dev_private; 1861 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); 1862 struct drm_crtc *crtc = NULL; 1863 struct dpu_crtc *dpu_crtc; 1864 int i, ret; 1865 1866 dpu_crtc = drmm_crtc_alloc_with_planes(dev, struct dpu_crtc, base, 1867 plane, cursor, 1868 &dpu_crtc_funcs, 1869 NULL); 1870 1871 if (IS_ERR(dpu_crtc)) 1872 return ERR_CAST(dpu_crtc); 1873 1874 crtc = &dpu_crtc->base; 1875 crtc->dev = dev; 1876 1877 spin_lock_init(&dpu_crtc->spin_lock); 1878 atomic_set(&dpu_crtc->frame_pending, 0); 1879 1880 init_completion(&dpu_crtc->frame_done_comp); 1881 1882 INIT_LIST_HEAD(&dpu_crtc->frame_event_list); 1883 1884 for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) { 1885 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list); 1886 list_add(&dpu_crtc->frame_events[i].list, 1887 &dpu_crtc->frame_event_list); 1888 kthread_init_work(&dpu_crtc->frame_events[i].work, 1889 dpu_crtc_frame_event_work); 1890 } 1891 1892 drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs); 1893 1894 if (dpu_kms->catalog->dspp_count) { 1895 const struct dpu_dspp_cfg *dspp = &dpu_kms->catalog->dspp[0]; 1896 1897 if (dspp->sblk->gc.base) { 1898 drm_mode_crtc_set_gamma_size(crtc, DPU_GAMMA_LUT_SIZE); 1899 drm_crtc_enable_color_mgmt(crtc, 0, true, DPU_GAMMA_LUT_SIZE); 1900 } else { 1901 drm_crtc_enable_color_mgmt(crtc, 0, true, 0); 1902 } 1903 } 1904 1905 /* save user friendly CRTC name for later */ 1906 snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id); 1907 1908 /* initialize event handling */ 1909 spin_lock_init(&dpu_crtc->event_lock); 1910 1911 ret = drm_self_refresh_helper_init(crtc); 1912 if (ret) { 1913 DPU_ERROR("Failed to initialize %s with self-refresh helpers %d\n", 1914 crtc->name, ret); 1915 return ERR_PTR(ret); 1916 } 1917 1918 DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name); 1919 return crtc; 1920 } 1921