1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. 5 * Copyright (C) 2013 Red Hat 6 * Author: Rob Clark <robdclark@gmail.com> 7 */ 8 9 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ 10 #include <linux/sort.h> 11 #include <linux/debugfs.h> 12 #include <linux/ktime.h> 13 #include <linux/bits.h> 14 15 #include <drm/drm_atomic.h> 16 #include <drm/drm_blend.h> 17 #include <drm/drm_crtc.h> 18 #include <drm/drm_flip_work.h> 19 #include <drm/drm_framebuffer.h> 20 #include <drm/drm_mode.h> 21 #include <drm/drm_probe_helper.h> 22 #include <drm/drm_rect.h> 23 #include <drm/drm_vblank.h> 24 #include <drm/drm_self_refresh_helper.h> 25 26 #include "dpu_kms.h" 27 #include "dpu_hw_lm.h" 28 #include "dpu_hw_ctl.h" 29 #include "dpu_hw_dspp.h" 30 #include "dpu_crtc.h" 31 #include "dpu_plane.h" 32 #include "dpu_encoder.h" 33 #include "dpu_vbif.h" 34 #include "dpu_core_perf.h" 35 #include "dpu_trace.h" 36 37 /* layer mixer index on dpu_crtc */ 38 #define LEFT_MIXER 0 39 #define RIGHT_MIXER 1 40 41 /* timeout in ms waiting for frame done */ 42 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60 43 44 #define CONVERT_S3_15(val) \ 45 (((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0)) 46 47 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) 48 { 49 struct msm_drm_private *priv = crtc->dev->dev_private; 50 51 return to_dpu_kms(priv->kms); 52 } 53 54 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc) 55 { 56 struct drm_device *dev = crtc->dev; 57 struct drm_encoder *encoder; 58 59 drm_for_each_encoder(encoder, dev) 60 if (encoder->crtc == crtc) 61 return encoder; 62 63 return NULL; 64 } 65 66 static enum dpu_crtc_crc_source dpu_crtc_parse_crc_source(const char *src_name) 67 { 68 if (!src_name || 69 !strcmp(src_name, "none")) 70 return DPU_CRTC_CRC_SOURCE_NONE; 71 if (!strcmp(src_name, "auto") || 72 !strcmp(src_name, "lm")) 73 return DPU_CRTC_CRC_SOURCE_LAYER_MIXER; 74 if (!strcmp(src_name, "encoder")) 75 return DPU_CRTC_CRC_SOURCE_ENCODER; 76 77 return DPU_CRTC_CRC_SOURCE_INVALID; 78 } 79 80 static int dpu_crtc_verify_crc_source(struct drm_crtc *crtc, 81 const char *src_name, size_t *values_cnt) 82 { 83 enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name); 84 struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state); 85 86 if (source < 0) { 87 DRM_DEBUG_DRIVER("Invalid source %s for CRTC%d\n", src_name, crtc->index); 88 return -EINVAL; 89 } 90 91 if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) { 92 *values_cnt = crtc_state->num_mixers; 93 } else if (source == DPU_CRTC_CRC_SOURCE_ENCODER) { 94 struct drm_encoder *drm_enc; 95 96 *values_cnt = 0; 97 98 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) 99 *values_cnt += dpu_encoder_get_crc_values_cnt(drm_enc); 100 } 101 102 return 0; 103 } 104 105 static void dpu_crtc_setup_lm_misr(struct dpu_crtc_state *crtc_state) 106 { 107 struct dpu_crtc_mixer *m; 108 int i; 109 110 for (i = 0; i < crtc_state->num_mixers; ++i) { 111 m = &crtc_state->mixers[i]; 112 113 if (!m->hw_lm || !m->hw_lm->ops.setup_misr) 114 continue; 115 116 /* Calculate MISR over 1 frame */ 117 m->hw_lm->ops.setup_misr(m->hw_lm); 118 } 119 } 120 121 static void dpu_crtc_setup_encoder_misr(struct drm_crtc *crtc) 122 { 123 struct drm_encoder *drm_enc; 124 125 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) 126 dpu_encoder_setup_misr(drm_enc); 127 } 128 129 static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) 130 { 131 enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name); 132 enum dpu_crtc_crc_source current_source; 133 struct dpu_crtc_state *crtc_state; 134 struct drm_device *drm_dev = crtc->dev; 135 136 bool was_enabled; 137 bool enable = false; 138 int ret = 0; 139 140 if (source < 0) { 141 DRM_DEBUG_DRIVER("Invalid CRC source %s for CRTC%d\n", src_name, crtc->index); 142 return -EINVAL; 143 } 144 145 ret = drm_modeset_lock(&crtc->mutex, NULL); 146 147 if (ret) 148 return ret; 149 150 enable = (source != DPU_CRTC_CRC_SOURCE_NONE); 151 crtc_state = to_dpu_crtc_state(crtc->state); 152 153 spin_lock_irq(&drm_dev->event_lock); 154 current_source = crtc_state->crc_source; 155 spin_unlock_irq(&drm_dev->event_lock); 156 157 was_enabled = (current_source != DPU_CRTC_CRC_SOURCE_NONE); 158 159 if (!was_enabled && enable) { 160 ret = drm_crtc_vblank_get(crtc); 161 162 if (ret) 163 goto cleanup; 164 165 } else if (was_enabled && !enable) { 166 drm_crtc_vblank_put(crtc); 167 } 168 169 spin_lock_irq(&drm_dev->event_lock); 170 crtc_state->crc_source = source; 171 spin_unlock_irq(&drm_dev->event_lock); 172 173 crtc_state->crc_frame_skip_count = 0; 174 175 if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) 176 dpu_crtc_setup_lm_misr(crtc_state); 177 else if (source == DPU_CRTC_CRC_SOURCE_ENCODER) 178 dpu_crtc_setup_encoder_misr(crtc); 179 else 180 ret = -EINVAL; 181 182 cleanup: 183 drm_modeset_unlock(&crtc->mutex); 184 185 return ret; 186 } 187 188 static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc) 189 { 190 struct drm_encoder *encoder = get_encoder_from_crtc(crtc); 191 if (!encoder) { 192 DRM_ERROR("no encoder found for crtc %d\n", crtc->index); 193 return 0; 194 } 195 196 return dpu_encoder_get_vsync_count(encoder); 197 } 198 199 static int dpu_crtc_get_lm_crc(struct drm_crtc *crtc, 200 struct dpu_crtc_state *crtc_state) 201 { 202 struct dpu_crtc_mixer *m; 203 u32 crcs[CRTC_DUAL_MIXERS]; 204 205 int rc = 0; 206 int i; 207 208 BUILD_BUG_ON(ARRAY_SIZE(crcs) != ARRAY_SIZE(crtc_state->mixers)); 209 210 for (i = 0; i < crtc_state->num_mixers; ++i) { 211 212 m = &crtc_state->mixers[i]; 213 214 if (!m->hw_lm || !m->hw_lm->ops.collect_misr) 215 continue; 216 217 rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]); 218 219 if (rc) { 220 if (rc != -ENODATA) 221 DRM_DEBUG_DRIVER("MISR read failed\n"); 222 return rc; 223 } 224 } 225 226 return drm_crtc_add_crc_entry(crtc, true, 227 drm_crtc_accurate_vblank_count(crtc), crcs); 228 } 229 230 static int dpu_crtc_get_encoder_crc(struct drm_crtc *crtc) 231 { 232 struct drm_encoder *drm_enc; 233 int rc, pos = 0; 234 u32 crcs[INTF_MAX]; 235 236 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) { 237 rc = dpu_encoder_get_crc(drm_enc, crcs, pos); 238 if (rc < 0) { 239 if (rc != -ENODATA) 240 DRM_DEBUG_DRIVER("MISR read failed\n"); 241 242 return rc; 243 } 244 245 pos += rc; 246 } 247 248 return drm_crtc_add_crc_entry(crtc, true, 249 drm_crtc_accurate_vblank_count(crtc), crcs); 250 } 251 252 static int dpu_crtc_get_crc(struct drm_crtc *crtc) 253 { 254 struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state); 255 256 /* Skip first 2 frames in case of "uncooked" CRCs */ 257 if (crtc_state->crc_frame_skip_count < 2) { 258 crtc_state->crc_frame_skip_count++; 259 return 0; 260 } 261 262 if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) 263 return dpu_crtc_get_lm_crc(crtc, crtc_state); 264 else if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_ENCODER) 265 return dpu_crtc_get_encoder_crc(crtc); 266 267 return -EINVAL; 268 } 269 270 static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc, 271 bool in_vblank_irq, 272 int *vpos, int *hpos, 273 ktime_t *stime, ktime_t *etime, 274 const struct drm_display_mode *mode) 275 { 276 unsigned int pipe = crtc->index; 277 struct drm_encoder *encoder; 278 int line, vsw, vbp, vactive_start, vactive_end, vfp_end; 279 280 encoder = get_encoder_from_crtc(crtc); 281 if (!encoder) { 282 DRM_ERROR("no encoder found for crtc %d\n", pipe); 283 return false; 284 } 285 286 vsw = mode->crtc_vsync_end - mode->crtc_vsync_start; 287 vbp = mode->crtc_vtotal - mode->crtc_vsync_end; 288 289 /* 290 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at 291 * the end of VFP. Translate the porch values relative to the line 292 * counter positions. 293 */ 294 295 vactive_start = vsw + vbp + 1; 296 vactive_end = vactive_start + mode->crtc_vdisplay; 297 298 /* last scan line before VSYNC */ 299 vfp_end = mode->crtc_vtotal; 300 301 if (stime) 302 *stime = ktime_get(); 303 304 line = dpu_encoder_get_linecount(encoder); 305 306 if (line < vactive_start) 307 line -= vactive_start; 308 else if (line > vactive_end) 309 line = line - vfp_end - vactive_start; 310 else 311 line -= vactive_start; 312 313 *vpos = line; 314 *hpos = 0; 315 316 if (etime) 317 *etime = ktime_get(); 318 319 return true; 320 } 321 322 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, 323 struct dpu_plane_state *pstate, 324 const struct msm_format *format, 325 const struct dpu_mdss_version *mdss_ver) 326 { 327 struct dpu_hw_mixer *lm = mixer->hw_lm; 328 u32 blend_op; 329 u32 fg_alpha, bg_alpha, max_alpha; 330 331 if (mdss_ver->core_major_ver < 12) { 332 max_alpha = 0xff; 333 fg_alpha = pstate->base.alpha >> 8; 334 } else { 335 max_alpha = 0x3ff; 336 fg_alpha = pstate->base.alpha >> 6; 337 } 338 bg_alpha = max_alpha - fg_alpha; 339 340 /* default to opaque blending */ 341 if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE || 342 !format->alpha_enable) { 343 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | 344 DPU_BLEND_BG_ALPHA_BG_CONST; 345 } else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) { 346 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | 347 DPU_BLEND_BG_ALPHA_FG_PIXEL; 348 if (fg_alpha != max_alpha) { 349 bg_alpha = fg_alpha; 350 blend_op |= DPU_BLEND_BG_MOD_ALPHA | 351 DPU_BLEND_BG_INV_MOD_ALPHA; 352 } else { 353 blend_op |= DPU_BLEND_BG_INV_ALPHA; 354 } 355 } else { 356 /* coverage blending */ 357 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL | 358 DPU_BLEND_BG_ALPHA_FG_PIXEL; 359 if (fg_alpha != max_alpha) { 360 bg_alpha = fg_alpha; 361 blend_op |= DPU_BLEND_FG_MOD_ALPHA | 362 DPU_BLEND_FG_INV_MOD_ALPHA | 363 DPU_BLEND_BG_MOD_ALPHA | 364 DPU_BLEND_BG_INV_MOD_ALPHA; 365 } else { 366 blend_op |= DPU_BLEND_BG_INV_ALPHA; 367 } 368 } 369 370 lm->ops.setup_blend_config(lm, pstate->stage, 371 fg_alpha, bg_alpha, blend_op); 372 373 DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n", 374 &format->pixel_format, format->alpha_enable, blend_op); 375 } 376 377 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) 378 { 379 struct dpu_crtc_state *crtc_state; 380 int lm_idx; 381 382 crtc_state = to_dpu_crtc_state(crtc->state); 383 384 for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) { 385 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx]; 386 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm; 387 struct dpu_hw_mixer_cfg cfg; 388 389 if (!lm_roi || !drm_rect_visible(lm_roi)) 390 continue; 391 392 cfg.out_width = drm_rect_width(lm_roi); 393 cfg.out_height = drm_rect_height(lm_roi); 394 cfg.right_mixer = lm_idx & 0x1; 395 cfg.flags = 0; 396 hw_lm->ops.setup_mixer_out(hw_lm, &cfg); 397 } 398 } 399 400 static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc, 401 struct drm_plane *plane, 402 struct dpu_crtc_mixer *mixer, 403 u32 num_mixers, 404 enum dpu_stage stage, 405 const struct msm_format *format, 406 uint64_t modifier, 407 struct dpu_sw_pipe *pipe, 408 unsigned int stage_idx, 409 struct dpu_hw_stage_cfg *stage_cfg 410 ) 411 { 412 u32 lm_idx; 413 enum dpu_sspp sspp_idx; 414 struct drm_plane_state *state; 415 416 sspp_idx = pipe->sspp->idx; 417 418 state = plane->state; 419 420 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane), 421 state, to_dpu_plane_state(state), stage_idx, 422 format->pixel_format, 423 modifier); 424 425 DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d multirect_idx %d\n", 426 crtc->base.id, 427 stage, 428 plane->base.id, 429 sspp_idx - SSPP_NONE, 430 state->fb ? state->fb->base.id : -1, 431 pipe->multirect_index); 432 433 stage_cfg->stage[stage][stage_idx] = sspp_idx; 434 stage_cfg->multirect_index[stage][stage_idx] = pipe->multirect_index; 435 436 /* blend config update */ 437 for (lm_idx = 0; lm_idx < num_mixers; lm_idx++) 438 mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl, sspp_idx); 439 } 440 441 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, 442 struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer, 443 struct dpu_hw_stage_cfg *stage_cfg) 444 { 445 struct drm_plane *plane; 446 struct drm_framebuffer *fb; 447 struct drm_plane_state *state; 448 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 449 struct dpu_plane_state *pstate = NULL; 450 const struct msm_format *format; 451 struct dpu_hw_ctl *ctl = mixer->lm_ctl; 452 u32 lm_idx; 453 bool bg_alpha_enable = false; 454 DECLARE_BITMAP(active_fetch, SSPP_MAX); 455 DECLARE_BITMAP(active_pipes, SSPP_MAX); 456 457 memset(active_fetch, 0, sizeof(active_fetch)); 458 memset(active_pipes, 0, sizeof(active_pipes)); 459 drm_atomic_crtc_for_each_plane(plane, crtc) { 460 state = plane->state; 461 if (!state) 462 continue; 463 464 if (!state->visible) 465 continue; 466 467 pstate = to_dpu_plane_state(state); 468 fb = state->fb; 469 470 format = msm_framebuffer_format(pstate->base.fb); 471 472 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable) 473 bg_alpha_enable = true; 474 475 set_bit(pstate->pipe.sspp->idx, active_fetch); 476 set_bit(pstate->pipe.sspp->idx, active_pipes); 477 _dpu_crtc_blend_setup_pipe(crtc, plane, 478 mixer, cstate->num_mixers, 479 pstate->stage, 480 format, fb ? fb->modifier : 0, 481 &pstate->pipe, 0, stage_cfg); 482 483 if (pstate->r_pipe.sspp) { 484 set_bit(pstate->r_pipe.sspp->idx, active_fetch); 485 set_bit(pstate->r_pipe.sspp->idx, active_pipes); 486 _dpu_crtc_blend_setup_pipe(crtc, plane, 487 mixer, cstate->num_mixers, 488 pstate->stage, 489 format, fb ? fb->modifier : 0, 490 &pstate->r_pipe, 1, stage_cfg); 491 } 492 493 /* blend config update */ 494 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) { 495 _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate, format, 496 ctl->mdss_ver); 497 498 if (bg_alpha_enable && !format->alpha_enable) 499 mixer[lm_idx].mixer_op_mode = 0; 500 else 501 mixer[lm_idx].mixer_op_mode |= 502 1 << pstate->stage; 503 } 504 } 505 506 if (ctl->ops.set_active_fetch_pipes) 507 ctl->ops.set_active_fetch_pipes(ctl, active_fetch); 508 509 if (ctl->ops.set_active_pipes) 510 ctl->ops.set_active_pipes(ctl, active_pipes); 511 512 _dpu_crtc_program_lm_output_roi(crtc); 513 } 514 515 /** 516 * _dpu_crtc_blend_setup - configure crtc mixers 517 * @crtc: Pointer to drm crtc structure 518 */ 519 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) 520 { 521 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 522 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 523 struct dpu_crtc_mixer *mixer = cstate->mixers; 524 struct dpu_hw_ctl *ctl; 525 struct dpu_hw_mixer *lm; 526 struct dpu_hw_stage_cfg stage_cfg; 527 DECLARE_BITMAP(active_lms, LM_MAX); 528 int i; 529 530 DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name); 531 532 for (i = 0; i < cstate->num_mixers; i++) { 533 mixer[i].mixer_op_mode = 0; 534 if (mixer[i].lm_ctl->ops.clear_all_blendstages) 535 mixer[i].lm_ctl->ops.clear_all_blendstages( 536 mixer[i].lm_ctl); 537 if (mixer[i].lm_ctl->ops.set_active_fetch_pipes) 538 mixer[i].lm_ctl->ops.set_active_fetch_pipes(mixer[i].lm_ctl, NULL); 539 if (mixer[i].lm_ctl->ops.set_active_pipes) 540 mixer[i].lm_ctl->ops.set_active_pipes(mixer[i].lm_ctl, NULL); 541 542 if (mixer[i].hw_lm->ops.clear_all_blendstages) 543 mixer[i].hw_lm->ops.clear_all_blendstages(mixer[i].hw_lm); 544 } 545 546 /* initialize stage cfg */ 547 memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg)); 548 memset(active_lms, 0, sizeof(active_lms)); 549 550 _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg); 551 552 for (i = 0; i < cstate->num_mixers; i++) { 553 ctl = mixer[i].lm_ctl; 554 lm = mixer[i].hw_lm; 555 556 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode); 557 558 /* stage config flush mask */ 559 ctl->ops.update_pending_flush_mixer(ctl, 560 mixer[i].hw_lm->idx); 561 562 set_bit(lm->idx, active_lms); 563 if (ctl->ops.set_active_lms) 564 ctl->ops.set_active_lms(ctl, active_lms); 565 566 DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n", 567 mixer[i].hw_lm->idx - LM_0, 568 mixer[i].mixer_op_mode, 569 ctl->idx - CTL_0); 570 571 if (ctl->ops.setup_blendstage) 572 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx, 573 &stage_cfg); 574 575 if (lm->ops.setup_blendstage) 576 lm->ops.setup_blendstage(lm, mixer[i].hw_lm->idx, 577 &stage_cfg); 578 } 579 } 580 581 /** 582 * _dpu_crtc_complete_flip - signal pending page_flip events 583 * Any pending vblank events are added to the vblank_event_list 584 * so that the next vblank interrupt shall signal them. 585 * However PAGE_FLIP events are not handled through the vblank_event_list. 586 * This API signals any pending PAGE_FLIP events requested through 587 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event. 588 * @crtc: Pointer to drm crtc structure 589 */ 590 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc) 591 { 592 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 593 struct drm_device *dev = crtc->dev; 594 unsigned long flags; 595 596 spin_lock_irqsave(&dev->event_lock, flags); 597 if (dpu_crtc->event) { 598 DRM_DEBUG_VBL("%s: send event: %p\n", dpu_crtc->name, 599 dpu_crtc->event); 600 trace_dpu_crtc_complete_flip(DRMID(crtc)); 601 drm_crtc_send_vblank_event(crtc, dpu_crtc->event); 602 dpu_crtc->event = NULL; 603 } 604 spin_unlock_irqrestore(&dev->event_lock, flags); 605 } 606 607 /** 608 * dpu_crtc_get_intf_mode - get interface mode of the given crtc 609 * @crtc: Pointert to crtc 610 */ 611 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) 612 { 613 struct drm_encoder *encoder; 614 615 /* 616 * TODO: This function is called from dpu debugfs and as part of atomic 617 * check. When called from debugfs, the crtc->mutex must be held to 618 * read crtc->state. However reading crtc->state from atomic check isn't 619 * allowed (unless you have a good reason, a big comment, and a deep 620 * understanding of how the atomic/modeset locks work (<- and this is 621 * probably not possible)). So we'll keep the WARN_ON here for now, but 622 * really we need to figure out a better way to track our operating mode 623 */ 624 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 625 626 /* TODO: Returns the first INTF_MODE, could there be multiple values? */ 627 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 628 return dpu_encoder_get_intf_mode(encoder); 629 630 return INTF_MODE_NONE; 631 } 632 633 /** 634 * dpu_crtc_vblank_callback - called on vblank irq, issues completion events 635 * @crtc: Pointer to drm crtc object 636 */ 637 void dpu_crtc_vblank_callback(struct drm_crtc *crtc) 638 { 639 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 640 641 /* keep statistics on vblank callback - with auto reset via debugfs */ 642 if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0) 643 dpu_crtc->vblank_cb_time = ktime_get(); 644 else 645 dpu_crtc->vblank_cb_count++; 646 647 dpu_crtc_get_crc(crtc); 648 649 drm_crtc_handle_vblank(crtc); 650 trace_dpu_crtc_vblank_cb(DRMID(crtc)); 651 } 652 653 static void dpu_crtc_frame_event_work(struct kthread_work *work) 654 { 655 struct dpu_crtc_frame_event *fevent = container_of(work, 656 struct dpu_crtc_frame_event, work); 657 struct drm_crtc *crtc = fevent->crtc; 658 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 659 unsigned long flags; 660 bool frame_done = false; 661 662 DPU_ATRACE_BEGIN("crtc_frame_event"); 663 664 DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event, 665 ktime_to_ns(fevent->ts)); 666 667 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE 668 | DPU_ENCODER_FRAME_EVENT_ERROR 669 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) { 670 671 if (atomic_read(&dpu_crtc->frame_pending) < 1) { 672 /* ignore vblank when not pending */ 673 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) { 674 /* release bandwidth and other resources */ 675 trace_dpu_crtc_frame_event_done(DRMID(crtc), 676 fevent->event); 677 dpu_core_perf_crtc_release_bw(crtc); 678 } else { 679 trace_dpu_crtc_frame_event_more_pending(DRMID(crtc), 680 fevent->event); 681 } 682 683 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE 684 | DPU_ENCODER_FRAME_EVENT_ERROR)) 685 frame_done = true; 686 } 687 688 if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD) 689 DPU_ERROR("crtc%d ts:%lld received panel dead event\n", 690 crtc->base.id, ktime_to_ns(fevent->ts)); 691 692 if (frame_done) 693 complete_all(&dpu_crtc->frame_done_comp); 694 695 spin_lock_irqsave(&dpu_crtc->spin_lock, flags); 696 list_add_tail(&fevent->list, &dpu_crtc->frame_event_list); 697 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); 698 DPU_ATRACE_END("crtc_frame_event"); 699 } 700 701 /** 702 * dpu_crtc_frame_event_cb - crtc frame event callback API 703 * @crtc: Pointer to crtc 704 * @event: Event to process 705 * 706 * Encoder may call this for different events from different context - IRQ, 707 * user thread, commit_thread, etc. Each event should be carefully reviewed and 708 * should be processed in proper task context to avoid schedulin delay or 709 * properly manage the irq context's bottom half processing. 710 */ 711 void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event) 712 { 713 struct dpu_crtc *dpu_crtc; 714 struct msm_drm_private *priv; 715 struct dpu_crtc_frame_event *fevent; 716 unsigned long flags; 717 u32 crtc_id; 718 719 /* Nothing to do on idle event */ 720 if (event & DPU_ENCODER_FRAME_EVENT_IDLE) 721 return; 722 723 dpu_crtc = to_dpu_crtc(crtc); 724 priv = crtc->dev->dev_private; 725 crtc_id = drm_crtc_index(crtc); 726 727 trace_dpu_crtc_frame_event_cb(DRMID(crtc), event); 728 729 spin_lock_irqsave(&dpu_crtc->spin_lock, flags); 730 fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list, 731 struct dpu_crtc_frame_event, list); 732 if (fevent) 733 list_del_init(&fevent->list); 734 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); 735 736 if (!fevent) { 737 DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event); 738 return; 739 } 740 741 fevent->event = event; 742 fevent->crtc = crtc; 743 fevent->ts = ktime_get(); 744 kthread_queue_work(priv->kms->event_thread[crtc_id].worker, &fevent->work); 745 } 746 747 /** 748 * dpu_crtc_complete_commit - callback signalling completion of current commit 749 * @crtc: Pointer to drm crtc object 750 */ 751 void dpu_crtc_complete_commit(struct drm_crtc *crtc) 752 { 753 trace_dpu_crtc_complete_commit(DRMID(crtc)); 754 dpu_core_perf_crtc_update(crtc, 0); 755 _dpu_crtc_complete_flip(crtc); 756 } 757 758 static int _dpu_crtc_check_and_setup_lm_bounds(struct drm_crtc *crtc, 759 struct drm_crtc_state *state) 760 { 761 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 762 struct drm_display_mode *adj_mode = &state->adjusted_mode; 763 u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers; 764 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); 765 int i; 766 767 /* if we cannot merge 2 LMs (no 3d mux) better to fail earlier 768 * before even checking the width after the split 769 */ 770 if (!dpu_kms->catalog->caps->has_3d_merge && 771 adj_mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width) 772 return -E2BIG; 773 774 for (i = 0; i < cstate->num_mixers; i++) { 775 struct drm_rect *r = &cstate->lm_bounds[i]; 776 r->x1 = crtc_split_width * i; 777 r->y1 = 0; 778 r->x2 = r->x1 + crtc_split_width; 779 r->y2 = adj_mode->vdisplay; 780 781 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r); 782 783 if (drm_rect_width(r) > dpu_kms->catalog->caps->max_mixer_width) 784 return -E2BIG; 785 } 786 787 return 0; 788 } 789 790 static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state, 791 struct dpu_hw_pcc_cfg *cfg) 792 { 793 struct drm_color_ctm *ctm; 794 795 memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg)); 796 797 ctm = (struct drm_color_ctm *)state->ctm->data; 798 799 if (!ctm) 800 return; 801 802 cfg->r.r = CONVERT_S3_15(ctm->matrix[0]); 803 cfg->g.r = CONVERT_S3_15(ctm->matrix[1]); 804 cfg->b.r = CONVERT_S3_15(ctm->matrix[2]); 805 806 cfg->r.g = CONVERT_S3_15(ctm->matrix[3]); 807 cfg->g.g = CONVERT_S3_15(ctm->matrix[4]); 808 cfg->b.g = CONVERT_S3_15(ctm->matrix[5]); 809 810 cfg->r.b = CONVERT_S3_15(ctm->matrix[6]); 811 cfg->g.b = CONVERT_S3_15(ctm->matrix[7]); 812 cfg->b.b = CONVERT_S3_15(ctm->matrix[8]); 813 } 814 815 static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc) 816 { 817 struct drm_crtc_state *state = crtc->state; 818 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 819 struct dpu_crtc_mixer *mixer = cstate->mixers; 820 struct dpu_hw_pcc_cfg cfg; 821 struct dpu_hw_ctl *ctl; 822 struct dpu_hw_dspp *dspp; 823 int i; 824 825 826 if (!state->color_mgmt_changed && !drm_atomic_crtc_needs_modeset(state)) 827 return; 828 829 for (i = 0; i < cstate->num_mixers; i++) { 830 ctl = mixer[i].lm_ctl; 831 dspp = mixer[i].hw_dspp; 832 833 if (!dspp || !dspp->ops.setup_pcc) 834 continue; 835 836 if (!state->ctm) { 837 dspp->ops.setup_pcc(dspp, NULL); 838 } else { 839 _dpu_crtc_get_pcc_coeff(state, &cfg); 840 dspp->ops.setup_pcc(dspp, &cfg); 841 } 842 843 /* stage config flush mask */ 844 ctl->ops.update_pending_flush_dspp(ctl, 845 mixer[i].hw_dspp->idx, DPU_DSPP_PCC); 846 } 847 } 848 849 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, 850 struct drm_atomic_state *state) 851 { 852 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 853 struct drm_encoder *encoder; 854 855 if (!crtc->state->enable) { 856 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n", 857 crtc->base.id, crtc->state->enable); 858 return; 859 } 860 861 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); 862 863 _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc->state); 864 865 /* encoder will trigger pending mask now */ 866 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 867 dpu_encoder_trigger_kickoff_pending(encoder); 868 869 /* 870 * If no mixers have been allocated in dpu_crtc_atomic_check(), 871 * it means we are trying to flush a CRTC whose state is disabled: 872 * nothing else needs to be done. 873 */ 874 if (unlikely(!cstate->num_mixers)) 875 return; 876 877 _dpu_crtc_blend_setup(crtc); 878 879 _dpu_crtc_setup_cp_blocks(crtc); 880 881 /* 882 * PP_DONE irq is only used by command mode for now. 883 * It is better to request pending before FLUSH and START trigger 884 * to make sure no pp_done irq missed. 885 * This is safe because no pp_done will happen before SW trigger 886 * in command mode. 887 */ 888 } 889 890 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, 891 struct drm_atomic_state *state) 892 { 893 struct dpu_crtc *dpu_crtc; 894 struct drm_device *dev; 895 struct drm_plane *plane; 896 struct msm_drm_private *priv; 897 unsigned long flags; 898 struct dpu_crtc_state *cstate; 899 900 if (!crtc->state->enable) { 901 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n", 902 crtc->base.id, crtc->state->enable); 903 return; 904 } 905 906 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); 907 908 dpu_crtc = to_dpu_crtc(crtc); 909 cstate = to_dpu_crtc_state(crtc->state); 910 dev = crtc->dev; 911 priv = dev->dev_private; 912 913 if (crtc->index >= ARRAY_SIZE(priv->kms->event_thread)) { 914 DPU_ERROR("invalid crtc index[%d]\n", crtc->index); 915 return; 916 } 917 918 WARN_ON(dpu_crtc->event); 919 spin_lock_irqsave(&dev->event_lock, flags); 920 dpu_crtc->event = crtc->state->event; 921 crtc->state->event = NULL; 922 spin_unlock_irqrestore(&dev->event_lock, flags); 923 924 /* 925 * If no mixers has been allocated in dpu_crtc_atomic_check(), 926 * it means we are trying to flush a CRTC whose state is disabled: 927 * nothing else needs to be done. 928 */ 929 if (unlikely(!cstate->num_mixers)) 930 return; 931 932 /* update performance setting before crtc kickoff */ 933 dpu_core_perf_crtc_update(crtc, 1); 934 935 /* 936 * Final plane updates: Give each plane a chance to complete all 937 * required writes/flushing before crtc's "flush 938 * everything" call below. 939 */ 940 drm_atomic_crtc_for_each_plane(plane, crtc) { 941 if (dpu_crtc->smmu_state.transition_error) 942 dpu_plane_set_error(plane, true); 943 dpu_plane_flush(plane); 944 } 945 946 /* Kickoff will be scheduled by outer layer */ 947 } 948 949 /** 950 * dpu_crtc_destroy_state - state destroy hook 951 * @crtc: drm CRTC 952 * @state: CRTC state object to release 953 */ 954 static void dpu_crtc_destroy_state(struct drm_crtc *crtc, 955 struct drm_crtc_state *state) 956 { 957 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 958 959 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); 960 961 __drm_atomic_helper_crtc_destroy_state(state); 962 963 kfree(cstate); 964 } 965 966 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc) 967 { 968 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 969 int ret, rc = 0; 970 971 if (!atomic_read(&dpu_crtc->frame_pending)) { 972 DRM_DEBUG_ATOMIC("no frames pending\n"); 973 return 0; 974 } 975 976 DPU_ATRACE_BEGIN("frame done completion wait"); 977 ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp, 978 msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS)); 979 if (!ret) { 980 DRM_ERROR("frame done wait timed out, ret:%d\n", ret); 981 rc = -ETIMEDOUT; 982 } 983 DPU_ATRACE_END("frame done completion wait"); 984 985 return rc; 986 } 987 988 static int dpu_crtc_kickoff_clone_mode(struct drm_crtc *crtc) 989 { 990 struct drm_encoder *encoder; 991 struct drm_encoder *rt_encoder = NULL, *wb_encoder = NULL; 992 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); 993 994 /* Find encoder for real time display */ 995 drm_for_each_encoder_mask(encoder, crtc->dev, 996 crtc->state->encoder_mask) { 997 if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) 998 wb_encoder = encoder; 999 else 1000 rt_encoder = encoder; 1001 } 1002 1003 if (!rt_encoder || !wb_encoder) { 1004 DRM_DEBUG_ATOMIC("real time or wb encoder not found\n"); 1005 return -EINVAL; 1006 } 1007 1008 dpu_encoder_prepare_for_kickoff(wb_encoder); 1009 dpu_encoder_prepare_for_kickoff(rt_encoder); 1010 1011 dpu_vbif_clear_errors(dpu_kms); 1012 1013 /* 1014 * Kickoff real time encoder last as it's the encoder that 1015 * will do the flush 1016 */ 1017 dpu_encoder_kickoff(wb_encoder); 1018 dpu_encoder_kickoff(rt_encoder); 1019 1020 /* Don't start frame done timers until the kickoffs have finished */ 1021 dpu_encoder_start_frame_done_timer(wb_encoder); 1022 dpu_encoder_start_frame_done_timer(rt_encoder); 1023 1024 return 0; 1025 } 1026 1027 /** 1028 * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc 1029 * @crtc: Pointer to drm crtc object 1030 */ 1031 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc) 1032 { 1033 struct drm_encoder *encoder; 1034 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1035 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); 1036 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 1037 1038 /* 1039 * If no mixers has been allocated in dpu_crtc_atomic_check(), 1040 * it means we are trying to start a CRTC whose state is disabled: 1041 * nothing else needs to be done. 1042 */ 1043 if (unlikely(!cstate->num_mixers)) 1044 return; 1045 1046 DPU_ATRACE_BEGIN("crtc_commit"); 1047 1048 drm_for_each_encoder_mask(encoder, crtc->dev, 1049 crtc->state->encoder_mask) { 1050 if (!dpu_encoder_is_valid_for_commit(encoder)) { 1051 DRM_DEBUG_ATOMIC("invalid FB not kicking off crtc\n"); 1052 goto end; 1053 } 1054 } 1055 1056 if (drm_crtc_in_clone_mode(crtc->state)) { 1057 if (dpu_crtc_kickoff_clone_mode(crtc)) 1058 goto end; 1059 } else { 1060 /* 1061 * Encoder will flush/start now, unless it has a tx pending. 1062 * If so, it may delay and flush at an irq event (e.g. ppdone) 1063 */ 1064 drm_for_each_encoder_mask(encoder, crtc->dev, 1065 crtc->state->encoder_mask) 1066 dpu_encoder_prepare_for_kickoff(encoder); 1067 1068 dpu_vbif_clear_errors(dpu_kms); 1069 1070 drm_for_each_encoder_mask(encoder, crtc->dev, 1071 crtc->state->encoder_mask) { 1072 dpu_encoder_kickoff(encoder); 1073 dpu_encoder_start_frame_done_timer(encoder); 1074 } 1075 } 1076 1077 if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) { 1078 /* acquire bandwidth and other resources */ 1079 DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id); 1080 } else 1081 DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id); 1082 1083 dpu_crtc->play_count++; 1084 1085 reinit_completion(&dpu_crtc->frame_done_comp); 1086 1087 end: 1088 DPU_ATRACE_END("crtc_commit"); 1089 } 1090 1091 static void dpu_crtc_reset(struct drm_crtc *crtc) 1092 { 1093 struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL); 1094 1095 if (crtc->state) 1096 dpu_crtc_destroy_state(crtc, crtc->state); 1097 1098 if (cstate) 1099 __drm_atomic_helper_crtc_reset(crtc, &cstate->base); 1100 else 1101 __drm_atomic_helper_crtc_reset(crtc, NULL); 1102 } 1103 1104 /** 1105 * dpu_crtc_duplicate_state - state duplicate hook 1106 * @crtc: Pointer to drm crtc structure 1107 */ 1108 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc) 1109 { 1110 struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state); 1111 1112 cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL); 1113 if (!cstate) { 1114 DPU_ERROR("failed to allocate state\n"); 1115 return NULL; 1116 } 1117 1118 /* duplicate base helper */ 1119 __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base); 1120 1121 return &cstate->base; 1122 } 1123 1124 static void dpu_crtc_atomic_print_state(struct drm_printer *p, 1125 const struct drm_crtc_state *state) 1126 { 1127 const struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 1128 int i; 1129 1130 for (i = 0; i < cstate->num_mixers; i++) { 1131 drm_printf(p, "\tlm[%d]=%d\n", i, cstate->mixers[i].hw_lm->idx - LM_0); 1132 drm_printf(p, "\tctl[%d]=%d\n", i, cstate->mixers[i].lm_ctl->idx - CTL_0); 1133 if (cstate->mixers[i].hw_dspp) 1134 drm_printf(p, "\tdspp[%d]=%d\n", i, cstate->mixers[i].hw_dspp->idx - DSPP_0); 1135 } 1136 } 1137 1138 static void dpu_crtc_disable(struct drm_crtc *crtc, 1139 struct drm_atomic_state *state) 1140 { 1141 struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, 1142 crtc); 1143 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1144 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 1145 struct drm_encoder *encoder; 1146 unsigned long flags; 1147 bool release_bandwidth = false; 1148 1149 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); 1150 1151 /* If disable is triggered while in self refresh mode, 1152 * reset the encoder software state so that in enable 1153 * it won't trigger a warn while assigning crtc. 1154 */ 1155 if (old_crtc_state->self_refresh_active) { 1156 drm_for_each_encoder_mask(encoder, crtc->dev, 1157 old_crtc_state->encoder_mask) { 1158 dpu_encoder_assign_crtc(encoder, NULL); 1159 } 1160 return; 1161 } 1162 1163 /* Disable/save vblank irq handling */ 1164 drm_crtc_vblank_off(crtc); 1165 1166 drm_for_each_encoder_mask(encoder, crtc->dev, 1167 old_crtc_state->encoder_mask) { 1168 /* in video mode, we hold an extra bandwidth reference 1169 * as we cannot drop bandwidth at frame-done if any 1170 * crtc is being used in video mode. 1171 */ 1172 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO) 1173 release_bandwidth = true; 1174 1175 /* 1176 * If disable is triggered during psr active(e.g: screen dim in PSR), 1177 * we will need encoder->crtc connection to process the device sleep & 1178 * preserve it during psr sequence. 1179 */ 1180 if (!crtc->state->self_refresh_active) 1181 dpu_encoder_assign_crtc(encoder, NULL); 1182 } 1183 1184 /* wait for frame_event_done completion */ 1185 if (_dpu_crtc_wait_for_frame_done(crtc)) 1186 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n", 1187 crtc->base.id, 1188 atomic_read(&dpu_crtc->frame_pending)); 1189 1190 trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc); 1191 dpu_crtc->enabled = false; 1192 1193 if (atomic_read(&dpu_crtc->frame_pending)) { 1194 trace_dpu_crtc_disable_frame_pending(DRMID(crtc), 1195 atomic_read(&dpu_crtc->frame_pending)); 1196 if (release_bandwidth) 1197 dpu_core_perf_crtc_release_bw(crtc); 1198 atomic_set(&dpu_crtc->frame_pending, 0); 1199 } 1200 1201 dpu_core_perf_crtc_update(crtc, 0); 1202 1203 /* disable clk & bw control until clk & bw properties are set */ 1204 cstate->bw_control = false; 1205 cstate->bw_split_vote = false; 1206 1207 if (crtc->state->event && !crtc->state->active) { 1208 spin_lock_irqsave(&crtc->dev->event_lock, flags); 1209 drm_crtc_send_vblank_event(crtc, crtc->state->event); 1210 crtc->state->event = NULL; 1211 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 1212 } 1213 1214 pm_runtime_put_sync(crtc->dev->dev); 1215 } 1216 1217 static void dpu_crtc_enable(struct drm_crtc *crtc, 1218 struct drm_atomic_state *state) 1219 { 1220 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1221 struct drm_encoder *encoder; 1222 bool request_bandwidth = false; 1223 struct drm_crtc_state *old_crtc_state; 1224 1225 old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); 1226 1227 pm_runtime_get_sync(crtc->dev->dev); 1228 1229 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); 1230 1231 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) { 1232 /* in video mode, we hold an extra bandwidth reference 1233 * as we cannot drop bandwidth at frame-done if any 1234 * crtc is being used in video mode. 1235 */ 1236 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO) 1237 request_bandwidth = true; 1238 } 1239 1240 if (request_bandwidth) 1241 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref); 1242 1243 trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc); 1244 dpu_crtc->enabled = true; 1245 1246 if (!old_crtc_state->self_refresh_active) { 1247 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 1248 dpu_encoder_assign_crtc(encoder, crtc); 1249 } 1250 1251 /* Enable/restore vblank irq handling */ 1252 drm_crtc_vblank_on(crtc); 1253 } 1254 1255 static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate) 1256 { 1257 struct drm_crtc *crtc = cstate->crtc; 1258 struct drm_encoder *encoder; 1259 1260 if (cstate->self_refresh_active) 1261 return true; 1262 1263 drm_for_each_encoder_mask (encoder, crtc->dev, cstate->encoder_mask) { 1264 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_CMD) { 1265 return true; 1266 } 1267 } 1268 1269 return false; 1270 } 1271 1272 static int dpu_crtc_reassign_planes(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) 1273 { 1274 int total_planes = crtc->dev->mode_config.num_total_plane; 1275 struct drm_atomic_state *state = crtc_state->state; 1276 struct dpu_global_state *global_state; 1277 struct drm_plane_state **states; 1278 struct drm_plane *plane; 1279 int ret; 1280 1281 global_state = dpu_kms_get_global_state(crtc_state->state); 1282 if (IS_ERR(global_state)) 1283 return PTR_ERR(global_state); 1284 1285 dpu_rm_release_all_sspp(global_state, crtc); 1286 1287 if (!crtc_state->enable) 1288 return 0; 1289 1290 states = kcalloc(total_planes, sizeof(*states), GFP_KERNEL); 1291 if (!states) 1292 return -ENOMEM; 1293 1294 drm_atomic_crtc_state_for_each_plane(plane, crtc_state) { 1295 struct drm_plane_state *plane_state = 1296 drm_atomic_get_plane_state(state, plane); 1297 1298 if (IS_ERR(plane_state)) { 1299 ret = PTR_ERR(plane_state); 1300 goto done; 1301 } 1302 1303 states[plane_state->normalized_zpos] = plane_state; 1304 } 1305 1306 ret = dpu_assign_plane_resources(global_state, state, crtc, states, total_planes); 1307 1308 done: 1309 kfree(states); 1310 return ret; 1311 } 1312 1313 #define MAX_CHANNELS_PER_CRTC 2 1314 #define MAX_HDISPLAY_SPLIT 1080 1315 1316 static struct msm_display_topology dpu_crtc_get_topology( 1317 struct drm_crtc *crtc, 1318 struct dpu_kms *dpu_kms, 1319 struct drm_crtc_state *crtc_state) 1320 { 1321 struct drm_display_mode *mode = &crtc_state->adjusted_mode; 1322 struct msm_display_topology topology = {0}; 1323 struct drm_encoder *drm_enc; 1324 1325 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) 1326 dpu_encoder_update_topology(drm_enc, &topology, crtc_state->state, 1327 &crtc_state->adjusted_mode); 1328 1329 topology.cwb_enabled = drm_crtc_in_clone_mode(crtc_state); 1330 1331 /* 1332 * Datapath topology selection 1333 * 1334 * Dual display 1335 * 2 LM, 2 INTF ( Split display using 2 interfaces) 1336 * 1337 * Single display 1338 * 1 LM, 1 INTF 1339 * 2 LM, 1 INTF (stream merge to support high resolution interfaces) 1340 * 1341 * If DSC is enabled, use 2 LMs for 2:2:1 topology 1342 * 1343 * Add dspps to the reservation requirements if ctm is requested 1344 * 1345 * Only hardcode num_lm to 2 for cases where num_intf == 2 and CWB is not 1346 * enabled. This is because in cases where CWB is enabled, num_intf will 1347 * count both the WB and real-time phys encoders. 1348 * 1349 * For non-DSC CWB usecases, have the num_lm be decided by the 1350 * (mode->hdisplay > MAX_HDISPLAY_SPLIT) check. 1351 */ 1352 1353 if (topology.num_intf == 2 && !topology.cwb_enabled) 1354 topology.num_lm = 2; 1355 else if (topology.num_dsc == 2) 1356 topology.num_lm = 2; 1357 else if (dpu_kms->catalog->caps->has_3d_merge) 1358 topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1; 1359 else 1360 topology.num_lm = 1; 1361 1362 if (crtc_state->ctm) 1363 topology.num_dspp = topology.num_lm; 1364 1365 return topology; 1366 } 1367 1368 static int dpu_crtc_assign_resources(struct drm_crtc *crtc, 1369 struct drm_crtc_state *crtc_state) 1370 { 1371 struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_CRTC]; 1372 struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_CRTC]; 1373 struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_CRTC]; 1374 int i, num_lm, num_ctl, num_dspp; 1375 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); 1376 struct dpu_global_state *global_state; 1377 struct dpu_crtc_state *cstate; 1378 struct msm_display_topology topology; 1379 int ret; 1380 1381 /* 1382 * Release and Allocate resources on every modeset 1383 */ 1384 global_state = dpu_kms_get_global_state(crtc_state->state); 1385 if (IS_ERR(global_state)) 1386 return PTR_ERR(global_state); 1387 1388 dpu_rm_release(global_state, crtc); 1389 1390 if (!crtc_state->enable) 1391 return 0; 1392 1393 topology = dpu_crtc_get_topology(crtc, dpu_kms, crtc_state); 1394 ret = dpu_rm_reserve(&dpu_kms->rm, global_state, 1395 crtc_state->crtc, &topology); 1396 if (ret) 1397 return ret; 1398 1399 cstate = to_dpu_crtc_state(crtc_state); 1400 1401 num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1402 crtc_state->crtc, 1403 DPU_HW_BLK_CTL, hw_ctl, 1404 ARRAY_SIZE(hw_ctl)); 1405 num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1406 crtc_state->crtc, 1407 DPU_HW_BLK_LM, hw_lm, 1408 ARRAY_SIZE(hw_lm)); 1409 num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1410 crtc_state->crtc, 1411 DPU_HW_BLK_DSPP, hw_dspp, 1412 ARRAY_SIZE(hw_dspp)); 1413 1414 for (i = 0; i < num_lm; i++) { 1415 int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); 1416 1417 cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); 1418 cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); 1419 if (i < num_dspp) 1420 cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]); 1421 } 1422 1423 cstate->num_mixers = num_lm; 1424 1425 return 0; 1426 } 1427 1428 /** 1429 * dpu_crtc_check_mode_changed: check if full modeset is required 1430 * @old_crtc_state: Previous CRTC state 1431 * @new_crtc_state: Corresponding CRTC state to be checked 1432 * 1433 * Check if the changes in the object properties demand full mode set. 1434 */ 1435 int dpu_crtc_check_mode_changed(struct drm_crtc_state *old_crtc_state, 1436 struct drm_crtc_state *new_crtc_state) 1437 { 1438 struct drm_encoder *drm_enc; 1439 struct drm_crtc *crtc = new_crtc_state->crtc; 1440 bool clone_mode_enabled = drm_crtc_in_clone_mode(old_crtc_state); 1441 bool clone_mode_requested = drm_crtc_in_clone_mode(new_crtc_state); 1442 1443 DRM_DEBUG_ATOMIC("%d\n", crtc->base.id); 1444 1445 /* there might be cases where encoder needs a modeset too */ 1446 drm_for_each_encoder_mask(drm_enc, crtc->dev, new_crtc_state->encoder_mask) { 1447 if (dpu_encoder_needs_modeset(drm_enc, new_crtc_state->state)) 1448 new_crtc_state->mode_changed = true; 1449 } 1450 1451 if ((clone_mode_requested && !clone_mode_enabled) || 1452 (!clone_mode_requested && clone_mode_enabled)) 1453 new_crtc_state->mode_changed = true; 1454 1455 return 0; 1456 } 1457 1458 static int dpu_crtc_atomic_check(struct drm_crtc *crtc, 1459 struct drm_atomic_state *state) 1460 { 1461 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, 1462 crtc); 1463 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1464 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state); 1465 1466 const struct drm_plane_state *pstate; 1467 struct drm_plane *plane; 1468 1469 int rc = 0; 1470 1471 bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state); 1472 1473 /* don't reallocate resources if only ACTIVE has beeen changed */ 1474 if (crtc_state->mode_changed || crtc_state->connectors_changed) { 1475 rc = dpu_crtc_assign_resources(crtc, crtc_state); 1476 if (rc < 0) 1477 return rc; 1478 } 1479 1480 if (dpu_use_virtual_planes && 1481 (crtc_state->planes_changed || crtc_state->zpos_changed)) { 1482 rc = dpu_crtc_reassign_planes(crtc, crtc_state); 1483 if (rc < 0) 1484 return rc; 1485 } 1486 1487 if (!crtc_state->enable || !drm_atomic_crtc_effectively_active(crtc_state)) { 1488 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n", 1489 crtc->base.id, crtc_state->enable, 1490 crtc_state->active); 1491 memset(&cstate->new_perf, 0, sizeof(cstate->new_perf)); 1492 return 0; 1493 } 1494 1495 DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name); 1496 1497 if (cstate->num_mixers) { 1498 rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state); 1499 if (rc) 1500 return rc; 1501 } 1502 1503 /* FIXME: move this to dpu_plane_atomic_check? */ 1504 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { 1505 struct dpu_plane_state *dpu_pstate = to_dpu_plane_state(pstate); 1506 1507 if (IS_ERR_OR_NULL(pstate)) { 1508 rc = PTR_ERR(pstate); 1509 DPU_ERROR("%s: failed to get plane%d state, %d\n", 1510 dpu_crtc->name, plane->base.id, rc); 1511 return rc; 1512 } 1513 1514 if (!pstate->visible) 1515 continue; 1516 1517 dpu_pstate->needs_dirtyfb = needs_dirtyfb; 1518 } 1519 1520 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref); 1521 1522 rc = dpu_core_perf_crtc_check(crtc, crtc_state); 1523 if (rc) { 1524 DPU_ERROR("crtc%d failed performance check %d\n", 1525 crtc->base.id, rc); 1526 return rc; 1527 } 1528 1529 return 0; 1530 } 1531 1532 static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc, 1533 const struct drm_display_mode *mode) 1534 { 1535 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); 1536 u64 adjusted_mode_clk; 1537 1538 /* if there is no 3d_mux block we cannot merge LMs so we cannot 1539 * split the large layer into 2 LMs, filter out such modes 1540 */ 1541 if (!dpu_kms->catalog->caps->has_3d_merge && 1542 mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width) 1543 return MODE_BAD_HVALUE; 1544 1545 adjusted_mode_clk = dpu_core_perf_adjusted_mode_clk(mode->clock, 1546 dpu_kms->perf.perf_cfg); 1547 1548 /* 1549 * The given mode, adjusted for the perf clock factor, should not exceed 1550 * the max core clock rate 1551 */ 1552 if (dpu_kms->perf.max_core_clk_rate < adjusted_mode_clk * 1000) 1553 return MODE_CLOCK_HIGH; 1554 1555 /* 1556 * max crtc width is equal to the max mixer width * 2 and max height is 4K 1557 */ 1558 return drm_mode_validate_size(mode, 1559 2 * dpu_kms->catalog->caps->max_mixer_width, 1560 4096); 1561 } 1562 1563 /** 1564 * dpu_crtc_vblank - enable or disable vblanks for this crtc 1565 * @crtc: Pointer to drm crtc object 1566 * @en: true to enable vblanks, false to disable 1567 */ 1568 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en) 1569 { 1570 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1571 struct drm_encoder *enc; 1572 1573 trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc); 1574 1575 /* 1576 * Normally we would iterate through encoder_mask in crtc state to find 1577 * attached encoders. In this case, we might be disabling vblank _after_ 1578 * encoder_mask has been cleared. 1579 * 1580 * Instead, we "assign" a crtc to the encoder in enable and clear it in 1581 * disable (which is also after encoder_mask is cleared). So instead of 1582 * using encoder mask, we'll ask the encoder to toggle itself iff it's 1583 * currently assigned to our crtc. 1584 * 1585 * Note also that this function cannot be called while crtc is disabled 1586 * since we use drm_crtc_vblank_on/off. So we don't need to worry 1587 * about the assigned crtcs being inconsistent with the current state 1588 * (which means no need to worry about modeset locks). 1589 */ 1590 list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) { 1591 trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en, 1592 dpu_crtc); 1593 1594 dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en); 1595 } 1596 1597 return 0; 1598 } 1599 1600 #ifdef CONFIG_DEBUG_FS 1601 static int _dpu_debugfs_status_show(struct seq_file *s, void *data) 1602 { 1603 struct dpu_crtc *dpu_crtc; 1604 struct dpu_plane_state *pstate = NULL; 1605 struct dpu_crtc_mixer *m; 1606 1607 struct drm_crtc *crtc; 1608 struct drm_plane *plane; 1609 struct drm_display_mode *mode; 1610 struct drm_framebuffer *fb; 1611 struct drm_plane_state *state; 1612 struct dpu_crtc_state *cstate; 1613 1614 int i, out_width; 1615 1616 dpu_crtc = s->private; 1617 crtc = &dpu_crtc->base; 1618 1619 drm_modeset_lock_all(crtc->dev); 1620 cstate = to_dpu_crtc_state(crtc->state); 1621 1622 mode = &crtc->state->adjusted_mode; 1623 out_width = mode->hdisplay / cstate->num_mixers; 1624 1625 seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id, 1626 mode->hdisplay, mode->vdisplay); 1627 1628 seq_puts(s, "\n"); 1629 1630 for (i = 0; i < cstate->num_mixers; ++i) { 1631 m = &cstate->mixers[i]; 1632 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n", 1633 m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0, 1634 out_width, mode->vdisplay); 1635 } 1636 1637 seq_puts(s, "\n"); 1638 1639 drm_atomic_crtc_for_each_plane(plane, crtc) { 1640 pstate = to_dpu_plane_state(plane->state); 1641 state = plane->state; 1642 1643 if (!pstate || !state) 1644 continue; 1645 1646 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id, 1647 pstate->stage); 1648 1649 if (plane->state->fb) { 1650 fb = plane->state->fb; 1651 1652 seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ", 1653 fb->base.id, (char *) &fb->format->format, 1654 fb->width, fb->height); 1655 for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i) 1656 seq_printf(s, "cpp[%d]:%u ", 1657 i, fb->format->cpp[i]); 1658 seq_puts(s, "\n\t"); 1659 1660 seq_printf(s, "modifier:%8llu ", fb->modifier); 1661 seq_puts(s, "\n"); 1662 1663 seq_puts(s, "\t"); 1664 for (i = 0; i < ARRAY_SIZE(fb->pitches); i++) 1665 seq_printf(s, "pitches[%d]:%8u ", i, 1666 fb->pitches[i]); 1667 seq_puts(s, "\n"); 1668 1669 seq_puts(s, "\t"); 1670 for (i = 0; i < ARRAY_SIZE(fb->offsets); i++) 1671 seq_printf(s, "offsets[%d]:%8u ", i, 1672 fb->offsets[i]); 1673 seq_puts(s, "\n"); 1674 } 1675 1676 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n", 1677 state->src_x, state->src_y, state->src_w, state->src_h); 1678 1679 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n", 1680 state->crtc_x, state->crtc_y, state->crtc_w, 1681 state->crtc_h); 1682 seq_printf(s, "\tsspp[0]:%s\n", 1683 pstate->pipe.sspp->cap->name); 1684 seq_printf(s, "\tmultirect[0]: mode: %d index: %d\n", 1685 pstate->pipe.multirect_mode, pstate->pipe.multirect_index); 1686 if (pstate->r_pipe.sspp) { 1687 seq_printf(s, "\tsspp[1]:%s\n", 1688 pstate->r_pipe.sspp->cap->name); 1689 seq_printf(s, "\tmultirect[1]: mode: %d index: %d\n", 1690 pstate->r_pipe.multirect_mode, pstate->r_pipe.multirect_index); 1691 } 1692 1693 seq_puts(s, "\n"); 1694 } 1695 if (dpu_crtc->vblank_cb_count) { 1696 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time); 1697 s64 diff_ms = ktime_to_ms(diff); 1698 s64 fps = diff_ms ? div_s64( 1699 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0; 1700 1701 seq_printf(s, 1702 "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n", 1703 fps, dpu_crtc->vblank_cb_count, 1704 ktime_to_ms(diff), dpu_crtc->play_count); 1705 1706 /* reset time & count for next measurement */ 1707 dpu_crtc->vblank_cb_count = 0; 1708 dpu_crtc->vblank_cb_time = ktime_set(0, 0); 1709 } 1710 1711 drm_modeset_unlock_all(crtc->dev); 1712 1713 return 0; 1714 } 1715 1716 DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status); 1717 1718 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v) 1719 { 1720 struct drm_crtc *crtc = s->private; 1721 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1722 1723 seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc)); 1724 seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc)); 1725 seq_printf(s, "core_clk_rate: %llu\n", 1726 dpu_crtc->cur_perf.core_clk_rate); 1727 seq_printf(s, "bw_ctl: %uk\n", 1728 (u32)DIV_ROUND_UP_ULL(dpu_crtc->cur_perf.bw_ctl, 1000)); 1729 seq_printf(s, "max_per_pipe_ib: %u\n", 1730 dpu_crtc->cur_perf.max_per_pipe_ib); 1731 1732 return 0; 1733 } 1734 DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state); 1735 1736 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) 1737 { 1738 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1739 1740 debugfs_create_file("status", 0400, 1741 crtc->debugfs_entry, 1742 dpu_crtc, &_dpu_debugfs_status_fops); 1743 debugfs_create_file("state", 0600, 1744 crtc->debugfs_entry, 1745 &dpu_crtc->base, 1746 &dpu_crtc_debugfs_state_fops); 1747 1748 return 0; 1749 } 1750 #else 1751 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) 1752 { 1753 return 0; 1754 } 1755 #endif /* CONFIG_DEBUG_FS */ 1756 1757 static int dpu_crtc_late_register(struct drm_crtc *crtc) 1758 { 1759 return _dpu_crtc_init_debugfs(crtc); 1760 } 1761 1762 static const struct drm_crtc_funcs dpu_crtc_funcs = { 1763 .set_config = drm_atomic_helper_set_config, 1764 .page_flip = drm_atomic_helper_page_flip, 1765 .reset = dpu_crtc_reset, 1766 .atomic_duplicate_state = dpu_crtc_duplicate_state, 1767 .atomic_destroy_state = dpu_crtc_destroy_state, 1768 .atomic_print_state = dpu_crtc_atomic_print_state, 1769 .late_register = dpu_crtc_late_register, 1770 .verify_crc_source = dpu_crtc_verify_crc_source, 1771 .set_crc_source = dpu_crtc_set_crc_source, 1772 .enable_vblank = msm_crtc_enable_vblank, 1773 .disable_vblank = msm_crtc_disable_vblank, 1774 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, 1775 .get_vblank_counter = dpu_crtc_get_vblank_counter, 1776 }; 1777 1778 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = { 1779 .atomic_disable = dpu_crtc_disable, 1780 .atomic_enable = dpu_crtc_enable, 1781 .atomic_check = dpu_crtc_atomic_check, 1782 .atomic_begin = dpu_crtc_atomic_begin, 1783 .atomic_flush = dpu_crtc_atomic_flush, 1784 .mode_valid = dpu_crtc_mode_valid, 1785 .get_scanout_position = dpu_crtc_get_scanout_position, 1786 }; 1787 1788 /** 1789 * dpu_crtc_init - create a new crtc object 1790 * @dev: dpu device 1791 * @plane: base plane 1792 * @cursor: cursor plane 1793 * @return: new crtc object or error 1794 * 1795 * initialize CRTC 1796 */ 1797 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, 1798 struct drm_plane *cursor) 1799 { 1800 struct msm_drm_private *priv = dev->dev_private; 1801 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); 1802 struct drm_crtc *crtc = NULL; 1803 struct dpu_crtc *dpu_crtc; 1804 int i, ret; 1805 1806 dpu_crtc = drmm_crtc_alloc_with_planes(dev, struct dpu_crtc, base, 1807 plane, cursor, 1808 &dpu_crtc_funcs, 1809 NULL); 1810 1811 if (IS_ERR(dpu_crtc)) 1812 return ERR_CAST(dpu_crtc); 1813 1814 crtc = &dpu_crtc->base; 1815 crtc->dev = dev; 1816 1817 spin_lock_init(&dpu_crtc->spin_lock); 1818 atomic_set(&dpu_crtc->frame_pending, 0); 1819 1820 init_completion(&dpu_crtc->frame_done_comp); 1821 1822 INIT_LIST_HEAD(&dpu_crtc->frame_event_list); 1823 1824 for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) { 1825 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list); 1826 list_add(&dpu_crtc->frame_events[i].list, 1827 &dpu_crtc->frame_event_list); 1828 kthread_init_work(&dpu_crtc->frame_events[i].work, 1829 dpu_crtc_frame_event_work); 1830 } 1831 1832 drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs); 1833 1834 if (dpu_kms->catalog->dspp_count) 1835 drm_crtc_enable_color_mgmt(crtc, 0, true, 0); 1836 1837 /* save user friendly CRTC name for later */ 1838 snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id); 1839 1840 /* initialize event handling */ 1841 spin_lock_init(&dpu_crtc->event_lock); 1842 1843 ret = drm_self_refresh_helper_init(crtc); 1844 if (ret) { 1845 DPU_ERROR("Failed to initialize %s with self-refresh helpers %d\n", 1846 crtc->name, ret); 1847 return ERR_PTR(ret); 1848 } 1849 1850 DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name); 1851 return crtc; 1852 } 1853