1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved. 5 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 6 * 7 * Author: Rob Clark <robdclark@gmail.com> 8 */ 9 10 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ 11 #include <linux/debugfs.h> 12 #include <linux/kthread.h> 13 #include <linux/seq_file.h> 14 15 #include <drm/drm_atomic.h> 16 #include <drm/drm_crtc.h> 17 #include <drm/drm_file.h> 18 #include <drm/drm_probe_helper.h> 19 #include <drm/drm_framebuffer.h> 20 21 #include "msm_drv.h" 22 #include "dpu_kms.h" 23 #include "dpu_hwio.h" 24 #include "dpu_hw_catalog.h" 25 #include "dpu_hw_intf.h" 26 #include "dpu_hw_ctl.h" 27 #include "dpu_hw_dspp.h" 28 #include "dpu_hw_dsc.h" 29 #include "dpu_hw_merge3d.h" 30 #include "dpu_hw_cdm.h" 31 #include "dpu_formats.h" 32 #include "dpu_encoder_phys.h" 33 #include "dpu_crtc.h" 34 #include "dpu_trace.h" 35 #include "dpu_core_irq.h" 36 #include "disp/msm_disp_snapshot.h" 37 38 #define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\ 39 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) 40 41 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\ 42 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) 43 44 #define DPU_ERROR_ENC_RATELIMITED(e, fmt, ...) DPU_ERROR_RATELIMITED("enc%d " fmt,\ 45 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) 46 47 /* 48 * Two to anticipate panels that can do cmd/vid dynamic switching 49 * plan is to create all possible physical encoder types, and switch between 50 * them at runtime 51 */ 52 #define NUM_PHYS_ENCODER_TYPES 2 53 54 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \ 55 (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES) 56 57 #define MAX_CHANNELS_PER_ENC 2 58 59 #define IDLE_SHORT_TIMEOUT 1 60 61 #define MAX_HDISPLAY_SPLIT 1080 62 63 /* timeout in frames waiting for frame done */ 64 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5 65 66 /** 67 * enum dpu_enc_rc_events - events for resource control state machine 68 * @DPU_ENC_RC_EVENT_KICKOFF: 69 * This event happens at NORMAL priority. 70 * Event that signals the start of the transfer. When this event is 71 * received, enable MDP/DSI core clocks. Regardless of the previous 72 * state, the resource should be in ON state at the end of this event. 73 * @DPU_ENC_RC_EVENT_FRAME_DONE: 74 * This event happens at INTERRUPT level. 75 * Event signals the end of the data transfer after the PP FRAME_DONE 76 * event. At the end of this event, a delayed work is scheduled to go to 77 * IDLE_PC state after IDLE_TIMEOUT time. 78 * @DPU_ENC_RC_EVENT_PRE_STOP: 79 * This event happens at NORMAL priority. 80 * This event, when received during the ON state, leave the RC STATE 81 * in the PRE_OFF state. It should be followed by the STOP event as 82 * part of encoder disable. 83 * If received during IDLE or OFF states, it will do nothing. 84 * @DPU_ENC_RC_EVENT_STOP: 85 * This event happens at NORMAL priority. 86 * When this event is received, disable all the MDP/DSI core clocks, and 87 * disable IRQs. It should be called from the PRE_OFF or IDLE states. 88 * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing. 89 * PRE_OFF is expected when PRE_STOP was executed during the ON state. 90 * Resource state should be in OFF at the end of the event. 91 * @DPU_ENC_RC_EVENT_ENTER_IDLE: 92 * This event happens at NORMAL priority from a work item. 93 * Event signals that there were no frame updates for IDLE_TIMEOUT time. 94 * This would disable MDP/DSI core clocks and change the resource state 95 * to IDLE. 96 */ 97 enum dpu_enc_rc_events { 98 DPU_ENC_RC_EVENT_KICKOFF = 1, 99 DPU_ENC_RC_EVENT_FRAME_DONE, 100 DPU_ENC_RC_EVENT_PRE_STOP, 101 DPU_ENC_RC_EVENT_STOP, 102 DPU_ENC_RC_EVENT_ENTER_IDLE 103 }; 104 105 /* 106 * enum dpu_enc_rc_states - states that the resource control maintains 107 * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state 108 * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state 109 * @DPU_ENC_RC_STATE_ON: Resource is in ON state 110 * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state 111 * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state 112 */ 113 enum dpu_enc_rc_states { 114 DPU_ENC_RC_STATE_OFF, 115 DPU_ENC_RC_STATE_PRE_OFF, 116 DPU_ENC_RC_STATE_ON, 117 DPU_ENC_RC_STATE_IDLE 118 }; 119 120 /** 121 * struct dpu_encoder_virt - virtual encoder. Container of one or more physical 122 * encoders. Virtual encoder manages one "logical" display. Physical 123 * encoders manage one intf block, tied to a specific panel/sub-panel. 124 * Virtual encoder defers as much as possible to the physical encoders. 125 * Virtual encoder registers itself with the DRM Framework as the encoder. 126 * @base: drm_encoder base class for registration with DRM 127 * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes 128 * @enabled: True if the encoder is active, protected by enc_lock 129 * @commit_done_timedout: True if there has been a timeout on commit after 130 * enabling the encoder. 131 * @num_phys_encs: Actual number of physical encoders contained. 132 * @phys_encs: Container of physical encoders managed. 133 * @cur_master: Pointer to the current master in this mode. Optimization 134 * Only valid after enable. Cleared as disable. 135 * @cur_slave: As above but for the slave encoder. 136 * @hw_pp: Handle to the pingpong blocks used for the display. No. 137 * pingpong blocks can be different than num_phys_encs. 138 * @hw_dsc: Handle to the DSC blocks used for the display. 139 * @dsc_mask: Bitmask of used DSC blocks. 140 * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped 141 * for partial update right-only cases, such as pingpong 142 * split where virtual pingpong does not generate IRQs 143 * @crtc: Pointer to the currently assigned crtc. Normally you 144 * would use crtc->state->encoder_mask to determine the 145 * link between encoder/crtc. However in this case we need 146 * to track crtc in the disable() hook which is called 147 * _after_ encoder_mask is cleared. 148 * @connector: If a mode is set, cached pointer to the active connector 149 * @enc_lock: Lock around physical encoder 150 * create/destroy/enable/disable 151 * @frame_busy_mask: Bitmask tracking which phys_enc we are still 152 * busy processing current command. 153 * Bit0 = phys_encs[0] etc. 154 * @frame_done_timeout_ms: frame done timeout in ms 155 * @frame_done_timeout_cnt: atomic counter tracking the number of frame 156 * done timeouts 157 * @frame_done_timer: watchdog timer for frame done event 158 * @disp_info: local copy of msm_display_info struct 159 * @idle_pc_supported: indicate if idle power collaps is supported 160 * @rc_lock: resource control mutex lock to protect 161 * virt encoder over various state changes 162 * @rc_state: resource controller state 163 * @delayed_off_work: delayed worker to schedule disabling of 164 * clks and resources after IDLE_TIMEOUT time. 165 * @topology: topology of the display 166 * @idle_timeout: idle timeout duration in milliseconds 167 * @wide_bus_en: wide bus is enabled on this interface 168 * @dsc: drm_dsc_config pointer, for DSC-enabled encoders 169 */ 170 struct dpu_encoder_virt { 171 struct drm_encoder base; 172 spinlock_t enc_spinlock; 173 174 bool enabled; 175 bool commit_done_timedout; 176 177 unsigned int num_phys_encs; 178 struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL]; 179 struct dpu_encoder_phys *cur_master; 180 struct dpu_encoder_phys *cur_slave; 181 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; 182 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; 183 184 unsigned int dsc_mask; 185 186 bool intfs_swapped; 187 188 struct drm_crtc *crtc; 189 struct drm_connector *connector; 190 191 struct mutex enc_lock; 192 DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL); 193 194 atomic_t frame_done_timeout_ms; 195 atomic_t frame_done_timeout_cnt; 196 struct timer_list frame_done_timer; 197 198 struct msm_display_info disp_info; 199 200 bool idle_pc_supported; 201 struct mutex rc_lock; 202 enum dpu_enc_rc_states rc_state; 203 struct delayed_work delayed_off_work; 204 struct msm_display_topology topology; 205 206 u32 idle_timeout; 207 208 bool wide_bus_en; 209 210 /* DSC configuration */ 211 struct drm_dsc_config *dsc; 212 }; 213 214 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base) 215 216 static u32 dither_matrix[DITHER_MATRIX_SZ] = { 217 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10 218 }; 219 220 /** 221 * dpu_encoder_get_drm_fmt - return DRM fourcc format 222 * @phys_enc: Pointer to physical encoder structure 223 */ 224 u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc) 225 { 226 struct drm_encoder *drm_enc; 227 struct dpu_encoder_virt *dpu_enc; 228 struct drm_display_info *info; 229 struct drm_display_mode *mode; 230 231 drm_enc = phys_enc->parent; 232 dpu_enc = to_dpu_encoder_virt(drm_enc); 233 info = &dpu_enc->connector->display_info; 234 mode = &phys_enc->cached_mode; 235 236 if (drm_mode_is_420_only(info, mode)) 237 return DRM_FORMAT_YUV420; 238 239 return DRM_FORMAT_RGB888; 240 } 241 242 /** 243 * dpu_encoder_needs_periph_flush - return true if physical encoder requires 244 * peripheral flush 245 * @phys_enc: Pointer to physical encoder structure 246 */ 247 bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc) 248 { 249 struct drm_encoder *drm_enc; 250 struct dpu_encoder_virt *dpu_enc; 251 struct msm_display_info *disp_info; 252 struct msm_drm_private *priv; 253 struct drm_display_mode *mode; 254 255 drm_enc = phys_enc->parent; 256 dpu_enc = to_dpu_encoder_virt(drm_enc); 257 disp_info = &dpu_enc->disp_info; 258 priv = drm_enc->dev->dev_private; 259 mode = &phys_enc->cached_mode; 260 261 return phys_enc->hw_intf->cap->type == INTF_DP && 262 msm_dp_needs_periph_flush(priv->dp[disp_info->h_tile_instance[0]], mode); 263 } 264 265 /** 266 * dpu_encoder_is_widebus_enabled - return bool value if widebus is enabled 267 * @drm_enc: Pointer to previously created drm encoder structure 268 */ 269 bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc) 270 { 271 const struct dpu_encoder_virt *dpu_enc; 272 struct msm_drm_private *priv = drm_enc->dev->dev_private; 273 const struct msm_display_info *disp_info; 274 int index; 275 276 dpu_enc = to_dpu_encoder_virt(drm_enc); 277 disp_info = &dpu_enc->disp_info; 278 index = disp_info->h_tile_instance[0]; 279 280 if (disp_info->intf_type == INTF_DP) 281 return msm_dp_wide_bus_available(priv->dp[index]); 282 else if (disp_info->intf_type == INTF_DSI) 283 return msm_dsi_wide_bus_enabled(priv->dsi[index]); 284 285 return false; 286 } 287 288 /** 289 * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled 290 * for the encoder. 291 * @drm_enc: Pointer to previously created drm encoder structure 292 */ 293 bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc) 294 { 295 const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 296 297 return dpu_enc->dsc ? true : false; 298 } 299 300 /** 301 * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained 302 * in virtual encoder that can collect CRC values 303 * @drm_enc: Pointer to previously created drm encoder structure 304 * Returns: Number of physical encoders for given drm encoder 305 */ 306 int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc) 307 { 308 struct dpu_encoder_virt *dpu_enc; 309 int i, num_intf = 0; 310 311 dpu_enc = to_dpu_encoder_virt(drm_enc); 312 313 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 314 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 315 316 if (phys->hw_intf && phys->hw_intf->ops.setup_misr 317 && phys->hw_intf->ops.collect_misr) 318 num_intf++; 319 } 320 321 return num_intf; 322 } 323 324 /** 325 * dpu_encoder_setup_misr - enable misr calculations 326 * @drm_enc: Pointer to previously created drm encoder structure 327 */ 328 void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc) 329 { 330 struct dpu_encoder_virt *dpu_enc; 331 332 int i; 333 334 dpu_enc = to_dpu_encoder_virt(drm_enc); 335 336 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 337 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 338 339 if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr) 340 continue; 341 342 phys->hw_intf->ops.setup_misr(phys->hw_intf); 343 } 344 } 345 346 /** 347 * dpu_encoder_get_crc - get the crc value from interface blocks 348 * @drm_enc: Pointer to previously created drm encoder structure 349 * @crcs: array to fill with CRC data 350 * @pos: offset into the @crcs array 351 * Returns: 0 on success, error otherwise 352 */ 353 int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos) 354 { 355 struct dpu_encoder_virt *dpu_enc; 356 357 int i, rc = 0, entries_added = 0; 358 359 if (!drm_enc->crtc) { 360 DRM_ERROR("no crtc found for encoder %d\n", drm_enc->index); 361 return -EINVAL; 362 } 363 364 dpu_enc = to_dpu_encoder_virt(drm_enc); 365 366 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 367 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 368 369 if (!phys->hw_intf || !phys->hw_intf->ops.collect_misr) 370 continue; 371 372 rc = phys->hw_intf->ops.collect_misr(phys->hw_intf, &crcs[pos + entries_added]); 373 if (rc) 374 return rc; 375 entries_added++; 376 } 377 378 return entries_added; 379 } 380 381 static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc) 382 { 383 struct dpu_hw_dither_cfg dither_cfg = { 0 }; 384 385 if (!hw_pp->ops.setup_dither) 386 return; 387 388 switch (bpc) { 389 case 6: 390 dither_cfg.c0_bitdepth = 6; 391 dither_cfg.c1_bitdepth = 6; 392 dither_cfg.c2_bitdepth = 6; 393 dither_cfg.c3_bitdepth = 6; 394 dither_cfg.temporal_en = 0; 395 break; 396 default: 397 hw_pp->ops.setup_dither(hw_pp, NULL); 398 return; 399 } 400 401 memcpy(&dither_cfg.matrix, dither_matrix, 402 sizeof(u32) * DITHER_MATRIX_SZ); 403 404 hw_pp->ops.setup_dither(hw_pp, &dither_cfg); 405 } 406 407 static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode) 408 { 409 switch (intf_mode) { 410 case INTF_MODE_VIDEO: 411 return "INTF_MODE_VIDEO"; 412 case INTF_MODE_CMD: 413 return "INTF_MODE_CMD"; 414 case INTF_MODE_WB_BLOCK: 415 return "INTF_MODE_WB_BLOCK"; 416 case INTF_MODE_WB_LINE: 417 return "INTF_MODE_WB_LINE"; 418 default: 419 return "INTF_MODE_UNKNOWN"; 420 } 421 } 422 423 /** 424 * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has 425 * timed out, including reporting frame error event to crtc and debug dump 426 * @phys_enc: Pointer to physical encoder structure 427 * @intr_idx: Failing interrupt index 428 */ 429 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc, 430 enum dpu_intr_idx intr_idx) 431 { 432 DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n", 433 DRMID(phys_enc->parent), 434 dpu_encoder_helper_get_intf_type(phys_enc->intf_mode), 435 phys_enc->hw_intf ? phys_enc->hw_intf->idx - INTF_0 : -1, 436 phys_enc->hw_wb ? phys_enc->hw_wb->idx - WB_0 : -1, 437 phys_enc->hw_pp->idx - PINGPONG_0, intr_idx); 438 439 dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, 440 DPU_ENCODER_FRAME_EVENT_ERROR); 441 } 442 443 static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id, 444 u32 irq_idx, struct dpu_encoder_wait_info *info); 445 446 /** 447 * dpu_encoder_helper_wait_for_irq - utility to wait on an irq. 448 * note: will call dpu_encoder_helper_wait_for_irq on timeout 449 * @phys_enc: Pointer to physical encoder structure 450 * @irq_idx: IRQ index 451 * @func: IRQ callback to be called in case of timeout 452 * @wait_info: wait info struct 453 * @return: 0 or -ERROR 454 */ 455 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, 456 unsigned int irq_idx, 457 void (*func)(void *arg), 458 struct dpu_encoder_wait_info *wait_info) 459 { 460 u32 irq_status; 461 int ret; 462 463 if (!wait_info) { 464 DPU_ERROR("invalid params\n"); 465 return -EINVAL; 466 } 467 /* note: do master / slave checking outside */ 468 469 /* return EWOULDBLOCK since we know the wait isn't necessary */ 470 if (phys_enc->enable_state == DPU_ENC_DISABLED) { 471 DRM_ERROR("encoder is disabled id=%u, callback=%ps, IRQ=[%d, %d]\n", 472 DRMID(phys_enc->parent), func, 473 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx)); 474 return -EWOULDBLOCK; 475 } 476 477 if (irq_idx == 0) { 478 DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n", 479 DRMID(phys_enc->parent), func); 480 return 0; 481 } 482 483 DRM_DEBUG_KMS("id=%u, callback=%ps, IRQ=[%d, %d], pp=%d, pending_cnt=%d\n", 484 DRMID(phys_enc->parent), func, 485 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), phys_enc->hw_pp->idx - PINGPONG_0, 486 atomic_read(wait_info->atomic_cnt)); 487 488 ret = dpu_encoder_helper_wait_event_timeout( 489 DRMID(phys_enc->parent), 490 irq_idx, 491 wait_info); 492 493 if (ret <= 0) { 494 irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq_idx); 495 if (irq_status) { 496 unsigned long flags; 497 498 DRM_DEBUG_KMS("IRQ=[%d, %d] not triggered id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n", 499 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), 500 DRMID(phys_enc->parent), func, 501 phys_enc->hw_pp->idx - PINGPONG_0, 502 atomic_read(wait_info->atomic_cnt)); 503 local_irq_save(flags); 504 func(phys_enc); 505 local_irq_restore(flags); 506 ret = 0; 507 } else { 508 ret = -ETIMEDOUT; 509 DRM_DEBUG_KMS("IRQ=[%d, %d] timeout id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n", 510 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), 511 DRMID(phys_enc->parent), func, 512 phys_enc->hw_pp->idx - PINGPONG_0, 513 atomic_read(wait_info->atomic_cnt)); 514 } 515 } else { 516 ret = 0; 517 trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent), 518 func, DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), 519 phys_enc->hw_pp->idx - PINGPONG_0, 520 atomic_read(wait_info->atomic_cnt)); 521 } 522 523 return ret; 524 } 525 526 /** 527 * dpu_encoder_get_vsync_count - get vsync count for the encoder. 528 * @drm_enc: Pointer to previously created drm encoder structure 529 */ 530 int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc) 531 { 532 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 533 struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL; 534 return phys ? atomic_read(&phys->vsync_cnt) : 0; 535 } 536 537 /** 538 * dpu_encoder_get_linecount - get interface line count for the encoder. 539 * @drm_enc: Pointer to previously created drm encoder structure 540 */ 541 int dpu_encoder_get_linecount(struct drm_encoder *drm_enc) 542 { 543 struct dpu_encoder_virt *dpu_enc; 544 struct dpu_encoder_phys *phys; 545 int linecount = 0; 546 547 dpu_enc = to_dpu_encoder_virt(drm_enc); 548 phys = dpu_enc ? dpu_enc->cur_master : NULL; 549 550 if (phys && phys->ops.get_line_count) 551 linecount = phys->ops.get_line_count(phys); 552 553 return linecount; 554 } 555 556 /** 557 * dpu_encoder_helper_split_config - split display configuration helper function 558 * This helper function may be used by physical encoders to configure 559 * the split display related registers. 560 * @phys_enc: Pointer to physical encoder structure 561 * @interface: enum dpu_intf setting 562 */ 563 void dpu_encoder_helper_split_config( 564 struct dpu_encoder_phys *phys_enc, 565 enum dpu_intf interface) 566 { 567 struct dpu_encoder_virt *dpu_enc; 568 struct split_pipe_cfg cfg = { 0 }; 569 struct dpu_hw_mdp *hw_mdptop; 570 struct msm_display_info *disp_info; 571 572 if (!phys_enc->hw_mdptop || !phys_enc->parent) { 573 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL); 574 return; 575 } 576 577 dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 578 hw_mdptop = phys_enc->hw_mdptop; 579 disp_info = &dpu_enc->disp_info; 580 581 if (disp_info->intf_type != INTF_DSI) 582 return; 583 584 /** 585 * disable split modes since encoder will be operating in as the only 586 * encoder, either for the entire use case in the case of, for example, 587 * single DSI, or for this frame in the case of left/right only partial 588 * update. 589 */ 590 if (phys_enc->split_role == ENC_ROLE_SOLO) { 591 if (hw_mdptop->ops.setup_split_pipe) 592 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg); 593 return; 594 } 595 596 cfg.en = true; 597 cfg.mode = phys_enc->intf_mode; 598 cfg.intf = interface; 599 600 if (cfg.en && phys_enc->ops.needs_single_flush && 601 phys_enc->ops.needs_single_flush(phys_enc)) 602 cfg.split_flush_en = true; 603 604 if (phys_enc->split_role == ENC_ROLE_MASTER) { 605 DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en); 606 607 if (hw_mdptop->ops.setup_split_pipe) 608 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg); 609 } 610 } 611 612 /** 613 * dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology. 614 * @drm_enc: Pointer to previously created drm encoder structure 615 */ 616 bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc) 617 { 618 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 619 int i, intf_count = 0, num_dsc = 0; 620 621 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) 622 if (dpu_enc->phys_encs[i]) 623 intf_count++; 624 625 /* See dpu_encoder_get_topology, we only support 2:2:1 topology */ 626 if (dpu_enc->dsc) 627 num_dsc = 2; 628 629 return (num_dsc > 0) && (num_dsc > intf_count); 630 } 631 632 /** 633 * dpu_encoder_get_dsc_config - get DSC config for the DPU encoder 634 * This helper function is used by physical encoder to get DSC config 635 * used for this encoder. 636 * @drm_enc: Pointer to encoder structure 637 */ 638 struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc) 639 { 640 struct msm_drm_private *priv = drm_enc->dev->dev_private; 641 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 642 int index = dpu_enc->disp_info.h_tile_instance[0]; 643 644 if (dpu_enc->disp_info.intf_type == INTF_DSI) 645 return msm_dsi_get_dsc_config(priv->dsi[index]); 646 647 return NULL; 648 } 649 650 static struct msm_display_topology dpu_encoder_get_topology( 651 struct dpu_encoder_virt *dpu_enc, 652 struct dpu_kms *dpu_kms, 653 struct drm_display_mode *mode, 654 struct drm_crtc_state *crtc_state, 655 struct drm_dsc_config *dsc) 656 { 657 struct msm_display_topology topology = {0}; 658 int i, intf_count = 0; 659 660 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) 661 if (dpu_enc->phys_encs[i]) 662 intf_count++; 663 664 /* Datapath topology selection 665 * 666 * Dual display 667 * 2 LM, 2 INTF ( Split display using 2 interfaces) 668 * 669 * Single display 670 * 1 LM, 1 INTF 671 * 2 LM, 1 INTF (stream merge to support high resolution interfaces) 672 * 673 * Add dspps to the reservation requirements if ctm is requested 674 */ 675 if (intf_count == 2) 676 topology.num_lm = 2; 677 else if (!dpu_kms->catalog->caps->has_3d_merge) 678 topology.num_lm = 1; 679 else 680 topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1; 681 682 if (crtc_state->ctm) 683 topology.num_dspp = topology.num_lm; 684 685 topology.num_intf = intf_count; 686 687 if (dsc) { 688 /* 689 * In case of Display Stream Compression (DSC), we would use 690 * 2 DSC encoders, 2 layer mixers and 1 interface 691 * this is power optimal and can drive up to (including) 4k 692 * screens 693 */ 694 topology.num_dsc = 2; 695 topology.num_lm = 2; 696 topology.num_intf = 1; 697 } 698 699 return topology; 700 } 701 702 static void dpu_encoder_assign_crtc_resources(struct dpu_kms *dpu_kms, 703 struct drm_encoder *drm_enc, 704 struct dpu_global_state *global_state, 705 struct drm_crtc_state *crtc_state) 706 { 707 struct dpu_crtc_state *cstate; 708 struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; 709 struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; 710 struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC]; 711 int num_lm, num_ctl, num_dspp, i; 712 713 cstate = to_dpu_crtc_state(crtc_state); 714 715 memset(cstate->mixers, 0, sizeof(cstate->mixers)); 716 717 num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 718 drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); 719 num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 720 drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); 721 num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 722 drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp, 723 ARRAY_SIZE(hw_dspp)); 724 725 for (i = 0; i < num_lm; i++) { 726 int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); 727 728 cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); 729 cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); 730 cstate->mixers[i].hw_dspp = i < num_dspp ? to_dpu_hw_dspp(hw_dspp[i]) : NULL; 731 } 732 733 cstate->num_mixers = num_lm; 734 } 735 736 static int dpu_encoder_virt_atomic_check( 737 struct drm_encoder *drm_enc, 738 struct drm_crtc_state *crtc_state, 739 struct drm_connector_state *conn_state) 740 { 741 struct dpu_encoder_virt *dpu_enc; 742 struct msm_drm_private *priv; 743 struct dpu_kms *dpu_kms; 744 struct drm_display_mode *adj_mode; 745 struct msm_display_topology topology; 746 struct msm_display_info *disp_info; 747 struct dpu_global_state *global_state; 748 struct drm_framebuffer *fb; 749 struct drm_dsc_config *dsc; 750 int ret = 0; 751 752 if (!drm_enc || !crtc_state || !conn_state) { 753 DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n", 754 drm_enc != NULL, crtc_state != NULL, conn_state != NULL); 755 return -EINVAL; 756 } 757 758 dpu_enc = to_dpu_encoder_virt(drm_enc); 759 DPU_DEBUG_ENC(dpu_enc, "\n"); 760 761 priv = drm_enc->dev->dev_private; 762 disp_info = &dpu_enc->disp_info; 763 dpu_kms = to_dpu_kms(priv->kms); 764 adj_mode = &crtc_state->adjusted_mode; 765 global_state = dpu_kms_get_global_state(crtc_state->state); 766 if (IS_ERR(global_state)) 767 return PTR_ERR(global_state); 768 769 trace_dpu_enc_atomic_check(DRMID(drm_enc)); 770 771 dsc = dpu_encoder_get_dsc_config(drm_enc); 772 773 topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc); 774 775 /* 776 * Use CDM only for writeback or DP at the moment as other interfaces cannot handle it. 777 * If writeback itself cannot handle cdm for some reason it will fail in its atomic_check() 778 * earlier. 779 */ 780 if (disp_info->intf_type == INTF_WB && conn_state->writeback_job) { 781 fb = conn_state->writeback_job->fb; 782 783 if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb))) 784 topology.needs_cdm = true; 785 } else if (disp_info->intf_type == INTF_DP) { 786 if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode)) 787 topology.needs_cdm = true; 788 } 789 790 if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm) 791 crtc_state->mode_changed = true; 792 else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm) 793 crtc_state->mode_changed = true; 794 /* 795 * Release and Allocate resources on every modeset 796 * Dont allocate when active is false. 797 */ 798 if (drm_atomic_crtc_needs_modeset(crtc_state)) { 799 dpu_rm_release(global_state, drm_enc); 800 801 if (!crtc_state->active_changed || crtc_state->enable) 802 ret = dpu_rm_reserve(&dpu_kms->rm, global_state, 803 drm_enc, crtc_state, topology); 804 if (!ret) 805 dpu_encoder_assign_crtc_resources(dpu_kms, drm_enc, 806 global_state, crtc_state); 807 } 808 809 trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags); 810 811 return ret; 812 } 813 814 static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc, 815 struct msm_display_info *disp_info) 816 { 817 struct dpu_vsync_source_cfg vsync_cfg = { 0 }; 818 struct msm_drm_private *priv; 819 struct dpu_kms *dpu_kms; 820 struct dpu_hw_mdp *hw_mdptop; 821 struct drm_encoder *drm_enc; 822 struct dpu_encoder_phys *phys_enc; 823 int i; 824 825 if (!dpu_enc || !disp_info) { 826 DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n", 827 dpu_enc != NULL, disp_info != NULL); 828 return; 829 } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) { 830 DPU_ERROR("invalid num phys enc %d/%d\n", 831 dpu_enc->num_phys_encs, 832 (int) ARRAY_SIZE(dpu_enc->hw_pp)); 833 return; 834 } 835 836 drm_enc = &dpu_enc->base; 837 /* this pointers are checked in virt_enable_helper */ 838 priv = drm_enc->dev->dev_private; 839 840 dpu_kms = to_dpu_kms(priv->kms); 841 hw_mdptop = dpu_kms->hw_mdp; 842 if (!hw_mdptop) { 843 DPU_ERROR("invalid mdptop\n"); 844 return; 845 } 846 847 if (hw_mdptop->ops.setup_vsync_source) { 848 for (i = 0; i < dpu_enc->num_phys_encs; i++) 849 vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx; 850 851 vsync_cfg.pp_count = dpu_enc->num_phys_encs; 852 vsync_cfg.frame_rate = drm_mode_vrefresh(&dpu_enc->base.crtc->state->adjusted_mode); 853 854 vsync_cfg.vsync_source = disp_info->vsync_source; 855 856 hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg); 857 858 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 859 phys_enc = dpu_enc->phys_encs[i]; 860 861 if (phys_enc->has_intf_te && phys_enc->hw_intf->ops.vsync_sel) 862 phys_enc->hw_intf->ops.vsync_sel(phys_enc->hw_intf, 863 vsync_cfg.vsync_source); 864 } 865 } 866 } 867 868 static void _dpu_encoder_irq_enable(struct drm_encoder *drm_enc) 869 { 870 struct dpu_encoder_virt *dpu_enc; 871 int i; 872 873 if (!drm_enc) { 874 DPU_ERROR("invalid encoder\n"); 875 return; 876 } 877 878 dpu_enc = to_dpu_encoder_virt(drm_enc); 879 880 DPU_DEBUG_ENC(dpu_enc, "\n"); 881 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 882 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 883 884 phys->ops.irq_enable(phys); 885 } 886 } 887 888 static void _dpu_encoder_irq_disable(struct drm_encoder *drm_enc) 889 { 890 struct dpu_encoder_virt *dpu_enc; 891 int i; 892 893 if (!drm_enc) { 894 DPU_ERROR("invalid encoder\n"); 895 return; 896 } 897 898 dpu_enc = to_dpu_encoder_virt(drm_enc); 899 900 DPU_DEBUG_ENC(dpu_enc, "\n"); 901 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 902 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 903 904 phys->ops.irq_disable(phys); 905 } 906 } 907 908 static void _dpu_encoder_resource_enable(struct drm_encoder *drm_enc) 909 { 910 struct msm_drm_private *priv; 911 struct dpu_kms *dpu_kms; 912 struct dpu_encoder_virt *dpu_enc; 913 914 dpu_enc = to_dpu_encoder_virt(drm_enc); 915 priv = drm_enc->dev->dev_private; 916 dpu_kms = to_dpu_kms(priv->kms); 917 918 trace_dpu_enc_rc_enable(DRMID(drm_enc)); 919 920 if (!dpu_enc->cur_master) { 921 DPU_ERROR("encoder master not set\n"); 922 return; 923 } 924 925 /* enable DPU core clks */ 926 pm_runtime_get_sync(&dpu_kms->pdev->dev); 927 928 /* enable all the irq */ 929 _dpu_encoder_irq_enable(drm_enc); 930 } 931 932 static void _dpu_encoder_resource_disable(struct drm_encoder *drm_enc) 933 { 934 struct msm_drm_private *priv; 935 struct dpu_kms *dpu_kms; 936 struct dpu_encoder_virt *dpu_enc; 937 938 dpu_enc = to_dpu_encoder_virt(drm_enc); 939 priv = drm_enc->dev->dev_private; 940 dpu_kms = to_dpu_kms(priv->kms); 941 942 trace_dpu_enc_rc_disable(DRMID(drm_enc)); 943 944 if (!dpu_enc->cur_master) { 945 DPU_ERROR("encoder master not set\n"); 946 return; 947 } 948 949 /* disable all the irq */ 950 _dpu_encoder_irq_disable(drm_enc); 951 952 /* disable DPU core clks */ 953 pm_runtime_put_sync(&dpu_kms->pdev->dev); 954 } 955 956 static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, 957 u32 sw_event) 958 { 959 struct dpu_encoder_virt *dpu_enc; 960 struct msm_drm_private *priv; 961 bool is_vid_mode = false; 962 963 if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) { 964 DPU_ERROR("invalid parameters\n"); 965 return -EINVAL; 966 } 967 dpu_enc = to_dpu_encoder_virt(drm_enc); 968 priv = drm_enc->dev->dev_private; 969 is_vid_mode = !dpu_enc->disp_info.is_cmd_mode; 970 971 /* 972 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET 973 * events and return early for other events (ie wb display). 974 */ 975 if (!dpu_enc->idle_pc_supported && 976 (sw_event != DPU_ENC_RC_EVENT_KICKOFF && 977 sw_event != DPU_ENC_RC_EVENT_STOP && 978 sw_event != DPU_ENC_RC_EVENT_PRE_STOP)) 979 return 0; 980 981 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported, 982 dpu_enc->rc_state, "begin"); 983 984 switch (sw_event) { 985 case DPU_ENC_RC_EVENT_KICKOFF: 986 /* cancel delayed off work, if any */ 987 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work)) 988 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", 989 sw_event); 990 991 mutex_lock(&dpu_enc->rc_lock); 992 993 /* return if the resource control is already in ON state */ 994 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) { 995 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n", 996 DRMID(drm_enc), sw_event); 997 mutex_unlock(&dpu_enc->rc_lock); 998 return 0; 999 } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF && 1000 dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) { 1001 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n", 1002 DRMID(drm_enc), sw_event, 1003 dpu_enc->rc_state); 1004 mutex_unlock(&dpu_enc->rc_lock); 1005 return -EINVAL; 1006 } 1007 1008 if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) 1009 _dpu_encoder_irq_enable(drm_enc); 1010 else 1011 _dpu_encoder_resource_enable(drm_enc); 1012 1013 dpu_enc->rc_state = DPU_ENC_RC_STATE_ON; 1014 1015 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 1016 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 1017 "kickoff"); 1018 1019 mutex_unlock(&dpu_enc->rc_lock); 1020 break; 1021 1022 case DPU_ENC_RC_EVENT_FRAME_DONE: 1023 /* 1024 * mutex lock is not used as this event happens at interrupt 1025 * context. And locking is not required as, the other events 1026 * like KICKOFF and STOP does a wait-for-idle before executing 1027 * the resource_control 1028 */ 1029 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) { 1030 DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n", 1031 DRMID(drm_enc), sw_event, 1032 dpu_enc->rc_state); 1033 return -EINVAL; 1034 } 1035 1036 /* 1037 * schedule off work item only when there are no 1038 * frames pending 1039 */ 1040 if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) { 1041 DRM_DEBUG_KMS("id:%d skip schedule work\n", 1042 DRMID(drm_enc)); 1043 return 0; 1044 } 1045 1046 queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work, 1047 msecs_to_jiffies(dpu_enc->idle_timeout)); 1048 1049 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 1050 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 1051 "frame done"); 1052 break; 1053 1054 case DPU_ENC_RC_EVENT_PRE_STOP: 1055 /* cancel delayed off work, if any */ 1056 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work)) 1057 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", 1058 sw_event); 1059 1060 mutex_lock(&dpu_enc->rc_lock); 1061 1062 if (is_vid_mode && 1063 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) { 1064 _dpu_encoder_irq_enable(drm_enc); 1065 } 1066 /* skip if is already OFF or IDLE, resources are off already */ 1067 else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF || 1068 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) { 1069 DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n", 1070 DRMID(drm_enc), sw_event, 1071 dpu_enc->rc_state); 1072 mutex_unlock(&dpu_enc->rc_lock); 1073 return 0; 1074 } 1075 1076 dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF; 1077 1078 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 1079 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 1080 "pre stop"); 1081 1082 mutex_unlock(&dpu_enc->rc_lock); 1083 break; 1084 1085 case DPU_ENC_RC_EVENT_STOP: 1086 mutex_lock(&dpu_enc->rc_lock); 1087 1088 /* return if the resource control is already in OFF state */ 1089 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) { 1090 DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n", 1091 DRMID(drm_enc), sw_event); 1092 mutex_unlock(&dpu_enc->rc_lock); 1093 return 0; 1094 } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) { 1095 DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n", 1096 DRMID(drm_enc), sw_event, dpu_enc->rc_state); 1097 mutex_unlock(&dpu_enc->rc_lock); 1098 return -EINVAL; 1099 } 1100 1101 /** 1102 * expect to arrive here only if in either idle state or pre-off 1103 * and in IDLE state the resources are already disabled 1104 */ 1105 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF) 1106 _dpu_encoder_resource_disable(drm_enc); 1107 1108 dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF; 1109 1110 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 1111 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 1112 "stop"); 1113 1114 mutex_unlock(&dpu_enc->rc_lock); 1115 break; 1116 1117 case DPU_ENC_RC_EVENT_ENTER_IDLE: 1118 mutex_lock(&dpu_enc->rc_lock); 1119 1120 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) { 1121 DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n", 1122 DRMID(drm_enc), sw_event, dpu_enc->rc_state); 1123 mutex_unlock(&dpu_enc->rc_lock); 1124 return 0; 1125 } 1126 1127 /* 1128 * if we are in ON but a frame was just kicked off, 1129 * ignore the IDLE event, it's probably a stale timer event 1130 */ 1131 if (dpu_enc->frame_busy_mask[0]) { 1132 DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n", 1133 DRMID(drm_enc), sw_event, dpu_enc->rc_state); 1134 mutex_unlock(&dpu_enc->rc_lock); 1135 return 0; 1136 } 1137 1138 if (is_vid_mode) 1139 _dpu_encoder_irq_disable(drm_enc); 1140 else 1141 _dpu_encoder_resource_disable(drm_enc); 1142 1143 dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE; 1144 1145 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 1146 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 1147 "idle"); 1148 1149 mutex_unlock(&dpu_enc->rc_lock); 1150 break; 1151 1152 default: 1153 DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc), 1154 sw_event); 1155 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 1156 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 1157 "error"); 1158 break; 1159 } 1160 1161 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 1162 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 1163 "end"); 1164 return 0; 1165 } 1166 1167 /** 1168 * dpu_encoder_prepare_wb_job - prepare writeback job for the encoder. 1169 * @drm_enc: Pointer to previously created drm encoder structure 1170 * @job: Pointer to the current drm writeback job 1171 */ 1172 void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc, 1173 struct drm_writeback_job *job) 1174 { 1175 struct dpu_encoder_virt *dpu_enc; 1176 int i; 1177 1178 dpu_enc = to_dpu_encoder_virt(drm_enc); 1179 1180 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1181 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1182 1183 if (phys->ops.prepare_wb_job) 1184 phys->ops.prepare_wb_job(phys, job); 1185 1186 } 1187 } 1188 1189 /** 1190 * dpu_encoder_cleanup_wb_job - cleanup writeback job for the encoder. 1191 * @drm_enc: Pointer to previously created drm encoder structure 1192 * @job: Pointer to the current drm writeback job 1193 */ 1194 void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc, 1195 struct drm_writeback_job *job) 1196 { 1197 struct dpu_encoder_virt *dpu_enc; 1198 int i; 1199 1200 dpu_enc = to_dpu_encoder_virt(drm_enc); 1201 1202 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1203 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1204 1205 if (phys->ops.cleanup_wb_job) 1206 phys->ops.cleanup_wb_job(phys, job); 1207 1208 } 1209 } 1210 1211 static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, 1212 struct drm_crtc_state *crtc_state, 1213 struct drm_connector_state *conn_state) 1214 { 1215 struct dpu_encoder_virt *dpu_enc; 1216 struct msm_drm_private *priv; 1217 struct dpu_kms *dpu_kms; 1218 struct dpu_global_state *global_state; 1219 struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC]; 1220 struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; 1221 struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC]; 1222 int num_ctl, num_pp, num_dsc; 1223 unsigned int dsc_mask = 0; 1224 int i; 1225 1226 if (!drm_enc) { 1227 DPU_ERROR("invalid encoder\n"); 1228 return; 1229 } 1230 1231 dpu_enc = to_dpu_encoder_virt(drm_enc); 1232 DPU_DEBUG_ENC(dpu_enc, "\n"); 1233 1234 priv = drm_enc->dev->dev_private; 1235 dpu_kms = to_dpu_kms(priv->kms); 1236 1237 global_state = dpu_kms_get_existing_global_state(dpu_kms); 1238 if (IS_ERR_OR_NULL(global_state)) { 1239 DPU_ERROR("Failed to get global state"); 1240 return; 1241 } 1242 1243 trace_dpu_enc_mode_set(DRMID(drm_enc)); 1244 1245 /* Query resource that have been reserved in atomic check step. */ 1246 num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1247 drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp, 1248 ARRAY_SIZE(hw_pp)); 1249 num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1250 drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); 1251 1252 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) 1253 dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i]) 1254 : NULL; 1255 1256 num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1257 drm_enc->base.id, DPU_HW_BLK_DSC, 1258 hw_dsc, ARRAY_SIZE(hw_dsc)); 1259 for (i = 0; i < num_dsc; i++) { 1260 dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]); 1261 dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0); 1262 } 1263 1264 dpu_enc->dsc_mask = dsc_mask; 1265 1266 if ((dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) || 1267 dpu_enc->disp_info.intf_type == INTF_DP) { 1268 struct dpu_hw_blk *hw_cdm = NULL; 1269 1270 dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1271 drm_enc->base.id, DPU_HW_BLK_CDM, 1272 &hw_cdm, 1); 1273 dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL; 1274 } 1275 1276 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1277 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1278 1279 phys->hw_pp = dpu_enc->hw_pp[i]; 1280 if (!phys->hw_pp) { 1281 DPU_ERROR_ENC(dpu_enc, 1282 "no pp block assigned at idx: %d\n", i); 1283 return; 1284 } 1285 1286 phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL; 1287 if (!phys->hw_ctl) { 1288 DPU_ERROR_ENC(dpu_enc, 1289 "no ctl block assigned at idx: %d\n", i); 1290 return; 1291 } 1292 1293 phys->cached_mode = crtc_state->adjusted_mode; 1294 if (phys->ops.atomic_mode_set) 1295 phys->ops.atomic_mode_set(phys, crtc_state, conn_state); 1296 } 1297 } 1298 1299 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) 1300 { 1301 struct dpu_encoder_virt *dpu_enc = NULL; 1302 int i; 1303 1304 if (!drm_enc || !drm_enc->dev) { 1305 DPU_ERROR("invalid parameters\n"); 1306 return; 1307 } 1308 1309 dpu_enc = to_dpu_encoder_virt(drm_enc); 1310 if (!dpu_enc || !dpu_enc->cur_master) { 1311 DPU_ERROR("invalid dpu encoder/master\n"); 1312 return; 1313 } 1314 1315 1316 if (dpu_enc->disp_info.intf_type == INTF_DP && 1317 dpu_enc->cur_master->hw_mdptop && 1318 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select) 1319 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select( 1320 dpu_enc->cur_master->hw_mdptop); 1321 1322 if (dpu_enc->disp_info.is_cmd_mode) 1323 _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info); 1324 1325 if (dpu_enc->disp_info.intf_type == INTF_DSI && 1326 !WARN_ON(dpu_enc->num_phys_encs == 0)) { 1327 unsigned bpc = dpu_enc->connector->display_info.bpc; 1328 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { 1329 if (!dpu_enc->hw_pp[i]) 1330 continue; 1331 _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc); 1332 } 1333 } 1334 } 1335 1336 /** 1337 * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs 1338 * @drm_enc: encoder pointer 1339 */ 1340 void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc) 1341 { 1342 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1343 1344 mutex_lock(&dpu_enc->enc_lock); 1345 1346 if (!dpu_enc->enabled) 1347 goto out; 1348 1349 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore) 1350 dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave); 1351 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore) 1352 dpu_enc->cur_master->ops.restore(dpu_enc->cur_master); 1353 1354 _dpu_encoder_virt_enable_helper(drm_enc); 1355 1356 out: 1357 mutex_unlock(&dpu_enc->enc_lock); 1358 } 1359 1360 static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc, 1361 struct drm_atomic_state *state) 1362 { 1363 struct dpu_encoder_virt *dpu_enc = NULL; 1364 int ret = 0; 1365 struct drm_display_mode *cur_mode = NULL; 1366 1367 dpu_enc = to_dpu_encoder_virt(drm_enc); 1368 dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc); 1369 1370 atomic_set(&dpu_enc->frame_done_timeout_cnt, 0); 1371 1372 mutex_lock(&dpu_enc->enc_lock); 1373 1374 dpu_enc->commit_done_timedout = false; 1375 1376 dpu_enc->connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc); 1377 1378 cur_mode = &dpu_enc->base.crtc->state->adjusted_mode; 1379 1380 dpu_enc->wide_bus_en = dpu_encoder_is_widebus_enabled(drm_enc); 1381 1382 trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay, 1383 cur_mode->vdisplay); 1384 1385 /* always enable slave encoder before master */ 1386 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable) 1387 dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave); 1388 1389 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable) 1390 dpu_enc->cur_master->ops.enable(dpu_enc->cur_master); 1391 1392 ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF); 1393 if (ret) { 1394 DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n", 1395 ret); 1396 goto out; 1397 } 1398 1399 _dpu_encoder_virt_enable_helper(drm_enc); 1400 1401 dpu_enc->enabled = true; 1402 1403 out: 1404 mutex_unlock(&dpu_enc->enc_lock); 1405 } 1406 1407 static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc, 1408 struct drm_atomic_state *state) 1409 { 1410 struct dpu_encoder_virt *dpu_enc = NULL; 1411 struct drm_crtc *crtc; 1412 struct drm_crtc_state *old_state = NULL; 1413 int i = 0; 1414 1415 dpu_enc = to_dpu_encoder_virt(drm_enc); 1416 DPU_DEBUG_ENC(dpu_enc, "\n"); 1417 1418 crtc = drm_atomic_get_old_crtc_for_encoder(state, drm_enc); 1419 if (crtc) 1420 old_state = drm_atomic_get_old_crtc_state(state, crtc); 1421 1422 /* 1423 * The encoder is already disabled if self refresh mode was set earlier, 1424 * in the old_state for the corresponding crtc. 1425 */ 1426 if (old_state && old_state->self_refresh_active) 1427 return; 1428 1429 mutex_lock(&dpu_enc->enc_lock); 1430 dpu_enc->enabled = false; 1431 1432 trace_dpu_enc_disable(DRMID(drm_enc)); 1433 1434 /* wait for idle */ 1435 dpu_encoder_wait_for_tx_complete(drm_enc); 1436 1437 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP); 1438 1439 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1440 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1441 1442 if (phys->ops.disable) 1443 phys->ops.disable(phys); 1444 } 1445 1446 1447 /* after phys waits for frame-done, should be no more frames pending */ 1448 if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { 1449 DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id); 1450 del_timer_sync(&dpu_enc->frame_done_timer); 1451 } 1452 1453 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP); 1454 1455 dpu_enc->connector = NULL; 1456 1457 DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); 1458 1459 mutex_unlock(&dpu_enc->enc_lock); 1460 } 1461 1462 static struct dpu_hw_intf *dpu_encoder_get_intf(const struct dpu_mdss_cfg *catalog, 1463 struct dpu_rm *dpu_rm, 1464 enum dpu_intf_type type, u32 controller_id) 1465 { 1466 int i = 0; 1467 1468 if (type == INTF_WB) 1469 return NULL; 1470 1471 for (i = 0; i < catalog->intf_count; i++) { 1472 if (catalog->intf[i].type == type 1473 && catalog->intf[i].controller_id == controller_id) { 1474 return dpu_rm_get_intf(dpu_rm, catalog->intf[i].id); 1475 } 1476 } 1477 1478 return NULL; 1479 } 1480 1481 /** 1482 * dpu_encoder_vblank_callback - Notify virtual encoder of vblank IRQ reception 1483 * @drm_enc: Pointer to drm encoder structure 1484 * @phy_enc: Pointer to physical encoder 1485 * Note: This is called from IRQ handler context. 1486 */ 1487 void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc, 1488 struct dpu_encoder_phys *phy_enc) 1489 { 1490 struct dpu_encoder_virt *dpu_enc = NULL; 1491 unsigned long lock_flags; 1492 1493 if (!drm_enc || !phy_enc) 1494 return; 1495 1496 DPU_ATRACE_BEGIN("encoder_vblank_callback"); 1497 dpu_enc = to_dpu_encoder_virt(drm_enc); 1498 1499 atomic_inc(&phy_enc->vsync_cnt); 1500 1501 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1502 if (dpu_enc->crtc) 1503 dpu_crtc_vblank_callback(dpu_enc->crtc); 1504 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1505 1506 DPU_ATRACE_END("encoder_vblank_callback"); 1507 } 1508 1509 /** 1510 * dpu_encoder_underrun_callback - Notify virtual encoder of underrun IRQ reception 1511 * @drm_enc: Pointer to drm encoder structure 1512 * @phy_enc: Pointer to physical encoder 1513 * Note: This is called from IRQ handler context. 1514 */ 1515 void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc, 1516 struct dpu_encoder_phys *phy_enc) 1517 { 1518 if (!phy_enc) 1519 return; 1520 1521 DPU_ATRACE_BEGIN("encoder_underrun_callback"); 1522 atomic_inc(&phy_enc->underrun_cnt); 1523 1524 /* trigger dump only on the first underrun */ 1525 if (atomic_read(&phy_enc->underrun_cnt) == 1) 1526 msm_disp_snapshot_state(drm_enc->dev); 1527 1528 trace_dpu_enc_underrun_cb(DRMID(drm_enc), 1529 atomic_read(&phy_enc->underrun_cnt)); 1530 DPU_ATRACE_END("encoder_underrun_callback"); 1531 } 1532 1533 /** 1534 * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to 1535 * @drm_enc: encoder pointer 1536 * @crtc: crtc pointer 1537 */ 1538 void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc) 1539 { 1540 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1541 unsigned long lock_flags; 1542 1543 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1544 /* crtc should always be cleared before re-assigning */ 1545 WARN_ON(crtc && dpu_enc->crtc); 1546 dpu_enc->crtc = crtc; 1547 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1548 } 1549 1550 /** 1551 * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if 1552 * the encoder is assigned to the given crtc 1553 * @drm_enc: encoder pointer 1554 * @crtc: crtc pointer 1555 * @enable: true if vblank should be enabled 1556 */ 1557 void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc, 1558 struct drm_crtc *crtc, bool enable) 1559 { 1560 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1561 unsigned long lock_flags; 1562 int i; 1563 1564 trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable); 1565 1566 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1567 if (dpu_enc->crtc != crtc) { 1568 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1569 return; 1570 } 1571 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1572 1573 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1574 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1575 1576 if (phys->ops.control_vblank_irq) 1577 phys->ops.control_vblank_irq(phys, enable); 1578 } 1579 } 1580 1581 /** 1582 * dpu_encoder_frame_done_callback - Notify virtual encoder that this phys 1583 * encoder completes last request frame 1584 * @drm_enc: Pointer to drm encoder structure 1585 * @ready_phys: Pointer to physical encoder 1586 * @event: Event to process 1587 */ 1588 void dpu_encoder_frame_done_callback( 1589 struct drm_encoder *drm_enc, 1590 struct dpu_encoder_phys *ready_phys, u32 event) 1591 { 1592 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1593 unsigned int i; 1594 1595 if (event & (DPU_ENCODER_FRAME_EVENT_DONE 1596 | DPU_ENCODER_FRAME_EVENT_ERROR 1597 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) { 1598 1599 if (!dpu_enc->frame_busy_mask[0]) { 1600 /** 1601 * suppress frame_done without waiter, 1602 * likely autorefresh 1603 */ 1604 trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event, 1605 dpu_encoder_helper_get_intf_type(ready_phys->intf_mode), 1606 ready_phys->hw_intf ? ready_phys->hw_intf->idx : -1, 1607 ready_phys->hw_wb ? ready_phys->hw_wb->idx : -1); 1608 return; 1609 } 1610 1611 /* One of the physical encoders has become idle */ 1612 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1613 if (dpu_enc->phys_encs[i] == ready_phys) { 1614 trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i, 1615 dpu_enc->frame_busy_mask[0]); 1616 clear_bit(i, dpu_enc->frame_busy_mask); 1617 } 1618 } 1619 1620 if (!dpu_enc->frame_busy_mask[0]) { 1621 atomic_set(&dpu_enc->frame_done_timeout_ms, 0); 1622 del_timer(&dpu_enc->frame_done_timer); 1623 1624 dpu_encoder_resource_control(drm_enc, 1625 DPU_ENC_RC_EVENT_FRAME_DONE); 1626 1627 if (dpu_enc->crtc) 1628 dpu_crtc_frame_event_cb(dpu_enc->crtc, event); 1629 } 1630 } else { 1631 if (dpu_enc->crtc) 1632 dpu_crtc_frame_event_cb(dpu_enc->crtc, event); 1633 } 1634 } 1635 1636 static void dpu_encoder_off_work(struct work_struct *work) 1637 { 1638 struct dpu_encoder_virt *dpu_enc = container_of(work, 1639 struct dpu_encoder_virt, delayed_off_work.work); 1640 1641 dpu_encoder_resource_control(&dpu_enc->base, 1642 DPU_ENC_RC_EVENT_ENTER_IDLE); 1643 1644 dpu_encoder_frame_done_callback(&dpu_enc->base, NULL, 1645 DPU_ENCODER_FRAME_EVENT_IDLE); 1646 } 1647 1648 /** 1649 * _dpu_encoder_trigger_flush - trigger flush for a physical encoder 1650 * @drm_enc: Pointer to drm encoder structure 1651 * @phys: Pointer to physical encoder structure 1652 * @extra_flush_bits: Additional bit mask to include in flush trigger 1653 */ 1654 static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, 1655 struct dpu_encoder_phys *phys, uint32_t extra_flush_bits) 1656 { 1657 struct dpu_hw_ctl *ctl; 1658 int pending_kickoff_cnt; 1659 u32 ret = UINT_MAX; 1660 1661 if (!phys->hw_pp) { 1662 DPU_ERROR("invalid pingpong hw\n"); 1663 return; 1664 } 1665 1666 ctl = phys->hw_ctl; 1667 if (!ctl->ops.trigger_flush) { 1668 DPU_ERROR("missing trigger cb\n"); 1669 return; 1670 } 1671 1672 pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys); 1673 1674 if (extra_flush_bits && ctl->ops.update_pending_flush) 1675 ctl->ops.update_pending_flush(ctl, extra_flush_bits); 1676 1677 ctl->ops.trigger_flush(ctl); 1678 1679 if (ctl->ops.get_pending_flush) 1680 ret = ctl->ops.get_pending_flush(ctl); 1681 1682 trace_dpu_enc_trigger_flush(DRMID(drm_enc), 1683 dpu_encoder_helper_get_intf_type(phys->intf_mode), 1684 phys->hw_intf ? phys->hw_intf->idx : -1, 1685 phys->hw_wb ? phys->hw_wb->idx : -1, 1686 pending_kickoff_cnt, ctl->idx, 1687 extra_flush_bits, ret); 1688 } 1689 1690 /** 1691 * _dpu_encoder_trigger_start - trigger start for a physical encoder 1692 * @phys: Pointer to physical encoder structure 1693 */ 1694 static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys) 1695 { 1696 if (!phys) { 1697 DPU_ERROR("invalid argument(s)\n"); 1698 return; 1699 } 1700 1701 if (!phys->hw_pp) { 1702 DPU_ERROR("invalid pingpong hw\n"); 1703 return; 1704 } 1705 1706 if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED) 1707 phys->ops.trigger_start(phys); 1708 } 1709 1710 /** 1711 * dpu_encoder_helper_trigger_start - control start helper function 1712 * This helper function may be optionally specified by physical 1713 * encoders if they require ctl_start triggering. 1714 * @phys_enc: Pointer to physical encoder structure 1715 */ 1716 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc) 1717 { 1718 struct dpu_hw_ctl *ctl; 1719 1720 ctl = phys_enc->hw_ctl; 1721 if (ctl->ops.trigger_start) { 1722 ctl->ops.trigger_start(ctl); 1723 trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx); 1724 } 1725 } 1726 1727 static int dpu_encoder_helper_wait_event_timeout( 1728 int32_t drm_id, 1729 unsigned int irq_idx, 1730 struct dpu_encoder_wait_info *info) 1731 { 1732 int rc = 0; 1733 s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms; 1734 s64 jiffies = msecs_to_jiffies(info->timeout_ms); 1735 s64 time; 1736 1737 do { 1738 rc = wait_event_timeout(*(info->wq), 1739 atomic_read(info->atomic_cnt) == 0, jiffies); 1740 time = ktime_to_ms(ktime_get()); 1741 1742 trace_dpu_enc_wait_event_timeout(drm_id, 1743 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), 1744 rc, time, 1745 expected_time, 1746 atomic_read(info->atomic_cnt)); 1747 /* If we timed out, counter is valid and time is less, wait again */ 1748 } while (atomic_read(info->atomic_cnt) && (rc == 0) && 1749 (time < expected_time)); 1750 1751 return rc; 1752 } 1753 1754 static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc) 1755 { 1756 struct dpu_encoder_virt *dpu_enc; 1757 struct dpu_hw_ctl *ctl; 1758 int rc; 1759 struct drm_encoder *drm_enc; 1760 1761 dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 1762 ctl = phys_enc->hw_ctl; 1763 drm_enc = phys_enc->parent; 1764 1765 if (!ctl->ops.reset) 1766 return; 1767 1768 DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc), 1769 ctl->idx); 1770 1771 rc = ctl->ops.reset(ctl); 1772 if (rc) { 1773 DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx); 1774 msm_disp_snapshot_state(drm_enc->dev); 1775 } 1776 1777 phys_enc->enable_state = DPU_ENC_ENABLED; 1778 } 1779 1780 /** 1781 * _dpu_encoder_kickoff_phys - handle physical encoder kickoff 1782 * Iterate through the physical encoders and perform consolidated flush 1783 * and/or control start triggering as needed. This is done in the virtual 1784 * encoder rather than the individual physical ones in order to handle 1785 * use cases that require visibility into multiple physical encoders at 1786 * a time. 1787 * @dpu_enc: Pointer to virtual encoder structure 1788 */ 1789 static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc) 1790 { 1791 struct dpu_hw_ctl *ctl; 1792 uint32_t i, pending_flush; 1793 unsigned long lock_flags; 1794 1795 pending_flush = 0x0; 1796 1797 /* update pending counts and trigger kickoff ctl flush atomically */ 1798 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1799 1800 /* don't perform flush/start operations for slave encoders */ 1801 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1802 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1803 1804 if (phys->enable_state == DPU_ENC_DISABLED) 1805 continue; 1806 1807 ctl = phys->hw_ctl; 1808 1809 /* 1810 * This is cleared in frame_done worker, which isn't invoked 1811 * for async commits. So don't set this for async, since it'll 1812 * roll over to the next commit. 1813 */ 1814 if (phys->split_role != ENC_ROLE_SLAVE) 1815 set_bit(i, dpu_enc->frame_busy_mask); 1816 1817 if (!phys->ops.needs_single_flush || 1818 !phys->ops.needs_single_flush(phys)) 1819 _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0); 1820 else if (ctl->ops.get_pending_flush) 1821 pending_flush |= ctl->ops.get_pending_flush(ctl); 1822 } 1823 1824 /* for split flush, combine pending flush masks and send to master */ 1825 if (pending_flush && dpu_enc->cur_master) { 1826 _dpu_encoder_trigger_flush( 1827 &dpu_enc->base, 1828 dpu_enc->cur_master, 1829 pending_flush); 1830 } 1831 1832 _dpu_encoder_trigger_start(dpu_enc->cur_master); 1833 1834 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1835 } 1836 1837 /** 1838 * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous 1839 * kickoff and trigger the ctl prepare progress for command mode display. 1840 * @drm_enc: encoder pointer 1841 */ 1842 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc) 1843 { 1844 struct dpu_encoder_virt *dpu_enc; 1845 struct dpu_encoder_phys *phys; 1846 unsigned int i; 1847 struct dpu_hw_ctl *ctl; 1848 struct msm_display_info *disp_info; 1849 1850 if (!drm_enc) { 1851 DPU_ERROR("invalid encoder\n"); 1852 return; 1853 } 1854 dpu_enc = to_dpu_encoder_virt(drm_enc); 1855 disp_info = &dpu_enc->disp_info; 1856 1857 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1858 phys = dpu_enc->phys_encs[i]; 1859 1860 ctl = phys->hw_ctl; 1861 ctl->ops.clear_pending_flush(ctl); 1862 1863 /* update only for command mode primary ctl */ 1864 if ((phys == dpu_enc->cur_master) && 1865 disp_info->is_cmd_mode 1866 && ctl->ops.trigger_pending) 1867 ctl->ops.trigger_pending(ctl); 1868 } 1869 } 1870 1871 static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc, 1872 struct drm_display_mode *mode) 1873 { 1874 u64 pclk_rate; 1875 u32 pclk_period; 1876 u32 line_time; 1877 1878 /* 1879 * For linetime calculation, only operate on master encoder. 1880 */ 1881 if (!dpu_enc->cur_master) 1882 return 0; 1883 1884 if (!dpu_enc->cur_master->ops.get_line_count) { 1885 DPU_ERROR("get_line_count function not defined\n"); 1886 return 0; 1887 } 1888 1889 pclk_rate = mode->clock; /* pixel clock in kHz */ 1890 if (pclk_rate == 0) { 1891 DPU_ERROR("pclk is 0, cannot calculate line time\n"); 1892 return 0; 1893 } 1894 1895 pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate); 1896 if (pclk_period == 0) { 1897 DPU_ERROR("pclk period is 0\n"); 1898 return 0; 1899 } 1900 1901 /* 1902 * Line time calculation based on Pixel clock and HTOTAL. 1903 * Final unit is in ns. 1904 */ 1905 line_time = (pclk_period * mode->htotal) / 1000; 1906 if (line_time == 0) { 1907 DPU_ERROR("line time calculation is 0\n"); 1908 return 0; 1909 } 1910 1911 DPU_DEBUG_ENC(dpu_enc, 1912 "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n", 1913 pclk_rate, pclk_period, line_time); 1914 1915 return line_time; 1916 } 1917 1918 /** 1919 * dpu_encoder_vsync_time - get the time of the next vsync 1920 * @drm_enc: encoder pointer 1921 * @wakeup_time: pointer to ktime_t to write the vsync time to 1922 */ 1923 int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time) 1924 { 1925 struct drm_display_mode *mode; 1926 struct dpu_encoder_virt *dpu_enc; 1927 u32 cur_line; 1928 u32 line_time; 1929 u32 vtotal, time_to_vsync; 1930 ktime_t cur_time; 1931 1932 dpu_enc = to_dpu_encoder_virt(drm_enc); 1933 1934 if (!drm_enc->crtc || !drm_enc->crtc->state) { 1935 DPU_ERROR("crtc/crtc state object is NULL\n"); 1936 return -EINVAL; 1937 } 1938 mode = &drm_enc->crtc->state->adjusted_mode; 1939 1940 line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode); 1941 if (!line_time) 1942 return -EINVAL; 1943 1944 cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master); 1945 1946 vtotal = mode->vtotal; 1947 if (cur_line >= vtotal) 1948 time_to_vsync = line_time * vtotal; 1949 else 1950 time_to_vsync = line_time * (vtotal - cur_line); 1951 1952 if (time_to_vsync == 0) { 1953 DPU_ERROR("time to vsync should not be zero, vtotal=%d\n", 1954 vtotal); 1955 return -EINVAL; 1956 } 1957 1958 cur_time = ktime_get(); 1959 *wakeup_time = ktime_add_ns(cur_time, time_to_vsync); 1960 1961 DPU_DEBUG_ENC(dpu_enc, 1962 "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n", 1963 cur_line, vtotal, time_to_vsync, 1964 ktime_to_ms(cur_time), 1965 ktime_to_ms(*wakeup_time)); 1966 return 0; 1967 } 1968 1969 static u32 1970 dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc, 1971 u32 enc_ip_width) 1972 { 1973 int ssm_delay, total_pixels, soft_slice_per_enc; 1974 1975 soft_slice_per_enc = enc_ip_width / dsc->slice_width; 1976 1977 /* 1978 * minimum number of initial line pixels is a sum of: 1979 * 1. sub-stream multiplexer delay (83 groups for 8bpc, 1980 * 91 for 10 bpc) * 3 1981 * 2. for two soft slice cases, add extra sub-stream multiplexer * 3 1982 * 3. the initial xmit delay 1983 * 4. total pipeline delay through the "lock step" of encoder (47) 1984 * 5. 6 additional pixels as the output of the rate buffer is 1985 * 48 bits wide 1986 */ 1987 ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92); 1988 total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47; 1989 if (soft_slice_per_enc > 1) 1990 total_pixels += (ssm_delay * 3); 1991 return DIV_ROUND_UP(total_pixels, dsc->slice_width); 1992 } 1993 1994 static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl, 1995 struct dpu_hw_dsc *hw_dsc, 1996 struct dpu_hw_pingpong *hw_pp, 1997 struct drm_dsc_config *dsc, 1998 u32 common_mode, 1999 u32 initial_lines) 2000 { 2001 if (hw_dsc->ops.dsc_config) 2002 hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines); 2003 2004 if (hw_dsc->ops.dsc_config_thresh) 2005 hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc); 2006 2007 if (hw_pp->ops.setup_dsc) 2008 hw_pp->ops.setup_dsc(hw_pp); 2009 2010 if (hw_dsc->ops.dsc_bind_pingpong_blk) 2011 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, hw_pp->idx); 2012 2013 if (hw_pp->ops.enable_dsc) 2014 hw_pp->ops.enable_dsc(hw_pp); 2015 2016 if (ctl->ops.update_pending_flush_dsc) 2017 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx); 2018 } 2019 2020 static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, 2021 struct drm_dsc_config *dsc) 2022 { 2023 /* coding only for 2LM, 2enc, 1 dsc config */ 2024 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master; 2025 struct dpu_hw_ctl *ctl = enc_master->hw_ctl; 2026 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; 2027 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; 2028 int this_frame_slices; 2029 int intf_ip_w, enc_ip_w; 2030 int dsc_common_mode; 2031 int pic_width; 2032 u32 initial_lines; 2033 int i; 2034 2035 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { 2036 hw_pp[i] = dpu_enc->hw_pp[i]; 2037 hw_dsc[i] = dpu_enc->hw_dsc[i]; 2038 2039 if (!hw_pp[i] || !hw_dsc[i]) { 2040 DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n"); 2041 return; 2042 } 2043 } 2044 2045 dsc_common_mode = 0; 2046 pic_width = dsc->pic_width; 2047 2048 dsc_common_mode = DSC_MODE_SPLIT_PANEL; 2049 if (dpu_encoder_use_dsc_merge(enc_master->parent)) 2050 dsc_common_mode |= DSC_MODE_MULTIPLEX; 2051 if (enc_master->intf_mode == INTF_MODE_VIDEO) 2052 dsc_common_mode |= DSC_MODE_VIDEO; 2053 2054 this_frame_slices = pic_width / dsc->slice_width; 2055 intf_ip_w = this_frame_slices * dsc->slice_width; 2056 2057 /* 2058 * dsc merge case: when using 2 encoders for the same stream, 2059 * no. of slices need to be same on both the encoders. 2060 */ 2061 enc_ip_w = intf_ip_w / 2; 2062 initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w); 2063 2064 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) 2065 dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i], 2066 dsc, dsc_common_mode, initial_lines); 2067 } 2068 2069 /** 2070 * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl 2071 * path (i.e. ctl flush and start) at next appropriate time. 2072 * Immediately: if no previous commit is outstanding. 2073 * Delayed: Block until next trigger can be issued. 2074 * @drm_enc: encoder pointer 2075 */ 2076 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc) 2077 { 2078 struct dpu_encoder_virt *dpu_enc; 2079 struct dpu_encoder_phys *phys; 2080 bool needs_hw_reset = false; 2081 unsigned int i; 2082 2083 dpu_enc = to_dpu_encoder_virt(drm_enc); 2084 2085 trace_dpu_enc_prepare_kickoff(DRMID(drm_enc)); 2086 2087 /* prepare for next kickoff, may include waiting on previous kickoff */ 2088 DPU_ATRACE_BEGIN("enc_prepare_for_kickoff"); 2089 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2090 phys = dpu_enc->phys_encs[i]; 2091 if (phys->ops.prepare_for_kickoff) 2092 phys->ops.prepare_for_kickoff(phys); 2093 if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET) 2094 needs_hw_reset = true; 2095 } 2096 DPU_ATRACE_END("enc_prepare_for_kickoff"); 2097 2098 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF); 2099 2100 /* if any phys needs reset, reset all phys, in-order */ 2101 if (needs_hw_reset) { 2102 trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc)); 2103 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2104 dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]); 2105 } 2106 } 2107 2108 if (dpu_enc->dsc) 2109 dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc); 2110 } 2111 2112 /** 2113 * dpu_encoder_is_valid_for_commit - check if encode has valid parameters for commit. 2114 * @drm_enc: Pointer to drm encoder structure 2115 */ 2116 bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc) 2117 { 2118 struct dpu_encoder_virt *dpu_enc; 2119 unsigned int i; 2120 struct dpu_encoder_phys *phys; 2121 2122 dpu_enc = to_dpu_encoder_virt(drm_enc); 2123 2124 if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) { 2125 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2126 phys = dpu_enc->phys_encs[i]; 2127 if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) { 2128 DPU_DEBUG("invalid FB not kicking off\n"); 2129 return false; 2130 } 2131 } 2132 } 2133 2134 return true; 2135 } 2136 2137 /** 2138 * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path 2139 * (i.e. ctl flush and start) immediately. 2140 * @drm_enc: encoder pointer 2141 */ 2142 void dpu_encoder_kickoff(struct drm_encoder *drm_enc) 2143 { 2144 struct dpu_encoder_virt *dpu_enc; 2145 struct dpu_encoder_phys *phys; 2146 unsigned long timeout_ms; 2147 unsigned int i; 2148 2149 DPU_ATRACE_BEGIN("encoder_kickoff"); 2150 dpu_enc = to_dpu_encoder_virt(drm_enc); 2151 2152 trace_dpu_enc_kickoff(DRMID(drm_enc)); 2153 2154 timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 / 2155 drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode); 2156 2157 atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms); 2158 mod_timer(&dpu_enc->frame_done_timer, 2159 jiffies + msecs_to_jiffies(timeout_ms)); 2160 2161 /* All phys encs are ready to go, trigger the kickoff */ 2162 _dpu_encoder_kickoff_phys(dpu_enc); 2163 2164 /* allow phys encs to handle any post-kickoff business */ 2165 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2166 phys = dpu_enc->phys_encs[i]; 2167 if (phys->ops.handle_post_kickoff) 2168 phys->ops.handle_post_kickoff(phys); 2169 } 2170 2171 DPU_ATRACE_END("encoder_kickoff"); 2172 } 2173 2174 static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc) 2175 { 2176 struct dpu_hw_mixer_cfg mixer; 2177 int i, num_lm; 2178 struct dpu_global_state *global_state; 2179 struct dpu_hw_blk *hw_lm[2]; 2180 struct dpu_hw_mixer *hw_mixer[2]; 2181 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl; 2182 2183 memset(&mixer, 0, sizeof(mixer)); 2184 2185 /* reset all mixers for this encoder */ 2186 if (phys_enc->hw_ctl->ops.clear_all_blendstages) 2187 phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl); 2188 2189 global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms); 2190 2191 num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state, 2192 phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); 2193 2194 for (i = 0; i < num_lm; i++) { 2195 hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]); 2196 if (phys_enc->hw_ctl->ops.update_pending_flush_mixer) 2197 phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx); 2198 2199 /* clear all blendstages */ 2200 if (phys_enc->hw_ctl->ops.setup_blendstage) 2201 phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL); 2202 } 2203 } 2204 2205 static void dpu_encoder_dsc_pipe_clr(struct dpu_hw_ctl *ctl, 2206 struct dpu_hw_dsc *hw_dsc, 2207 struct dpu_hw_pingpong *hw_pp) 2208 { 2209 if (hw_dsc->ops.dsc_disable) 2210 hw_dsc->ops.dsc_disable(hw_dsc); 2211 2212 if (hw_pp->ops.disable_dsc) 2213 hw_pp->ops.disable_dsc(hw_pp); 2214 2215 if (hw_dsc->ops.dsc_bind_pingpong_blk) 2216 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, PINGPONG_NONE); 2217 2218 if (ctl->ops.update_pending_flush_dsc) 2219 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx); 2220 } 2221 2222 static void dpu_encoder_unprep_dsc(struct dpu_encoder_virt *dpu_enc) 2223 { 2224 /* coding only for 2LM, 2enc, 1 dsc config */ 2225 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master; 2226 struct dpu_hw_ctl *ctl = enc_master->hw_ctl; 2227 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; 2228 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; 2229 int i; 2230 2231 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { 2232 hw_pp[i] = dpu_enc->hw_pp[i]; 2233 hw_dsc[i] = dpu_enc->hw_dsc[i]; 2234 2235 if (hw_pp[i] && hw_dsc[i]) 2236 dpu_encoder_dsc_pipe_clr(ctl, hw_dsc[i], hw_pp[i]); 2237 } 2238 } 2239 2240 /** 2241 * dpu_encoder_helper_phys_cleanup - helper to cleanup dpu pipeline 2242 * @phys_enc: Pointer to physical encoder structure 2243 */ 2244 void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) 2245 { 2246 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl; 2247 struct dpu_hw_intf_cfg intf_cfg = { 0 }; 2248 int i; 2249 struct dpu_encoder_virt *dpu_enc; 2250 2251 dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 2252 2253 phys_enc->hw_ctl->ops.reset(ctl); 2254 2255 dpu_encoder_helper_reset_mixers(phys_enc); 2256 2257 /* 2258 * TODO: move the once-only operation like CTL flush/trigger 2259 * into dpu_encoder_virt_disable() and all operations which need 2260 * to be done per phys encoder into the phys_disable() op. 2261 */ 2262 if (phys_enc->hw_wb) { 2263 /* disable the PP block */ 2264 if (phys_enc->hw_wb->ops.bind_pingpong_blk) 2265 phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, PINGPONG_NONE); 2266 2267 /* mark WB flush as pending */ 2268 if (phys_enc->hw_ctl->ops.update_pending_flush_wb) 2269 phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx); 2270 } else { 2271 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2272 if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk) 2273 phys_enc->hw_intf->ops.bind_pingpong_blk( 2274 dpu_enc->phys_encs[i]->hw_intf, 2275 PINGPONG_NONE); 2276 2277 /* mark INTF flush as pending */ 2278 if (phys_enc->hw_ctl->ops.update_pending_flush_intf) 2279 phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl, 2280 dpu_enc->phys_encs[i]->hw_intf->idx); 2281 } 2282 } 2283 2284 /* reset the merge 3D HW block */ 2285 if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) { 2286 phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, 2287 BLEND_3D_NONE); 2288 if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d) 2289 phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl, 2290 phys_enc->hw_pp->merge_3d->idx); 2291 } 2292 2293 if (phys_enc->hw_cdm) { 2294 if (phys_enc->hw_cdm->ops.bind_pingpong_blk && phys_enc->hw_pp) 2295 phys_enc->hw_cdm->ops.bind_pingpong_blk(phys_enc->hw_cdm, 2296 PINGPONG_NONE); 2297 if (phys_enc->hw_ctl->ops.update_pending_flush_cdm) 2298 phys_enc->hw_ctl->ops.update_pending_flush_cdm(phys_enc->hw_ctl, 2299 phys_enc->hw_cdm->idx); 2300 } 2301 2302 if (dpu_enc->dsc) { 2303 dpu_encoder_unprep_dsc(dpu_enc); 2304 dpu_enc->dsc = NULL; 2305 } 2306 2307 intf_cfg.stream_sel = 0; /* Don't care value for video mode */ 2308 intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); 2309 intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc); 2310 2311 if (phys_enc->hw_intf) 2312 intf_cfg.intf = phys_enc->hw_intf->idx; 2313 if (phys_enc->hw_wb) 2314 intf_cfg.wb = phys_enc->hw_wb->idx; 2315 2316 if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) 2317 intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx; 2318 2319 if (ctl->ops.reset_intf_cfg) 2320 ctl->ops.reset_intf_cfg(ctl, &intf_cfg); 2321 2322 ctl->ops.trigger_flush(ctl); 2323 ctl->ops.trigger_start(ctl); 2324 ctl->ops.clear_pending_flush(ctl); 2325 } 2326 2327 /** 2328 * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block 2329 * @phys_enc: Pointer to physical encoder 2330 * @dpu_fmt: Pinter to the format description 2331 * @output_type: HDMI/WB 2332 */ 2333 void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc, 2334 const struct msm_format *dpu_fmt, 2335 u32 output_type) 2336 { 2337 struct dpu_hw_cdm *hw_cdm; 2338 struct dpu_hw_cdm_cfg *cdm_cfg; 2339 struct dpu_hw_pingpong *hw_pp; 2340 int ret; 2341 2342 if (!phys_enc) 2343 return; 2344 2345 cdm_cfg = &phys_enc->cdm_cfg; 2346 hw_pp = phys_enc->hw_pp; 2347 hw_cdm = phys_enc->hw_cdm; 2348 2349 if (!hw_cdm) 2350 return; 2351 2352 if (!MSM_FORMAT_IS_YUV(dpu_fmt)) { 2353 DPU_DEBUG("[enc:%d] cdm_disable fmt:%p4cc\n", DRMID(phys_enc->parent), 2354 &dpu_fmt->pixel_format); 2355 if (hw_cdm->ops.bind_pingpong_blk) 2356 hw_cdm->ops.bind_pingpong_blk(hw_cdm, PINGPONG_NONE); 2357 2358 return; 2359 } 2360 2361 memset(cdm_cfg, 0, sizeof(struct dpu_hw_cdm_cfg)); 2362 2363 cdm_cfg->output_width = phys_enc->cached_mode.hdisplay; 2364 cdm_cfg->output_height = phys_enc->cached_mode.vdisplay; 2365 cdm_cfg->output_fmt = dpu_fmt; 2366 cdm_cfg->output_type = output_type; 2367 cdm_cfg->output_bit_depth = MSM_FORMAT_IS_DX(dpu_fmt) ? 2368 CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT; 2369 cdm_cfg->csc_cfg = &dpu_csc10_rgb2yuv_601l; 2370 2371 /* enable 10 bit logic */ 2372 switch (cdm_cfg->output_fmt->chroma_sample) { 2373 case CHROMA_FULL: 2374 cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE; 2375 cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; 2376 break; 2377 case CHROMA_H2V1: 2378 cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE; 2379 cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; 2380 break; 2381 case CHROMA_420: 2382 cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE; 2383 cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE; 2384 break; 2385 case CHROMA_H1V2: 2386 default: 2387 DPU_ERROR("[enc:%d] unsupported chroma sampling type\n", 2388 DRMID(phys_enc->parent)); 2389 cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE; 2390 cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; 2391 break; 2392 } 2393 2394 DPU_DEBUG("[enc:%d] cdm_enable:%d,%d,%p4cc,%d,%d,%d,%d]\n", 2395 DRMID(phys_enc->parent), cdm_cfg->output_width, 2396 cdm_cfg->output_height, &cdm_cfg->output_fmt->pixel_format, 2397 cdm_cfg->output_type, cdm_cfg->output_bit_depth, 2398 cdm_cfg->h_cdwn_type, cdm_cfg->v_cdwn_type); 2399 2400 if (hw_cdm->ops.enable) { 2401 cdm_cfg->pp_id = hw_pp->idx; 2402 ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg); 2403 if (ret < 0) { 2404 DPU_ERROR("[enc:%d] failed to enable CDM; ret:%d\n", 2405 DRMID(phys_enc->parent), ret); 2406 return; 2407 } 2408 } 2409 } 2410 2411 #ifdef CONFIG_DEBUG_FS 2412 static int _dpu_encoder_status_show(struct seq_file *s, void *data) 2413 { 2414 struct drm_encoder *drm_enc = s->private; 2415 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 2416 int i; 2417 2418 mutex_lock(&dpu_enc->enc_lock); 2419 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2420 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2421 2422 seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d frame_done_cnt:%d", 2423 phys->hw_intf ? phys->hw_intf->idx - INTF_0 : -1, 2424 phys->hw_wb ? phys->hw_wb->idx - WB_0 : -1, 2425 atomic_read(&phys->vsync_cnt), 2426 atomic_read(&phys->underrun_cnt), 2427 atomic_read(&dpu_enc->frame_done_timeout_cnt)); 2428 2429 seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode)); 2430 } 2431 mutex_unlock(&dpu_enc->enc_lock); 2432 2433 return 0; 2434 } 2435 2436 DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status); 2437 2438 static void dpu_encoder_debugfs_init(struct drm_encoder *drm_enc, struct dentry *root) 2439 { 2440 /* don't error check these */ 2441 debugfs_create_file("status", 0600, 2442 root, drm_enc, &_dpu_encoder_status_fops); 2443 } 2444 #else 2445 #define dpu_encoder_debugfs_init NULL 2446 #endif 2447 2448 static int dpu_encoder_virt_add_phys_encs( 2449 struct drm_device *dev, 2450 struct msm_display_info *disp_info, 2451 struct dpu_encoder_virt *dpu_enc, 2452 struct dpu_enc_phys_init_params *params) 2453 { 2454 struct dpu_encoder_phys *enc = NULL; 2455 2456 DPU_DEBUG_ENC(dpu_enc, "\n"); 2457 2458 /* 2459 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types 2460 * in this function, check up-front. 2461 */ 2462 if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >= 2463 ARRAY_SIZE(dpu_enc->phys_encs)) { 2464 DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n", 2465 dpu_enc->num_phys_encs); 2466 return -EINVAL; 2467 } 2468 2469 2470 if (disp_info->intf_type == INTF_WB) { 2471 enc = dpu_encoder_phys_wb_init(dev, params); 2472 2473 if (IS_ERR(enc)) { 2474 DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n", 2475 PTR_ERR(enc)); 2476 return PTR_ERR(enc); 2477 } 2478 2479 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; 2480 ++dpu_enc->num_phys_encs; 2481 } else if (disp_info->is_cmd_mode) { 2482 enc = dpu_encoder_phys_cmd_init(dev, params); 2483 2484 if (IS_ERR(enc)) { 2485 DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n", 2486 PTR_ERR(enc)); 2487 return PTR_ERR(enc); 2488 } 2489 2490 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; 2491 ++dpu_enc->num_phys_encs; 2492 } else { 2493 enc = dpu_encoder_phys_vid_init(dev, params); 2494 2495 if (IS_ERR(enc)) { 2496 DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n", 2497 PTR_ERR(enc)); 2498 return PTR_ERR(enc); 2499 } 2500 2501 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; 2502 ++dpu_enc->num_phys_encs; 2503 } 2504 2505 if (params->split_role == ENC_ROLE_SLAVE) 2506 dpu_enc->cur_slave = enc; 2507 else 2508 dpu_enc->cur_master = enc; 2509 2510 return 0; 2511 } 2512 2513 static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, 2514 struct dpu_kms *dpu_kms, 2515 struct msm_display_info *disp_info) 2516 { 2517 int ret = 0; 2518 int i = 0; 2519 struct dpu_enc_phys_init_params phys_params; 2520 2521 if (!dpu_enc) { 2522 DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL); 2523 return -EINVAL; 2524 } 2525 2526 dpu_enc->cur_master = NULL; 2527 2528 memset(&phys_params, 0, sizeof(phys_params)); 2529 phys_params.dpu_kms = dpu_kms; 2530 phys_params.parent = &dpu_enc->base; 2531 phys_params.enc_spinlock = &dpu_enc->enc_spinlock; 2532 2533 WARN_ON(disp_info->num_of_h_tiles < 1); 2534 2535 DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles); 2536 2537 if (disp_info->intf_type != INTF_WB) 2538 dpu_enc->idle_pc_supported = 2539 dpu_kms->catalog->caps->has_idle_pc; 2540 2541 mutex_lock(&dpu_enc->enc_lock); 2542 for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) { 2543 /* 2544 * Left-most tile is at index 0, content is controller id 2545 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right 2546 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right 2547 */ 2548 u32 controller_id = disp_info->h_tile_instance[i]; 2549 2550 if (disp_info->num_of_h_tiles > 1) { 2551 if (i == 0) 2552 phys_params.split_role = ENC_ROLE_MASTER; 2553 else 2554 phys_params.split_role = ENC_ROLE_SLAVE; 2555 } else { 2556 phys_params.split_role = ENC_ROLE_SOLO; 2557 } 2558 2559 DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n", 2560 i, controller_id, phys_params.split_role); 2561 2562 phys_params.hw_intf = dpu_encoder_get_intf(dpu_kms->catalog, &dpu_kms->rm, 2563 disp_info->intf_type, 2564 controller_id); 2565 2566 if (disp_info->intf_type == INTF_WB && controller_id < WB_MAX) 2567 phys_params.hw_wb = dpu_rm_get_wb(&dpu_kms->rm, controller_id); 2568 2569 if (!phys_params.hw_intf && !phys_params.hw_wb) { 2570 DPU_ERROR_ENC(dpu_enc, "no intf or wb block assigned at idx: %d\n", i); 2571 ret = -EINVAL; 2572 break; 2573 } 2574 2575 if (phys_params.hw_intf && phys_params.hw_wb) { 2576 DPU_ERROR_ENC(dpu_enc, 2577 "invalid phys both intf and wb block at idx: %d\n", i); 2578 ret = -EINVAL; 2579 break; 2580 } 2581 2582 ret = dpu_encoder_virt_add_phys_encs(dpu_kms->dev, disp_info, 2583 dpu_enc, &phys_params); 2584 if (ret) { 2585 DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n"); 2586 break; 2587 } 2588 } 2589 2590 mutex_unlock(&dpu_enc->enc_lock); 2591 2592 return ret; 2593 } 2594 2595 static void dpu_encoder_frame_done_timeout(struct timer_list *t) 2596 { 2597 struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t, 2598 frame_done_timer); 2599 struct drm_encoder *drm_enc = &dpu_enc->base; 2600 u32 event; 2601 2602 if (!drm_enc->dev) { 2603 DPU_ERROR("invalid parameters\n"); 2604 return; 2605 } 2606 2607 if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc) { 2608 DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n", 2609 DRMID(drm_enc), dpu_enc->frame_busy_mask[0]); 2610 return; 2611 } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { 2612 DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc)); 2613 return; 2614 } 2615 2616 DPU_ERROR_ENC_RATELIMITED(dpu_enc, "frame done timeout\n"); 2617 2618 if (atomic_inc_return(&dpu_enc->frame_done_timeout_cnt) == 1) 2619 msm_disp_snapshot_state(drm_enc->dev); 2620 2621 event = DPU_ENCODER_FRAME_EVENT_ERROR; 2622 trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event); 2623 dpu_crtc_frame_event_cb(dpu_enc->crtc, event); 2624 } 2625 2626 static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = { 2627 .atomic_mode_set = dpu_encoder_virt_atomic_mode_set, 2628 .atomic_disable = dpu_encoder_virt_atomic_disable, 2629 .atomic_enable = dpu_encoder_virt_atomic_enable, 2630 .atomic_check = dpu_encoder_virt_atomic_check, 2631 }; 2632 2633 static const struct drm_encoder_funcs dpu_encoder_funcs = { 2634 .debugfs_init = dpu_encoder_debugfs_init, 2635 }; 2636 2637 /** 2638 * dpu_encoder_init - initialize virtual encoder object 2639 * @dev: Pointer to drm device structure 2640 * @drm_enc_mode: corresponding DRM_MODE_ENCODER_* constant 2641 * @disp_info: Pointer to display information structure 2642 * Returns: Pointer to newly created drm encoder 2643 */ 2644 struct drm_encoder *dpu_encoder_init(struct drm_device *dev, 2645 int drm_enc_mode, 2646 struct msm_display_info *disp_info) 2647 { 2648 struct msm_drm_private *priv = dev->dev_private; 2649 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); 2650 struct dpu_encoder_virt *dpu_enc; 2651 int ret; 2652 2653 dpu_enc = drmm_encoder_alloc(dev, struct dpu_encoder_virt, base, 2654 &dpu_encoder_funcs, drm_enc_mode, NULL); 2655 if (IS_ERR(dpu_enc)) 2656 return ERR_CAST(dpu_enc); 2657 2658 drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs); 2659 2660 spin_lock_init(&dpu_enc->enc_spinlock); 2661 dpu_enc->enabled = false; 2662 mutex_init(&dpu_enc->enc_lock); 2663 mutex_init(&dpu_enc->rc_lock); 2664 2665 ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info); 2666 if (ret) { 2667 DPU_ERROR("failed to setup encoder\n"); 2668 return ERR_PTR(-ENOMEM); 2669 } 2670 2671 atomic_set(&dpu_enc->frame_done_timeout_ms, 0); 2672 atomic_set(&dpu_enc->frame_done_timeout_cnt, 0); 2673 timer_setup(&dpu_enc->frame_done_timer, 2674 dpu_encoder_frame_done_timeout, 0); 2675 2676 INIT_DELAYED_WORK(&dpu_enc->delayed_off_work, 2677 dpu_encoder_off_work); 2678 dpu_enc->idle_timeout = IDLE_TIMEOUT; 2679 2680 memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info)); 2681 2682 DPU_DEBUG_ENC(dpu_enc, "created\n"); 2683 2684 return &dpu_enc->base; 2685 } 2686 2687 /** 2688 * dpu_encoder_wait_for_commit_done() - Wait for encoder to flush pending state 2689 * @drm_enc: encoder pointer 2690 * 2691 * Wait for hardware to have flushed the current pending changes to hardware at 2692 * a vblank or CTL_START. Physical encoders will map this differently depending 2693 * on the type: vid mode -> vsync_irq, cmd mode -> CTL_START. 2694 * 2695 * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise 2696 */ 2697 int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_enc) 2698 { 2699 struct dpu_encoder_virt *dpu_enc = NULL; 2700 int i, ret = 0; 2701 2702 if (!drm_enc) { 2703 DPU_ERROR("invalid encoder\n"); 2704 return -EINVAL; 2705 } 2706 dpu_enc = to_dpu_encoder_virt(drm_enc); 2707 DPU_DEBUG_ENC(dpu_enc, "\n"); 2708 2709 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2710 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2711 2712 if (phys->ops.wait_for_commit_done) { 2713 DPU_ATRACE_BEGIN("wait_for_commit_done"); 2714 ret = phys->ops.wait_for_commit_done(phys); 2715 DPU_ATRACE_END("wait_for_commit_done"); 2716 if (ret == -ETIMEDOUT && !dpu_enc->commit_done_timedout) { 2717 dpu_enc->commit_done_timedout = true; 2718 msm_disp_snapshot_state(drm_enc->dev); 2719 } 2720 if (ret) 2721 return ret; 2722 } 2723 } 2724 2725 return ret; 2726 } 2727 2728 /** 2729 * dpu_encoder_wait_for_tx_complete() - Wait for encoder to transfer pixels to panel 2730 * @drm_enc: encoder pointer 2731 * 2732 * Wait for the hardware to transfer all the pixels to the panel. Physical 2733 * encoders will map this differently depending on the type: vid mode -> vsync_irq, 2734 * cmd mode -> pp_done. 2735 * 2736 * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise 2737 */ 2738 int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_enc) 2739 { 2740 struct dpu_encoder_virt *dpu_enc = NULL; 2741 int i, ret = 0; 2742 2743 if (!drm_enc) { 2744 DPU_ERROR("invalid encoder\n"); 2745 return -EINVAL; 2746 } 2747 dpu_enc = to_dpu_encoder_virt(drm_enc); 2748 DPU_DEBUG_ENC(dpu_enc, "\n"); 2749 2750 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2751 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2752 2753 if (phys->ops.wait_for_tx_complete) { 2754 DPU_ATRACE_BEGIN("wait_for_tx_complete"); 2755 ret = phys->ops.wait_for_tx_complete(phys); 2756 DPU_ATRACE_END("wait_for_tx_complete"); 2757 if (ret) 2758 return ret; 2759 } 2760 } 2761 2762 return ret; 2763 } 2764 2765 /** 2766 * dpu_encoder_get_intf_mode - get interface mode of the given encoder 2767 * @encoder: Pointer to drm encoder object 2768 */ 2769 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder) 2770 { 2771 struct dpu_encoder_virt *dpu_enc = NULL; 2772 2773 if (!encoder) { 2774 DPU_ERROR("invalid encoder\n"); 2775 return INTF_MODE_NONE; 2776 } 2777 dpu_enc = to_dpu_encoder_virt(encoder); 2778 2779 if (dpu_enc->cur_master) 2780 return dpu_enc->cur_master->intf_mode; 2781 2782 if (dpu_enc->num_phys_encs) 2783 return dpu_enc->phys_encs[0]->intf_mode; 2784 2785 return INTF_MODE_NONE; 2786 } 2787 2788 /** 2789 * dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder 2790 * This helper function is used by physical encoder to get DSC blocks mask 2791 * used for this encoder. 2792 * @phys_enc: Pointer to physical encoder structure 2793 */ 2794 unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc) 2795 { 2796 struct drm_encoder *encoder = phys_enc->parent; 2797 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder); 2798 2799 return dpu_enc->dsc_mask; 2800 } 2801 2802 void dpu_encoder_phys_init(struct dpu_encoder_phys *phys_enc, 2803 struct dpu_enc_phys_init_params *p) 2804 { 2805 phys_enc->hw_mdptop = p->dpu_kms->hw_mdp; 2806 phys_enc->hw_intf = p->hw_intf; 2807 phys_enc->hw_wb = p->hw_wb; 2808 phys_enc->parent = p->parent; 2809 phys_enc->dpu_kms = p->dpu_kms; 2810 phys_enc->split_role = p->split_role; 2811 phys_enc->enc_spinlock = p->enc_spinlock; 2812 phys_enc->enable_state = DPU_ENC_DISABLED; 2813 2814 atomic_set(&phys_enc->pending_kickoff_cnt, 0); 2815 atomic_set(&phys_enc->pending_ctlstart_cnt, 0); 2816 2817 atomic_set(&phys_enc->vsync_cnt, 0); 2818 atomic_set(&phys_enc->underrun_cnt, 0); 2819 2820 init_waitqueue_head(&phys_enc->pending_kickoff_wq); 2821 } 2822