1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved. 5 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. 6 * 7 * Author: Rob Clark <robdclark@gmail.com> 8 */ 9 10 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ 11 #include <linux/debugfs.h> 12 #include <linux/kthread.h> 13 #include <linux/seq_file.h> 14 15 #include <drm/drm_atomic.h> 16 #include <drm/drm_crtc.h> 17 #include <drm/drm_file.h> 18 #include <drm/drm_probe_helper.h> 19 #include <drm/drm_framebuffer.h> 20 21 #include "msm_drv.h" 22 #include "dpu_kms.h" 23 #include "dpu_hwio.h" 24 #include "dpu_hw_catalog.h" 25 #include "dpu_hw_intf.h" 26 #include "dpu_hw_ctl.h" 27 #include "dpu_hw_cwb.h" 28 #include "dpu_hw_dspp.h" 29 #include "dpu_hw_dsc.h" 30 #include "dpu_hw_merge3d.h" 31 #include "dpu_hw_cdm.h" 32 #include "dpu_formats.h" 33 #include "dpu_encoder_phys.h" 34 #include "dpu_crtc.h" 35 #include "dpu_trace.h" 36 #include "dpu_core_irq.h" 37 #include "disp/msm_disp_snapshot.h" 38 39 #define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\ 40 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) 41 42 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\ 43 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) 44 45 #define DPU_ERROR_ENC_RATELIMITED(e, fmt, ...) DPU_ERROR_RATELIMITED("enc%d " fmt,\ 46 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) 47 48 /* 49 * Two to anticipate panels that can do cmd/vid dynamic switching 50 * plan is to create all possible physical encoder types, and switch between 51 * them at runtime 52 */ 53 #define NUM_PHYS_ENCODER_TYPES 2 54 55 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \ 56 (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES) 57 58 #define MAX_CHANNELS_PER_ENC 2 59 #define MAX_CWB_PER_ENC 2 60 61 #define IDLE_SHORT_TIMEOUT 1 62 63 /* timeout in frames waiting for frame done */ 64 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5 65 66 /** 67 * enum dpu_enc_rc_events - events for resource control state machine 68 * @DPU_ENC_RC_EVENT_KICKOFF: 69 * This event happens at NORMAL priority. 70 * Event that signals the start of the transfer. When this event is 71 * received, enable MDP/DSI core clocks. Regardless of the previous 72 * state, the resource should be in ON state at the end of this event. 73 * @DPU_ENC_RC_EVENT_FRAME_DONE: 74 * This event happens at INTERRUPT level. 75 * Event signals the end of the data transfer after the PP FRAME_DONE 76 * event. At the end of this event, a delayed work is scheduled to go to 77 * IDLE_PC state after IDLE_TIMEOUT time. 78 * @DPU_ENC_RC_EVENT_PRE_STOP: 79 * This event happens at NORMAL priority. 80 * This event, when received during the ON state, leave the RC STATE 81 * in the PRE_OFF state. It should be followed by the STOP event as 82 * part of encoder disable. 83 * If received during IDLE or OFF states, it will do nothing. 84 * @DPU_ENC_RC_EVENT_STOP: 85 * This event happens at NORMAL priority. 86 * When this event is received, disable all the MDP/DSI core clocks, and 87 * disable IRQs. It should be called from the PRE_OFF or IDLE states. 88 * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing. 89 * PRE_OFF is expected when PRE_STOP was executed during the ON state. 90 * Resource state should be in OFF at the end of the event. 91 * @DPU_ENC_RC_EVENT_ENTER_IDLE: 92 * This event happens at NORMAL priority from a work item. 93 * Event signals that there were no frame updates for IDLE_TIMEOUT time. 94 * This would disable MDP/DSI core clocks and change the resource state 95 * to IDLE. 96 */ 97 enum dpu_enc_rc_events { 98 DPU_ENC_RC_EVENT_KICKOFF = 1, 99 DPU_ENC_RC_EVENT_FRAME_DONE, 100 DPU_ENC_RC_EVENT_PRE_STOP, 101 DPU_ENC_RC_EVENT_STOP, 102 DPU_ENC_RC_EVENT_ENTER_IDLE 103 }; 104 105 /* 106 * enum dpu_enc_rc_states - states that the resource control maintains 107 * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state 108 * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state 109 * @DPU_ENC_RC_STATE_ON: Resource is in ON state 110 * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state 111 * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state 112 */ 113 enum dpu_enc_rc_states { 114 DPU_ENC_RC_STATE_OFF, 115 DPU_ENC_RC_STATE_PRE_OFF, 116 DPU_ENC_RC_STATE_ON, 117 DPU_ENC_RC_STATE_IDLE 118 }; 119 120 /** 121 * struct dpu_encoder_virt - virtual encoder. Container of one or more physical 122 * encoders. Virtual encoder manages one "logical" display. Physical 123 * encoders manage one intf block, tied to a specific panel/sub-panel. 124 * Virtual encoder defers as much as possible to the physical encoders. 125 * Virtual encoder registers itself with the DRM Framework as the encoder. 126 * @base: drm_encoder base class for registration with DRM 127 * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes 128 * @enabled: True if the encoder is active, protected by enc_lock 129 * @commit_done_timedout: True if there has been a timeout on commit after 130 * enabling the encoder. 131 * @num_phys_encs: Actual number of physical encoders contained. 132 * @phys_encs: Container of physical encoders managed. 133 * @cur_master: Pointer to the current master in this mode. Optimization 134 * Only valid after enable. Cleared as disable. 135 * @cur_slave: As above but for the slave encoder. 136 * @hw_pp: Handle to the pingpong blocks used for the display. No. 137 * pingpong blocks can be different than num_phys_encs. 138 * @hw_cwb: Handle to the CWB muxes used for concurrent writeback 139 * display. Number of CWB muxes can be different than 140 * num_phys_encs. 141 * @hw_dsc: Handle to the DSC blocks used for the display. 142 * @dsc_mask: Bitmask of used DSC blocks. 143 * @cwb_mask: Bitmask of used CWB muxes 144 * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped 145 * for partial update right-only cases, such as pingpong 146 * split where virtual pingpong does not generate IRQs 147 * @crtc: Pointer to the currently assigned crtc. Normally you 148 * would use crtc->state->encoder_mask to determine the 149 * link between encoder/crtc. However in this case we need 150 * to track crtc in the disable() hook which is called 151 * _after_ encoder_mask is cleared. 152 * @connector: If a mode is set, cached pointer to the active connector 153 * @enc_lock: Lock around physical encoder 154 * create/destroy/enable/disable 155 * @frame_busy_mask: Bitmask tracking which phys_enc we are still 156 * busy processing current command. 157 * Bit0 = phys_encs[0] etc. 158 * @frame_done_timeout_ms: frame done timeout in ms 159 * @frame_done_timeout_cnt: atomic counter tracking the number of frame 160 * done timeouts 161 * @frame_done_timer: watchdog timer for frame done event 162 * @disp_info: local copy of msm_display_info struct 163 * @idle_pc_supported: indicate if idle power collaps is supported 164 * @rc_lock: resource control mutex lock to protect 165 * virt encoder over various state changes 166 * @rc_state: resource controller state 167 * @delayed_off_work: delayed worker to schedule disabling of 168 * clks and resources after IDLE_TIMEOUT time. 169 * @topology: topology of the display 170 * @idle_timeout: idle timeout duration in milliseconds 171 * @wide_bus_en: wide bus is enabled on this interface 172 * @dsc: drm_dsc_config pointer, for DSC-enabled encoders 173 */ 174 struct dpu_encoder_virt { 175 struct drm_encoder base; 176 spinlock_t enc_spinlock; 177 178 bool enabled; 179 bool commit_done_timedout; 180 181 unsigned int num_phys_encs; 182 struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL]; 183 struct dpu_encoder_phys *cur_master; 184 struct dpu_encoder_phys *cur_slave; 185 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; 186 struct dpu_hw_cwb *hw_cwb[MAX_CWB_PER_ENC]; 187 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; 188 189 unsigned int dsc_mask; 190 unsigned int cwb_mask; 191 192 bool intfs_swapped; 193 194 struct drm_crtc *crtc; 195 struct drm_connector *connector; 196 197 struct mutex enc_lock; 198 DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL); 199 200 atomic_t frame_done_timeout_ms; 201 atomic_t frame_done_timeout_cnt; 202 struct timer_list frame_done_timer; 203 204 struct msm_display_info disp_info; 205 206 bool idle_pc_supported; 207 struct mutex rc_lock; 208 enum dpu_enc_rc_states rc_state; 209 struct delayed_work delayed_off_work; 210 struct msm_display_topology topology; 211 212 u32 idle_timeout; 213 214 bool wide_bus_en; 215 216 /* DSC configuration */ 217 struct drm_dsc_config *dsc; 218 }; 219 220 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base) 221 222 static u32 dither_matrix[DITHER_MATRIX_SZ] = { 223 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10 224 }; 225 226 /** 227 * dpu_encoder_get_drm_fmt - return DRM fourcc format 228 * @phys_enc: Pointer to physical encoder structure 229 */ 230 u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc) 231 { 232 struct drm_encoder *drm_enc; 233 struct dpu_encoder_virt *dpu_enc; 234 struct drm_display_info *info; 235 struct drm_display_mode *mode; 236 237 drm_enc = phys_enc->parent; 238 dpu_enc = to_dpu_encoder_virt(drm_enc); 239 info = &dpu_enc->connector->display_info; 240 mode = &phys_enc->cached_mode; 241 242 if (drm_mode_is_420_only(info, mode)) 243 return DRM_FORMAT_YUV420; 244 245 return DRM_FORMAT_RGB888; 246 } 247 248 /** 249 * dpu_encoder_needs_periph_flush - return true if physical encoder requires 250 * peripheral flush 251 * @phys_enc: Pointer to physical encoder structure 252 */ 253 bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc) 254 { 255 struct drm_encoder *drm_enc; 256 struct dpu_encoder_virt *dpu_enc; 257 struct msm_display_info *disp_info; 258 struct msm_drm_private *priv; 259 struct drm_display_mode *mode; 260 261 drm_enc = phys_enc->parent; 262 dpu_enc = to_dpu_encoder_virt(drm_enc); 263 disp_info = &dpu_enc->disp_info; 264 priv = drm_enc->dev->dev_private; 265 mode = &phys_enc->cached_mode; 266 267 return phys_enc->hw_intf->cap->type == INTF_DP && 268 msm_dp_needs_periph_flush(priv->kms->dp[disp_info->h_tile_instance[0]], mode); 269 } 270 271 /** 272 * dpu_encoder_is_widebus_enabled - return bool value if widebus is enabled 273 * @drm_enc: Pointer to previously created drm encoder structure 274 */ 275 bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc) 276 { 277 const struct dpu_encoder_virt *dpu_enc; 278 struct msm_drm_private *priv = drm_enc->dev->dev_private; 279 const struct msm_display_info *disp_info; 280 int index; 281 282 dpu_enc = to_dpu_encoder_virt(drm_enc); 283 disp_info = &dpu_enc->disp_info; 284 index = disp_info->h_tile_instance[0]; 285 286 if (disp_info->intf_type == INTF_DP) 287 return msm_dp_wide_bus_available(priv->kms->dp[index]); 288 else if (disp_info->intf_type == INTF_DSI) 289 return msm_dsi_wide_bus_enabled(priv->kms->dsi[index]); 290 291 return false; 292 } 293 294 /** 295 * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled 296 * for the encoder. 297 * @drm_enc: Pointer to previously created drm encoder structure 298 */ 299 bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc) 300 { 301 const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 302 303 return dpu_enc->dsc ? true : false; 304 } 305 306 /** 307 * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained 308 * in virtual encoder that can collect CRC values 309 * @drm_enc: Pointer to previously created drm encoder structure 310 * Returns: Number of physical encoders for given drm encoder 311 */ 312 int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc) 313 { 314 struct dpu_encoder_virt *dpu_enc; 315 int i, num_intf = 0; 316 317 dpu_enc = to_dpu_encoder_virt(drm_enc); 318 319 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 320 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 321 322 if (phys->hw_intf && phys->hw_intf->ops.setup_misr 323 && phys->hw_intf->ops.collect_misr) 324 num_intf++; 325 } 326 327 return num_intf; 328 } 329 330 /** 331 * dpu_encoder_setup_misr - enable misr calculations 332 * @drm_enc: Pointer to previously created drm encoder structure 333 */ 334 void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc) 335 { 336 struct dpu_encoder_virt *dpu_enc; 337 338 int i; 339 340 dpu_enc = to_dpu_encoder_virt(drm_enc); 341 342 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 343 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 344 345 if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr) 346 continue; 347 348 phys->hw_intf->ops.setup_misr(phys->hw_intf); 349 } 350 } 351 352 /** 353 * dpu_encoder_get_crc - get the crc value from interface blocks 354 * @drm_enc: Pointer to previously created drm encoder structure 355 * @crcs: array to fill with CRC data 356 * @pos: offset into the @crcs array 357 * Returns: 0 on success, error otherwise 358 */ 359 int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos) 360 { 361 struct dpu_encoder_virt *dpu_enc; 362 363 int i, rc = 0, entries_added = 0; 364 365 if (!drm_enc->crtc) { 366 DRM_ERROR("no crtc found for encoder %d\n", drm_enc->index); 367 return -EINVAL; 368 } 369 370 dpu_enc = to_dpu_encoder_virt(drm_enc); 371 372 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 373 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 374 375 if (!phys->hw_intf || !phys->hw_intf->ops.collect_misr) 376 continue; 377 378 rc = phys->hw_intf->ops.collect_misr(phys->hw_intf, &crcs[pos + entries_added]); 379 if (rc) 380 return rc; 381 entries_added++; 382 } 383 384 return entries_added; 385 } 386 387 static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc) 388 { 389 struct dpu_hw_dither_cfg dither_cfg = { 0 }; 390 391 if (!hw_pp->ops.setup_dither) 392 return; 393 394 switch (bpc) { 395 case 6: 396 dither_cfg.c0_bitdepth = 6; 397 dither_cfg.c1_bitdepth = 6; 398 dither_cfg.c2_bitdepth = 6; 399 dither_cfg.c3_bitdepth = 6; 400 dither_cfg.temporal_en = 0; 401 break; 402 default: 403 hw_pp->ops.setup_dither(hw_pp, NULL); 404 return; 405 } 406 407 memcpy(&dither_cfg.matrix, dither_matrix, 408 sizeof(u32) * DITHER_MATRIX_SZ); 409 410 hw_pp->ops.setup_dither(hw_pp, &dither_cfg); 411 } 412 413 static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode) 414 { 415 switch (intf_mode) { 416 case INTF_MODE_VIDEO: 417 return "INTF_MODE_VIDEO"; 418 case INTF_MODE_CMD: 419 return "INTF_MODE_CMD"; 420 case INTF_MODE_WB_BLOCK: 421 return "INTF_MODE_WB_BLOCK"; 422 case INTF_MODE_WB_LINE: 423 return "INTF_MODE_WB_LINE"; 424 default: 425 return "INTF_MODE_UNKNOWN"; 426 } 427 } 428 429 /** 430 * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has 431 * timed out, including reporting frame error event to crtc and debug dump 432 * @phys_enc: Pointer to physical encoder structure 433 * @intr_idx: Failing interrupt index 434 */ 435 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc, 436 enum dpu_intr_idx intr_idx) 437 { 438 DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n", 439 DRMID(phys_enc->parent), 440 dpu_encoder_helper_get_intf_type(phys_enc->intf_mode), 441 phys_enc->hw_intf ? phys_enc->hw_intf->idx - INTF_0 : -1, 442 phys_enc->hw_wb ? phys_enc->hw_wb->idx - WB_0 : -1, 443 phys_enc->hw_pp->idx - PINGPONG_0, intr_idx); 444 445 dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, 446 DPU_ENCODER_FRAME_EVENT_ERROR); 447 } 448 449 static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id, 450 u32 irq_idx, struct dpu_encoder_wait_info *info); 451 452 /** 453 * dpu_encoder_helper_wait_for_irq - utility to wait on an irq. 454 * note: will call dpu_encoder_helper_wait_for_irq on timeout 455 * @phys_enc: Pointer to physical encoder structure 456 * @irq_idx: IRQ index 457 * @func: IRQ callback to be called in case of timeout 458 * @wait_info: wait info struct 459 * @return: 0 or -ERROR 460 */ 461 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, 462 unsigned int irq_idx, 463 void (*func)(void *arg), 464 struct dpu_encoder_wait_info *wait_info) 465 { 466 u32 irq_status; 467 int ret; 468 469 if (!wait_info) { 470 DPU_ERROR("invalid params\n"); 471 return -EINVAL; 472 } 473 /* note: do master / slave checking outside */ 474 475 /* return EWOULDBLOCK since we know the wait isn't necessary */ 476 if (phys_enc->enable_state == DPU_ENC_DISABLED) { 477 DRM_ERROR("encoder is disabled id=%u, callback=%ps, IRQ=[%d, %d]\n", 478 DRMID(phys_enc->parent), func, 479 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx)); 480 return -EWOULDBLOCK; 481 } 482 483 if (irq_idx == 0) { 484 DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n", 485 DRMID(phys_enc->parent), func); 486 return 0; 487 } 488 489 DRM_DEBUG_KMS("id=%u, callback=%ps, IRQ=[%d, %d], pp=%d, pending_cnt=%d\n", 490 DRMID(phys_enc->parent), func, 491 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), phys_enc->hw_pp->idx - PINGPONG_0, 492 atomic_read(wait_info->atomic_cnt)); 493 494 ret = dpu_encoder_helper_wait_event_timeout( 495 DRMID(phys_enc->parent), 496 irq_idx, 497 wait_info); 498 499 if (ret <= 0) { 500 irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq_idx); 501 if (irq_status) { 502 unsigned long flags; 503 504 DRM_DEBUG_KMS("IRQ=[%d, %d] not triggered id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n", 505 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), 506 DRMID(phys_enc->parent), func, 507 phys_enc->hw_pp->idx - PINGPONG_0, 508 atomic_read(wait_info->atomic_cnt)); 509 local_irq_save(flags); 510 func(phys_enc); 511 local_irq_restore(flags); 512 ret = 0; 513 } else { 514 ret = -ETIMEDOUT; 515 DRM_DEBUG_KMS("IRQ=[%d, %d] timeout id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n", 516 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), 517 DRMID(phys_enc->parent), func, 518 phys_enc->hw_pp->idx - PINGPONG_0, 519 atomic_read(wait_info->atomic_cnt)); 520 } 521 } else { 522 ret = 0; 523 trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent), 524 func, DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), 525 phys_enc->hw_pp->idx - PINGPONG_0, 526 atomic_read(wait_info->atomic_cnt)); 527 } 528 529 return ret; 530 } 531 532 /** 533 * dpu_encoder_get_vsync_count - get vsync count for the encoder. 534 * @drm_enc: Pointer to previously created drm encoder structure 535 */ 536 int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc) 537 { 538 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 539 struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL; 540 return phys ? atomic_read(&phys->vsync_cnt) : 0; 541 } 542 543 /** 544 * dpu_encoder_get_linecount - get interface line count for the encoder. 545 * @drm_enc: Pointer to previously created drm encoder structure 546 */ 547 int dpu_encoder_get_linecount(struct drm_encoder *drm_enc) 548 { 549 struct dpu_encoder_virt *dpu_enc; 550 struct dpu_encoder_phys *phys; 551 int linecount = 0; 552 553 dpu_enc = to_dpu_encoder_virt(drm_enc); 554 phys = dpu_enc ? dpu_enc->cur_master : NULL; 555 556 if (phys && phys->ops.get_line_count) 557 linecount = phys->ops.get_line_count(phys); 558 559 return linecount; 560 } 561 562 /** 563 * dpu_encoder_helper_split_config - split display configuration helper function 564 * This helper function may be used by physical encoders to configure 565 * the split display related registers. 566 * @phys_enc: Pointer to physical encoder structure 567 * @interface: enum dpu_intf setting 568 */ 569 void dpu_encoder_helper_split_config( 570 struct dpu_encoder_phys *phys_enc, 571 enum dpu_intf interface) 572 { 573 struct dpu_encoder_virt *dpu_enc; 574 struct split_pipe_cfg cfg = { 0 }; 575 struct dpu_hw_mdp *hw_mdptop; 576 struct msm_display_info *disp_info; 577 578 if (!phys_enc->hw_mdptop || !phys_enc->parent) { 579 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL); 580 return; 581 } 582 583 dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 584 hw_mdptop = phys_enc->hw_mdptop; 585 disp_info = &dpu_enc->disp_info; 586 587 if (disp_info->intf_type != INTF_DSI) 588 return; 589 590 /** 591 * disable split modes since encoder will be operating in as the only 592 * encoder, either for the entire use case in the case of, for example, 593 * single DSI, or for this frame in the case of left/right only partial 594 * update. 595 */ 596 if (phys_enc->split_role == ENC_ROLE_SOLO) { 597 if (hw_mdptop->ops.setup_split_pipe) 598 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg); 599 return; 600 } 601 602 cfg.en = true; 603 cfg.mode = phys_enc->intf_mode; 604 cfg.intf = interface; 605 606 if (cfg.en && phys_enc->ops.needs_single_flush && 607 phys_enc->ops.needs_single_flush(phys_enc)) 608 cfg.split_flush_en = true; 609 610 if (phys_enc->split_role == ENC_ROLE_MASTER) { 611 DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en); 612 613 if (hw_mdptop->ops.setup_split_pipe) 614 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg); 615 } 616 } 617 618 /** 619 * dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology. 620 * @drm_enc: Pointer to previously created drm encoder structure 621 */ 622 bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc) 623 { 624 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 625 int i, intf_count = 0, num_dsc = 0; 626 627 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) 628 if (dpu_enc->phys_encs[i]) 629 intf_count++; 630 631 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) 632 if (dpu_enc->hw_dsc[i]) 633 num_dsc++; 634 635 return (num_dsc > 0) && (num_dsc > intf_count); 636 } 637 638 /** 639 * dpu_encoder_get_dsc_config - get DSC config for the DPU encoder 640 * This helper function is used by physical encoder to get DSC config 641 * used for this encoder. 642 * @drm_enc: Pointer to encoder structure 643 */ 644 struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc) 645 { 646 struct msm_drm_private *priv = drm_enc->dev->dev_private; 647 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 648 int index = dpu_enc->disp_info.h_tile_instance[0]; 649 650 if (dpu_enc->disp_info.intf_type == INTF_DSI) 651 return msm_dsi_get_dsc_config(priv->kms->dsi[index]); 652 653 return NULL; 654 } 655 656 void dpu_encoder_update_topology(struct drm_encoder *drm_enc, 657 struct msm_display_topology *topology, 658 struct drm_atomic_state *state, 659 const struct drm_display_mode *adj_mode) 660 { 661 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 662 struct msm_drm_private *priv = dpu_enc->base.dev->dev_private; 663 struct msm_display_info *disp_info = &dpu_enc->disp_info; 664 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); 665 struct drm_connector *connector; 666 struct drm_connector_state *conn_state; 667 struct drm_framebuffer *fb; 668 struct drm_dsc_config *dsc; 669 670 int i; 671 672 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) 673 if (dpu_enc->phys_encs[i]) 674 topology->num_intf++; 675 676 dsc = dpu_encoder_get_dsc_config(drm_enc); 677 678 /* We only support 2 DSC mode (with 2 LM and 1 INTF) */ 679 if (dsc) { 680 /* 681 * Use 2 DSC encoders, 2 layer mixers and 1 or 2 interfaces 682 * when Display Stream Compression (DSC) is enabled, 683 * and when enough DSC blocks are available. 684 * This is power-optimal and can drive up to (including) 4k 685 * screens. 686 */ 687 WARN(topology->num_intf > 2, 688 "DSC topology cannot support more than 2 interfaces\n"); 689 if (topology->num_intf >= 2 || dpu_kms->catalog->dsc_count >= 2) 690 topology->num_dsc = 2; 691 else 692 topology->num_dsc = 1; 693 } 694 695 connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc); 696 if (!connector) 697 return; 698 conn_state = drm_atomic_get_new_connector_state(state, connector); 699 if (!conn_state) 700 return; 701 702 /* 703 * Use CDM only for writeback or DP at the moment as other interfaces cannot handle it. 704 * If writeback itself cannot handle cdm for some reason it will fail in its atomic_check() 705 * earlier. 706 */ 707 if (disp_info->intf_type == INTF_WB && conn_state->writeback_job) { 708 fb = conn_state->writeback_job->fb; 709 710 if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb))) 711 topology->num_cdm++; 712 } else if (disp_info->intf_type == INTF_DP) { 713 if (msm_dp_is_yuv_420_enabled(priv->kms->dp[disp_info->h_tile_instance[0]], 714 adj_mode)) 715 topology->num_cdm++; 716 } 717 } 718 719 bool dpu_encoder_needs_modeset(struct drm_encoder *drm_enc, struct drm_atomic_state *state) 720 { 721 struct drm_connector *connector; 722 struct drm_connector_state *conn_state; 723 struct drm_framebuffer *fb; 724 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 725 726 if (!drm_enc || !state) 727 return false; 728 729 connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc); 730 if (!connector) 731 return false; 732 733 conn_state = drm_atomic_get_new_connector_state(state, connector); 734 if (!conn_state) 735 return false; 736 737 /** 738 * These checks are duplicated from dpu_encoder_update_topology() since 739 * CRTC and encoder don't hold topology information 740 */ 741 if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) { 742 fb = conn_state->writeback_job->fb; 743 if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb))) { 744 if (!dpu_enc->cur_master->hw_cdm) 745 return true; 746 } else { 747 if (dpu_enc->cur_master->hw_cdm) 748 return true; 749 } 750 } 751 752 return false; 753 } 754 755 static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc, 756 struct msm_display_info *disp_info) 757 { 758 struct dpu_vsync_source_cfg vsync_cfg = { 0 }; 759 struct msm_drm_private *priv; 760 struct dpu_kms *dpu_kms; 761 struct dpu_hw_mdp *hw_mdptop; 762 struct drm_encoder *drm_enc; 763 struct dpu_encoder_phys *phys_enc; 764 int i; 765 766 if (!dpu_enc || !disp_info) { 767 DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n", 768 dpu_enc != NULL, disp_info != NULL); 769 return; 770 } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) { 771 DPU_ERROR("invalid num phys enc %d/%d\n", 772 dpu_enc->num_phys_encs, 773 (int) ARRAY_SIZE(dpu_enc->hw_pp)); 774 return; 775 } 776 777 drm_enc = &dpu_enc->base; 778 /* this pointers are checked in virt_enable_helper */ 779 priv = drm_enc->dev->dev_private; 780 781 dpu_kms = to_dpu_kms(priv->kms); 782 hw_mdptop = dpu_kms->hw_mdp; 783 if (!hw_mdptop) { 784 DPU_ERROR("invalid mdptop\n"); 785 return; 786 } 787 788 if (hw_mdptop->ops.setup_vsync_source) { 789 for (i = 0; i < dpu_enc->num_phys_encs; i++) 790 vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx; 791 792 vsync_cfg.pp_count = dpu_enc->num_phys_encs; 793 vsync_cfg.frame_rate = drm_mode_vrefresh(&dpu_enc->base.crtc->state->adjusted_mode); 794 795 vsync_cfg.vsync_source = disp_info->vsync_source; 796 797 hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg); 798 799 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 800 phys_enc = dpu_enc->phys_encs[i]; 801 802 if (phys_enc->has_intf_te && phys_enc->hw_intf->ops.vsync_sel) 803 phys_enc->hw_intf->ops.vsync_sel(phys_enc->hw_intf, 804 vsync_cfg.vsync_source); 805 } 806 } 807 } 808 809 static void _dpu_encoder_irq_enable(struct drm_encoder *drm_enc) 810 { 811 struct dpu_encoder_virt *dpu_enc; 812 int i; 813 814 if (!drm_enc) { 815 DPU_ERROR("invalid encoder\n"); 816 return; 817 } 818 819 dpu_enc = to_dpu_encoder_virt(drm_enc); 820 821 DPU_DEBUG_ENC(dpu_enc, "\n"); 822 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 823 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 824 825 phys->ops.irq_enable(phys); 826 } 827 } 828 829 static void _dpu_encoder_irq_disable(struct drm_encoder *drm_enc) 830 { 831 struct dpu_encoder_virt *dpu_enc; 832 int i; 833 834 if (!drm_enc) { 835 DPU_ERROR("invalid encoder\n"); 836 return; 837 } 838 839 dpu_enc = to_dpu_encoder_virt(drm_enc); 840 841 DPU_DEBUG_ENC(dpu_enc, "\n"); 842 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 843 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 844 845 phys->ops.irq_disable(phys); 846 } 847 } 848 849 static void _dpu_encoder_resource_enable(struct drm_encoder *drm_enc) 850 { 851 struct msm_drm_private *priv; 852 struct dpu_kms *dpu_kms; 853 struct dpu_encoder_virt *dpu_enc; 854 855 dpu_enc = to_dpu_encoder_virt(drm_enc); 856 priv = drm_enc->dev->dev_private; 857 dpu_kms = to_dpu_kms(priv->kms); 858 859 trace_dpu_enc_rc_enable(DRMID(drm_enc)); 860 861 if (!dpu_enc->cur_master) { 862 DPU_ERROR("encoder master not set\n"); 863 return; 864 } 865 866 /* enable DPU core clks */ 867 pm_runtime_get_sync(&dpu_kms->pdev->dev); 868 869 /* enable all the irq */ 870 _dpu_encoder_irq_enable(drm_enc); 871 } 872 873 static void _dpu_encoder_resource_disable(struct drm_encoder *drm_enc) 874 { 875 struct msm_drm_private *priv; 876 struct dpu_kms *dpu_kms; 877 struct dpu_encoder_virt *dpu_enc; 878 879 dpu_enc = to_dpu_encoder_virt(drm_enc); 880 priv = drm_enc->dev->dev_private; 881 dpu_kms = to_dpu_kms(priv->kms); 882 883 trace_dpu_enc_rc_disable(DRMID(drm_enc)); 884 885 if (!dpu_enc->cur_master) { 886 DPU_ERROR("encoder master not set\n"); 887 return; 888 } 889 890 /* disable all the irq */ 891 _dpu_encoder_irq_disable(drm_enc); 892 893 /* disable DPU core clks */ 894 pm_runtime_put_sync(&dpu_kms->pdev->dev); 895 } 896 897 static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, 898 u32 sw_event) 899 { 900 struct dpu_encoder_virt *dpu_enc; 901 struct msm_drm_private *priv; 902 bool is_vid_mode = false; 903 904 if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) { 905 DPU_ERROR("invalid parameters\n"); 906 return -EINVAL; 907 } 908 dpu_enc = to_dpu_encoder_virt(drm_enc); 909 priv = drm_enc->dev->dev_private; 910 is_vid_mode = !dpu_enc->disp_info.is_cmd_mode; 911 912 /* 913 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET 914 * events and return early for other events (ie wb display). 915 */ 916 if (!dpu_enc->idle_pc_supported && 917 (sw_event != DPU_ENC_RC_EVENT_KICKOFF && 918 sw_event != DPU_ENC_RC_EVENT_STOP && 919 sw_event != DPU_ENC_RC_EVENT_PRE_STOP)) 920 return 0; 921 922 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported, 923 dpu_enc->rc_state, "begin"); 924 925 switch (sw_event) { 926 case DPU_ENC_RC_EVENT_KICKOFF: 927 /* cancel delayed off work, if any */ 928 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work)) 929 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", 930 sw_event); 931 932 mutex_lock(&dpu_enc->rc_lock); 933 934 /* return if the resource control is already in ON state */ 935 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) { 936 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n", 937 DRMID(drm_enc), sw_event); 938 mutex_unlock(&dpu_enc->rc_lock); 939 return 0; 940 } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF && 941 dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) { 942 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n", 943 DRMID(drm_enc), sw_event, 944 dpu_enc->rc_state); 945 mutex_unlock(&dpu_enc->rc_lock); 946 return -EINVAL; 947 } 948 949 if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) 950 _dpu_encoder_irq_enable(drm_enc); 951 else 952 _dpu_encoder_resource_enable(drm_enc); 953 954 dpu_enc->rc_state = DPU_ENC_RC_STATE_ON; 955 956 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 957 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 958 "kickoff"); 959 960 mutex_unlock(&dpu_enc->rc_lock); 961 break; 962 963 case DPU_ENC_RC_EVENT_FRAME_DONE: 964 /* 965 * mutex lock is not used as this event happens at interrupt 966 * context. And locking is not required as, the other events 967 * like KICKOFF and STOP does a wait-for-idle before executing 968 * the resource_control 969 */ 970 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) { 971 DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n", 972 DRMID(drm_enc), sw_event, 973 dpu_enc->rc_state); 974 return -EINVAL; 975 } 976 977 /* 978 * schedule off work item only when there are no 979 * frames pending 980 */ 981 if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) { 982 DRM_DEBUG_KMS("id:%d skip schedule work\n", 983 DRMID(drm_enc)); 984 return 0; 985 } 986 987 queue_delayed_work(priv->kms->wq, &dpu_enc->delayed_off_work, 988 msecs_to_jiffies(dpu_enc->idle_timeout)); 989 990 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 991 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 992 "frame done"); 993 break; 994 995 case DPU_ENC_RC_EVENT_PRE_STOP: 996 /* cancel delayed off work, if any */ 997 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work)) 998 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", 999 sw_event); 1000 1001 mutex_lock(&dpu_enc->rc_lock); 1002 1003 if (is_vid_mode && 1004 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) { 1005 _dpu_encoder_irq_enable(drm_enc); 1006 } 1007 /* skip if is already OFF or IDLE, resources are off already */ 1008 else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF || 1009 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) { 1010 DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n", 1011 DRMID(drm_enc), sw_event, 1012 dpu_enc->rc_state); 1013 mutex_unlock(&dpu_enc->rc_lock); 1014 return 0; 1015 } 1016 1017 dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF; 1018 1019 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 1020 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 1021 "pre stop"); 1022 1023 mutex_unlock(&dpu_enc->rc_lock); 1024 break; 1025 1026 case DPU_ENC_RC_EVENT_STOP: 1027 mutex_lock(&dpu_enc->rc_lock); 1028 1029 /* return if the resource control is already in OFF state */ 1030 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) { 1031 DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n", 1032 DRMID(drm_enc), sw_event); 1033 mutex_unlock(&dpu_enc->rc_lock); 1034 return 0; 1035 } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) { 1036 DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n", 1037 DRMID(drm_enc), sw_event, dpu_enc->rc_state); 1038 mutex_unlock(&dpu_enc->rc_lock); 1039 return -EINVAL; 1040 } 1041 1042 /** 1043 * expect to arrive here only if in either idle state or pre-off 1044 * and in IDLE state the resources are already disabled 1045 */ 1046 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF) 1047 _dpu_encoder_resource_disable(drm_enc); 1048 1049 dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF; 1050 1051 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 1052 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 1053 "stop"); 1054 1055 mutex_unlock(&dpu_enc->rc_lock); 1056 break; 1057 1058 case DPU_ENC_RC_EVENT_ENTER_IDLE: 1059 mutex_lock(&dpu_enc->rc_lock); 1060 1061 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) { 1062 DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n", 1063 DRMID(drm_enc), sw_event, dpu_enc->rc_state); 1064 mutex_unlock(&dpu_enc->rc_lock); 1065 return 0; 1066 } 1067 1068 /* 1069 * if we are in ON but a frame was just kicked off, 1070 * ignore the IDLE event, it's probably a stale timer event 1071 */ 1072 if (dpu_enc->frame_busy_mask[0]) { 1073 DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n", 1074 DRMID(drm_enc), sw_event, dpu_enc->rc_state); 1075 mutex_unlock(&dpu_enc->rc_lock); 1076 return 0; 1077 } 1078 1079 if (is_vid_mode) 1080 _dpu_encoder_irq_disable(drm_enc); 1081 else 1082 _dpu_encoder_resource_disable(drm_enc); 1083 1084 dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE; 1085 1086 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 1087 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 1088 "idle"); 1089 1090 mutex_unlock(&dpu_enc->rc_lock); 1091 break; 1092 1093 default: 1094 DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc), 1095 sw_event); 1096 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 1097 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 1098 "error"); 1099 break; 1100 } 1101 1102 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 1103 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 1104 "end"); 1105 return 0; 1106 } 1107 1108 /** 1109 * dpu_encoder_prepare_wb_job - prepare writeback job for the encoder. 1110 * @drm_enc: Pointer to previously created drm encoder structure 1111 * @job: Pointer to the current drm writeback job 1112 */ 1113 void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc, 1114 struct drm_writeback_job *job) 1115 { 1116 struct dpu_encoder_virt *dpu_enc; 1117 int i; 1118 1119 dpu_enc = to_dpu_encoder_virt(drm_enc); 1120 1121 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1122 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1123 1124 if (phys->ops.prepare_wb_job) 1125 phys->ops.prepare_wb_job(phys, job); 1126 1127 } 1128 } 1129 1130 /** 1131 * dpu_encoder_cleanup_wb_job - cleanup writeback job for the encoder. 1132 * @drm_enc: Pointer to previously created drm encoder structure 1133 * @job: Pointer to the current drm writeback job 1134 */ 1135 void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc, 1136 struct drm_writeback_job *job) 1137 { 1138 struct dpu_encoder_virt *dpu_enc; 1139 int i; 1140 1141 dpu_enc = to_dpu_encoder_virt(drm_enc); 1142 1143 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1144 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1145 1146 if (phys->ops.cleanup_wb_job) 1147 phys->ops.cleanup_wb_job(phys, job); 1148 1149 } 1150 } 1151 1152 static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, 1153 struct drm_crtc_state *crtc_state, 1154 struct drm_connector_state *conn_state) 1155 { 1156 struct dpu_encoder_virt *dpu_enc; 1157 struct msm_drm_private *priv; 1158 struct dpu_kms *dpu_kms; 1159 struct dpu_global_state *global_state; 1160 struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC]; 1161 struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; 1162 struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC]; 1163 struct dpu_hw_blk *hw_cwb[MAX_CHANNELS_PER_ENC]; 1164 int num_ctl, num_pp, num_dsc, num_pp_per_intf; 1165 int num_cwb = 0; 1166 bool is_cwb_encoder; 1167 unsigned int dsc_mask = 0; 1168 unsigned int cwb_mask = 0; 1169 int i; 1170 1171 if (!drm_enc) { 1172 DPU_ERROR("invalid encoder\n"); 1173 return; 1174 } 1175 1176 dpu_enc = to_dpu_encoder_virt(drm_enc); 1177 DPU_DEBUG_ENC(dpu_enc, "\n"); 1178 1179 priv = drm_enc->dev->dev_private; 1180 dpu_kms = to_dpu_kms(priv->kms); 1181 is_cwb_encoder = drm_crtc_in_clone_mode(crtc_state) && 1182 dpu_enc->disp_info.intf_type == INTF_WB; 1183 1184 global_state = dpu_kms_get_existing_global_state(dpu_kms); 1185 if (IS_ERR_OR_NULL(global_state)) { 1186 DPU_ERROR("Failed to get global state"); 1187 return; 1188 } 1189 1190 trace_dpu_enc_mode_set(DRMID(drm_enc)); 1191 1192 /* Query resource that have been reserved in atomic check step. */ 1193 if (is_cwb_encoder) { 1194 num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1195 drm_enc->crtc, 1196 DPU_HW_BLK_DCWB_PINGPONG, 1197 hw_pp, ARRAY_SIZE(hw_pp)); 1198 num_cwb = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1199 drm_enc->crtc, 1200 DPU_HW_BLK_CWB, 1201 hw_cwb, ARRAY_SIZE(hw_cwb)); 1202 } else { 1203 num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1204 drm_enc->crtc, 1205 DPU_HW_BLK_PINGPONG, hw_pp, 1206 ARRAY_SIZE(hw_pp)); 1207 } 1208 1209 for (i = 0; i < num_cwb; i++) { 1210 dpu_enc->hw_cwb[i] = to_dpu_hw_cwb(hw_cwb[i]); 1211 cwb_mask |= BIT(dpu_enc->hw_cwb[i]->idx - CWB_0); 1212 } 1213 1214 dpu_enc->cwb_mask = cwb_mask; 1215 1216 num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1217 drm_enc->crtc, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); 1218 1219 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) 1220 dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i]) 1221 : NULL; 1222 1223 num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1224 drm_enc->crtc, DPU_HW_BLK_DSC, 1225 hw_dsc, ARRAY_SIZE(hw_dsc)); 1226 for (i = 0; i < num_dsc; i++) { 1227 dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]); 1228 dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0); 1229 } 1230 1231 dpu_enc->dsc_mask = dsc_mask; 1232 1233 if ((dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) || 1234 dpu_enc->disp_info.intf_type == INTF_DP) { 1235 struct dpu_hw_blk *hw_cdm = NULL; 1236 1237 dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1238 drm_enc->crtc, DPU_HW_BLK_CDM, 1239 &hw_cdm, 1); 1240 dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL; 1241 } 1242 1243 /* 1244 * There may be 4 PP and 2 INTF for quad pipe case, so INTF is not 1245 * mapped to PP 1:1. Let's calculate the stride with pipe/INTF 1246 */ 1247 num_pp_per_intf = num_pp / dpu_enc->num_phys_encs; 1248 1249 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1250 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1251 1252 phys->hw_pp = dpu_enc->hw_pp[num_pp_per_intf * i]; 1253 if (!phys->hw_pp) { 1254 DPU_ERROR_ENC(dpu_enc, 1255 "no pp block assigned at idx: %d\n", i); 1256 return; 1257 } 1258 1259 /* Use first (and only) CTL if active CTLs are supported */ 1260 if (num_ctl == 1) 1261 phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[0]); 1262 else 1263 phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL; 1264 if (!phys->hw_ctl) { 1265 DPU_ERROR_ENC(dpu_enc, 1266 "no ctl block assigned at idx: %d\n", i); 1267 return; 1268 } 1269 1270 phys->cached_mode = crtc_state->adjusted_mode; 1271 if (phys->ops.atomic_mode_set) 1272 phys->ops.atomic_mode_set(phys, crtc_state, conn_state); 1273 } 1274 } 1275 1276 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) 1277 { 1278 struct dpu_encoder_virt *dpu_enc = NULL; 1279 int i; 1280 1281 if (!drm_enc || !drm_enc->dev) { 1282 DPU_ERROR("invalid parameters\n"); 1283 return; 1284 } 1285 1286 dpu_enc = to_dpu_encoder_virt(drm_enc); 1287 if (!dpu_enc || !dpu_enc->cur_master) { 1288 DPU_ERROR("invalid dpu encoder/master\n"); 1289 return; 1290 } 1291 1292 1293 if (dpu_enc->disp_info.intf_type == INTF_DP && 1294 dpu_enc->cur_master->hw_mdptop && 1295 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select) 1296 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select( 1297 dpu_enc->cur_master->hw_mdptop); 1298 1299 if (dpu_enc->disp_info.is_cmd_mode) 1300 _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info); 1301 1302 if (dpu_enc->disp_info.intf_type == INTF_DSI && 1303 !WARN_ON(dpu_enc->num_phys_encs == 0)) { 1304 unsigned bpc = dpu_enc->connector->display_info.bpc; 1305 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { 1306 if (!dpu_enc->hw_pp[i]) 1307 continue; 1308 _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc); 1309 } 1310 } 1311 } 1312 1313 /** 1314 * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs 1315 * @drm_enc: encoder pointer 1316 */ 1317 void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc) 1318 { 1319 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1320 1321 mutex_lock(&dpu_enc->enc_lock); 1322 1323 if (!dpu_enc->enabled) 1324 goto out; 1325 1326 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore) 1327 dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave); 1328 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore) 1329 dpu_enc->cur_master->ops.restore(dpu_enc->cur_master); 1330 1331 _dpu_encoder_virt_enable_helper(drm_enc); 1332 1333 out: 1334 mutex_unlock(&dpu_enc->enc_lock); 1335 } 1336 1337 static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc, 1338 struct drm_atomic_state *state) 1339 { 1340 struct dpu_encoder_virt *dpu_enc = NULL; 1341 int ret = 0; 1342 struct drm_display_mode *cur_mode = NULL; 1343 1344 dpu_enc = to_dpu_encoder_virt(drm_enc); 1345 dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc); 1346 1347 atomic_set(&dpu_enc->frame_done_timeout_cnt, 0); 1348 1349 mutex_lock(&dpu_enc->enc_lock); 1350 1351 dpu_enc->commit_done_timedout = false; 1352 1353 dpu_enc->connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc); 1354 1355 cur_mode = &dpu_enc->base.crtc->state->adjusted_mode; 1356 1357 dpu_enc->wide_bus_en = dpu_encoder_is_widebus_enabled(drm_enc); 1358 1359 trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay, 1360 cur_mode->vdisplay); 1361 1362 /* always enable slave encoder before master */ 1363 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable) 1364 dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave); 1365 1366 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable) 1367 dpu_enc->cur_master->ops.enable(dpu_enc->cur_master); 1368 1369 ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF); 1370 if (ret) { 1371 DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n", 1372 ret); 1373 goto out; 1374 } 1375 1376 _dpu_encoder_virt_enable_helper(drm_enc); 1377 1378 dpu_enc->enabled = true; 1379 1380 out: 1381 mutex_unlock(&dpu_enc->enc_lock); 1382 } 1383 1384 static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc, 1385 struct drm_atomic_state *state) 1386 { 1387 struct dpu_encoder_virt *dpu_enc = NULL; 1388 struct drm_crtc *crtc; 1389 struct drm_crtc_state *old_state = NULL; 1390 int i = 0; 1391 1392 dpu_enc = to_dpu_encoder_virt(drm_enc); 1393 DPU_DEBUG_ENC(dpu_enc, "\n"); 1394 1395 crtc = drm_atomic_get_old_crtc_for_encoder(state, drm_enc); 1396 if (crtc) 1397 old_state = drm_atomic_get_old_crtc_state(state, crtc); 1398 1399 /* 1400 * The encoder is already disabled if self refresh mode was set earlier, 1401 * in the old_state for the corresponding crtc. 1402 */ 1403 if (old_state && old_state->self_refresh_active) 1404 return; 1405 1406 mutex_lock(&dpu_enc->enc_lock); 1407 dpu_enc->enabled = false; 1408 1409 trace_dpu_enc_disable(DRMID(drm_enc)); 1410 1411 /* wait for idle */ 1412 dpu_encoder_wait_for_tx_complete(drm_enc); 1413 1414 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP); 1415 1416 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1417 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1418 1419 if (phys->ops.disable) 1420 phys->ops.disable(phys); 1421 } 1422 1423 1424 /* after phys waits for frame-done, should be no more frames pending */ 1425 if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { 1426 DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id); 1427 timer_delete_sync(&dpu_enc->frame_done_timer); 1428 } 1429 1430 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP); 1431 1432 dpu_enc->connector = NULL; 1433 1434 DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); 1435 1436 mutex_unlock(&dpu_enc->enc_lock); 1437 } 1438 1439 static struct dpu_hw_intf *dpu_encoder_get_intf(const struct dpu_mdss_cfg *catalog, 1440 struct dpu_rm *dpu_rm, 1441 enum dpu_intf_type type, u32 controller_id) 1442 { 1443 int i = 0; 1444 1445 if (type == INTF_WB) 1446 return NULL; 1447 1448 for (i = 0; i < catalog->intf_count; i++) { 1449 if (catalog->intf[i].type == type 1450 && catalog->intf[i].controller_id == controller_id) { 1451 return dpu_rm_get_intf(dpu_rm, catalog->intf[i].id); 1452 } 1453 } 1454 1455 return NULL; 1456 } 1457 1458 /** 1459 * dpu_encoder_vblank_callback - Notify virtual encoder of vblank IRQ reception 1460 * @drm_enc: Pointer to drm encoder structure 1461 * @phy_enc: Pointer to physical encoder 1462 * Note: This is called from IRQ handler context. 1463 */ 1464 void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc, 1465 struct dpu_encoder_phys *phy_enc) 1466 { 1467 struct dpu_encoder_virt *dpu_enc = NULL; 1468 unsigned long lock_flags; 1469 1470 if (!drm_enc || !phy_enc) 1471 return; 1472 1473 DPU_ATRACE_BEGIN("encoder_vblank_callback"); 1474 dpu_enc = to_dpu_encoder_virt(drm_enc); 1475 1476 atomic_inc(&phy_enc->vsync_cnt); 1477 1478 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1479 if (dpu_enc->crtc) 1480 dpu_crtc_vblank_callback(dpu_enc->crtc); 1481 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1482 1483 DPU_ATRACE_END("encoder_vblank_callback"); 1484 } 1485 1486 /** 1487 * dpu_encoder_underrun_callback - Notify virtual encoder of underrun IRQ reception 1488 * @drm_enc: Pointer to drm encoder structure 1489 * @phy_enc: Pointer to physical encoder 1490 * Note: This is called from IRQ handler context. 1491 */ 1492 void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc, 1493 struct dpu_encoder_phys *phy_enc) 1494 { 1495 if (!phy_enc) 1496 return; 1497 1498 DPU_ATRACE_BEGIN("encoder_underrun_callback"); 1499 atomic_inc(&phy_enc->underrun_cnt); 1500 1501 /* trigger dump only on the first underrun */ 1502 if (atomic_read(&phy_enc->underrun_cnt) == 1) 1503 msm_disp_snapshot_state(drm_enc->dev); 1504 1505 trace_dpu_enc_underrun_cb(DRMID(drm_enc), 1506 atomic_read(&phy_enc->underrun_cnt)); 1507 DPU_ATRACE_END("encoder_underrun_callback"); 1508 } 1509 1510 /** 1511 * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to 1512 * @drm_enc: encoder pointer 1513 * @crtc: crtc pointer 1514 */ 1515 void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc) 1516 { 1517 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1518 unsigned long lock_flags; 1519 1520 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1521 /* crtc should always be cleared before re-assigning */ 1522 WARN_ON(crtc && dpu_enc->crtc); 1523 dpu_enc->crtc = crtc; 1524 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1525 } 1526 1527 /** 1528 * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if 1529 * the encoder is assigned to the given crtc 1530 * @drm_enc: encoder pointer 1531 * @crtc: crtc pointer 1532 * @enable: true if vblank should be enabled 1533 */ 1534 void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc, 1535 struct drm_crtc *crtc, bool enable) 1536 { 1537 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1538 unsigned long lock_flags; 1539 int i; 1540 1541 trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable); 1542 1543 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1544 if (dpu_enc->crtc != crtc) { 1545 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1546 return; 1547 } 1548 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1549 1550 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1551 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1552 1553 if (phys->ops.control_vblank_irq) 1554 phys->ops.control_vblank_irq(phys, enable); 1555 } 1556 } 1557 1558 /** 1559 * dpu_encoder_frame_done_callback - Notify virtual encoder that this phys 1560 * encoder completes last request frame 1561 * @drm_enc: Pointer to drm encoder structure 1562 * @ready_phys: Pointer to physical encoder 1563 * @event: Event to process 1564 */ 1565 void dpu_encoder_frame_done_callback( 1566 struct drm_encoder *drm_enc, 1567 struct dpu_encoder_phys *ready_phys, u32 event) 1568 { 1569 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1570 unsigned int i; 1571 1572 if (event & (DPU_ENCODER_FRAME_EVENT_DONE 1573 | DPU_ENCODER_FRAME_EVENT_ERROR 1574 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) { 1575 1576 if (!dpu_enc->frame_busy_mask[0]) { 1577 /** 1578 * suppress frame_done without waiter, 1579 * likely autorefresh 1580 */ 1581 trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event, 1582 dpu_encoder_helper_get_intf_type(ready_phys->intf_mode), 1583 ready_phys->hw_intf ? ready_phys->hw_intf->idx : -1, 1584 ready_phys->hw_wb ? ready_phys->hw_wb->idx : -1); 1585 return; 1586 } 1587 1588 /* One of the physical encoders has become idle */ 1589 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1590 if (dpu_enc->phys_encs[i] == ready_phys) { 1591 trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i, 1592 dpu_enc->frame_busy_mask[0]); 1593 clear_bit(i, dpu_enc->frame_busy_mask); 1594 } 1595 } 1596 1597 if (!dpu_enc->frame_busy_mask[0]) { 1598 atomic_set(&dpu_enc->frame_done_timeout_ms, 0); 1599 timer_delete(&dpu_enc->frame_done_timer); 1600 1601 dpu_encoder_resource_control(drm_enc, 1602 DPU_ENC_RC_EVENT_FRAME_DONE); 1603 1604 if (dpu_enc->crtc) 1605 dpu_crtc_frame_event_cb(dpu_enc->crtc, event); 1606 } 1607 } else { 1608 if (dpu_enc->crtc) 1609 dpu_crtc_frame_event_cb(dpu_enc->crtc, event); 1610 } 1611 } 1612 1613 static void dpu_encoder_off_work(struct work_struct *work) 1614 { 1615 struct dpu_encoder_virt *dpu_enc = container_of(work, 1616 struct dpu_encoder_virt, delayed_off_work.work); 1617 1618 dpu_encoder_resource_control(&dpu_enc->base, 1619 DPU_ENC_RC_EVENT_ENTER_IDLE); 1620 1621 dpu_encoder_frame_done_callback(&dpu_enc->base, NULL, 1622 DPU_ENCODER_FRAME_EVENT_IDLE); 1623 } 1624 1625 /** 1626 * _dpu_encoder_trigger_flush - trigger flush for a physical encoder 1627 * @drm_enc: Pointer to drm encoder structure 1628 * @phys: Pointer to physical encoder structure 1629 * @extra_flush_bits: Additional bit mask to include in flush trigger 1630 */ 1631 static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, 1632 struct dpu_encoder_phys *phys, uint32_t extra_flush_bits) 1633 { 1634 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1635 struct dpu_hw_ctl *ctl; 1636 int pending_kickoff_cnt; 1637 u32 ret = UINT_MAX; 1638 1639 if (!phys->hw_pp) { 1640 DPU_ERROR("invalid pingpong hw\n"); 1641 return; 1642 } 1643 1644 ctl = phys->hw_ctl; 1645 if (!ctl->ops.trigger_flush) { 1646 DPU_ERROR("missing trigger cb\n"); 1647 return; 1648 } 1649 1650 pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys); 1651 1652 /* Return early if encoder is writeback and in clone mode */ 1653 if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL && 1654 dpu_enc->cwb_mask) { 1655 DPU_DEBUG("encoder %d skip flush for concurrent writeback encoder\n", 1656 DRMID(drm_enc)); 1657 return; 1658 } 1659 1660 1661 if (extra_flush_bits && ctl->ops.update_pending_flush) 1662 ctl->ops.update_pending_flush(ctl, extra_flush_bits); 1663 1664 ctl->ops.trigger_flush(ctl); 1665 1666 if (ctl->ops.get_pending_flush) 1667 ret = ctl->ops.get_pending_flush(ctl); 1668 1669 trace_dpu_enc_trigger_flush(DRMID(drm_enc), 1670 dpu_encoder_helper_get_intf_type(phys->intf_mode), 1671 phys->hw_intf ? phys->hw_intf->idx : -1, 1672 phys->hw_wb ? phys->hw_wb->idx : -1, 1673 pending_kickoff_cnt, ctl->idx, 1674 extra_flush_bits, ret); 1675 } 1676 1677 /** 1678 * _dpu_encoder_trigger_start - trigger start for a physical encoder 1679 * @phys: Pointer to physical encoder structure 1680 */ 1681 static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys) 1682 { 1683 struct dpu_encoder_virt *dpu_enc; 1684 1685 if (!phys) { 1686 DPU_ERROR("invalid argument(s)\n"); 1687 return; 1688 } 1689 1690 if (!phys->hw_pp) { 1691 DPU_ERROR("invalid pingpong hw\n"); 1692 return; 1693 } 1694 1695 dpu_enc = to_dpu_encoder_virt(phys->parent); 1696 1697 if (phys->parent->encoder_type == DRM_MODE_ENCODER_VIRTUAL && 1698 dpu_enc->cwb_mask) { 1699 DPU_DEBUG("encoder %d CWB enabled, skipping\n", DRMID(phys->parent)); 1700 return; 1701 } 1702 1703 if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED) 1704 phys->ops.trigger_start(phys); 1705 } 1706 1707 /** 1708 * dpu_encoder_helper_trigger_start - control start helper function 1709 * This helper function may be optionally specified by physical 1710 * encoders if they require ctl_start triggering. 1711 * @phys_enc: Pointer to physical encoder structure 1712 */ 1713 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc) 1714 { 1715 struct dpu_hw_ctl *ctl; 1716 1717 ctl = phys_enc->hw_ctl; 1718 if (ctl->ops.trigger_start) { 1719 ctl->ops.trigger_start(ctl); 1720 trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx); 1721 } 1722 } 1723 1724 static int dpu_encoder_helper_wait_event_timeout( 1725 int32_t drm_id, 1726 unsigned int irq_idx, 1727 struct dpu_encoder_wait_info *info) 1728 { 1729 int rc = 0; 1730 s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms; 1731 s64 jiffies = msecs_to_jiffies(info->timeout_ms); 1732 s64 time; 1733 1734 do { 1735 rc = wait_event_timeout(*(info->wq), 1736 atomic_read(info->atomic_cnt) == 0, jiffies); 1737 time = ktime_to_ms(ktime_get()); 1738 1739 trace_dpu_enc_wait_event_timeout(drm_id, 1740 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), 1741 rc, time, 1742 expected_time, 1743 atomic_read(info->atomic_cnt)); 1744 /* If we timed out, counter is valid and time is less, wait again */ 1745 } while (atomic_read(info->atomic_cnt) && (rc == 0) && 1746 (time < expected_time)); 1747 1748 return rc; 1749 } 1750 1751 static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc) 1752 { 1753 struct dpu_encoder_virt *dpu_enc; 1754 struct dpu_hw_ctl *ctl; 1755 int rc; 1756 struct drm_encoder *drm_enc; 1757 1758 dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 1759 ctl = phys_enc->hw_ctl; 1760 drm_enc = phys_enc->parent; 1761 1762 if (!ctl->ops.reset) 1763 return; 1764 1765 DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc), 1766 ctl->idx); 1767 1768 rc = ctl->ops.reset(ctl); 1769 if (rc) { 1770 DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx); 1771 msm_disp_snapshot_state(drm_enc->dev); 1772 } 1773 1774 phys_enc->enable_state = DPU_ENC_ENABLED; 1775 } 1776 1777 /** 1778 * _dpu_encoder_kickoff_phys - handle physical encoder kickoff 1779 * Iterate through the physical encoders and perform consolidated flush 1780 * and/or control start triggering as needed. This is done in the virtual 1781 * encoder rather than the individual physical ones in order to handle 1782 * use cases that require visibility into multiple physical encoders at 1783 * a time. 1784 * @dpu_enc: Pointer to virtual encoder structure 1785 */ 1786 static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc) 1787 { 1788 struct dpu_hw_ctl *ctl; 1789 uint32_t i, pending_flush; 1790 unsigned long lock_flags; 1791 1792 pending_flush = 0x0; 1793 1794 /* update pending counts and trigger kickoff ctl flush atomically */ 1795 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1796 1797 /* don't perform flush/start operations for slave encoders */ 1798 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1799 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1800 1801 if (phys->enable_state == DPU_ENC_DISABLED) 1802 continue; 1803 1804 ctl = phys->hw_ctl; 1805 1806 /* 1807 * This is cleared in frame_done worker, which isn't invoked 1808 * for async commits. So don't set this for async, since it'll 1809 * roll over to the next commit. 1810 */ 1811 if (phys->split_role != ENC_ROLE_SLAVE) 1812 set_bit(i, dpu_enc->frame_busy_mask); 1813 1814 if (!phys->ops.needs_single_flush || 1815 !phys->ops.needs_single_flush(phys)) 1816 _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0); 1817 else if (ctl->ops.get_pending_flush) 1818 pending_flush |= ctl->ops.get_pending_flush(ctl); 1819 } 1820 1821 /* for split flush, combine pending flush masks and send to master */ 1822 if (pending_flush && dpu_enc->cur_master) { 1823 _dpu_encoder_trigger_flush( 1824 &dpu_enc->base, 1825 dpu_enc->cur_master, 1826 pending_flush); 1827 } 1828 1829 _dpu_encoder_trigger_start(dpu_enc->cur_master); 1830 1831 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1832 } 1833 1834 /** 1835 * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous 1836 * kickoff and trigger the ctl prepare progress for command mode display. 1837 * @drm_enc: encoder pointer 1838 */ 1839 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc) 1840 { 1841 struct dpu_encoder_virt *dpu_enc; 1842 struct dpu_encoder_phys *phys; 1843 unsigned int i; 1844 struct dpu_hw_ctl *ctl; 1845 struct msm_display_info *disp_info; 1846 1847 if (!drm_enc) { 1848 DPU_ERROR("invalid encoder\n"); 1849 return; 1850 } 1851 dpu_enc = to_dpu_encoder_virt(drm_enc); 1852 disp_info = &dpu_enc->disp_info; 1853 1854 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1855 phys = dpu_enc->phys_encs[i]; 1856 1857 ctl = phys->hw_ctl; 1858 ctl->ops.clear_pending_flush(ctl); 1859 1860 /* update only for command mode primary ctl */ 1861 if ((phys == dpu_enc->cur_master) && 1862 disp_info->is_cmd_mode 1863 && ctl->ops.trigger_pending) 1864 ctl->ops.trigger_pending(ctl); 1865 } 1866 } 1867 1868 static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc, 1869 struct drm_display_mode *mode) 1870 { 1871 u64 pclk_rate; 1872 u32 pclk_period; 1873 u32 line_time; 1874 1875 /* 1876 * For linetime calculation, only operate on master encoder. 1877 */ 1878 if (!dpu_enc->cur_master) 1879 return 0; 1880 1881 if (!dpu_enc->cur_master->ops.get_line_count) { 1882 DPU_ERROR("get_line_count function not defined\n"); 1883 return 0; 1884 } 1885 1886 pclk_rate = mode->clock; /* pixel clock in kHz */ 1887 if (pclk_rate == 0) { 1888 DPU_ERROR("pclk is 0, cannot calculate line time\n"); 1889 return 0; 1890 } 1891 1892 pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate); 1893 if (pclk_period == 0) { 1894 DPU_ERROR("pclk period is 0\n"); 1895 return 0; 1896 } 1897 1898 /* 1899 * Line time calculation based on Pixel clock and HTOTAL. 1900 * Final unit is in ns. 1901 */ 1902 line_time = (pclk_period * mode->htotal) / 1000; 1903 if (line_time == 0) { 1904 DPU_ERROR("line time calculation is 0\n"); 1905 return 0; 1906 } 1907 1908 DPU_DEBUG_ENC(dpu_enc, 1909 "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n", 1910 pclk_rate, pclk_period, line_time); 1911 1912 return line_time; 1913 } 1914 1915 /** 1916 * dpu_encoder_vsync_time - get the time of the next vsync 1917 * @drm_enc: encoder pointer 1918 * @wakeup_time: pointer to ktime_t to write the vsync time to 1919 */ 1920 int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time) 1921 { 1922 struct drm_display_mode *mode; 1923 struct dpu_encoder_virt *dpu_enc; 1924 u32 cur_line; 1925 u32 line_time; 1926 u32 vtotal, time_to_vsync; 1927 ktime_t cur_time; 1928 1929 dpu_enc = to_dpu_encoder_virt(drm_enc); 1930 1931 if (!drm_enc->crtc || !drm_enc->crtc->state) { 1932 DPU_ERROR("crtc/crtc state object is NULL\n"); 1933 return -EINVAL; 1934 } 1935 mode = &drm_enc->crtc->state->adjusted_mode; 1936 1937 line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode); 1938 if (!line_time) 1939 return -EINVAL; 1940 1941 cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master); 1942 1943 vtotal = mode->vtotal; 1944 if (cur_line >= vtotal) 1945 time_to_vsync = line_time * vtotal; 1946 else 1947 time_to_vsync = line_time * (vtotal - cur_line); 1948 1949 if (time_to_vsync == 0) { 1950 DPU_ERROR("time to vsync should not be zero, vtotal=%d\n", 1951 vtotal); 1952 return -EINVAL; 1953 } 1954 1955 cur_time = ktime_get(); 1956 *wakeup_time = ktime_add_ns(cur_time, time_to_vsync); 1957 1958 DPU_DEBUG_ENC(dpu_enc, 1959 "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n", 1960 cur_line, vtotal, time_to_vsync, 1961 ktime_to_ms(cur_time), 1962 ktime_to_ms(*wakeup_time)); 1963 return 0; 1964 } 1965 1966 static u32 1967 dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc, 1968 u32 enc_ip_width) 1969 { 1970 int ssm_delay, total_pixels, soft_slice_per_enc; 1971 1972 soft_slice_per_enc = enc_ip_width / dsc->slice_width; 1973 1974 /* 1975 * minimum number of initial line pixels is a sum of: 1976 * 1. sub-stream multiplexer delay (83 groups for 8bpc, 1977 * 91 for 10 bpc) * 3 1978 * 2. for two soft slice cases, add extra sub-stream multiplexer * 3 1979 * 3. the initial xmit delay 1980 * 4. total pipeline delay through the "lock step" of encoder (47) 1981 * 5. 6 additional pixels as the output of the rate buffer is 1982 * 48 bits wide 1983 */ 1984 ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92); 1985 total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47; 1986 if (soft_slice_per_enc > 1) 1987 total_pixels += (ssm_delay * 3); 1988 return DIV_ROUND_UP(total_pixels, dsc->slice_width); 1989 } 1990 1991 static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl, 1992 struct dpu_hw_dsc *hw_dsc, 1993 struct dpu_hw_pingpong *hw_pp, 1994 struct drm_dsc_config *dsc, 1995 u32 common_mode, 1996 u32 initial_lines) 1997 { 1998 if (hw_dsc->ops.dsc_config) 1999 hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines); 2000 2001 if (hw_dsc->ops.dsc_config_thresh) 2002 hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc); 2003 2004 if (hw_pp->ops.setup_dsc) 2005 hw_pp->ops.setup_dsc(hw_pp); 2006 2007 if (hw_dsc->ops.dsc_bind_pingpong_blk) 2008 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, hw_pp->idx); 2009 2010 if (hw_pp->ops.enable_dsc) 2011 hw_pp->ops.enable_dsc(hw_pp); 2012 2013 if (ctl->ops.update_pending_flush_dsc) 2014 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx); 2015 } 2016 2017 static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, 2018 struct drm_dsc_config *dsc) 2019 { 2020 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master; 2021 struct dpu_hw_ctl *ctl = enc_master->hw_ctl; 2022 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; 2023 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; 2024 int this_frame_slices; 2025 int intf_ip_w, enc_ip_w; 2026 int dsc_common_mode; 2027 int pic_width; 2028 u32 initial_lines; 2029 int num_dsc = 0; 2030 int i; 2031 2032 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { 2033 hw_pp[i] = dpu_enc->hw_pp[i]; 2034 hw_dsc[i] = dpu_enc->hw_dsc[i]; 2035 2036 if (!hw_pp[i] || !hw_dsc[i]) 2037 break; 2038 2039 num_dsc++; 2040 } 2041 2042 pic_width = dsc->pic_width; 2043 2044 dsc_common_mode = 0; 2045 if (num_dsc > 1) 2046 dsc_common_mode |= DSC_MODE_SPLIT_PANEL; 2047 if (dpu_encoder_use_dsc_merge(enc_master->parent)) 2048 dsc_common_mode |= DSC_MODE_MULTIPLEX; 2049 if (enc_master->intf_mode == INTF_MODE_VIDEO) 2050 dsc_common_mode |= DSC_MODE_VIDEO; 2051 2052 this_frame_slices = pic_width / dsc->slice_width; 2053 intf_ip_w = this_frame_slices * dsc->slice_width; 2054 2055 enc_ip_w = intf_ip_w / num_dsc; 2056 initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w); 2057 2058 for (i = 0; i < num_dsc; i++) 2059 dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i], 2060 dsc, dsc_common_mode, initial_lines); 2061 } 2062 2063 /** 2064 * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl 2065 * path (i.e. ctl flush and start) at next appropriate time. 2066 * Immediately: if no previous commit is outstanding. 2067 * Delayed: Block until next trigger can be issued. 2068 * @drm_enc: encoder pointer 2069 */ 2070 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc) 2071 { 2072 struct dpu_encoder_virt *dpu_enc; 2073 struct dpu_encoder_phys *phys; 2074 bool needs_hw_reset = false; 2075 unsigned int i; 2076 2077 dpu_enc = to_dpu_encoder_virt(drm_enc); 2078 2079 trace_dpu_enc_prepare_kickoff(DRMID(drm_enc)); 2080 2081 /* prepare for next kickoff, may include waiting on previous kickoff */ 2082 DPU_ATRACE_BEGIN("enc_prepare_for_kickoff"); 2083 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2084 phys = dpu_enc->phys_encs[i]; 2085 if (phys->ops.prepare_for_kickoff) 2086 phys->ops.prepare_for_kickoff(phys); 2087 if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET) 2088 needs_hw_reset = true; 2089 } 2090 DPU_ATRACE_END("enc_prepare_for_kickoff"); 2091 2092 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF); 2093 2094 /* if any phys needs reset, reset all phys, in-order */ 2095 if (needs_hw_reset) { 2096 trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc)); 2097 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2098 dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]); 2099 } 2100 } 2101 2102 if (dpu_enc->dsc) 2103 dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc); 2104 } 2105 2106 /** 2107 * dpu_encoder_is_valid_for_commit - check if encode has valid parameters for commit. 2108 * @drm_enc: Pointer to drm encoder structure 2109 */ 2110 bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc) 2111 { 2112 struct dpu_encoder_virt *dpu_enc; 2113 unsigned int i; 2114 struct dpu_encoder_phys *phys; 2115 2116 dpu_enc = to_dpu_encoder_virt(drm_enc); 2117 2118 if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) { 2119 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2120 phys = dpu_enc->phys_encs[i]; 2121 if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) { 2122 DPU_DEBUG("invalid FB not kicking off\n"); 2123 return false; 2124 } 2125 } 2126 } 2127 2128 return true; 2129 } 2130 2131 /** 2132 * dpu_encoder_start_frame_done_timer - Start the encoder frame done timer 2133 * @drm_enc: Pointer to drm encoder structure 2134 */ 2135 void dpu_encoder_start_frame_done_timer(struct drm_encoder *drm_enc) 2136 { 2137 struct dpu_encoder_virt *dpu_enc; 2138 unsigned long timeout_ms; 2139 2140 dpu_enc = to_dpu_encoder_virt(drm_enc); 2141 timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 / 2142 drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode); 2143 2144 atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms); 2145 mod_timer(&dpu_enc->frame_done_timer, 2146 jiffies + msecs_to_jiffies(timeout_ms)); 2147 2148 } 2149 2150 /** 2151 * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path 2152 * (i.e. ctl flush and start) immediately. 2153 * @drm_enc: encoder pointer 2154 */ 2155 void dpu_encoder_kickoff(struct drm_encoder *drm_enc) 2156 { 2157 struct dpu_encoder_virt *dpu_enc; 2158 struct dpu_encoder_phys *phys; 2159 unsigned int i; 2160 2161 DPU_ATRACE_BEGIN("encoder_kickoff"); 2162 dpu_enc = to_dpu_encoder_virt(drm_enc); 2163 2164 trace_dpu_enc_kickoff(DRMID(drm_enc)); 2165 2166 /* All phys encs are ready to go, trigger the kickoff */ 2167 _dpu_encoder_kickoff_phys(dpu_enc); 2168 2169 /* allow phys encs to handle any post-kickoff business */ 2170 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2171 phys = dpu_enc->phys_encs[i]; 2172 if (phys->ops.handle_post_kickoff) 2173 phys->ops.handle_post_kickoff(phys); 2174 } 2175 2176 DPU_ATRACE_END("encoder_kickoff"); 2177 } 2178 2179 static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc) 2180 { 2181 int i, num_lm; 2182 struct dpu_global_state *global_state; 2183 struct dpu_hw_blk *hw_lm[2]; 2184 struct dpu_hw_mixer *hw_mixer[2]; 2185 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl; 2186 2187 /* reset all mixers for this encoder */ 2188 if (ctl->ops.clear_all_blendstages) 2189 ctl->ops.clear_all_blendstages(ctl); 2190 2191 global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms); 2192 2193 num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state, 2194 phys_enc->parent->crtc, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); 2195 2196 for (i = 0; i < num_lm; i++) { 2197 hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]); 2198 if (ctl->ops.update_pending_flush_mixer) 2199 ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx); 2200 2201 /* clear all blendstages */ 2202 if (ctl->ops.setup_blendstage) 2203 ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL); 2204 2205 if (hw_mixer[i]->ops.clear_all_blendstages) 2206 hw_mixer[i]->ops.clear_all_blendstages(hw_mixer[i]); 2207 2208 if (ctl->ops.set_active_lms) 2209 ctl->ops.set_active_lms(ctl, NULL); 2210 2211 if (ctl->ops.set_active_fetch_pipes) 2212 ctl->ops.set_active_fetch_pipes(ctl, NULL); 2213 2214 if (ctl->ops.set_active_pipes) 2215 ctl->ops.set_active_pipes(ctl, NULL); 2216 } 2217 } 2218 2219 static void dpu_encoder_dsc_pipe_clr(struct dpu_hw_ctl *ctl, 2220 struct dpu_hw_dsc *hw_dsc, 2221 struct dpu_hw_pingpong *hw_pp) 2222 { 2223 if (hw_dsc->ops.dsc_disable) 2224 hw_dsc->ops.dsc_disable(hw_dsc); 2225 2226 if (hw_pp->ops.disable_dsc) 2227 hw_pp->ops.disable_dsc(hw_pp); 2228 2229 if (hw_dsc->ops.dsc_bind_pingpong_blk) 2230 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, PINGPONG_NONE); 2231 2232 if (ctl->ops.update_pending_flush_dsc) 2233 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx); 2234 } 2235 2236 static void dpu_encoder_unprep_dsc(struct dpu_encoder_virt *dpu_enc) 2237 { 2238 /* coding only for 2LM, 2enc, 1 dsc config */ 2239 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master; 2240 struct dpu_hw_ctl *ctl = enc_master->hw_ctl; 2241 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; 2242 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; 2243 int i; 2244 2245 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { 2246 hw_pp[i] = dpu_enc->hw_pp[i]; 2247 hw_dsc[i] = dpu_enc->hw_dsc[i]; 2248 2249 if (hw_pp[i] && hw_dsc[i]) 2250 dpu_encoder_dsc_pipe_clr(ctl, hw_dsc[i], hw_pp[i]); 2251 } 2252 } 2253 2254 /** 2255 * dpu_encoder_helper_phys_cleanup - helper to cleanup dpu pipeline 2256 * @phys_enc: Pointer to physical encoder structure 2257 */ 2258 void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) 2259 { 2260 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl; 2261 struct dpu_hw_intf_cfg intf_cfg = { 0 }; 2262 int i; 2263 struct dpu_encoder_virt *dpu_enc; 2264 2265 dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 2266 2267 ctl->ops.reset(ctl); 2268 2269 dpu_encoder_helper_reset_mixers(phys_enc); 2270 2271 /* 2272 * TODO: move the once-only operation like CTL flush/trigger 2273 * into dpu_encoder_virt_disable() and all operations which need 2274 * to be done per phys encoder into the phys_disable() op. 2275 */ 2276 if (phys_enc->hw_wb) { 2277 /* disable the PP block */ 2278 if (phys_enc->hw_wb->ops.bind_pingpong_blk) 2279 phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, PINGPONG_NONE); 2280 2281 /* mark WB flush as pending */ 2282 if (ctl->ops.update_pending_flush_wb) 2283 ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx); 2284 } else { 2285 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2286 if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk) 2287 phys_enc->hw_intf->ops.bind_pingpong_blk( 2288 dpu_enc->phys_encs[i]->hw_intf, 2289 PINGPONG_NONE); 2290 2291 /* mark INTF flush as pending */ 2292 if (ctl->ops.update_pending_flush_intf) 2293 ctl->ops.update_pending_flush_intf(ctl, 2294 dpu_enc->phys_encs[i]->hw_intf->idx); 2295 } 2296 } 2297 2298 if (phys_enc->hw_pp && phys_enc->hw_pp->ops.setup_dither) 2299 phys_enc->hw_pp->ops.setup_dither(phys_enc->hw_pp, NULL); 2300 2301 if (dpu_enc->cwb_mask) 2302 dpu_encoder_helper_phys_setup_cwb(phys_enc, false); 2303 2304 /* reset the merge 3D HW block */ 2305 if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) { 2306 phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, 2307 BLEND_3D_NONE); 2308 if (ctl->ops.update_pending_flush_merge_3d) 2309 ctl->ops.update_pending_flush_merge_3d(ctl, 2310 phys_enc->hw_pp->merge_3d->idx); 2311 } 2312 2313 if (phys_enc->hw_cdm) { 2314 if (phys_enc->hw_cdm->ops.bind_pingpong_blk && phys_enc->hw_pp) 2315 phys_enc->hw_cdm->ops.bind_pingpong_blk(phys_enc->hw_cdm, 2316 PINGPONG_NONE); 2317 if (ctl->ops.update_pending_flush_cdm) 2318 ctl->ops.update_pending_flush_cdm(ctl, 2319 phys_enc->hw_cdm->idx); 2320 } 2321 2322 if (dpu_enc->dsc) { 2323 dpu_encoder_unprep_dsc(dpu_enc); 2324 dpu_enc->dsc = NULL; 2325 } 2326 2327 intf_cfg.stream_sel = 0; /* Don't care value for video mode */ 2328 intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); 2329 intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc); 2330 intf_cfg.cwb = dpu_enc->cwb_mask; 2331 2332 if (phys_enc->hw_intf) 2333 intf_cfg.intf = phys_enc->hw_intf->idx; 2334 if (phys_enc->hw_wb) 2335 intf_cfg.wb = phys_enc->hw_wb->idx; 2336 2337 if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) 2338 intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx; 2339 2340 if (ctl->ops.reset_intf_cfg) 2341 ctl->ops.reset_intf_cfg(ctl, &intf_cfg); 2342 2343 ctl->ops.trigger_flush(ctl); 2344 ctl->ops.trigger_start(ctl); 2345 ctl->ops.clear_pending_flush(ctl); 2346 } 2347 2348 void dpu_encoder_helper_phys_setup_cwb(struct dpu_encoder_phys *phys_enc, 2349 bool enable) 2350 { 2351 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 2352 struct dpu_hw_cwb *hw_cwb; 2353 struct dpu_hw_ctl *hw_ctl; 2354 struct dpu_hw_cwb_setup_cfg cwb_cfg; 2355 2356 struct dpu_kms *dpu_kms; 2357 struct dpu_global_state *global_state; 2358 struct dpu_hw_blk *rt_pp_list[MAX_CHANNELS_PER_ENC]; 2359 int num_pp; 2360 2361 if (!phys_enc->hw_wb) 2362 return; 2363 2364 hw_ctl = phys_enc->hw_ctl; 2365 2366 if (!phys_enc->hw_ctl) { 2367 DPU_DEBUG("[wb:%d] no ctl assigned\n", 2368 phys_enc->hw_wb->idx - WB_0); 2369 return; 2370 } 2371 2372 dpu_kms = phys_enc->dpu_kms; 2373 global_state = dpu_kms_get_existing_global_state(dpu_kms); 2374 num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 2375 phys_enc->parent->crtc, 2376 DPU_HW_BLK_PINGPONG, rt_pp_list, 2377 ARRAY_SIZE(rt_pp_list)); 2378 2379 if (num_pp == 0 || num_pp > MAX_CHANNELS_PER_ENC) { 2380 DPU_DEBUG_ENC(dpu_enc, "invalid num_pp %d\n", num_pp); 2381 return; 2382 } 2383 2384 /* 2385 * The CWB mux supports using LM or DSPP as tap points. For now, 2386 * always use LM tap point 2387 */ 2388 cwb_cfg.input = INPUT_MODE_LM_OUT; 2389 2390 for (int i = 0; i < MAX_CWB_PER_ENC; i++) { 2391 hw_cwb = dpu_enc->hw_cwb[i]; 2392 if (!hw_cwb) 2393 continue; 2394 2395 if (enable) { 2396 struct dpu_hw_pingpong *hw_pp = 2397 to_dpu_hw_pingpong(rt_pp_list[i]); 2398 cwb_cfg.pp_idx = hw_pp->idx; 2399 } else { 2400 cwb_cfg.pp_idx = PINGPONG_NONE; 2401 } 2402 2403 hw_cwb->ops.config_cwb(hw_cwb, &cwb_cfg); 2404 2405 if (hw_ctl->ops.update_pending_flush_cwb) 2406 hw_ctl->ops.update_pending_flush_cwb(hw_ctl, hw_cwb->idx); 2407 } 2408 } 2409 2410 /** 2411 * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block 2412 * @phys_enc: Pointer to physical encoder 2413 * @dpu_fmt: Pinter to the format description 2414 * @output_type: HDMI/WB 2415 */ 2416 void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc, 2417 const struct msm_format *dpu_fmt, 2418 u32 output_type) 2419 { 2420 struct dpu_hw_cdm *hw_cdm; 2421 struct dpu_hw_cdm_cfg *cdm_cfg; 2422 struct dpu_hw_pingpong *hw_pp; 2423 int ret; 2424 2425 if (!phys_enc) 2426 return; 2427 2428 cdm_cfg = &phys_enc->cdm_cfg; 2429 hw_pp = phys_enc->hw_pp; 2430 hw_cdm = phys_enc->hw_cdm; 2431 2432 if (!hw_cdm) 2433 return; 2434 2435 if (!MSM_FORMAT_IS_YUV(dpu_fmt)) { 2436 DPU_DEBUG("[enc:%d] cdm_disable fmt:%p4cc\n", DRMID(phys_enc->parent), 2437 &dpu_fmt->pixel_format); 2438 if (hw_cdm->ops.bind_pingpong_blk) 2439 hw_cdm->ops.bind_pingpong_blk(hw_cdm, PINGPONG_NONE); 2440 2441 return; 2442 } 2443 2444 memset(cdm_cfg, 0, sizeof(struct dpu_hw_cdm_cfg)); 2445 2446 cdm_cfg->output_width = phys_enc->cached_mode.hdisplay; 2447 cdm_cfg->output_height = phys_enc->cached_mode.vdisplay; 2448 cdm_cfg->output_fmt = dpu_fmt; 2449 cdm_cfg->output_type = output_type; 2450 cdm_cfg->output_bit_depth = MSM_FORMAT_IS_DX(dpu_fmt) ? 2451 CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT; 2452 cdm_cfg->csc_cfg = &dpu_csc10_rgb2yuv_601l; 2453 2454 /* enable 10 bit logic */ 2455 switch (cdm_cfg->output_fmt->chroma_sample) { 2456 case CHROMA_FULL: 2457 cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE; 2458 cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; 2459 break; 2460 case CHROMA_H2V1: 2461 cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE; 2462 cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; 2463 break; 2464 case CHROMA_420: 2465 cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE; 2466 cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE; 2467 break; 2468 case CHROMA_H1V2: 2469 default: 2470 DPU_ERROR("[enc:%d] unsupported chroma sampling type\n", 2471 DRMID(phys_enc->parent)); 2472 cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE; 2473 cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; 2474 break; 2475 } 2476 2477 DPU_DEBUG("[enc:%d] cdm_enable:%d,%d,%p4cc,%d,%d,%d,%d]\n", 2478 DRMID(phys_enc->parent), cdm_cfg->output_width, 2479 cdm_cfg->output_height, &cdm_cfg->output_fmt->pixel_format, 2480 cdm_cfg->output_type, cdm_cfg->output_bit_depth, 2481 cdm_cfg->h_cdwn_type, cdm_cfg->v_cdwn_type); 2482 2483 if (hw_cdm->ops.enable) { 2484 cdm_cfg->pp_id = hw_pp->idx; 2485 ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg); 2486 if (ret < 0) { 2487 DPU_ERROR("[enc:%d] failed to enable CDM; ret:%d\n", 2488 DRMID(phys_enc->parent), ret); 2489 return; 2490 } 2491 } 2492 } 2493 2494 #ifdef CONFIG_DEBUG_FS 2495 static int _dpu_encoder_status_show(struct seq_file *s, void *data) 2496 { 2497 struct drm_encoder *drm_enc = s->private; 2498 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 2499 int i; 2500 2501 mutex_lock(&dpu_enc->enc_lock); 2502 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2503 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2504 2505 seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d frame_done_cnt:%d", 2506 phys->hw_intf ? phys->hw_intf->idx - INTF_0 : -1, 2507 phys->hw_wb ? phys->hw_wb->idx - WB_0 : -1, 2508 atomic_read(&phys->vsync_cnt), 2509 atomic_read(&phys->underrun_cnt), 2510 atomic_read(&dpu_enc->frame_done_timeout_cnt)); 2511 2512 seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode)); 2513 } 2514 mutex_unlock(&dpu_enc->enc_lock); 2515 2516 return 0; 2517 } 2518 2519 DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status); 2520 2521 static void dpu_encoder_debugfs_init(struct drm_encoder *drm_enc, struct dentry *root) 2522 { 2523 /* don't error check these */ 2524 debugfs_create_file("status", 0600, 2525 root, drm_enc, &_dpu_encoder_status_fops); 2526 } 2527 #else 2528 #define dpu_encoder_debugfs_init NULL 2529 #endif 2530 2531 static int dpu_encoder_virt_add_phys_encs( 2532 struct drm_device *dev, 2533 struct msm_display_info *disp_info, 2534 struct dpu_encoder_virt *dpu_enc, 2535 struct dpu_enc_phys_init_params *params) 2536 { 2537 struct dpu_encoder_phys *enc = NULL; 2538 2539 DPU_DEBUG_ENC(dpu_enc, "\n"); 2540 2541 /* 2542 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types 2543 * in this function, check up-front. 2544 */ 2545 if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >= 2546 ARRAY_SIZE(dpu_enc->phys_encs)) { 2547 DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n", 2548 dpu_enc->num_phys_encs); 2549 return -EINVAL; 2550 } 2551 2552 2553 if (disp_info->intf_type == INTF_WB) { 2554 enc = dpu_encoder_phys_wb_init(dev, params); 2555 2556 if (IS_ERR(enc)) { 2557 DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n", 2558 PTR_ERR(enc)); 2559 return PTR_ERR(enc); 2560 } 2561 2562 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; 2563 ++dpu_enc->num_phys_encs; 2564 } else if (disp_info->is_cmd_mode) { 2565 enc = dpu_encoder_phys_cmd_init(dev, params); 2566 2567 if (IS_ERR(enc)) { 2568 DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n", 2569 PTR_ERR(enc)); 2570 return PTR_ERR(enc); 2571 } 2572 2573 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; 2574 ++dpu_enc->num_phys_encs; 2575 } else { 2576 enc = dpu_encoder_phys_vid_init(dev, params); 2577 2578 if (IS_ERR(enc)) { 2579 DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n", 2580 PTR_ERR(enc)); 2581 return PTR_ERR(enc); 2582 } 2583 2584 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; 2585 ++dpu_enc->num_phys_encs; 2586 } 2587 2588 if (params->split_role == ENC_ROLE_SLAVE) 2589 dpu_enc->cur_slave = enc; 2590 else 2591 dpu_enc->cur_master = enc; 2592 2593 return 0; 2594 } 2595 2596 /** 2597 * dpu_encoder_get_clones - Calculate the possible_clones for DPU encoder 2598 * @drm_enc: DRM encoder pointer 2599 * Returns: possible_clones mask 2600 */ 2601 uint32_t dpu_encoder_get_clones(struct drm_encoder *drm_enc) 2602 { 2603 struct drm_encoder *curr; 2604 int type = drm_enc->encoder_type; 2605 uint32_t clone_mask = drm_encoder_mask(drm_enc); 2606 2607 /* 2608 * Set writeback as possible clones of real-time DSI encoders and vice 2609 * versa 2610 * 2611 * Writeback encoders can't be clones of each other and DSI 2612 * encoders can't be clones of each other. 2613 * 2614 * TODO: Add DP encoders as valid possible clones for writeback encoders 2615 * (and vice versa) once concurrent writeback has been validated for DP 2616 */ 2617 drm_for_each_encoder(curr, drm_enc->dev) { 2618 if ((type == DRM_MODE_ENCODER_VIRTUAL && 2619 curr->encoder_type == DRM_MODE_ENCODER_DSI) || 2620 (type == DRM_MODE_ENCODER_DSI && 2621 curr->encoder_type == DRM_MODE_ENCODER_VIRTUAL)) 2622 clone_mask |= drm_encoder_mask(curr); 2623 } 2624 2625 return clone_mask; 2626 } 2627 2628 static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, 2629 struct dpu_kms *dpu_kms, 2630 struct msm_display_info *disp_info) 2631 { 2632 int ret = 0; 2633 int i = 0; 2634 struct dpu_enc_phys_init_params phys_params; 2635 2636 if (!dpu_enc) { 2637 DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL); 2638 return -EINVAL; 2639 } 2640 2641 dpu_enc->cur_master = NULL; 2642 2643 memset(&phys_params, 0, sizeof(phys_params)); 2644 phys_params.dpu_kms = dpu_kms; 2645 phys_params.parent = &dpu_enc->base; 2646 phys_params.enc_spinlock = &dpu_enc->enc_spinlock; 2647 2648 WARN_ON(disp_info->num_of_h_tiles < 1); 2649 2650 DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles); 2651 2652 if (disp_info->intf_type != INTF_WB) 2653 dpu_enc->idle_pc_supported = 2654 dpu_kms->catalog->caps->has_idle_pc; 2655 2656 mutex_lock(&dpu_enc->enc_lock); 2657 for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) { 2658 /* 2659 * Left-most tile is at index 0, content is controller id 2660 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right 2661 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right 2662 */ 2663 u32 controller_id = disp_info->h_tile_instance[i]; 2664 2665 if (disp_info->num_of_h_tiles > 1) { 2666 if (i == 0) 2667 phys_params.split_role = ENC_ROLE_MASTER; 2668 else 2669 phys_params.split_role = ENC_ROLE_SLAVE; 2670 } else { 2671 phys_params.split_role = ENC_ROLE_SOLO; 2672 } 2673 2674 DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n", 2675 i, controller_id, phys_params.split_role); 2676 2677 phys_params.hw_intf = dpu_encoder_get_intf(dpu_kms->catalog, &dpu_kms->rm, 2678 disp_info->intf_type, 2679 controller_id); 2680 2681 if (disp_info->intf_type == INTF_WB && controller_id < WB_MAX) 2682 phys_params.hw_wb = dpu_rm_get_wb(&dpu_kms->rm, controller_id); 2683 2684 if (!phys_params.hw_intf && !phys_params.hw_wb) { 2685 DPU_ERROR_ENC(dpu_enc, "no intf or wb block assigned at idx: %d\n", i); 2686 ret = -EINVAL; 2687 break; 2688 } 2689 2690 if (phys_params.hw_intf && phys_params.hw_wb) { 2691 DPU_ERROR_ENC(dpu_enc, 2692 "invalid phys both intf and wb block at idx: %d\n", i); 2693 ret = -EINVAL; 2694 break; 2695 } 2696 2697 ret = dpu_encoder_virt_add_phys_encs(dpu_kms->dev, disp_info, 2698 dpu_enc, &phys_params); 2699 if (ret) { 2700 DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n"); 2701 break; 2702 } 2703 } 2704 2705 mutex_unlock(&dpu_enc->enc_lock); 2706 2707 return ret; 2708 } 2709 2710 static void dpu_encoder_frame_done_timeout(struct timer_list *t) 2711 { 2712 struct dpu_encoder_virt *dpu_enc = timer_container_of(dpu_enc, t, 2713 frame_done_timer); 2714 struct drm_encoder *drm_enc = &dpu_enc->base; 2715 u32 event; 2716 2717 if (!drm_enc->dev) { 2718 DPU_ERROR("invalid parameters\n"); 2719 return; 2720 } 2721 2722 if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc) { 2723 DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n", 2724 DRMID(drm_enc), dpu_enc->frame_busy_mask[0]); 2725 return; 2726 } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { 2727 DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc)); 2728 return; 2729 } 2730 2731 DPU_ERROR_ENC_RATELIMITED(dpu_enc, "frame done timeout\n"); 2732 2733 if (atomic_inc_return(&dpu_enc->frame_done_timeout_cnt) == 1) 2734 msm_disp_snapshot_state(drm_enc->dev); 2735 2736 event = DPU_ENCODER_FRAME_EVENT_ERROR; 2737 trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event); 2738 dpu_crtc_frame_event_cb(dpu_enc->crtc, event); 2739 } 2740 2741 static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = { 2742 .atomic_mode_set = dpu_encoder_virt_atomic_mode_set, 2743 .atomic_disable = dpu_encoder_virt_atomic_disable, 2744 .atomic_enable = dpu_encoder_virt_atomic_enable, 2745 }; 2746 2747 static const struct drm_encoder_funcs dpu_encoder_funcs = { 2748 .debugfs_init = dpu_encoder_debugfs_init, 2749 }; 2750 2751 /** 2752 * dpu_encoder_init - initialize virtual encoder object 2753 * @dev: Pointer to drm device structure 2754 * @drm_enc_mode: corresponding DRM_MODE_ENCODER_* constant 2755 * @disp_info: Pointer to display information structure 2756 * Returns: Pointer to newly created drm encoder 2757 */ 2758 struct drm_encoder *dpu_encoder_init(struct drm_device *dev, 2759 int drm_enc_mode, 2760 struct msm_display_info *disp_info) 2761 { 2762 struct msm_drm_private *priv = dev->dev_private; 2763 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); 2764 struct dpu_encoder_virt *dpu_enc; 2765 int ret; 2766 2767 dpu_enc = drmm_encoder_alloc(dev, struct dpu_encoder_virt, base, 2768 &dpu_encoder_funcs, drm_enc_mode, NULL); 2769 if (IS_ERR(dpu_enc)) 2770 return ERR_CAST(dpu_enc); 2771 2772 drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs); 2773 2774 spin_lock_init(&dpu_enc->enc_spinlock); 2775 dpu_enc->enabled = false; 2776 mutex_init(&dpu_enc->enc_lock); 2777 mutex_init(&dpu_enc->rc_lock); 2778 2779 ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info); 2780 if (ret) { 2781 DPU_ERROR("failed to setup encoder\n"); 2782 return ERR_PTR(-ENOMEM); 2783 } 2784 2785 atomic_set(&dpu_enc->frame_done_timeout_ms, 0); 2786 atomic_set(&dpu_enc->frame_done_timeout_cnt, 0); 2787 timer_setup(&dpu_enc->frame_done_timer, 2788 dpu_encoder_frame_done_timeout, 0); 2789 2790 INIT_DELAYED_WORK(&dpu_enc->delayed_off_work, 2791 dpu_encoder_off_work); 2792 dpu_enc->idle_timeout = IDLE_TIMEOUT; 2793 2794 memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info)); 2795 2796 DPU_DEBUG_ENC(dpu_enc, "created\n"); 2797 2798 return &dpu_enc->base; 2799 } 2800 2801 /** 2802 * dpu_encoder_wait_for_commit_done() - Wait for encoder to flush pending state 2803 * @drm_enc: encoder pointer 2804 * 2805 * Wait for hardware to have flushed the current pending changes to hardware at 2806 * a vblank or CTL_START. Physical encoders will map this differently depending 2807 * on the type: vid mode -> vsync_irq, cmd mode -> CTL_START. 2808 * 2809 * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise 2810 */ 2811 int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_enc) 2812 { 2813 struct dpu_encoder_virt *dpu_enc = NULL; 2814 int i, ret = 0; 2815 2816 if (!drm_enc) { 2817 DPU_ERROR("invalid encoder\n"); 2818 return -EINVAL; 2819 } 2820 dpu_enc = to_dpu_encoder_virt(drm_enc); 2821 DPU_DEBUG_ENC(dpu_enc, "\n"); 2822 2823 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2824 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2825 2826 if (phys->ops.wait_for_commit_done) { 2827 DPU_ATRACE_BEGIN("wait_for_commit_done"); 2828 ret = phys->ops.wait_for_commit_done(phys); 2829 DPU_ATRACE_END("wait_for_commit_done"); 2830 if (ret == -ETIMEDOUT && !dpu_enc->commit_done_timedout) { 2831 dpu_enc->commit_done_timedout = true; 2832 msm_disp_snapshot_state(drm_enc->dev); 2833 } 2834 if (ret) 2835 return ret; 2836 } 2837 } 2838 2839 return ret; 2840 } 2841 2842 /** 2843 * dpu_encoder_wait_for_tx_complete() - Wait for encoder to transfer pixels to panel 2844 * @drm_enc: encoder pointer 2845 * 2846 * Wait for the hardware to transfer all the pixels to the panel. Physical 2847 * encoders will map this differently depending on the type: vid mode -> vsync_irq, 2848 * cmd mode -> pp_done. 2849 * 2850 * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise 2851 */ 2852 int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_enc) 2853 { 2854 struct dpu_encoder_virt *dpu_enc = NULL; 2855 int i, ret = 0; 2856 2857 if (!drm_enc) { 2858 DPU_ERROR("invalid encoder\n"); 2859 return -EINVAL; 2860 } 2861 dpu_enc = to_dpu_encoder_virt(drm_enc); 2862 DPU_DEBUG_ENC(dpu_enc, "\n"); 2863 2864 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2865 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2866 2867 if (phys->ops.wait_for_tx_complete) { 2868 DPU_ATRACE_BEGIN("wait_for_tx_complete"); 2869 ret = phys->ops.wait_for_tx_complete(phys); 2870 DPU_ATRACE_END("wait_for_tx_complete"); 2871 if (ret) 2872 return ret; 2873 } 2874 } 2875 2876 return ret; 2877 } 2878 2879 /** 2880 * dpu_encoder_get_intf_mode - get interface mode of the given encoder 2881 * @encoder: Pointer to drm encoder object 2882 */ 2883 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder) 2884 { 2885 struct dpu_encoder_virt *dpu_enc = NULL; 2886 2887 if (!encoder) { 2888 DPU_ERROR("invalid encoder\n"); 2889 return INTF_MODE_NONE; 2890 } 2891 dpu_enc = to_dpu_encoder_virt(encoder); 2892 2893 if (dpu_enc->cur_master) 2894 return dpu_enc->cur_master->intf_mode; 2895 2896 if (dpu_enc->num_phys_encs) 2897 return dpu_enc->phys_encs[0]->intf_mode; 2898 2899 return INTF_MODE_NONE; 2900 } 2901 2902 /** 2903 * dpu_encoder_helper_get_cwb_mask - get CWB blocks mask for the DPU encoder 2904 * @phys_enc: Pointer to physical encoder structure 2905 */ 2906 unsigned int dpu_encoder_helper_get_cwb_mask(struct dpu_encoder_phys *phys_enc) 2907 { 2908 struct drm_encoder *encoder = phys_enc->parent; 2909 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder); 2910 2911 return dpu_enc->cwb_mask; 2912 } 2913 2914 /** 2915 * dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder 2916 * This helper function is used by physical encoder to get DSC blocks mask 2917 * used for this encoder. 2918 * @phys_enc: Pointer to physical encoder structure 2919 */ 2920 unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc) 2921 { 2922 struct drm_encoder *encoder = phys_enc->parent; 2923 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder); 2924 2925 return dpu_enc->dsc_mask; 2926 } 2927 2928 void dpu_encoder_phys_init(struct dpu_encoder_phys *phys_enc, 2929 struct dpu_enc_phys_init_params *p) 2930 { 2931 phys_enc->hw_mdptop = p->dpu_kms->hw_mdp; 2932 phys_enc->hw_intf = p->hw_intf; 2933 phys_enc->hw_wb = p->hw_wb; 2934 phys_enc->parent = p->parent; 2935 phys_enc->dpu_kms = p->dpu_kms; 2936 phys_enc->split_role = p->split_role; 2937 phys_enc->enc_spinlock = p->enc_spinlock; 2938 phys_enc->enable_state = DPU_ENC_DISABLED; 2939 2940 atomic_set(&phys_enc->pending_kickoff_cnt, 0); 2941 atomic_set(&phys_enc->pending_ctlstart_cnt, 0); 2942 2943 atomic_set(&phys_enc->vsync_cnt, 0); 2944 atomic_set(&phys_enc->underrun_cnt, 0); 2945 2946 init_waitqueue_head(&phys_enc->pending_kickoff_wq); 2947 } 2948