1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved. 5 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. 6 * 7 * Author: Rob Clark <robdclark@gmail.com> 8 */ 9 10 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ 11 #include <linux/debugfs.h> 12 #include <linux/kthread.h> 13 #include <linux/seq_file.h> 14 15 #include <drm/drm_atomic.h> 16 #include <drm/drm_crtc.h> 17 #include <drm/drm_file.h> 18 #include <drm/drm_probe_helper.h> 19 #include <drm/drm_framebuffer.h> 20 21 #include "msm_drv.h" 22 #include "dpu_kms.h" 23 #include "dpu_hwio.h" 24 #include "dpu_hw_catalog.h" 25 #include "dpu_hw_intf.h" 26 #include "dpu_hw_ctl.h" 27 #include "dpu_hw_cwb.h" 28 #include "dpu_hw_dspp.h" 29 #include "dpu_hw_dsc.h" 30 #include "dpu_hw_merge3d.h" 31 #include "dpu_hw_cdm.h" 32 #include "dpu_formats.h" 33 #include "dpu_encoder_phys.h" 34 #include "dpu_crtc.h" 35 #include "dpu_trace.h" 36 #include "dpu_core_irq.h" 37 #include "disp/msm_disp_snapshot.h" 38 39 #define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\ 40 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) 41 42 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\ 43 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) 44 45 #define DPU_ERROR_ENC_RATELIMITED(e, fmt, ...) DPU_ERROR_RATELIMITED("enc%d " fmt,\ 46 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) 47 48 /* 49 * Two to anticipate panels that can do cmd/vid dynamic switching 50 * plan is to create all possible physical encoder types, and switch between 51 * them at runtime 52 */ 53 #define NUM_PHYS_ENCODER_TYPES 2 54 55 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \ 56 (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES) 57 58 #define MAX_CHANNELS_PER_ENC 4 59 #define MAX_CWB_PER_ENC 2 60 61 #define IDLE_SHORT_TIMEOUT 1 62 63 /* timeout in frames waiting for frame done */ 64 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5 65 66 /** 67 * enum dpu_enc_rc_events - events for resource control state machine 68 * @DPU_ENC_RC_EVENT_KICKOFF: 69 * This event happens at NORMAL priority. 70 * Event that signals the start of the transfer. When this event is 71 * received, enable MDP/DSI core clocks. Regardless of the previous 72 * state, the resource should be in ON state at the end of this event. 73 * @DPU_ENC_RC_EVENT_FRAME_DONE: 74 * This event happens at INTERRUPT level. 75 * Event signals the end of the data transfer after the PP FRAME_DONE 76 * event. At the end of this event, a delayed work is scheduled to go to 77 * IDLE_PC state after IDLE_TIMEOUT time. 78 * @DPU_ENC_RC_EVENT_PRE_STOP: 79 * This event happens at NORMAL priority. 80 * This event, when received during the ON state, leave the RC STATE 81 * in the PRE_OFF state. It should be followed by the STOP event as 82 * part of encoder disable. 83 * If received during IDLE or OFF states, it will do nothing. 84 * @DPU_ENC_RC_EVENT_STOP: 85 * This event happens at NORMAL priority. 86 * When this event is received, disable all the MDP/DSI core clocks, and 87 * disable IRQs. It should be called from the PRE_OFF or IDLE states. 88 * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing. 89 * PRE_OFF is expected when PRE_STOP was executed during the ON state. 90 * Resource state should be in OFF at the end of the event. 91 * @DPU_ENC_RC_EVENT_ENTER_IDLE: 92 * This event happens at NORMAL priority from a work item. 93 * Event signals that there were no frame updates for IDLE_TIMEOUT time. 94 * This would disable MDP/DSI core clocks and change the resource state 95 * to IDLE. 96 */ 97 enum dpu_enc_rc_events { 98 DPU_ENC_RC_EVENT_KICKOFF = 1, 99 DPU_ENC_RC_EVENT_FRAME_DONE, 100 DPU_ENC_RC_EVENT_PRE_STOP, 101 DPU_ENC_RC_EVENT_STOP, 102 DPU_ENC_RC_EVENT_ENTER_IDLE 103 }; 104 105 /* 106 * enum dpu_enc_rc_states - states that the resource control maintains 107 * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state 108 * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state 109 * @DPU_ENC_RC_STATE_ON: Resource is in ON state 110 * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state 111 * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state 112 */ 113 enum dpu_enc_rc_states { 114 DPU_ENC_RC_STATE_OFF, 115 DPU_ENC_RC_STATE_PRE_OFF, 116 DPU_ENC_RC_STATE_ON, 117 DPU_ENC_RC_STATE_IDLE 118 }; 119 120 /** 121 * struct dpu_encoder_virt - virtual encoder. Container of one or more physical 122 * encoders. Virtual encoder manages one "logical" display. Physical 123 * encoders manage one intf block, tied to a specific panel/sub-panel. 124 * Virtual encoder defers as much as possible to the physical encoders. 125 * Virtual encoder registers itself with the DRM Framework as the encoder. 126 * @base: drm_encoder base class for registration with DRM 127 * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes 128 * @enabled: True if the encoder is active, protected by enc_lock 129 * @commit_done_timedout: True if there has been a timeout on commit after 130 * enabling the encoder. 131 * @num_phys_encs: Actual number of physical encoders contained. 132 * @phys_encs: Container of physical encoders managed. 133 * @cur_master: Pointer to the current master in this mode. Optimization 134 * Only valid after enable. Cleared as disable. 135 * @cur_slave: As above but for the slave encoder. 136 * @hw_pp: Handle to the pingpong blocks used for the display. No. 137 * pingpong blocks can be different than num_phys_encs. 138 * @hw_cwb: Handle to the CWB muxes used for concurrent writeback 139 * display. Number of CWB muxes can be different than 140 * num_phys_encs. 141 * @hw_dsc: Handle to the DSC blocks used for the display. 142 * @dsc_mask: Bitmask of used DSC blocks. 143 * @cwb_mask: Bitmask of used CWB muxes 144 * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped 145 * for partial update right-only cases, such as pingpong 146 * split where virtual pingpong does not generate IRQs 147 * @crtc: Pointer to the currently assigned crtc. Normally you 148 * would use crtc->state->encoder_mask to determine the 149 * link between encoder/crtc. However in this case we need 150 * to track crtc in the disable() hook which is called 151 * _after_ encoder_mask is cleared. 152 * @connector: If a mode is set, cached pointer to the active connector 153 * @enc_lock: Lock around physical encoder 154 * create/destroy/enable/disable 155 * @frame_busy_mask: Bitmask tracking which phys_enc we are still 156 * busy processing current command. 157 * Bit0 = phys_encs[0] etc. 158 * @frame_done_timeout_ms: frame done timeout in ms 159 * @frame_done_timeout_cnt: atomic counter tracking the number of frame 160 * done timeouts 161 * @frame_done_timer: watchdog timer for frame done event 162 * @disp_info: local copy of msm_display_info struct 163 * @idle_pc_supported: indicate if idle power collaps is supported 164 * @rc_lock: resource control mutex lock to protect 165 * virt encoder over various state changes 166 * @rc_state: resource controller state 167 * @delayed_off_work: delayed worker to schedule disabling of 168 * clks and resources after IDLE_TIMEOUT time. 169 * @topology: topology of the display 170 * @idle_timeout: idle timeout duration in milliseconds 171 * @wide_bus_en: wide bus is enabled on this interface 172 * @dsc: drm_dsc_config pointer, for DSC-enabled encoders 173 */ 174 struct dpu_encoder_virt { 175 struct drm_encoder base; 176 spinlock_t enc_spinlock; 177 178 bool enabled; 179 bool commit_done_timedout; 180 181 unsigned int num_phys_encs; 182 struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL]; 183 struct dpu_encoder_phys *cur_master; 184 struct dpu_encoder_phys *cur_slave; 185 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; 186 struct dpu_hw_cwb *hw_cwb[MAX_CWB_PER_ENC]; 187 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; 188 189 unsigned int dsc_mask; 190 unsigned int cwb_mask; 191 192 bool intfs_swapped; 193 194 struct drm_crtc *crtc; 195 struct drm_connector *connector; 196 197 struct mutex enc_lock; 198 DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL); 199 200 atomic_t frame_done_timeout_ms; 201 atomic_t frame_done_timeout_cnt; 202 struct timer_list frame_done_timer; 203 204 struct msm_display_info disp_info; 205 206 bool idle_pc_supported; 207 struct mutex rc_lock; 208 enum dpu_enc_rc_states rc_state; 209 struct delayed_work delayed_off_work; 210 struct msm_display_topology topology; 211 212 u32 idle_timeout; 213 214 bool wide_bus_en; 215 216 /* DSC configuration */ 217 struct drm_dsc_config *dsc; 218 }; 219 220 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base) 221 222 static u32 dither_matrix[DITHER_MATRIX_SZ] = { 223 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10 224 }; 225 226 /** 227 * dpu_encoder_get_drm_fmt - return DRM fourcc format 228 * @phys_enc: Pointer to physical encoder structure 229 */ 230 u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc) 231 { 232 struct drm_encoder *drm_enc; 233 struct dpu_encoder_virt *dpu_enc; 234 struct drm_display_info *info; 235 struct drm_display_mode *mode; 236 237 drm_enc = phys_enc->parent; 238 dpu_enc = to_dpu_encoder_virt(drm_enc); 239 info = &dpu_enc->connector->display_info; 240 mode = &phys_enc->cached_mode; 241 242 if (drm_mode_is_420_only(info, mode)) 243 return DRM_FORMAT_YUV420; 244 245 return DRM_FORMAT_RGB888; 246 } 247 248 /** 249 * dpu_encoder_needs_periph_flush - return true if physical encoder requires 250 * peripheral flush 251 * @phys_enc: Pointer to physical encoder structure 252 */ 253 bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc) 254 { 255 struct drm_encoder *drm_enc; 256 struct dpu_encoder_virt *dpu_enc; 257 struct msm_display_info *disp_info; 258 struct msm_drm_private *priv; 259 struct drm_display_mode *mode; 260 261 drm_enc = phys_enc->parent; 262 dpu_enc = to_dpu_encoder_virt(drm_enc); 263 disp_info = &dpu_enc->disp_info; 264 priv = drm_enc->dev->dev_private; 265 mode = &phys_enc->cached_mode; 266 267 return phys_enc->hw_intf->cap->type == INTF_DP && 268 msm_dp_needs_periph_flush(priv->kms->dp[disp_info->h_tile_instance[0]], mode); 269 } 270 271 /** 272 * dpu_encoder_is_widebus_enabled - return bool value if widebus is enabled 273 * @drm_enc: Pointer to previously created drm encoder structure 274 */ 275 bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc) 276 { 277 const struct dpu_encoder_virt *dpu_enc; 278 struct msm_drm_private *priv = drm_enc->dev->dev_private; 279 const struct msm_display_info *disp_info; 280 int index; 281 282 dpu_enc = to_dpu_encoder_virt(drm_enc); 283 disp_info = &dpu_enc->disp_info; 284 index = disp_info->h_tile_instance[0]; 285 286 if (disp_info->intf_type == INTF_DP) 287 return msm_dp_wide_bus_available(priv->kms->dp[index]); 288 else if (disp_info->intf_type == INTF_DSI) 289 return msm_dsi_wide_bus_enabled(priv->kms->dsi[index]); 290 291 return false; 292 } 293 294 /** 295 * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled 296 * for the encoder. 297 * @drm_enc: Pointer to previously created drm encoder structure 298 */ 299 bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc) 300 { 301 const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 302 303 return dpu_enc->dsc ? true : false; 304 } 305 306 /** 307 * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained 308 * in virtual encoder that can collect CRC values 309 * @drm_enc: Pointer to previously created drm encoder structure 310 * Returns: Number of physical encoders for given drm encoder 311 */ 312 int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc) 313 { 314 struct dpu_encoder_virt *dpu_enc; 315 int i, num_intf = 0; 316 317 dpu_enc = to_dpu_encoder_virt(drm_enc); 318 319 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 320 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 321 322 if (phys->hw_intf && phys->hw_intf->ops.setup_misr 323 && phys->hw_intf->ops.collect_misr) 324 num_intf++; 325 } 326 327 return num_intf; 328 } 329 330 /** 331 * dpu_encoder_setup_misr - enable misr calculations 332 * @drm_enc: Pointer to previously created drm encoder structure 333 */ 334 void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc) 335 { 336 struct dpu_encoder_virt *dpu_enc; 337 338 int i; 339 340 dpu_enc = to_dpu_encoder_virt(drm_enc); 341 342 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 343 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 344 345 if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr) 346 continue; 347 348 phys->hw_intf->ops.setup_misr(phys->hw_intf); 349 } 350 } 351 352 /** 353 * dpu_encoder_get_crc - get the crc value from interface blocks 354 * @drm_enc: Pointer to previously created drm encoder structure 355 * @crcs: array to fill with CRC data 356 * @pos: offset into the @crcs array 357 * Returns: 0 on success, error otherwise 358 */ 359 int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos) 360 { 361 struct dpu_encoder_virt *dpu_enc; 362 363 int i, rc = 0, entries_added = 0; 364 365 if (!drm_enc->crtc) { 366 DRM_ERROR("no crtc found for encoder %d\n", drm_enc->index); 367 return -EINVAL; 368 } 369 370 dpu_enc = to_dpu_encoder_virt(drm_enc); 371 372 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 373 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 374 375 if (!phys->hw_intf || !phys->hw_intf->ops.collect_misr) 376 continue; 377 378 rc = phys->hw_intf->ops.collect_misr(phys->hw_intf, &crcs[pos + entries_added]); 379 if (rc) 380 return rc; 381 entries_added++; 382 } 383 384 return entries_added; 385 } 386 387 static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc) 388 { 389 struct dpu_hw_dither_cfg dither_cfg = { 0 }; 390 391 if (!hw_pp->ops.setup_dither) 392 return; 393 394 switch (bpc) { 395 case 6: 396 dither_cfg.c0_bitdepth = 6; 397 dither_cfg.c1_bitdepth = 6; 398 dither_cfg.c2_bitdepth = 6; 399 dither_cfg.c3_bitdepth = 6; 400 dither_cfg.temporal_en = 0; 401 break; 402 default: 403 hw_pp->ops.setup_dither(hw_pp, NULL); 404 return; 405 } 406 407 memcpy(&dither_cfg.matrix, dither_matrix, 408 sizeof(u32) * DITHER_MATRIX_SZ); 409 410 hw_pp->ops.setup_dither(hw_pp, &dither_cfg); 411 } 412 413 static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode) 414 { 415 switch (intf_mode) { 416 case INTF_MODE_VIDEO: 417 return "INTF_MODE_VIDEO"; 418 case INTF_MODE_CMD: 419 return "INTF_MODE_CMD"; 420 case INTF_MODE_WB_BLOCK: 421 return "INTF_MODE_WB_BLOCK"; 422 case INTF_MODE_WB_LINE: 423 return "INTF_MODE_WB_LINE"; 424 default: 425 return "INTF_MODE_UNKNOWN"; 426 } 427 } 428 429 /** 430 * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has 431 * timed out, including reporting frame error event to crtc and debug dump 432 * @phys_enc: Pointer to physical encoder structure 433 * @intr_idx: Failing interrupt index 434 */ 435 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc, 436 enum dpu_intr_idx intr_idx) 437 { 438 DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n", 439 DRMID(phys_enc->parent), 440 dpu_encoder_helper_get_intf_type(phys_enc->intf_mode), 441 phys_enc->hw_intf ? phys_enc->hw_intf->idx - INTF_0 : -1, 442 phys_enc->hw_wb ? phys_enc->hw_wb->idx - WB_0 : -1, 443 phys_enc->hw_pp->idx - PINGPONG_0, intr_idx); 444 445 dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, 446 DPU_ENCODER_FRAME_EVENT_ERROR); 447 } 448 449 static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id, 450 u32 irq_idx, struct dpu_encoder_wait_info *info); 451 452 /** 453 * dpu_encoder_helper_wait_for_irq - utility to wait on an irq. 454 * note: will call dpu_encoder_helper_wait_for_irq on timeout 455 * @phys_enc: Pointer to physical encoder structure 456 * @irq_idx: IRQ index 457 * @func: IRQ callback to be called in case of timeout 458 * @wait_info: wait info struct 459 * @return: 0 or -ERROR 460 */ 461 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, 462 unsigned int irq_idx, 463 void (*func)(void *arg), 464 struct dpu_encoder_wait_info *wait_info) 465 { 466 u32 irq_status; 467 int ret; 468 469 if (!wait_info) { 470 DPU_ERROR("invalid params\n"); 471 return -EINVAL; 472 } 473 /* note: do master / slave checking outside */ 474 475 /* return EWOULDBLOCK since we know the wait isn't necessary */ 476 if (phys_enc->enable_state == DPU_ENC_DISABLED) { 477 DRM_ERROR("encoder is disabled id=%u, callback=%ps, IRQ=[%d, %d]\n", 478 DRMID(phys_enc->parent), func, 479 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx)); 480 return -EWOULDBLOCK; 481 } 482 483 if (irq_idx == 0) { 484 DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n", 485 DRMID(phys_enc->parent), func); 486 return 0; 487 } 488 489 DRM_DEBUG_KMS("id=%u, callback=%ps, IRQ=[%d, %d], pp=%d, pending_cnt=%d\n", 490 DRMID(phys_enc->parent), func, 491 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), phys_enc->hw_pp->idx - PINGPONG_0, 492 atomic_read(wait_info->atomic_cnt)); 493 494 ret = dpu_encoder_helper_wait_event_timeout( 495 DRMID(phys_enc->parent), 496 irq_idx, 497 wait_info); 498 499 if (ret <= 0) { 500 irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq_idx); 501 if (irq_status) { 502 unsigned long flags; 503 504 DRM_DEBUG_KMS("IRQ=[%d, %d] not triggered id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n", 505 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), 506 DRMID(phys_enc->parent), func, 507 phys_enc->hw_pp->idx - PINGPONG_0, 508 atomic_read(wait_info->atomic_cnt)); 509 local_irq_save(flags); 510 func(phys_enc); 511 local_irq_restore(flags); 512 ret = 0; 513 } else { 514 ret = -ETIMEDOUT; 515 DRM_DEBUG_KMS("IRQ=[%d, %d] timeout id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n", 516 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), 517 DRMID(phys_enc->parent), func, 518 phys_enc->hw_pp->idx - PINGPONG_0, 519 atomic_read(wait_info->atomic_cnt)); 520 } 521 } else { 522 ret = 0; 523 trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent), 524 func, DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), 525 phys_enc->hw_pp->idx - PINGPONG_0, 526 atomic_read(wait_info->atomic_cnt)); 527 } 528 529 return ret; 530 } 531 532 /** 533 * dpu_encoder_get_vsync_count - get vsync count for the encoder. 534 * @drm_enc: Pointer to previously created drm encoder structure 535 */ 536 int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc) 537 { 538 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 539 struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL; 540 return phys ? atomic_read(&phys->vsync_cnt) : 0; 541 } 542 543 /** 544 * dpu_encoder_get_linecount - get interface line count for the encoder. 545 * @drm_enc: Pointer to previously created drm encoder structure 546 */ 547 int dpu_encoder_get_linecount(struct drm_encoder *drm_enc) 548 { 549 struct dpu_encoder_virt *dpu_enc; 550 struct dpu_encoder_phys *phys; 551 int linecount = 0; 552 553 dpu_enc = to_dpu_encoder_virt(drm_enc); 554 phys = dpu_enc ? dpu_enc->cur_master : NULL; 555 556 if (phys && phys->ops.get_line_count) 557 linecount = phys->ops.get_line_count(phys); 558 559 return linecount; 560 } 561 562 /** 563 * dpu_encoder_helper_split_config - split display configuration helper function 564 * This helper function may be used by physical encoders to configure 565 * the split display related registers. 566 * @phys_enc: Pointer to physical encoder structure 567 * @interface: enum dpu_intf setting 568 */ 569 void dpu_encoder_helper_split_config( 570 struct dpu_encoder_phys *phys_enc, 571 enum dpu_intf interface) 572 { 573 struct dpu_encoder_virt *dpu_enc; 574 struct split_pipe_cfg cfg = { 0 }; 575 struct dpu_hw_mdp *hw_mdptop; 576 struct msm_display_info *disp_info; 577 578 if (!phys_enc->hw_mdptop || !phys_enc->parent) { 579 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL); 580 return; 581 } 582 583 dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 584 hw_mdptop = phys_enc->hw_mdptop; 585 disp_info = &dpu_enc->disp_info; 586 587 if (disp_info->intf_type != INTF_DSI) 588 return; 589 590 /** 591 * disable split modes since encoder will be operating in as the only 592 * encoder, either for the entire use case in the case of, for example, 593 * single DSI, or for this frame in the case of left/right only partial 594 * update. 595 */ 596 if (phys_enc->split_role == ENC_ROLE_SOLO) { 597 if (hw_mdptop->ops.setup_split_pipe) 598 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg); 599 return; 600 } 601 602 cfg.en = true; 603 cfg.mode = phys_enc->intf_mode; 604 cfg.intf = interface; 605 606 if (cfg.en && phys_enc->ops.needs_single_flush && 607 phys_enc->ops.needs_single_flush(phys_enc)) 608 cfg.split_flush_en = true; 609 610 if (phys_enc->split_role == ENC_ROLE_MASTER) { 611 DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en); 612 613 if (hw_mdptop->ops.setup_split_pipe) 614 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg); 615 } 616 } 617 618 /** 619 * dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology. 620 * @drm_enc: Pointer to previously created drm encoder structure 621 */ 622 bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc) 623 { 624 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 625 int i, intf_count = 0, num_dsc = 0; 626 627 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) 628 if (dpu_enc->phys_encs[i]) 629 intf_count++; 630 631 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) 632 if (dpu_enc->hw_dsc[i]) 633 num_dsc++; 634 635 return (num_dsc > 0) && (num_dsc > intf_count); 636 } 637 638 /** 639 * dpu_encoder_get_dsc_config - get DSC config for the DPU encoder 640 * This helper function is used by physical encoder to get DSC config 641 * used for this encoder. 642 * @drm_enc: Pointer to encoder structure 643 */ 644 struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc) 645 { 646 struct msm_drm_private *priv = drm_enc->dev->dev_private; 647 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 648 int index = dpu_enc->disp_info.h_tile_instance[0]; 649 650 if (dpu_enc->disp_info.intf_type == INTF_DSI) 651 return msm_dsi_get_dsc_config(priv->kms->dsi[index]); 652 653 return NULL; 654 } 655 656 void dpu_encoder_update_topology(struct drm_encoder *drm_enc, 657 struct msm_display_topology *topology, 658 struct drm_atomic_state *state, 659 const struct drm_display_mode *adj_mode) 660 { 661 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 662 struct msm_drm_private *priv = dpu_enc->base.dev->dev_private; 663 struct msm_display_info *disp_info = &dpu_enc->disp_info; 664 struct drm_connector *connector; 665 struct drm_connector_state *conn_state; 666 struct drm_framebuffer *fb; 667 struct drm_dsc_config *dsc; 668 669 int i; 670 671 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) 672 if (dpu_enc->phys_encs[i]) 673 topology->num_intf++; 674 675 dsc = dpu_encoder_get_dsc_config(drm_enc); 676 677 /* 678 * Set DSC number as 1 to mark the enabled status, will be adjusted 679 * in dpu_crtc_get_topology() 680 */ 681 if (dsc) 682 topology->num_dsc = 1; 683 684 connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc); 685 if (!connector) 686 return; 687 conn_state = drm_atomic_get_new_connector_state(state, connector); 688 if (!conn_state) 689 return; 690 691 /* 692 * Use CDM only for writeback or DP at the moment as other interfaces cannot handle it. 693 * If writeback itself cannot handle cdm for some reason it will fail in its atomic_check() 694 * earlier. 695 */ 696 if (disp_info->intf_type == INTF_WB && conn_state->writeback_job) { 697 fb = conn_state->writeback_job->fb; 698 699 if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb))) 700 topology->num_cdm++; 701 } else if (disp_info->intf_type == INTF_DP) { 702 if (msm_dp_is_yuv_420_enabled(priv->kms->dp[disp_info->h_tile_instance[0]], 703 adj_mode)) 704 topology->num_cdm++; 705 } 706 } 707 708 bool dpu_encoder_needs_modeset(struct drm_encoder *drm_enc, struct drm_atomic_state *state) 709 { 710 struct drm_connector *connector; 711 struct drm_connector_state *conn_state; 712 struct drm_framebuffer *fb; 713 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 714 715 if (!drm_enc || !state) 716 return false; 717 718 connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc); 719 if (!connector) 720 return false; 721 722 conn_state = drm_atomic_get_new_connector_state(state, connector); 723 if (!conn_state) 724 return false; 725 726 /** 727 * These checks are duplicated from dpu_encoder_update_topology() since 728 * CRTC and encoder don't hold topology information 729 */ 730 if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) { 731 fb = conn_state->writeback_job->fb; 732 if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb))) { 733 if (!dpu_enc->cur_master->hw_cdm) 734 return true; 735 } else { 736 if (dpu_enc->cur_master->hw_cdm) 737 return true; 738 } 739 } 740 741 return false; 742 } 743 744 static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc, 745 struct msm_display_info *disp_info) 746 { 747 struct dpu_vsync_source_cfg vsync_cfg = { 0 }; 748 struct msm_drm_private *priv; 749 struct dpu_kms *dpu_kms; 750 struct dpu_hw_mdp *hw_mdptop; 751 struct drm_encoder *drm_enc; 752 struct dpu_encoder_phys *phys_enc; 753 int i; 754 755 if (!dpu_enc || !disp_info) { 756 DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n", 757 dpu_enc != NULL, disp_info != NULL); 758 return; 759 } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) { 760 DPU_ERROR("invalid num phys enc %d/%d\n", 761 dpu_enc->num_phys_encs, 762 (int) ARRAY_SIZE(dpu_enc->hw_pp)); 763 return; 764 } 765 766 drm_enc = &dpu_enc->base; 767 /* this pointers are checked in virt_enable_helper */ 768 priv = drm_enc->dev->dev_private; 769 770 dpu_kms = to_dpu_kms(priv->kms); 771 hw_mdptop = dpu_kms->hw_mdp; 772 if (!hw_mdptop) { 773 DPU_ERROR("invalid mdptop\n"); 774 return; 775 } 776 777 if (hw_mdptop->ops.setup_vsync_source) { 778 for (i = 0; i < dpu_enc->num_phys_encs; i++) 779 vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx; 780 781 vsync_cfg.pp_count = dpu_enc->num_phys_encs; 782 vsync_cfg.frame_rate = drm_mode_vrefresh(&dpu_enc->base.crtc->state->adjusted_mode); 783 784 vsync_cfg.vsync_source = disp_info->vsync_source; 785 786 hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg); 787 788 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 789 phys_enc = dpu_enc->phys_encs[i]; 790 791 if (phys_enc->has_intf_te && phys_enc->hw_intf->ops.vsync_sel) 792 phys_enc->hw_intf->ops.vsync_sel(phys_enc->hw_intf, 793 vsync_cfg.vsync_source); 794 } 795 } 796 } 797 798 static void _dpu_encoder_irq_enable(struct drm_encoder *drm_enc) 799 { 800 struct dpu_encoder_virt *dpu_enc; 801 int i; 802 803 if (!drm_enc) { 804 DPU_ERROR("invalid encoder\n"); 805 return; 806 } 807 808 dpu_enc = to_dpu_encoder_virt(drm_enc); 809 810 DPU_DEBUG_ENC(dpu_enc, "\n"); 811 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 812 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 813 814 phys->ops.irq_enable(phys); 815 } 816 } 817 818 static void _dpu_encoder_irq_disable(struct drm_encoder *drm_enc) 819 { 820 struct dpu_encoder_virt *dpu_enc; 821 int i; 822 823 if (!drm_enc) { 824 DPU_ERROR("invalid encoder\n"); 825 return; 826 } 827 828 dpu_enc = to_dpu_encoder_virt(drm_enc); 829 830 DPU_DEBUG_ENC(dpu_enc, "\n"); 831 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 832 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 833 834 phys->ops.irq_disable(phys); 835 } 836 } 837 838 static void _dpu_encoder_resource_enable(struct drm_encoder *drm_enc) 839 { 840 struct msm_drm_private *priv; 841 struct dpu_kms *dpu_kms; 842 struct dpu_encoder_virt *dpu_enc; 843 844 dpu_enc = to_dpu_encoder_virt(drm_enc); 845 priv = drm_enc->dev->dev_private; 846 dpu_kms = to_dpu_kms(priv->kms); 847 848 trace_dpu_enc_rc_enable(DRMID(drm_enc)); 849 850 if (!dpu_enc->cur_master) { 851 DPU_ERROR("encoder master not set\n"); 852 return; 853 } 854 855 /* enable DPU core clks */ 856 pm_runtime_get_sync(&dpu_kms->pdev->dev); 857 858 /* enable all the irq */ 859 _dpu_encoder_irq_enable(drm_enc); 860 } 861 862 static void _dpu_encoder_resource_disable(struct drm_encoder *drm_enc) 863 { 864 struct msm_drm_private *priv; 865 struct dpu_kms *dpu_kms; 866 struct dpu_encoder_virt *dpu_enc; 867 868 dpu_enc = to_dpu_encoder_virt(drm_enc); 869 priv = drm_enc->dev->dev_private; 870 dpu_kms = to_dpu_kms(priv->kms); 871 872 trace_dpu_enc_rc_disable(DRMID(drm_enc)); 873 874 if (!dpu_enc->cur_master) { 875 DPU_ERROR("encoder master not set\n"); 876 return; 877 } 878 879 /* disable all the irq */ 880 _dpu_encoder_irq_disable(drm_enc); 881 882 /* disable DPU core clks */ 883 pm_runtime_put_sync(&dpu_kms->pdev->dev); 884 } 885 886 static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, 887 u32 sw_event) 888 { 889 struct dpu_encoder_virt *dpu_enc; 890 struct msm_drm_private *priv; 891 bool is_vid_mode = false; 892 893 if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) { 894 DPU_ERROR("invalid parameters\n"); 895 return -EINVAL; 896 } 897 dpu_enc = to_dpu_encoder_virt(drm_enc); 898 priv = drm_enc->dev->dev_private; 899 is_vid_mode = !dpu_enc->disp_info.is_cmd_mode; 900 901 /* 902 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET 903 * events and return early for other events (ie wb display). 904 */ 905 if (!dpu_enc->idle_pc_supported && 906 (sw_event != DPU_ENC_RC_EVENT_KICKOFF && 907 sw_event != DPU_ENC_RC_EVENT_STOP && 908 sw_event != DPU_ENC_RC_EVENT_PRE_STOP)) 909 return 0; 910 911 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported, 912 dpu_enc->rc_state, "begin"); 913 914 switch (sw_event) { 915 case DPU_ENC_RC_EVENT_KICKOFF: 916 /* cancel delayed off work, if any */ 917 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work)) 918 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", 919 sw_event); 920 921 mutex_lock(&dpu_enc->rc_lock); 922 923 /* return if the resource control is already in ON state */ 924 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) { 925 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n", 926 DRMID(drm_enc), sw_event); 927 mutex_unlock(&dpu_enc->rc_lock); 928 return 0; 929 } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF && 930 dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) { 931 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n", 932 DRMID(drm_enc), sw_event, 933 dpu_enc->rc_state); 934 mutex_unlock(&dpu_enc->rc_lock); 935 return -EINVAL; 936 } 937 938 if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) 939 _dpu_encoder_irq_enable(drm_enc); 940 else 941 _dpu_encoder_resource_enable(drm_enc); 942 943 dpu_enc->rc_state = DPU_ENC_RC_STATE_ON; 944 945 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 946 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 947 "kickoff"); 948 949 mutex_unlock(&dpu_enc->rc_lock); 950 break; 951 952 case DPU_ENC_RC_EVENT_FRAME_DONE: 953 /* 954 * mutex lock is not used as this event happens at interrupt 955 * context. And locking is not required as, the other events 956 * like KICKOFF and STOP does a wait-for-idle before executing 957 * the resource_control 958 */ 959 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) { 960 DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n", 961 DRMID(drm_enc), sw_event, 962 dpu_enc->rc_state); 963 return -EINVAL; 964 } 965 966 /* 967 * schedule off work item only when there are no 968 * frames pending 969 */ 970 if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) { 971 DRM_DEBUG_KMS("id:%d skip schedule work\n", 972 DRMID(drm_enc)); 973 return 0; 974 } 975 976 queue_delayed_work(priv->kms->wq, &dpu_enc->delayed_off_work, 977 msecs_to_jiffies(dpu_enc->idle_timeout)); 978 979 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 980 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 981 "frame done"); 982 break; 983 984 case DPU_ENC_RC_EVENT_PRE_STOP: 985 /* cancel delayed off work, if any */ 986 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work)) 987 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", 988 sw_event); 989 990 mutex_lock(&dpu_enc->rc_lock); 991 992 if (is_vid_mode && 993 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) { 994 _dpu_encoder_irq_enable(drm_enc); 995 } 996 /* skip if is already OFF or IDLE, resources are off already */ 997 else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF || 998 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) { 999 DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n", 1000 DRMID(drm_enc), sw_event, 1001 dpu_enc->rc_state); 1002 mutex_unlock(&dpu_enc->rc_lock); 1003 return 0; 1004 } 1005 1006 dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF; 1007 1008 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 1009 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 1010 "pre stop"); 1011 1012 mutex_unlock(&dpu_enc->rc_lock); 1013 break; 1014 1015 case DPU_ENC_RC_EVENT_STOP: 1016 mutex_lock(&dpu_enc->rc_lock); 1017 1018 /* return if the resource control is already in OFF state */ 1019 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) { 1020 DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n", 1021 DRMID(drm_enc), sw_event); 1022 mutex_unlock(&dpu_enc->rc_lock); 1023 return 0; 1024 } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) { 1025 DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n", 1026 DRMID(drm_enc), sw_event, dpu_enc->rc_state); 1027 mutex_unlock(&dpu_enc->rc_lock); 1028 return -EINVAL; 1029 } 1030 1031 /** 1032 * expect to arrive here only if in either idle state or pre-off 1033 * and in IDLE state the resources are already disabled 1034 */ 1035 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF) 1036 _dpu_encoder_resource_disable(drm_enc); 1037 1038 dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF; 1039 1040 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 1041 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 1042 "stop"); 1043 1044 mutex_unlock(&dpu_enc->rc_lock); 1045 break; 1046 1047 case DPU_ENC_RC_EVENT_ENTER_IDLE: 1048 mutex_lock(&dpu_enc->rc_lock); 1049 1050 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) { 1051 DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n", 1052 DRMID(drm_enc), sw_event, dpu_enc->rc_state); 1053 mutex_unlock(&dpu_enc->rc_lock); 1054 return 0; 1055 } 1056 1057 /* 1058 * if we are in ON but a frame was just kicked off, 1059 * ignore the IDLE event, it's probably a stale timer event 1060 */ 1061 if (dpu_enc->frame_busy_mask[0]) { 1062 DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n", 1063 DRMID(drm_enc), sw_event, dpu_enc->rc_state); 1064 mutex_unlock(&dpu_enc->rc_lock); 1065 return 0; 1066 } 1067 1068 if (is_vid_mode) 1069 _dpu_encoder_irq_disable(drm_enc); 1070 else 1071 _dpu_encoder_resource_disable(drm_enc); 1072 1073 dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE; 1074 1075 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 1076 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 1077 "idle"); 1078 1079 mutex_unlock(&dpu_enc->rc_lock); 1080 break; 1081 1082 default: 1083 DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc), 1084 sw_event); 1085 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 1086 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 1087 "error"); 1088 break; 1089 } 1090 1091 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 1092 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 1093 "end"); 1094 return 0; 1095 } 1096 1097 /** 1098 * dpu_encoder_prepare_wb_job - prepare writeback job for the encoder. 1099 * @drm_enc: Pointer to previously created drm encoder structure 1100 * @job: Pointer to the current drm writeback job 1101 */ 1102 void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc, 1103 struct drm_writeback_job *job) 1104 { 1105 struct dpu_encoder_virt *dpu_enc; 1106 int i; 1107 1108 dpu_enc = to_dpu_encoder_virt(drm_enc); 1109 1110 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1111 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1112 1113 if (phys->ops.prepare_wb_job) 1114 phys->ops.prepare_wb_job(phys, job); 1115 1116 } 1117 } 1118 1119 /** 1120 * dpu_encoder_cleanup_wb_job - cleanup writeback job for the encoder. 1121 * @drm_enc: Pointer to previously created drm encoder structure 1122 * @job: Pointer to the current drm writeback job 1123 */ 1124 void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc, 1125 struct drm_writeback_job *job) 1126 { 1127 struct dpu_encoder_virt *dpu_enc; 1128 int i; 1129 1130 dpu_enc = to_dpu_encoder_virt(drm_enc); 1131 1132 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1133 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1134 1135 if (phys->ops.cleanup_wb_job) 1136 phys->ops.cleanup_wb_job(phys, job); 1137 1138 } 1139 } 1140 1141 static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, 1142 struct drm_crtc_state *crtc_state, 1143 struct drm_connector_state *conn_state) 1144 { 1145 struct dpu_encoder_virt *dpu_enc; 1146 struct msm_drm_private *priv; 1147 struct dpu_kms *dpu_kms; 1148 struct dpu_global_state *global_state; 1149 struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC]; 1150 struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; 1151 struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC]; 1152 struct dpu_hw_blk *hw_cwb[MAX_CHANNELS_PER_ENC]; 1153 int num_ctl, num_pp, num_dsc, num_pp_per_intf; 1154 int num_cwb = 0; 1155 bool is_cwb_encoder; 1156 unsigned int dsc_mask = 0; 1157 unsigned int cwb_mask = 0; 1158 int i; 1159 1160 if (!drm_enc) { 1161 DPU_ERROR("invalid encoder\n"); 1162 return; 1163 } 1164 1165 dpu_enc = to_dpu_encoder_virt(drm_enc); 1166 DPU_DEBUG_ENC(dpu_enc, "\n"); 1167 1168 priv = drm_enc->dev->dev_private; 1169 dpu_kms = to_dpu_kms(priv->kms); 1170 is_cwb_encoder = drm_crtc_in_clone_mode(crtc_state) && 1171 dpu_enc->disp_info.intf_type == INTF_WB; 1172 1173 global_state = dpu_kms_get_existing_global_state(dpu_kms); 1174 if (IS_ERR_OR_NULL(global_state)) { 1175 DPU_ERROR("Failed to get global state"); 1176 return; 1177 } 1178 1179 trace_dpu_enc_mode_set(DRMID(drm_enc)); 1180 1181 /* Query resource that have been reserved in atomic check step. */ 1182 if (is_cwb_encoder) { 1183 num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1184 drm_enc->crtc, 1185 DPU_HW_BLK_DCWB_PINGPONG, 1186 hw_pp, ARRAY_SIZE(hw_pp)); 1187 num_cwb = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1188 drm_enc->crtc, 1189 DPU_HW_BLK_CWB, 1190 hw_cwb, ARRAY_SIZE(hw_cwb)); 1191 } else { 1192 num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1193 drm_enc->crtc, 1194 DPU_HW_BLK_PINGPONG, hw_pp, 1195 ARRAY_SIZE(hw_pp)); 1196 } 1197 1198 for (i = 0; i < num_cwb; i++) { 1199 dpu_enc->hw_cwb[i] = to_dpu_hw_cwb(hw_cwb[i]); 1200 cwb_mask |= BIT(dpu_enc->hw_cwb[i]->idx - CWB_0); 1201 } 1202 1203 dpu_enc->cwb_mask = cwb_mask; 1204 1205 num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1206 drm_enc->crtc, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); 1207 1208 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) 1209 dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i]) 1210 : NULL; 1211 1212 num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1213 drm_enc->crtc, DPU_HW_BLK_DSC, 1214 hw_dsc, ARRAY_SIZE(hw_dsc)); 1215 for (i = 0; i < num_dsc; i++) { 1216 dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]); 1217 dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0); 1218 } 1219 1220 dpu_enc->dsc_mask = dsc_mask; 1221 1222 if ((dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) || 1223 dpu_enc->disp_info.intf_type == INTF_DP) { 1224 struct dpu_hw_blk *hw_cdm = NULL; 1225 1226 dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1227 drm_enc->crtc, DPU_HW_BLK_CDM, 1228 &hw_cdm, 1); 1229 dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL; 1230 } 1231 1232 /* 1233 * There may be 4 PP and 2 INTF for quad pipe case, so INTF is not 1234 * mapped to PP 1:1. Let's calculate the stride with pipe/INTF 1235 */ 1236 num_pp_per_intf = num_pp / dpu_enc->num_phys_encs; 1237 1238 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1239 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1240 1241 phys->hw_pp = dpu_enc->hw_pp[num_pp_per_intf * i]; 1242 if (!phys->hw_pp) { 1243 DPU_ERROR_ENC(dpu_enc, 1244 "no pp block assigned at idx: %d\n", i); 1245 return; 1246 } 1247 1248 /* Use first (and only) CTL if active CTLs are supported */ 1249 if (num_ctl == 1) 1250 phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[0]); 1251 else 1252 phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL; 1253 if (!phys->hw_ctl) { 1254 DPU_ERROR_ENC(dpu_enc, 1255 "no ctl block assigned at idx: %d\n", i); 1256 return; 1257 } 1258 1259 phys->cached_mode = crtc_state->adjusted_mode; 1260 if (phys->ops.atomic_mode_set) 1261 phys->ops.atomic_mode_set(phys, crtc_state, conn_state); 1262 } 1263 } 1264 1265 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) 1266 { 1267 struct dpu_encoder_virt *dpu_enc = NULL; 1268 int i; 1269 1270 if (!drm_enc || !drm_enc->dev) { 1271 DPU_ERROR("invalid parameters\n"); 1272 return; 1273 } 1274 1275 dpu_enc = to_dpu_encoder_virt(drm_enc); 1276 if (!dpu_enc || !dpu_enc->cur_master) { 1277 DPU_ERROR("invalid dpu encoder/master\n"); 1278 return; 1279 } 1280 1281 1282 if (dpu_enc->disp_info.intf_type == INTF_DP && 1283 dpu_enc->cur_master->hw_mdptop && 1284 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select) 1285 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select( 1286 dpu_enc->cur_master->hw_mdptop); 1287 1288 if (dpu_enc->disp_info.is_cmd_mode) 1289 _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info); 1290 1291 if (dpu_enc->disp_info.intf_type == INTF_DSI && 1292 !WARN_ON(dpu_enc->num_phys_encs == 0)) { 1293 unsigned bpc = dpu_enc->connector->display_info.bpc; 1294 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { 1295 if (!dpu_enc->hw_pp[i]) 1296 continue; 1297 _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc); 1298 } 1299 } 1300 } 1301 1302 /** 1303 * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs 1304 * @drm_enc: encoder pointer 1305 */ 1306 void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc) 1307 { 1308 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1309 1310 mutex_lock(&dpu_enc->enc_lock); 1311 1312 if (!dpu_enc->enabled) 1313 goto out; 1314 1315 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore) 1316 dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave); 1317 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore) 1318 dpu_enc->cur_master->ops.restore(dpu_enc->cur_master); 1319 1320 _dpu_encoder_virt_enable_helper(drm_enc); 1321 1322 out: 1323 mutex_unlock(&dpu_enc->enc_lock); 1324 } 1325 1326 static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc, 1327 struct drm_atomic_state *state) 1328 { 1329 struct dpu_encoder_virt *dpu_enc = NULL; 1330 int ret = 0; 1331 struct drm_display_mode *cur_mode = NULL; 1332 1333 dpu_enc = to_dpu_encoder_virt(drm_enc); 1334 dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc); 1335 1336 atomic_set(&dpu_enc->frame_done_timeout_cnt, 0); 1337 1338 mutex_lock(&dpu_enc->enc_lock); 1339 1340 dpu_enc->commit_done_timedout = false; 1341 1342 dpu_enc->connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc); 1343 1344 cur_mode = &dpu_enc->base.crtc->state->adjusted_mode; 1345 1346 dpu_enc->wide_bus_en = dpu_encoder_is_widebus_enabled(drm_enc); 1347 1348 trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay, 1349 cur_mode->vdisplay); 1350 1351 /* always enable slave encoder before master */ 1352 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable) 1353 dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave); 1354 1355 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable) 1356 dpu_enc->cur_master->ops.enable(dpu_enc->cur_master); 1357 1358 ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF); 1359 if (ret) { 1360 DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n", 1361 ret); 1362 goto out; 1363 } 1364 1365 _dpu_encoder_virt_enable_helper(drm_enc); 1366 1367 dpu_enc->enabled = true; 1368 1369 out: 1370 mutex_unlock(&dpu_enc->enc_lock); 1371 } 1372 1373 static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc, 1374 struct drm_atomic_state *state) 1375 { 1376 struct dpu_encoder_virt *dpu_enc = NULL; 1377 struct drm_crtc *crtc; 1378 struct drm_crtc_state *old_state = NULL; 1379 int i = 0; 1380 1381 dpu_enc = to_dpu_encoder_virt(drm_enc); 1382 DPU_DEBUG_ENC(dpu_enc, "\n"); 1383 1384 crtc = drm_atomic_get_old_crtc_for_encoder(state, drm_enc); 1385 if (crtc) 1386 old_state = drm_atomic_get_old_crtc_state(state, crtc); 1387 1388 /* 1389 * The encoder is already disabled if self refresh mode was set earlier, 1390 * in the old_state for the corresponding crtc. 1391 */ 1392 if (old_state && old_state->self_refresh_active) 1393 return; 1394 1395 mutex_lock(&dpu_enc->enc_lock); 1396 dpu_enc->enabled = false; 1397 1398 trace_dpu_enc_disable(DRMID(drm_enc)); 1399 1400 /* wait for idle */ 1401 dpu_encoder_wait_for_tx_complete(drm_enc); 1402 1403 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP); 1404 1405 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1406 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1407 1408 if (phys->ops.disable) 1409 phys->ops.disable(phys); 1410 } 1411 1412 1413 /* after phys waits for frame-done, should be no more frames pending */ 1414 if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { 1415 DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id); 1416 timer_delete_sync(&dpu_enc->frame_done_timer); 1417 } 1418 1419 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP); 1420 1421 dpu_enc->connector = NULL; 1422 1423 DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); 1424 1425 mutex_unlock(&dpu_enc->enc_lock); 1426 } 1427 1428 static struct dpu_hw_intf *dpu_encoder_get_intf(const struct dpu_mdss_cfg *catalog, 1429 struct dpu_rm *dpu_rm, 1430 enum dpu_intf_type type, u32 controller_id) 1431 { 1432 int i = 0; 1433 1434 if (type == INTF_WB) 1435 return NULL; 1436 1437 for (i = 0; i < catalog->intf_count; i++) { 1438 if (catalog->intf[i].type == type 1439 && catalog->intf[i].controller_id == controller_id) { 1440 return dpu_rm_get_intf(dpu_rm, catalog->intf[i].id); 1441 } 1442 } 1443 1444 return NULL; 1445 } 1446 1447 /** 1448 * dpu_encoder_vblank_callback - Notify virtual encoder of vblank IRQ reception 1449 * @drm_enc: Pointer to drm encoder structure 1450 * @phy_enc: Pointer to physical encoder 1451 * Note: This is called from IRQ handler context. 1452 */ 1453 void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc, 1454 struct dpu_encoder_phys *phy_enc) 1455 { 1456 struct dpu_encoder_virt *dpu_enc = NULL; 1457 unsigned long lock_flags; 1458 1459 if (!drm_enc || !phy_enc) 1460 return; 1461 1462 DPU_ATRACE_BEGIN("encoder_vblank_callback"); 1463 dpu_enc = to_dpu_encoder_virt(drm_enc); 1464 1465 atomic_inc(&phy_enc->vsync_cnt); 1466 1467 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1468 if (dpu_enc->crtc) 1469 dpu_crtc_vblank_callback(dpu_enc->crtc); 1470 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1471 1472 DPU_ATRACE_END("encoder_vblank_callback"); 1473 } 1474 1475 /** 1476 * dpu_encoder_underrun_callback - Notify virtual encoder of underrun IRQ reception 1477 * @drm_enc: Pointer to drm encoder structure 1478 * @phy_enc: Pointer to physical encoder 1479 * Note: This is called from IRQ handler context. 1480 */ 1481 void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc, 1482 struct dpu_encoder_phys *phy_enc) 1483 { 1484 if (!phy_enc) 1485 return; 1486 1487 DPU_ATRACE_BEGIN("encoder_underrun_callback"); 1488 atomic_inc(&phy_enc->underrun_cnt); 1489 1490 /* trigger dump only on the first underrun */ 1491 if (atomic_read(&phy_enc->underrun_cnt) == 1) 1492 msm_disp_snapshot_state(drm_enc->dev); 1493 1494 trace_dpu_enc_underrun_cb(DRMID(drm_enc), 1495 atomic_read(&phy_enc->underrun_cnt)); 1496 DPU_ATRACE_END("encoder_underrun_callback"); 1497 } 1498 1499 /** 1500 * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to 1501 * @drm_enc: encoder pointer 1502 * @crtc: crtc pointer 1503 */ 1504 void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc) 1505 { 1506 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1507 unsigned long lock_flags; 1508 1509 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1510 /* crtc should always be cleared before re-assigning */ 1511 WARN_ON(crtc && dpu_enc->crtc); 1512 dpu_enc->crtc = crtc; 1513 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1514 } 1515 1516 /** 1517 * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if 1518 * the encoder is assigned to the given crtc 1519 * @drm_enc: encoder pointer 1520 * @crtc: crtc pointer 1521 * @enable: true if vblank should be enabled 1522 */ 1523 void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc, 1524 struct drm_crtc *crtc, bool enable) 1525 { 1526 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1527 unsigned long lock_flags; 1528 int i; 1529 1530 trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable); 1531 1532 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1533 if (dpu_enc->crtc != crtc) { 1534 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1535 return; 1536 } 1537 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1538 1539 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1540 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1541 1542 if (phys->ops.control_vblank_irq) 1543 phys->ops.control_vblank_irq(phys, enable); 1544 } 1545 } 1546 1547 /** 1548 * dpu_encoder_frame_done_callback - Notify virtual encoder that this phys 1549 * encoder completes last request frame 1550 * @drm_enc: Pointer to drm encoder structure 1551 * @ready_phys: Pointer to physical encoder 1552 * @event: Event to process 1553 */ 1554 void dpu_encoder_frame_done_callback( 1555 struct drm_encoder *drm_enc, 1556 struct dpu_encoder_phys *ready_phys, u32 event) 1557 { 1558 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1559 unsigned int i; 1560 1561 if (event & (DPU_ENCODER_FRAME_EVENT_DONE 1562 | DPU_ENCODER_FRAME_EVENT_ERROR 1563 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) { 1564 1565 if (!dpu_enc->frame_busy_mask[0]) { 1566 /** 1567 * suppress frame_done without waiter, 1568 * likely autorefresh 1569 */ 1570 trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event, 1571 dpu_encoder_helper_get_intf_type(ready_phys->intf_mode), 1572 ready_phys->hw_intf ? ready_phys->hw_intf->idx : -1, 1573 ready_phys->hw_wb ? ready_phys->hw_wb->idx : -1); 1574 return; 1575 } 1576 1577 /* One of the physical encoders has become idle */ 1578 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1579 if (dpu_enc->phys_encs[i] == ready_phys) { 1580 trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i, 1581 dpu_enc->frame_busy_mask[0]); 1582 clear_bit(i, dpu_enc->frame_busy_mask); 1583 } 1584 } 1585 1586 if (!dpu_enc->frame_busy_mask[0]) { 1587 atomic_set(&dpu_enc->frame_done_timeout_ms, 0); 1588 timer_delete(&dpu_enc->frame_done_timer); 1589 1590 dpu_encoder_resource_control(drm_enc, 1591 DPU_ENC_RC_EVENT_FRAME_DONE); 1592 1593 if (dpu_enc->crtc) 1594 dpu_crtc_frame_event_cb(dpu_enc->crtc, event); 1595 } 1596 } else { 1597 if (dpu_enc->crtc) 1598 dpu_crtc_frame_event_cb(dpu_enc->crtc, event); 1599 } 1600 } 1601 1602 static void dpu_encoder_off_work(struct work_struct *work) 1603 { 1604 struct dpu_encoder_virt *dpu_enc = container_of(work, 1605 struct dpu_encoder_virt, delayed_off_work.work); 1606 1607 dpu_encoder_resource_control(&dpu_enc->base, 1608 DPU_ENC_RC_EVENT_ENTER_IDLE); 1609 1610 dpu_encoder_frame_done_callback(&dpu_enc->base, NULL, 1611 DPU_ENCODER_FRAME_EVENT_IDLE); 1612 } 1613 1614 /** 1615 * _dpu_encoder_trigger_flush - trigger flush for a physical encoder 1616 * @drm_enc: Pointer to drm encoder structure 1617 * @phys: Pointer to physical encoder structure 1618 * @extra_flush_bits: Additional bit mask to include in flush trigger 1619 */ 1620 static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, 1621 struct dpu_encoder_phys *phys, uint32_t extra_flush_bits) 1622 { 1623 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1624 struct dpu_hw_ctl *ctl; 1625 int pending_kickoff_cnt; 1626 u32 ret = UINT_MAX; 1627 1628 if (!phys->hw_pp) { 1629 DPU_ERROR("invalid pingpong hw\n"); 1630 return; 1631 } 1632 1633 ctl = phys->hw_ctl; 1634 if (!ctl->ops.trigger_flush) { 1635 DPU_ERROR("missing trigger cb\n"); 1636 return; 1637 } 1638 1639 pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys); 1640 1641 /* Return early if encoder is writeback and in clone mode */ 1642 if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL && 1643 dpu_enc->cwb_mask) { 1644 DPU_DEBUG("encoder %d skip flush for concurrent writeback encoder\n", 1645 DRMID(drm_enc)); 1646 return; 1647 } 1648 1649 1650 if (extra_flush_bits && ctl->ops.update_pending_flush) 1651 ctl->ops.update_pending_flush(ctl, extra_flush_bits); 1652 1653 ctl->ops.trigger_flush(ctl); 1654 1655 if (ctl->ops.get_pending_flush) 1656 ret = ctl->ops.get_pending_flush(ctl); 1657 1658 trace_dpu_enc_trigger_flush(DRMID(drm_enc), 1659 dpu_encoder_helper_get_intf_type(phys->intf_mode), 1660 phys->hw_intf ? phys->hw_intf->idx : -1, 1661 phys->hw_wb ? phys->hw_wb->idx : -1, 1662 pending_kickoff_cnt, ctl->idx, 1663 extra_flush_bits, ret); 1664 } 1665 1666 /** 1667 * _dpu_encoder_trigger_start - trigger start for a physical encoder 1668 * @phys: Pointer to physical encoder structure 1669 */ 1670 static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys) 1671 { 1672 struct dpu_encoder_virt *dpu_enc; 1673 1674 if (!phys) { 1675 DPU_ERROR("invalid argument(s)\n"); 1676 return; 1677 } 1678 1679 if (!phys->hw_pp) { 1680 DPU_ERROR("invalid pingpong hw\n"); 1681 return; 1682 } 1683 1684 dpu_enc = to_dpu_encoder_virt(phys->parent); 1685 1686 if (phys->parent->encoder_type == DRM_MODE_ENCODER_VIRTUAL && 1687 dpu_enc->cwb_mask) { 1688 DPU_DEBUG("encoder %d CWB enabled, skipping\n", DRMID(phys->parent)); 1689 return; 1690 } 1691 1692 if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED) 1693 phys->ops.trigger_start(phys); 1694 } 1695 1696 /** 1697 * dpu_encoder_helper_trigger_start - control start helper function 1698 * This helper function may be optionally specified by physical 1699 * encoders if they require ctl_start triggering. 1700 * @phys_enc: Pointer to physical encoder structure 1701 */ 1702 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc) 1703 { 1704 struct dpu_hw_ctl *ctl; 1705 1706 ctl = phys_enc->hw_ctl; 1707 if (ctl->ops.trigger_start) { 1708 ctl->ops.trigger_start(ctl); 1709 trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx); 1710 } 1711 } 1712 1713 static int dpu_encoder_helper_wait_event_timeout( 1714 int32_t drm_id, 1715 unsigned int irq_idx, 1716 struct dpu_encoder_wait_info *info) 1717 { 1718 int rc = 0; 1719 s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms; 1720 s64 jiffies = msecs_to_jiffies(info->timeout_ms); 1721 s64 time; 1722 1723 do { 1724 rc = wait_event_timeout(*(info->wq), 1725 atomic_read(info->atomic_cnt) == 0, jiffies); 1726 time = ktime_to_ms(ktime_get()); 1727 1728 trace_dpu_enc_wait_event_timeout(drm_id, 1729 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), 1730 rc, time, 1731 expected_time, 1732 atomic_read(info->atomic_cnt)); 1733 /* If we timed out, counter is valid and time is less, wait again */ 1734 } while (atomic_read(info->atomic_cnt) && (rc == 0) && 1735 (time < expected_time)); 1736 1737 return rc; 1738 } 1739 1740 static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc) 1741 { 1742 struct dpu_encoder_virt *dpu_enc; 1743 struct dpu_hw_ctl *ctl; 1744 int rc; 1745 struct drm_encoder *drm_enc; 1746 1747 dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 1748 ctl = phys_enc->hw_ctl; 1749 drm_enc = phys_enc->parent; 1750 1751 if (!ctl->ops.reset) 1752 return; 1753 1754 DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc), 1755 ctl->idx); 1756 1757 rc = ctl->ops.reset(ctl); 1758 if (rc) { 1759 DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx); 1760 msm_disp_snapshot_state(drm_enc->dev); 1761 } 1762 1763 phys_enc->enable_state = DPU_ENC_ENABLED; 1764 } 1765 1766 /** 1767 * _dpu_encoder_kickoff_phys - handle physical encoder kickoff 1768 * Iterate through the physical encoders and perform consolidated flush 1769 * and/or control start triggering as needed. This is done in the virtual 1770 * encoder rather than the individual physical ones in order to handle 1771 * use cases that require visibility into multiple physical encoders at 1772 * a time. 1773 * @dpu_enc: Pointer to virtual encoder structure 1774 */ 1775 static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc) 1776 { 1777 struct dpu_hw_ctl *ctl; 1778 uint32_t i, pending_flush; 1779 unsigned long lock_flags; 1780 1781 pending_flush = 0x0; 1782 1783 /* update pending counts and trigger kickoff ctl flush atomically */ 1784 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1785 1786 /* don't perform flush/start operations for slave encoders */ 1787 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1788 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1789 1790 if (phys->enable_state == DPU_ENC_DISABLED) 1791 continue; 1792 1793 ctl = phys->hw_ctl; 1794 1795 /* 1796 * This is cleared in frame_done worker, which isn't invoked 1797 * for async commits. So don't set this for async, since it'll 1798 * roll over to the next commit. 1799 */ 1800 if (phys->split_role != ENC_ROLE_SLAVE) 1801 set_bit(i, dpu_enc->frame_busy_mask); 1802 1803 if (!phys->ops.needs_single_flush || 1804 !phys->ops.needs_single_flush(phys)) 1805 _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0); 1806 else if (ctl->ops.get_pending_flush) 1807 pending_flush |= ctl->ops.get_pending_flush(ctl); 1808 } 1809 1810 /* for split flush, combine pending flush masks and send to master */ 1811 if (pending_flush && dpu_enc->cur_master) { 1812 _dpu_encoder_trigger_flush( 1813 &dpu_enc->base, 1814 dpu_enc->cur_master, 1815 pending_flush); 1816 } 1817 1818 _dpu_encoder_trigger_start(dpu_enc->cur_master); 1819 1820 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1821 } 1822 1823 /** 1824 * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous 1825 * kickoff and trigger the ctl prepare progress for command mode display. 1826 * @drm_enc: encoder pointer 1827 */ 1828 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc) 1829 { 1830 struct dpu_encoder_virt *dpu_enc; 1831 struct dpu_encoder_phys *phys; 1832 unsigned int i; 1833 struct dpu_hw_ctl *ctl; 1834 struct msm_display_info *disp_info; 1835 1836 if (!drm_enc) { 1837 DPU_ERROR("invalid encoder\n"); 1838 return; 1839 } 1840 dpu_enc = to_dpu_encoder_virt(drm_enc); 1841 disp_info = &dpu_enc->disp_info; 1842 1843 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1844 phys = dpu_enc->phys_encs[i]; 1845 1846 ctl = phys->hw_ctl; 1847 ctl->ops.clear_pending_flush(ctl); 1848 1849 /* update only for command mode primary ctl */ 1850 if ((phys == dpu_enc->cur_master) && 1851 disp_info->is_cmd_mode 1852 && ctl->ops.trigger_pending) 1853 ctl->ops.trigger_pending(ctl); 1854 } 1855 } 1856 1857 static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc, 1858 struct drm_display_mode *mode) 1859 { 1860 u64 pclk_rate; 1861 u32 pclk_period; 1862 u32 line_time; 1863 1864 /* 1865 * For linetime calculation, only operate on master encoder. 1866 */ 1867 if (!dpu_enc->cur_master) 1868 return 0; 1869 1870 if (!dpu_enc->cur_master->ops.get_line_count) { 1871 DPU_ERROR("get_line_count function not defined\n"); 1872 return 0; 1873 } 1874 1875 pclk_rate = mode->clock; /* pixel clock in kHz */ 1876 if (pclk_rate == 0) { 1877 DPU_ERROR("pclk is 0, cannot calculate line time\n"); 1878 return 0; 1879 } 1880 1881 pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate); 1882 if (pclk_period == 0) { 1883 DPU_ERROR("pclk period is 0\n"); 1884 return 0; 1885 } 1886 1887 /* 1888 * Line time calculation based on Pixel clock and HTOTAL. 1889 * Final unit is in ns. 1890 */ 1891 line_time = (pclk_period * mode->htotal) / 1000; 1892 if (line_time == 0) { 1893 DPU_ERROR("line time calculation is 0\n"); 1894 return 0; 1895 } 1896 1897 DPU_DEBUG_ENC(dpu_enc, 1898 "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n", 1899 pclk_rate, pclk_period, line_time); 1900 1901 return line_time; 1902 } 1903 1904 /** 1905 * dpu_encoder_vsync_time - get the time of the next vsync 1906 * @drm_enc: encoder pointer 1907 * @wakeup_time: pointer to ktime_t to write the vsync time to 1908 */ 1909 int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time) 1910 { 1911 struct drm_display_mode *mode; 1912 struct dpu_encoder_virt *dpu_enc; 1913 u32 cur_line; 1914 u32 line_time; 1915 u32 vtotal, time_to_vsync; 1916 ktime_t cur_time; 1917 1918 dpu_enc = to_dpu_encoder_virt(drm_enc); 1919 1920 if (!drm_enc->crtc || !drm_enc->crtc->state) { 1921 DPU_ERROR("crtc/crtc state object is NULL\n"); 1922 return -EINVAL; 1923 } 1924 mode = &drm_enc->crtc->state->adjusted_mode; 1925 1926 line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode); 1927 if (!line_time) 1928 return -EINVAL; 1929 1930 cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master); 1931 1932 vtotal = mode->vtotal; 1933 if (cur_line >= vtotal) 1934 time_to_vsync = line_time * vtotal; 1935 else 1936 time_to_vsync = line_time * (vtotal - cur_line); 1937 1938 if (time_to_vsync == 0) { 1939 DPU_ERROR("time to vsync should not be zero, vtotal=%d\n", 1940 vtotal); 1941 return -EINVAL; 1942 } 1943 1944 cur_time = ktime_get(); 1945 *wakeup_time = ktime_add_ns(cur_time, time_to_vsync); 1946 1947 DPU_DEBUG_ENC(dpu_enc, 1948 "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n", 1949 cur_line, vtotal, time_to_vsync, 1950 ktime_to_ms(cur_time), 1951 ktime_to_ms(*wakeup_time)); 1952 return 0; 1953 } 1954 1955 static u32 1956 dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc, 1957 u32 enc_ip_width) 1958 { 1959 int ssm_delay, total_pixels, soft_slice_per_enc; 1960 1961 soft_slice_per_enc = enc_ip_width / dsc->slice_width; 1962 1963 /* 1964 * minimum number of initial line pixels is a sum of: 1965 * 1. sub-stream multiplexer delay (83 groups for 8bpc, 1966 * 91 for 10 bpc) * 3 1967 * 2. for two soft slice cases, add extra sub-stream multiplexer * 3 1968 * 3. the initial xmit delay 1969 * 4. total pipeline delay through the "lock step" of encoder (47) 1970 * 5. 6 additional pixels as the output of the rate buffer is 1971 * 48 bits wide 1972 */ 1973 ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92); 1974 total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47; 1975 if (soft_slice_per_enc > 1) 1976 total_pixels += (ssm_delay * 3); 1977 return DIV_ROUND_UP(total_pixels, dsc->slice_width); 1978 } 1979 1980 static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl, 1981 struct dpu_hw_dsc *hw_dsc, 1982 struct dpu_hw_pingpong *hw_pp, 1983 struct drm_dsc_config *dsc, 1984 u32 common_mode, 1985 u32 initial_lines) 1986 { 1987 if (hw_dsc->ops.dsc_config) 1988 hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines); 1989 1990 if (hw_dsc->ops.dsc_config_thresh) 1991 hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc); 1992 1993 if (hw_pp->ops.setup_dsc) 1994 hw_pp->ops.setup_dsc(hw_pp); 1995 1996 if (hw_dsc->ops.dsc_bind_pingpong_blk) 1997 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, hw_pp->idx); 1998 1999 if (hw_pp->ops.enable_dsc) 2000 hw_pp->ops.enable_dsc(hw_pp); 2001 2002 if (ctl->ops.update_pending_flush_dsc) 2003 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx); 2004 } 2005 2006 static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, 2007 struct drm_dsc_config *dsc) 2008 { 2009 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master; 2010 struct dpu_hw_ctl *ctl = enc_master->hw_ctl; 2011 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; 2012 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; 2013 int this_frame_slices; 2014 int intf_ip_w, enc_ip_w; 2015 int dsc_common_mode; 2016 int pic_width; 2017 u32 initial_lines; 2018 int num_dsc = 0; 2019 int i; 2020 2021 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { 2022 hw_pp[i] = dpu_enc->hw_pp[i]; 2023 hw_dsc[i] = dpu_enc->hw_dsc[i]; 2024 2025 if (!hw_pp[i] || !hw_dsc[i]) 2026 break; 2027 2028 num_dsc++; 2029 } 2030 2031 pic_width = dsc->pic_width; 2032 2033 dsc_common_mode = 0; 2034 if (num_dsc > 1) 2035 dsc_common_mode |= DSC_MODE_SPLIT_PANEL; 2036 if (dpu_encoder_use_dsc_merge(enc_master->parent)) 2037 dsc_common_mode |= DSC_MODE_MULTIPLEX; 2038 if (enc_master->intf_mode == INTF_MODE_VIDEO) 2039 dsc_common_mode |= DSC_MODE_VIDEO; 2040 2041 this_frame_slices = pic_width / dsc->slice_width; 2042 intf_ip_w = this_frame_slices * dsc->slice_width; 2043 2044 enc_ip_w = intf_ip_w / num_dsc; 2045 initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w); 2046 2047 for (i = 0; i < num_dsc; i++) 2048 dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i], 2049 dsc, dsc_common_mode, initial_lines); 2050 } 2051 2052 /** 2053 * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl 2054 * path (i.e. ctl flush and start) at next appropriate time. 2055 * Immediately: if no previous commit is outstanding. 2056 * Delayed: Block until next trigger can be issued. 2057 * @drm_enc: encoder pointer 2058 */ 2059 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc) 2060 { 2061 struct dpu_encoder_virt *dpu_enc; 2062 struct dpu_encoder_phys *phys; 2063 bool needs_hw_reset = false; 2064 unsigned int i; 2065 2066 dpu_enc = to_dpu_encoder_virt(drm_enc); 2067 2068 trace_dpu_enc_prepare_kickoff(DRMID(drm_enc)); 2069 2070 /* prepare for next kickoff, may include waiting on previous kickoff */ 2071 DPU_ATRACE_BEGIN("enc_prepare_for_kickoff"); 2072 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2073 phys = dpu_enc->phys_encs[i]; 2074 if (phys->ops.prepare_for_kickoff) 2075 phys->ops.prepare_for_kickoff(phys); 2076 if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET) 2077 needs_hw_reset = true; 2078 } 2079 DPU_ATRACE_END("enc_prepare_for_kickoff"); 2080 2081 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF); 2082 2083 /* if any phys needs reset, reset all phys, in-order */ 2084 if (needs_hw_reset) { 2085 trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc)); 2086 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2087 dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]); 2088 } 2089 } 2090 2091 if (dpu_enc->dsc) 2092 dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc); 2093 } 2094 2095 /** 2096 * dpu_encoder_is_valid_for_commit - check if encode has valid parameters for commit. 2097 * @drm_enc: Pointer to drm encoder structure 2098 */ 2099 bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc) 2100 { 2101 struct dpu_encoder_virt *dpu_enc; 2102 unsigned int i; 2103 struct dpu_encoder_phys *phys; 2104 2105 dpu_enc = to_dpu_encoder_virt(drm_enc); 2106 2107 if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) { 2108 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2109 phys = dpu_enc->phys_encs[i]; 2110 if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) { 2111 DPU_DEBUG("invalid FB not kicking off\n"); 2112 return false; 2113 } 2114 } 2115 } 2116 2117 return true; 2118 } 2119 2120 /** 2121 * dpu_encoder_start_frame_done_timer - Start the encoder frame done timer 2122 * @drm_enc: Pointer to drm encoder structure 2123 */ 2124 void dpu_encoder_start_frame_done_timer(struct drm_encoder *drm_enc) 2125 { 2126 struct dpu_encoder_virt *dpu_enc; 2127 unsigned long timeout_ms; 2128 2129 dpu_enc = to_dpu_encoder_virt(drm_enc); 2130 timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 / 2131 drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode); 2132 2133 atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms); 2134 mod_timer(&dpu_enc->frame_done_timer, 2135 jiffies + msecs_to_jiffies(timeout_ms)); 2136 2137 } 2138 2139 /** 2140 * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path 2141 * (i.e. ctl flush and start) immediately. 2142 * @drm_enc: encoder pointer 2143 */ 2144 void dpu_encoder_kickoff(struct drm_encoder *drm_enc) 2145 { 2146 struct dpu_encoder_virt *dpu_enc; 2147 struct dpu_encoder_phys *phys; 2148 unsigned int i; 2149 2150 DPU_ATRACE_BEGIN("encoder_kickoff"); 2151 dpu_enc = to_dpu_encoder_virt(drm_enc); 2152 2153 trace_dpu_enc_kickoff(DRMID(drm_enc)); 2154 2155 /* All phys encs are ready to go, trigger the kickoff */ 2156 _dpu_encoder_kickoff_phys(dpu_enc); 2157 2158 /* allow phys encs to handle any post-kickoff business */ 2159 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2160 phys = dpu_enc->phys_encs[i]; 2161 if (phys->ops.handle_post_kickoff) 2162 phys->ops.handle_post_kickoff(phys); 2163 } 2164 2165 DPU_ATRACE_END("encoder_kickoff"); 2166 } 2167 2168 static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc) 2169 { 2170 int i, num_lm; 2171 struct dpu_global_state *global_state; 2172 struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; 2173 struct dpu_hw_mixer *hw_mixer[MAX_CHANNELS_PER_ENC]; 2174 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl; 2175 2176 /* reset all mixers for this encoder */ 2177 if (ctl->ops.clear_all_blendstages) 2178 ctl->ops.clear_all_blendstages(ctl); 2179 2180 global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms); 2181 2182 num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state, 2183 phys_enc->parent->crtc, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); 2184 2185 for (i = 0; i < num_lm; i++) { 2186 hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]); 2187 if (ctl->ops.update_pending_flush_mixer) 2188 ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx); 2189 2190 /* clear all blendstages */ 2191 if (ctl->ops.setup_blendstage) 2192 ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL); 2193 2194 if (hw_mixer[i]->ops.clear_all_blendstages) 2195 hw_mixer[i]->ops.clear_all_blendstages(hw_mixer[i]); 2196 2197 if (ctl->ops.set_active_lms) 2198 ctl->ops.set_active_lms(ctl, NULL); 2199 2200 if (ctl->ops.set_active_fetch_pipes) 2201 ctl->ops.set_active_fetch_pipes(ctl, NULL); 2202 2203 if (ctl->ops.set_active_pipes) 2204 ctl->ops.set_active_pipes(ctl, NULL); 2205 } 2206 } 2207 2208 static void dpu_encoder_dsc_pipe_clr(struct dpu_hw_ctl *ctl, 2209 struct dpu_hw_dsc *hw_dsc, 2210 struct dpu_hw_pingpong *hw_pp) 2211 { 2212 if (hw_dsc->ops.dsc_disable) 2213 hw_dsc->ops.dsc_disable(hw_dsc); 2214 2215 if (hw_pp->ops.disable_dsc) 2216 hw_pp->ops.disable_dsc(hw_pp); 2217 2218 if (hw_dsc->ops.dsc_bind_pingpong_blk) 2219 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, PINGPONG_NONE); 2220 2221 if (ctl->ops.update_pending_flush_dsc) 2222 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx); 2223 } 2224 2225 static void dpu_encoder_unprep_dsc(struct dpu_encoder_virt *dpu_enc) 2226 { 2227 /* coding only for 2LM, 2enc, 1 dsc config */ 2228 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master; 2229 struct dpu_hw_ctl *ctl = enc_master->hw_ctl; 2230 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; 2231 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; 2232 int i; 2233 2234 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { 2235 hw_pp[i] = dpu_enc->hw_pp[i]; 2236 hw_dsc[i] = dpu_enc->hw_dsc[i]; 2237 2238 if (hw_pp[i] && hw_dsc[i]) 2239 dpu_encoder_dsc_pipe_clr(ctl, hw_dsc[i], hw_pp[i]); 2240 } 2241 } 2242 2243 /** 2244 * dpu_encoder_helper_phys_cleanup - helper to cleanup dpu pipeline 2245 * @phys_enc: Pointer to physical encoder structure 2246 */ 2247 void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) 2248 { 2249 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl; 2250 struct dpu_hw_intf_cfg intf_cfg = { 0 }; 2251 int i; 2252 struct dpu_encoder_virt *dpu_enc; 2253 2254 dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 2255 2256 ctl->ops.reset(ctl); 2257 2258 dpu_encoder_helper_reset_mixers(phys_enc); 2259 2260 /* 2261 * TODO: move the once-only operation like CTL flush/trigger 2262 * into dpu_encoder_virt_disable() and all operations which need 2263 * to be done per phys encoder into the phys_disable() op. 2264 */ 2265 if (phys_enc->hw_wb) { 2266 /* disable the PP block */ 2267 if (phys_enc->hw_wb->ops.bind_pingpong_blk) 2268 phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, PINGPONG_NONE); 2269 2270 /* mark WB flush as pending */ 2271 if (ctl->ops.update_pending_flush_wb) 2272 ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx); 2273 } else { 2274 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2275 if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk) 2276 phys_enc->hw_intf->ops.bind_pingpong_blk( 2277 dpu_enc->phys_encs[i]->hw_intf, 2278 PINGPONG_NONE); 2279 2280 /* mark INTF flush as pending */ 2281 if (ctl->ops.update_pending_flush_intf) 2282 ctl->ops.update_pending_flush_intf(ctl, 2283 dpu_enc->phys_encs[i]->hw_intf->idx); 2284 } 2285 } 2286 2287 if (phys_enc->hw_pp && phys_enc->hw_pp->ops.setup_dither) 2288 phys_enc->hw_pp->ops.setup_dither(phys_enc->hw_pp, NULL); 2289 2290 if (dpu_enc->cwb_mask) 2291 dpu_encoder_helper_phys_setup_cwb(phys_enc, false); 2292 2293 /* reset the merge 3D HW block */ 2294 if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) { 2295 phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, 2296 BLEND_3D_NONE); 2297 if (ctl->ops.update_pending_flush_merge_3d) 2298 ctl->ops.update_pending_flush_merge_3d(ctl, 2299 phys_enc->hw_pp->merge_3d->idx); 2300 } 2301 2302 if (phys_enc->hw_cdm) { 2303 if (phys_enc->hw_cdm->ops.bind_pingpong_blk && phys_enc->hw_pp) 2304 phys_enc->hw_cdm->ops.bind_pingpong_blk(phys_enc->hw_cdm, 2305 PINGPONG_NONE); 2306 if (ctl->ops.update_pending_flush_cdm) 2307 ctl->ops.update_pending_flush_cdm(ctl, 2308 phys_enc->hw_cdm->idx); 2309 } 2310 2311 if (dpu_enc->dsc) { 2312 dpu_encoder_unprep_dsc(dpu_enc); 2313 dpu_enc->dsc = NULL; 2314 } 2315 2316 intf_cfg.stream_sel = 0; /* Don't care value for video mode */ 2317 intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); 2318 intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc); 2319 intf_cfg.cwb = dpu_enc->cwb_mask; 2320 2321 if (phys_enc->hw_intf) 2322 intf_cfg.intf = phys_enc->hw_intf->idx; 2323 if (phys_enc->hw_wb) 2324 intf_cfg.wb = phys_enc->hw_wb->idx; 2325 2326 if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) 2327 intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx; 2328 2329 if (ctl->ops.reset_intf_cfg) 2330 ctl->ops.reset_intf_cfg(ctl, &intf_cfg); 2331 2332 ctl->ops.trigger_flush(ctl); 2333 ctl->ops.trigger_start(ctl); 2334 ctl->ops.clear_pending_flush(ctl); 2335 } 2336 2337 void dpu_encoder_helper_phys_setup_cwb(struct dpu_encoder_phys *phys_enc, 2338 bool enable) 2339 { 2340 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 2341 struct dpu_hw_cwb *hw_cwb; 2342 struct dpu_hw_ctl *hw_ctl; 2343 struct dpu_hw_cwb_setup_cfg cwb_cfg; 2344 2345 struct dpu_kms *dpu_kms; 2346 struct dpu_global_state *global_state; 2347 struct dpu_hw_blk *rt_pp_list[MAX_CHANNELS_PER_ENC]; 2348 int num_pp; 2349 2350 if (!phys_enc->hw_wb) 2351 return; 2352 2353 hw_ctl = phys_enc->hw_ctl; 2354 2355 if (!phys_enc->hw_ctl) { 2356 DPU_DEBUG("[wb:%d] no ctl assigned\n", 2357 phys_enc->hw_wb->idx - WB_0); 2358 return; 2359 } 2360 2361 dpu_kms = phys_enc->dpu_kms; 2362 global_state = dpu_kms_get_existing_global_state(dpu_kms); 2363 num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 2364 phys_enc->parent->crtc, 2365 DPU_HW_BLK_PINGPONG, rt_pp_list, 2366 ARRAY_SIZE(rt_pp_list)); 2367 2368 if (num_pp == 0 || num_pp > MAX_CHANNELS_PER_ENC) { 2369 DPU_DEBUG_ENC(dpu_enc, "invalid num_pp %d\n", num_pp); 2370 return; 2371 } 2372 2373 /* 2374 * The CWB mux supports using LM or DSPP as tap points. For now, 2375 * always use LM tap point 2376 */ 2377 cwb_cfg.input = INPUT_MODE_LM_OUT; 2378 2379 for (int i = 0; i < MAX_CWB_PER_ENC; i++) { 2380 hw_cwb = dpu_enc->hw_cwb[i]; 2381 if (!hw_cwb) 2382 continue; 2383 2384 if (enable) { 2385 struct dpu_hw_pingpong *hw_pp = 2386 to_dpu_hw_pingpong(rt_pp_list[i]); 2387 cwb_cfg.pp_idx = hw_pp->idx; 2388 } else { 2389 cwb_cfg.pp_idx = PINGPONG_NONE; 2390 } 2391 2392 hw_cwb->ops.config_cwb(hw_cwb, &cwb_cfg); 2393 2394 if (hw_ctl->ops.update_pending_flush_cwb) 2395 hw_ctl->ops.update_pending_flush_cwb(hw_ctl, hw_cwb->idx); 2396 } 2397 } 2398 2399 /** 2400 * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block 2401 * @phys_enc: Pointer to physical encoder 2402 * @dpu_fmt: Pinter to the format description 2403 * @output_type: HDMI/WB 2404 */ 2405 void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc, 2406 const struct msm_format *dpu_fmt, 2407 u32 output_type) 2408 { 2409 struct dpu_hw_cdm *hw_cdm; 2410 struct dpu_hw_cdm_cfg *cdm_cfg; 2411 struct dpu_hw_pingpong *hw_pp; 2412 int ret; 2413 2414 if (!phys_enc) 2415 return; 2416 2417 cdm_cfg = &phys_enc->cdm_cfg; 2418 hw_pp = phys_enc->hw_pp; 2419 hw_cdm = phys_enc->hw_cdm; 2420 2421 if (!hw_cdm) 2422 return; 2423 2424 if (!MSM_FORMAT_IS_YUV(dpu_fmt)) { 2425 DPU_DEBUG("[enc:%d] cdm_disable fmt:%p4cc\n", DRMID(phys_enc->parent), 2426 &dpu_fmt->pixel_format); 2427 if (hw_cdm->ops.bind_pingpong_blk) 2428 hw_cdm->ops.bind_pingpong_blk(hw_cdm, PINGPONG_NONE); 2429 2430 return; 2431 } 2432 2433 memset(cdm_cfg, 0, sizeof(struct dpu_hw_cdm_cfg)); 2434 2435 cdm_cfg->output_width = phys_enc->cached_mode.hdisplay; 2436 cdm_cfg->output_height = phys_enc->cached_mode.vdisplay; 2437 cdm_cfg->output_fmt = dpu_fmt; 2438 cdm_cfg->output_type = output_type; 2439 cdm_cfg->output_bit_depth = MSM_FORMAT_IS_DX(dpu_fmt) ? 2440 CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT; 2441 cdm_cfg->csc_cfg = &dpu_csc10_rgb2yuv_601l; 2442 2443 /* enable 10 bit logic */ 2444 switch (cdm_cfg->output_fmt->chroma_sample) { 2445 case CHROMA_FULL: 2446 cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE; 2447 cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; 2448 break; 2449 case CHROMA_H2V1: 2450 cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE; 2451 cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; 2452 break; 2453 case CHROMA_420: 2454 cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE; 2455 cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE; 2456 break; 2457 case CHROMA_H1V2: 2458 default: 2459 DPU_ERROR("[enc:%d] unsupported chroma sampling type\n", 2460 DRMID(phys_enc->parent)); 2461 cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE; 2462 cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; 2463 break; 2464 } 2465 2466 DPU_DEBUG("[enc:%d] cdm_enable:%d,%d,%p4cc,%d,%d,%d,%d]\n", 2467 DRMID(phys_enc->parent), cdm_cfg->output_width, 2468 cdm_cfg->output_height, &cdm_cfg->output_fmt->pixel_format, 2469 cdm_cfg->output_type, cdm_cfg->output_bit_depth, 2470 cdm_cfg->h_cdwn_type, cdm_cfg->v_cdwn_type); 2471 2472 if (hw_cdm->ops.enable) { 2473 cdm_cfg->pp_id = hw_pp->idx; 2474 ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg); 2475 if (ret < 0) { 2476 DPU_ERROR("[enc:%d] failed to enable CDM; ret:%d\n", 2477 DRMID(phys_enc->parent), ret); 2478 return; 2479 } 2480 } 2481 } 2482 2483 #ifdef CONFIG_DEBUG_FS 2484 static int _dpu_encoder_status_show(struct seq_file *s, void *data) 2485 { 2486 struct drm_encoder *drm_enc = s->private; 2487 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 2488 int i; 2489 2490 mutex_lock(&dpu_enc->enc_lock); 2491 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2492 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2493 2494 seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d frame_done_cnt:%d", 2495 phys->hw_intf ? phys->hw_intf->idx - INTF_0 : -1, 2496 phys->hw_wb ? phys->hw_wb->idx - WB_0 : -1, 2497 atomic_read(&phys->vsync_cnt), 2498 atomic_read(&phys->underrun_cnt), 2499 atomic_read(&dpu_enc->frame_done_timeout_cnt)); 2500 2501 seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode)); 2502 } 2503 mutex_unlock(&dpu_enc->enc_lock); 2504 2505 return 0; 2506 } 2507 2508 DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status); 2509 2510 static void dpu_encoder_debugfs_init(struct drm_encoder *drm_enc, struct dentry *root) 2511 { 2512 /* don't error check these */ 2513 debugfs_create_file("status", 0600, 2514 root, drm_enc, &_dpu_encoder_status_fops); 2515 } 2516 #else 2517 #define dpu_encoder_debugfs_init NULL 2518 #endif 2519 2520 static int dpu_encoder_virt_add_phys_encs( 2521 struct drm_device *dev, 2522 struct msm_display_info *disp_info, 2523 struct dpu_encoder_virt *dpu_enc, 2524 struct dpu_enc_phys_init_params *params) 2525 { 2526 struct dpu_encoder_phys *enc = NULL; 2527 2528 DPU_DEBUG_ENC(dpu_enc, "\n"); 2529 2530 /* 2531 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types 2532 * in this function, check up-front. 2533 */ 2534 if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >= 2535 ARRAY_SIZE(dpu_enc->phys_encs)) { 2536 DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n", 2537 dpu_enc->num_phys_encs); 2538 return -EINVAL; 2539 } 2540 2541 2542 if (disp_info->intf_type == INTF_WB) { 2543 enc = dpu_encoder_phys_wb_init(dev, params); 2544 2545 if (IS_ERR(enc)) { 2546 DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n", 2547 PTR_ERR(enc)); 2548 return PTR_ERR(enc); 2549 } 2550 2551 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; 2552 ++dpu_enc->num_phys_encs; 2553 } else if (disp_info->is_cmd_mode) { 2554 enc = dpu_encoder_phys_cmd_init(dev, params); 2555 2556 if (IS_ERR(enc)) { 2557 DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n", 2558 PTR_ERR(enc)); 2559 return PTR_ERR(enc); 2560 } 2561 2562 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; 2563 ++dpu_enc->num_phys_encs; 2564 } else { 2565 enc = dpu_encoder_phys_vid_init(dev, params); 2566 2567 if (IS_ERR(enc)) { 2568 DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n", 2569 PTR_ERR(enc)); 2570 return PTR_ERR(enc); 2571 } 2572 2573 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; 2574 ++dpu_enc->num_phys_encs; 2575 } 2576 2577 if (params->split_role == ENC_ROLE_SLAVE) 2578 dpu_enc->cur_slave = enc; 2579 else 2580 dpu_enc->cur_master = enc; 2581 2582 return 0; 2583 } 2584 2585 /** 2586 * dpu_encoder_get_clones - Calculate the possible_clones for DPU encoder 2587 * @drm_enc: DRM encoder pointer 2588 * Returns: possible_clones mask 2589 */ 2590 uint32_t dpu_encoder_get_clones(struct drm_encoder *drm_enc) 2591 { 2592 struct drm_encoder *curr; 2593 int type = drm_enc->encoder_type; 2594 uint32_t clone_mask = drm_encoder_mask(drm_enc); 2595 2596 /* 2597 * Set writeback as possible clones of real-time DSI encoders and vice 2598 * versa 2599 * 2600 * Writeback encoders can't be clones of each other and DSI 2601 * encoders can't be clones of each other. 2602 * 2603 * TODO: Add DP encoders as valid possible clones for writeback encoders 2604 * (and vice versa) once concurrent writeback has been validated for DP 2605 */ 2606 drm_for_each_encoder(curr, drm_enc->dev) { 2607 if ((type == DRM_MODE_ENCODER_VIRTUAL && 2608 curr->encoder_type == DRM_MODE_ENCODER_DSI) || 2609 (type == DRM_MODE_ENCODER_DSI && 2610 curr->encoder_type == DRM_MODE_ENCODER_VIRTUAL)) 2611 clone_mask |= drm_encoder_mask(curr); 2612 } 2613 2614 return clone_mask; 2615 } 2616 2617 static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, 2618 struct dpu_kms *dpu_kms, 2619 struct msm_display_info *disp_info) 2620 { 2621 int ret = 0; 2622 int i = 0; 2623 struct dpu_enc_phys_init_params phys_params; 2624 2625 if (!dpu_enc) { 2626 DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL); 2627 return -EINVAL; 2628 } 2629 2630 dpu_enc->cur_master = NULL; 2631 2632 memset(&phys_params, 0, sizeof(phys_params)); 2633 phys_params.dpu_kms = dpu_kms; 2634 phys_params.parent = &dpu_enc->base; 2635 phys_params.enc_spinlock = &dpu_enc->enc_spinlock; 2636 2637 WARN_ON(disp_info->num_of_h_tiles < 1); 2638 2639 DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles); 2640 2641 if (disp_info->intf_type != INTF_WB) 2642 dpu_enc->idle_pc_supported = 2643 dpu_kms->catalog->caps->has_idle_pc; 2644 2645 mutex_lock(&dpu_enc->enc_lock); 2646 for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) { 2647 /* 2648 * Left-most tile is at index 0, content is controller id 2649 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right 2650 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right 2651 */ 2652 u32 controller_id = disp_info->h_tile_instance[i]; 2653 2654 if (disp_info->num_of_h_tiles > 1) { 2655 if (i == 0) 2656 phys_params.split_role = ENC_ROLE_MASTER; 2657 else 2658 phys_params.split_role = ENC_ROLE_SLAVE; 2659 } else { 2660 phys_params.split_role = ENC_ROLE_SOLO; 2661 } 2662 2663 DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n", 2664 i, controller_id, phys_params.split_role); 2665 2666 phys_params.hw_intf = dpu_encoder_get_intf(dpu_kms->catalog, &dpu_kms->rm, 2667 disp_info->intf_type, 2668 controller_id); 2669 2670 if (disp_info->intf_type == INTF_WB && controller_id < WB_MAX) 2671 phys_params.hw_wb = dpu_rm_get_wb(&dpu_kms->rm, controller_id); 2672 2673 if (!phys_params.hw_intf && !phys_params.hw_wb) { 2674 DPU_ERROR_ENC(dpu_enc, "no intf or wb block assigned at idx: %d\n", i); 2675 ret = -EINVAL; 2676 break; 2677 } 2678 2679 if (phys_params.hw_intf && phys_params.hw_wb) { 2680 DPU_ERROR_ENC(dpu_enc, 2681 "invalid phys both intf and wb block at idx: %d\n", i); 2682 ret = -EINVAL; 2683 break; 2684 } 2685 2686 ret = dpu_encoder_virt_add_phys_encs(dpu_kms->dev, disp_info, 2687 dpu_enc, &phys_params); 2688 if (ret) { 2689 DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n"); 2690 break; 2691 } 2692 } 2693 2694 mutex_unlock(&dpu_enc->enc_lock); 2695 2696 return ret; 2697 } 2698 2699 static void dpu_encoder_frame_done_timeout(struct timer_list *t) 2700 { 2701 struct dpu_encoder_virt *dpu_enc = timer_container_of(dpu_enc, t, 2702 frame_done_timer); 2703 struct drm_encoder *drm_enc = &dpu_enc->base; 2704 u32 event; 2705 2706 if (!drm_enc->dev) { 2707 DPU_ERROR("invalid parameters\n"); 2708 return; 2709 } 2710 2711 if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc) { 2712 DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n", 2713 DRMID(drm_enc), dpu_enc->frame_busy_mask[0]); 2714 return; 2715 } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { 2716 DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc)); 2717 return; 2718 } 2719 2720 DPU_ERROR_ENC_RATELIMITED(dpu_enc, "frame done timeout\n"); 2721 2722 if (atomic_inc_return(&dpu_enc->frame_done_timeout_cnt) == 1) 2723 msm_disp_snapshot_state(drm_enc->dev); 2724 2725 event = DPU_ENCODER_FRAME_EVENT_ERROR; 2726 trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event); 2727 dpu_crtc_frame_event_cb(dpu_enc->crtc, event); 2728 } 2729 2730 static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = { 2731 .atomic_mode_set = dpu_encoder_virt_atomic_mode_set, 2732 .atomic_disable = dpu_encoder_virt_atomic_disable, 2733 .atomic_enable = dpu_encoder_virt_atomic_enable, 2734 }; 2735 2736 static const struct drm_encoder_funcs dpu_encoder_funcs = { 2737 .debugfs_init = dpu_encoder_debugfs_init, 2738 }; 2739 2740 /** 2741 * dpu_encoder_init - initialize virtual encoder object 2742 * @dev: Pointer to drm device structure 2743 * @drm_enc_mode: corresponding DRM_MODE_ENCODER_* constant 2744 * @disp_info: Pointer to display information structure 2745 * Returns: Pointer to newly created drm encoder 2746 */ 2747 struct drm_encoder *dpu_encoder_init(struct drm_device *dev, 2748 int drm_enc_mode, 2749 struct msm_display_info *disp_info) 2750 { 2751 struct msm_drm_private *priv = dev->dev_private; 2752 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); 2753 struct dpu_encoder_virt *dpu_enc; 2754 int ret; 2755 2756 dpu_enc = drmm_encoder_alloc(dev, struct dpu_encoder_virt, base, 2757 &dpu_encoder_funcs, drm_enc_mode, NULL); 2758 if (IS_ERR(dpu_enc)) 2759 return ERR_CAST(dpu_enc); 2760 2761 drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs); 2762 2763 spin_lock_init(&dpu_enc->enc_spinlock); 2764 dpu_enc->enabled = false; 2765 mutex_init(&dpu_enc->enc_lock); 2766 mutex_init(&dpu_enc->rc_lock); 2767 2768 ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info); 2769 if (ret) { 2770 DPU_ERROR("failed to setup encoder\n"); 2771 return ERR_PTR(-ENOMEM); 2772 } 2773 2774 atomic_set(&dpu_enc->frame_done_timeout_ms, 0); 2775 atomic_set(&dpu_enc->frame_done_timeout_cnt, 0); 2776 timer_setup(&dpu_enc->frame_done_timer, 2777 dpu_encoder_frame_done_timeout, 0); 2778 2779 INIT_DELAYED_WORK(&dpu_enc->delayed_off_work, 2780 dpu_encoder_off_work); 2781 dpu_enc->idle_timeout = IDLE_TIMEOUT; 2782 2783 memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info)); 2784 2785 DPU_DEBUG_ENC(dpu_enc, "created\n"); 2786 2787 return &dpu_enc->base; 2788 } 2789 2790 /** 2791 * dpu_encoder_wait_for_commit_done() - Wait for encoder to flush pending state 2792 * @drm_enc: encoder pointer 2793 * 2794 * Wait for hardware to have flushed the current pending changes to hardware at 2795 * a vblank or CTL_START. Physical encoders will map this differently depending 2796 * on the type: vid mode -> vsync_irq, cmd mode -> CTL_START. 2797 * 2798 * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise 2799 */ 2800 int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_enc) 2801 { 2802 struct dpu_encoder_virt *dpu_enc = NULL; 2803 int i, ret = 0; 2804 2805 if (!drm_enc) { 2806 DPU_ERROR("invalid encoder\n"); 2807 return -EINVAL; 2808 } 2809 dpu_enc = to_dpu_encoder_virt(drm_enc); 2810 DPU_DEBUG_ENC(dpu_enc, "\n"); 2811 2812 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2813 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2814 2815 if (phys->ops.wait_for_commit_done) { 2816 DPU_ATRACE_BEGIN("wait_for_commit_done"); 2817 ret = phys->ops.wait_for_commit_done(phys); 2818 DPU_ATRACE_END("wait_for_commit_done"); 2819 if (ret == -ETIMEDOUT && !dpu_enc->commit_done_timedout) { 2820 dpu_enc->commit_done_timedout = true; 2821 msm_disp_snapshot_state(drm_enc->dev); 2822 } 2823 if (ret) 2824 return ret; 2825 } 2826 } 2827 2828 return ret; 2829 } 2830 2831 /** 2832 * dpu_encoder_wait_for_tx_complete() - Wait for encoder to transfer pixels to panel 2833 * @drm_enc: encoder pointer 2834 * 2835 * Wait for the hardware to transfer all the pixels to the panel. Physical 2836 * encoders will map this differently depending on the type: vid mode -> vsync_irq, 2837 * cmd mode -> pp_done. 2838 * 2839 * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise 2840 */ 2841 int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_enc) 2842 { 2843 struct dpu_encoder_virt *dpu_enc = NULL; 2844 int i, ret = 0; 2845 2846 if (!drm_enc) { 2847 DPU_ERROR("invalid encoder\n"); 2848 return -EINVAL; 2849 } 2850 dpu_enc = to_dpu_encoder_virt(drm_enc); 2851 DPU_DEBUG_ENC(dpu_enc, "\n"); 2852 2853 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2854 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2855 2856 if (phys->ops.wait_for_tx_complete) { 2857 DPU_ATRACE_BEGIN("wait_for_tx_complete"); 2858 ret = phys->ops.wait_for_tx_complete(phys); 2859 DPU_ATRACE_END("wait_for_tx_complete"); 2860 if (ret) 2861 return ret; 2862 } 2863 } 2864 2865 return ret; 2866 } 2867 2868 /** 2869 * dpu_encoder_get_intf_mode - get interface mode of the given encoder 2870 * @encoder: Pointer to drm encoder object 2871 */ 2872 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder) 2873 { 2874 struct dpu_encoder_virt *dpu_enc = NULL; 2875 2876 if (!encoder) { 2877 DPU_ERROR("invalid encoder\n"); 2878 return INTF_MODE_NONE; 2879 } 2880 dpu_enc = to_dpu_encoder_virt(encoder); 2881 2882 if (dpu_enc->cur_master) 2883 return dpu_enc->cur_master->intf_mode; 2884 2885 if (dpu_enc->num_phys_encs) 2886 return dpu_enc->phys_encs[0]->intf_mode; 2887 2888 return INTF_MODE_NONE; 2889 } 2890 2891 /** 2892 * dpu_encoder_helper_get_cwb_mask - get CWB blocks mask for the DPU encoder 2893 * @phys_enc: Pointer to physical encoder structure 2894 */ 2895 unsigned int dpu_encoder_helper_get_cwb_mask(struct dpu_encoder_phys *phys_enc) 2896 { 2897 struct drm_encoder *encoder = phys_enc->parent; 2898 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder); 2899 2900 return dpu_enc->cwb_mask; 2901 } 2902 2903 /** 2904 * dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder 2905 * This helper function is used by physical encoder to get DSC blocks mask 2906 * used for this encoder. 2907 * @phys_enc: Pointer to physical encoder structure 2908 */ 2909 unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc) 2910 { 2911 struct drm_encoder *encoder = phys_enc->parent; 2912 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder); 2913 2914 return dpu_enc->dsc_mask; 2915 } 2916 2917 void dpu_encoder_phys_init(struct dpu_encoder_phys *phys_enc, 2918 struct dpu_enc_phys_init_params *p) 2919 { 2920 phys_enc->hw_mdptop = p->dpu_kms->hw_mdp; 2921 phys_enc->hw_intf = p->hw_intf; 2922 phys_enc->hw_wb = p->hw_wb; 2923 phys_enc->parent = p->parent; 2924 phys_enc->dpu_kms = p->dpu_kms; 2925 phys_enc->split_role = p->split_role; 2926 phys_enc->enc_spinlock = p->enc_spinlock; 2927 phys_enc->enable_state = DPU_ENC_DISABLED; 2928 2929 atomic_set(&phys_enc->pending_kickoff_cnt, 0); 2930 atomic_set(&phys_enc->pending_ctlstart_cnt, 0); 2931 2932 atomic_set(&phys_enc->vsync_cnt, 0); 2933 atomic_set(&phys_enc->underrun_cnt, 0); 2934 2935 init_waitqueue_head(&phys_enc->pending_kickoff_wq); 2936 } 2937