1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
5 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
6 *
7 * Author: Rob Clark <robdclark@gmail.com>
8 */
9
10 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
11 #include <linux/debugfs.h>
12 #include <linux/kthread.h>
13 #include <linux/seq_file.h>
14
15 #include <drm/drm_atomic.h>
16 #include <drm/drm_crtc.h>
17 #include <drm/drm_file.h>
18 #include <drm/drm_probe_helper.h>
19 #include <drm/drm_framebuffer.h>
20
21 #include "msm_drv.h"
22 #include "dpu_kms.h"
23 #include "dpu_hwio.h"
24 #include "dpu_hw_catalog.h"
25 #include "dpu_hw_intf.h"
26 #include "dpu_hw_ctl.h"
27 #include "dpu_hw_dspp.h"
28 #include "dpu_hw_dsc.h"
29 #include "dpu_hw_merge3d.h"
30 #include "dpu_hw_cdm.h"
31 #include "dpu_formats.h"
32 #include "dpu_encoder_phys.h"
33 #include "dpu_crtc.h"
34 #include "dpu_trace.h"
35 #include "dpu_core_irq.h"
36 #include "disp/msm_disp_snapshot.h"
37
38 #define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\
39 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
40
41 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
42 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
43
44 #define DPU_ERROR_ENC_RATELIMITED(e, fmt, ...) DPU_ERROR_RATELIMITED("enc%d " fmt,\
45 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
46
47 /*
48 * Two to anticipate panels that can do cmd/vid dynamic switching
49 * plan is to create all possible physical encoder types, and switch between
50 * them at runtime
51 */
52 #define NUM_PHYS_ENCODER_TYPES 2
53
54 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \
55 (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
56
57 #define MAX_CHANNELS_PER_ENC 2
58
59 #define IDLE_SHORT_TIMEOUT 1
60
61 #define MAX_HDISPLAY_SPLIT 1080
62
63 /* timeout in frames waiting for frame done */
64 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
65
66 /**
67 * enum dpu_enc_rc_events - events for resource control state machine
68 * @DPU_ENC_RC_EVENT_KICKOFF:
69 * This event happens at NORMAL priority.
70 * Event that signals the start of the transfer. When this event is
71 * received, enable MDP/DSI core clocks. Regardless of the previous
72 * state, the resource should be in ON state at the end of this event.
73 * @DPU_ENC_RC_EVENT_FRAME_DONE:
74 * This event happens at INTERRUPT level.
75 * Event signals the end of the data transfer after the PP FRAME_DONE
76 * event. At the end of this event, a delayed work is scheduled to go to
77 * IDLE_PC state after IDLE_TIMEOUT time.
78 * @DPU_ENC_RC_EVENT_PRE_STOP:
79 * This event happens at NORMAL priority.
80 * This event, when received during the ON state, leave the RC STATE
81 * in the PRE_OFF state. It should be followed by the STOP event as
82 * part of encoder disable.
83 * If received during IDLE or OFF states, it will do nothing.
84 * @DPU_ENC_RC_EVENT_STOP:
85 * This event happens at NORMAL priority.
86 * When this event is received, disable all the MDP/DSI core clocks, and
87 * disable IRQs. It should be called from the PRE_OFF or IDLE states.
88 * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
89 * PRE_OFF is expected when PRE_STOP was executed during the ON state.
90 * Resource state should be in OFF at the end of the event.
91 * @DPU_ENC_RC_EVENT_ENTER_IDLE:
92 * This event happens at NORMAL priority from a work item.
93 * Event signals that there were no frame updates for IDLE_TIMEOUT time.
94 * This would disable MDP/DSI core clocks and change the resource state
95 * to IDLE.
96 */
97 enum dpu_enc_rc_events {
98 DPU_ENC_RC_EVENT_KICKOFF = 1,
99 DPU_ENC_RC_EVENT_FRAME_DONE,
100 DPU_ENC_RC_EVENT_PRE_STOP,
101 DPU_ENC_RC_EVENT_STOP,
102 DPU_ENC_RC_EVENT_ENTER_IDLE
103 };
104
105 /*
106 * enum dpu_enc_rc_states - states that the resource control maintains
107 * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
108 * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
109 * @DPU_ENC_RC_STATE_ON: Resource is in ON state
110 * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
111 * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
112 */
113 enum dpu_enc_rc_states {
114 DPU_ENC_RC_STATE_OFF,
115 DPU_ENC_RC_STATE_PRE_OFF,
116 DPU_ENC_RC_STATE_ON,
117 DPU_ENC_RC_STATE_IDLE
118 };
119
120 /**
121 * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
122 * encoders. Virtual encoder manages one "logical" display. Physical
123 * encoders manage one intf block, tied to a specific panel/sub-panel.
124 * Virtual encoder defers as much as possible to the physical encoders.
125 * Virtual encoder registers itself with the DRM Framework as the encoder.
126 * @base: drm_encoder base class for registration with DRM
127 * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
128 * @enabled: True if the encoder is active, protected by enc_lock
129 * @commit_done_timedout: True if there has been a timeout on commit after
130 * enabling the encoder.
131 * @num_phys_encs: Actual number of physical encoders contained.
132 * @phys_encs: Container of physical encoders managed.
133 * @cur_master: Pointer to the current master in this mode. Optimization
134 * Only valid after enable. Cleared as disable.
135 * @cur_slave: As above but for the slave encoder.
136 * @hw_pp: Handle to the pingpong blocks used for the display. No.
137 * pingpong blocks can be different than num_phys_encs.
138 * @hw_dsc: Handle to the DSC blocks used for the display.
139 * @dsc_mask: Bitmask of used DSC blocks.
140 * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped
141 * for partial update right-only cases, such as pingpong
142 * split where virtual pingpong does not generate IRQs
143 * @crtc: Pointer to the currently assigned crtc. Normally you
144 * would use crtc->state->encoder_mask to determine the
145 * link between encoder/crtc. However in this case we need
146 * to track crtc in the disable() hook which is called
147 * _after_ encoder_mask is cleared.
148 * @connector: If a mode is set, cached pointer to the active connector
149 * @enc_lock: Lock around physical encoder
150 * create/destroy/enable/disable
151 * @frame_busy_mask: Bitmask tracking which phys_enc we are still
152 * busy processing current command.
153 * Bit0 = phys_encs[0] etc.
154 * @frame_done_timeout_ms: frame done timeout in ms
155 * @frame_done_timeout_cnt: atomic counter tracking the number of frame
156 * done timeouts
157 * @frame_done_timer: watchdog timer for frame done event
158 * @disp_info: local copy of msm_display_info struct
159 * @idle_pc_supported: indicate if idle power collaps is supported
160 * @rc_lock: resource control mutex lock to protect
161 * virt encoder over various state changes
162 * @rc_state: resource controller state
163 * @delayed_off_work: delayed worker to schedule disabling of
164 * clks and resources after IDLE_TIMEOUT time.
165 * @topology: topology of the display
166 * @idle_timeout: idle timeout duration in milliseconds
167 * @wide_bus_en: wide bus is enabled on this interface
168 * @dsc: drm_dsc_config pointer, for DSC-enabled encoders
169 */
170 struct dpu_encoder_virt {
171 struct drm_encoder base;
172 spinlock_t enc_spinlock;
173
174 bool enabled;
175 bool commit_done_timedout;
176
177 unsigned int num_phys_encs;
178 struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
179 struct dpu_encoder_phys *cur_master;
180 struct dpu_encoder_phys *cur_slave;
181 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
182 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
183
184 unsigned int dsc_mask;
185
186 bool intfs_swapped;
187
188 struct drm_crtc *crtc;
189 struct drm_connector *connector;
190
191 struct mutex enc_lock;
192 DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
193
194 atomic_t frame_done_timeout_ms;
195 atomic_t frame_done_timeout_cnt;
196 struct timer_list frame_done_timer;
197
198 struct msm_display_info disp_info;
199
200 bool idle_pc_supported;
201 struct mutex rc_lock;
202 enum dpu_enc_rc_states rc_state;
203 struct delayed_work delayed_off_work;
204 struct msm_display_topology topology;
205
206 u32 idle_timeout;
207
208 bool wide_bus_en;
209
210 /* DSC configuration */
211 struct drm_dsc_config *dsc;
212 };
213
214 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
215
216 static u32 dither_matrix[DITHER_MATRIX_SZ] = {
217 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
218 };
219
dpu_encoder_get_drm_fmt(struct dpu_encoder_phys * phys_enc)220 u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc)
221 {
222 struct drm_encoder *drm_enc;
223 struct dpu_encoder_virt *dpu_enc;
224 struct drm_display_info *info;
225 struct drm_display_mode *mode;
226
227 drm_enc = phys_enc->parent;
228 dpu_enc = to_dpu_encoder_virt(drm_enc);
229 info = &dpu_enc->connector->display_info;
230 mode = &phys_enc->cached_mode;
231
232 if (drm_mode_is_420_only(info, mode))
233 return DRM_FORMAT_YUV420;
234
235 return DRM_FORMAT_RGB888;
236 }
237
dpu_encoder_needs_periph_flush(struct dpu_encoder_phys * phys_enc)238 bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc)
239 {
240 struct drm_encoder *drm_enc;
241 struct dpu_encoder_virt *dpu_enc;
242 struct msm_display_info *disp_info;
243 struct msm_drm_private *priv;
244 struct drm_display_mode *mode;
245
246 drm_enc = phys_enc->parent;
247 dpu_enc = to_dpu_encoder_virt(drm_enc);
248 disp_info = &dpu_enc->disp_info;
249 priv = drm_enc->dev->dev_private;
250 mode = &phys_enc->cached_mode;
251
252 return phys_enc->hw_intf->cap->type == INTF_DP &&
253 msm_dp_needs_periph_flush(priv->dp[disp_info->h_tile_instance[0]], mode);
254 }
255
dpu_encoder_is_widebus_enabled(const struct drm_encoder * drm_enc)256 bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
257 {
258 const struct dpu_encoder_virt *dpu_enc;
259 struct msm_drm_private *priv = drm_enc->dev->dev_private;
260 const struct msm_display_info *disp_info;
261 int index;
262
263 dpu_enc = to_dpu_encoder_virt(drm_enc);
264 disp_info = &dpu_enc->disp_info;
265 index = disp_info->h_tile_instance[0];
266
267 if (disp_info->intf_type == INTF_DP)
268 return msm_dp_wide_bus_available(priv->dp[index]);
269 else if (disp_info->intf_type == INTF_DSI)
270 return msm_dsi_wide_bus_enabled(priv->dsi[index]);
271
272 return false;
273 }
274
dpu_encoder_is_dsc_enabled(const struct drm_encoder * drm_enc)275 bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc)
276 {
277 const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
278
279 return dpu_enc->dsc ? true : false;
280 }
281
dpu_encoder_get_crc_values_cnt(const struct drm_encoder * drm_enc)282 int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
283 {
284 struct dpu_encoder_virt *dpu_enc;
285 int i, num_intf = 0;
286
287 dpu_enc = to_dpu_encoder_virt(drm_enc);
288
289 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
290 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
291
292 if (phys->hw_intf && phys->hw_intf->ops.setup_misr
293 && phys->hw_intf->ops.collect_misr)
294 num_intf++;
295 }
296
297 return num_intf;
298 }
299
dpu_encoder_setup_misr(const struct drm_encoder * drm_enc)300 void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc)
301 {
302 struct dpu_encoder_virt *dpu_enc;
303
304 int i;
305
306 dpu_enc = to_dpu_encoder_virt(drm_enc);
307
308 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
309 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
310
311 if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr)
312 continue;
313
314 phys->hw_intf->ops.setup_misr(phys->hw_intf);
315 }
316 }
317
dpu_encoder_get_crc(const struct drm_encoder * drm_enc,u32 * crcs,int pos)318 int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos)
319 {
320 struct dpu_encoder_virt *dpu_enc;
321
322 int i, rc = 0, entries_added = 0;
323
324 if (!drm_enc->crtc) {
325 DRM_ERROR("no crtc found for encoder %d\n", drm_enc->index);
326 return -EINVAL;
327 }
328
329 dpu_enc = to_dpu_encoder_virt(drm_enc);
330
331 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
332 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
333
334 if (!phys->hw_intf || !phys->hw_intf->ops.collect_misr)
335 continue;
336
337 rc = phys->hw_intf->ops.collect_misr(phys->hw_intf, &crcs[pos + entries_added]);
338 if (rc)
339 return rc;
340 entries_added++;
341 }
342
343 return entries_added;
344 }
345
_dpu_encoder_setup_dither(struct dpu_hw_pingpong * hw_pp,unsigned bpc)346 static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
347 {
348 struct dpu_hw_dither_cfg dither_cfg = { 0 };
349
350 if (!hw_pp->ops.setup_dither)
351 return;
352
353 switch (bpc) {
354 case 6:
355 dither_cfg.c0_bitdepth = 6;
356 dither_cfg.c1_bitdepth = 6;
357 dither_cfg.c2_bitdepth = 6;
358 dither_cfg.c3_bitdepth = 6;
359 dither_cfg.temporal_en = 0;
360 break;
361 default:
362 hw_pp->ops.setup_dither(hw_pp, NULL);
363 return;
364 }
365
366 memcpy(&dither_cfg.matrix, dither_matrix,
367 sizeof(u32) * DITHER_MATRIX_SZ);
368
369 hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
370 }
371
dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)372 static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)
373 {
374 switch (intf_mode) {
375 case INTF_MODE_VIDEO:
376 return "INTF_MODE_VIDEO";
377 case INTF_MODE_CMD:
378 return "INTF_MODE_CMD";
379 case INTF_MODE_WB_BLOCK:
380 return "INTF_MODE_WB_BLOCK";
381 case INTF_MODE_WB_LINE:
382 return "INTF_MODE_WB_LINE";
383 default:
384 return "INTF_MODE_UNKNOWN";
385 }
386 }
387
dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys * phys_enc,enum dpu_intr_idx intr_idx)388 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
389 enum dpu_intr_idx intr_idx)
390 {
391 DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n",
392 DRMID(phys_enc->parent),
393 dpu_encoder_helper_get_intf_type(phys_enc->intf_mode),
394 phys_enc->hw_intf ? phys_enc->hw_intf->idx - INTF_0 : -1,
395 phys_enc->hw_wb ? phys_enc->hw_wb->idx - WB_0 : -1,
396 phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
397
398 dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc,
399 DPU_ENCODER_FRAME_EVENT_ERROR);
400 }
401
402 static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
403 u32 irq_idx, struct dpu_encoder_wait_info *info);
404
dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys * phys_enc,unsigned int irq_idx,void (* func)(void * arg),struct dpu_encoder_wait_info * wait_info)405 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
406 unsigned int irq_idx,
407 void (*func)(void *arg),
408 struct dpu_encoder_wait_info *wait_info)
409 {
410 u32 irq_status;
411 int ret;
412
413 if (!wait_info) {
414 DPU_ERROR("invalid params\n");
415 return -EINVAL;
416 }
417 /* note: do master / slave checking outside */
418
419 /* return EWOULDBLOCK since we know the wait isn't necessary */
420 if (phys_enc->enable_state == DPU_ENC_DISABLED) {
421 DRM_ERROR("encoder is disabled id=%u, callback=%ps, IRQ=[%d, %d]\n",
422 DRMID(phys_enc->parent), func,
423 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
424 return -EWOULDBLOCK;
425 }
426
427 if (irq_idx == 0) {
428 DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n",
429 DRMID(phys_enc->parent), func);
430 return 0;
431 }
432
433 DRM_DEBUG_KMS("id=%u, callback=%ps, IRQ=[%d, %d], pp=%d, pending_cnt=%d\n",
434 DRMID(phys_enc->parent), func,
435 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), phys_enc->hw_pp->idx - PINGPONG_0,
436 atomic_read(wait_info->atomic_cnt));
437
438 ret = dpu_encoder_helper_wait_event_timeout(
439 DRMID(phys_enc->parent),
440 irq_idx,
441 wait_info);
442
443 if (ret <= 0) {
444 irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq_idx);
445 if (irq_status) {
446 unsigned long flags;
447
448 DRM_DEBUG_KMS("IRQ=[%d, %d] not triggered id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n",
449 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
450 DRMID(phys_enc->parent), func,
451 phys_enc->hw_pp->idx - PINGPONG_0,
452 atomic_read(wait_info->atomic_cnt));
453 local_irq_save(flags);
454 func(phys_enc);
455 local_irq_restore(flags);
456 ret = 0;
457 } else {
458 ret = -ETIMEDOUT;
459 DRM_DEBUG_KMS("IRQ=[%d, %d] timeout id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n",
460 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
461 DRMID(phys_enc->parent), func,
462 phys_enc->hw_pp->idx - PINGPONG_0,
463 atomic_read(wait_info->atomic_cnt));
464 }
465 } else {
466 ret = 0;
467 trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
468 func, DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
469 phys_enc->hw_pp->idx - PINGPONG_0,
470 atomic_read(wait_info->atomic_cnt));
471 }
472
473 return ret;
474 }
475
dpu_encoder_get_vsync_count(struct drm_encoder * drm_enc)476 int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
477 {
478 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
479 struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL;
480 return phys ? atomic_read(&phys->vsync_cnt) : 0;
481 }
482
dpu_encoder_get_linecount(struct drm_encoder * drm_enc)483 int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
484 {
485 struct dpu_encoder_virt *dpu_enc;
486 struct dpu_encoder_phys *phys;
487 int linecount = 0;
488
489 dpu_enc = to_dpu_encoder_virt(drm_enc);
490 phys = dpu_enc ? dpu_enc->cur_master : NULL;
491
492 if (phys && phys->ops.get_line_count)
493 linecount = phys->ops.get_line_count(phys);
494
495 return linecount;
496 }
497
dpu_encoder_helper_split_config(struct dpu_encoder_phys * phys_enc,enum dpu_intf interface)498 void dpu_encoder_helper_split_config(
499 struct dpu_encoder_phys *phys_enc,
500 enum dpu_intf interface)
501 {
502 struct dpu_encoder_virt *dpu_enc;
503 struct split_pipe_cfg cfg = { 0 };
504 struct dpu_hw_mdp *hw_mdptop;
505 struct msm_display_info *disp_info;
506
507 if (!phys_enc->hw_mdptop || !phys_enc->parent) {
508 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
509 return;
510 }
511
512 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
513 hw_mdptop = phys_enc->hw_mdptop;
514 disp_info = &dpu_enc->disp_info;
515
516 if (disp_info->intf_type != INTF_DSI)
517 return;
518
519 /**
520 * disable split modes since encoder will be operating in as the only
521 * encoder, either for the entire use case in the case of, for example,
522 * single DSI, or for this frame in the case of left/right only partial
523 * update.
524 */
525 if (phys_enc->split_role == ENC_ROLE_SOLO) {
526 if (hw_mdptop->ops.setup_split_pipe)
527 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
528 return;
529 }
530
531 cfg.en = true;
532 cfg.mode = phys_enc->intf_mode;
533 cfg.intf = interface;
534
535 if (cfg.en && phys_enc->ops.needs_single_flush &&
536 phys_enc->ops.needs_single_flush(phys_enc))
537 cfg.split_flush_en = true;
538
539 if (phys_enc->split_role == ENC_ROLE_MASTER) {
540 DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
541
542 if (hw_mdptop->ops.setup_split_pipe)
543 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
544 }
545 }
546
dpu_encoder_use_dsc_merge(struct drm_encoder * drm_enc)547 bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
548 {
549 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
550 int i, intf_count = 0, num_dsc = 0;
551
552 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
553 if (dpu_enc->phys_encs[i])
554 intf_count++;
555
556 /* See dpu_encoder_get_topology, we only support 2:2:1 topology */
557 if (dpu_enc->dsc)
558 num_dsc = 2;
559
560 return (num_dsc > 0) && (num_dsc > intf_count);
561 }
562
dpu_encoder_get_dsc_config(struct drm_encoder * drm_enc)563 struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
564 {
565 struct msm_drm_private *priv = drm_enc->dev->dev_private;
566 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
567 int index = dpu_enc->disp_info.h_tile_instance[0];
568
569 if (dpu_enc->disp_info.intf_type == INTF_DSI)
570 return msm_dsi_get_dsc_config(priv->dsi[index]);
571
572 return NULL;
573 }
574
dpu_encoder_get_topology(struct dpu_encoder_virt * dpu_enc,struct dpu_kms * dpu_kms,struct drm_display_mode * mode,struct drm_crtc_state * crtc_state,struct drm_dsc_config * dsc)575 static struct msm_display_topology dpu_encoder_get_topology(
576 struct dpu_encoder_virt *dpu_enc,
577 struct dpu_kms *dpu_kms,
578 struct drm_display_mode *mode,
579 struct drm_crtc_state *crtc_state,
580 struct drm_dsc_config *dsc)
581 {
582 struct msm_display_topology topology = {0};
583 int i, intf_count = 0;
584
585 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
586 if (dpu_enc->phys_encs[i])
587 intf_count++;
588
589 /* Datapath topology selection
590 *
591 * Dual display
592 * 2 LM, 2 INTF ( Split display using 2 interfaces)
593 *
594 * Single display
595 * 1 LM, 1 INTF
596 * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
597 *
598 * Add dspps to the reservation requirements if ctm is requested
599 */
600 if (intf_count == 2)
601 topology.num_lm = 2;
602 else if (!dpu_kms->catalog->caps->has_3d_merge)
603 topology.num_lm = 1;
604 else
605 topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
606
607 if (crtc_state->ctm)
608 topology.num_dspp = topology.num_lm;
609
610 topology.num_intf = intf_count;
611
612 if (dsc) {
613 /*
614 * In case of Display Stream Compression (DSC), we would use
615 * 2 DSC encoders, 2 layer mixers and 1 interface
616 * this is power optimal and can drive up to (including) 4k
617 * screens
618 */
619 topology.num_dsc = 2;
620 topology.num_lm = 2;
621 topology.num_intf = 1;
622 }
623
624 return topology;
625 }
626
dpu_encoder_virt_atomic_check(struct drm_encoder * drm_enc,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)627 static int dpu_encoder_virt_atomic_check(
628 struct drm_encoder *drm_enc,
629 struct drm_crtc_state *crtc_state,
630 struct drm_connector_state *conn_state)
631 {
632 struct dpu_encoder_virt *dpu_enc;
633 struct msm_drm_private *priv;
634 struct dpu_kms *dpu_kms;
635 struct drm_display_mode *adj_mode;
636 struct msm_display_topology topology;
637 struct msm_display_info *disp_info;
638 struct dpu_global_state *global_state;
639 struct drm_framebuffer *fb;
640 struct drm_dsc_config *dsc;
641 int ret = 0;
642
643 if (!drm_enc || !crtc_state || !conn_state) {
644 DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
645 drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
646 return -EINVAL;
647 }
648
649 dpu_enc = to_dpu_encoder_virt(drm_enc);
650 DPU_DEBUG_ENC(dpu_enc, "\n");
651
652 priv = drm_enc->dev->dev_private;
653 disp_info = &dpu_enc->disp_info;
654 dpu_kms = to_dpu_kms(priv->kms);
655 adj_mode = &crtc_state->adjusted_mode;
656 global_state = dpu_kms_get_global_state(crtc_state->state);
657 if (IS_ERR(global_state))
658 return PTR_ERR(global_state);
659
660 trace_dpu_enc_atomic_check(DRMID(drm_enc));
661
662 dsc = dpu_encoder_get_dsc_config(drm_enc);
663
664 topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc);
665
666 /*
667 * Use CDM only for writeback or DP at the moment as other interfaces cannot handle it.
668 * If writeback itself cannot handle cdm for some reason it will fail in its atomic_check()
669 * earlier.
670 */
671 if (disp_info->intf_type == INTF_WB && conn_state->writeback_job) {
672 fb = conn_state->writeback_job->fb;
673
674 if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb)))
675 topology.needs_cdm = true;
676 } else if (disp_info->intf_type == INTF_DP) {
677 if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode))
678 topology.needs_cdm = true;
679 }
680
681 if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm)
682 crtc_state->mode_changed = true;
683 else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm)
684 crtc_state->mode_changed = true;
685 /*
686 * Release and Allocate resources on every modeset
687 * Dont allocate when active is false.
688 */
689 if (drm_atomic_crtc_needs_modeset(crtc_state)) {
690 dpu_rm_release(global_state, drm_enc);
691
692 if (!crtc_state->active_changed || crtc_state->enable)
693 ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
694 drm_enc, crtc_state, topology);
695 }
696
697 trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
698
699 return ret;
700 }
701
_dpu_encoder_update_vsync_source(struct dpu_encoder_virt * dpu_enc,struct msm_display_info * disp_info)702 static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
703 struct msm_display_info *disp_info)
704 {
705 struct dpu_vsync_source_cfg vsync_cfg = { 0 };
706 struct msm_drm_private *priv;
707 struct dpu_kms *dpu_kms;
708 struct dpu_hw_mdp *hw_mdptop;
709 struct drm_encoder *drm_enc;
710 struct dpu_encoder_phys *phys_enc;
711 int i;
712
713 if (!dpu_enc || !disp_info) {
714 DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
715 dpu_enc != NULL, disp_info != NULL);
716 return;
717 } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
718 DPU_ERROR("invalid num phys enc %d/%d\n",
719 dpu_enc->num_phys_encs,
720 (int) ARRAY_SIZE(dpu_enc->hw_pp));
721 return;
722 }
723
724 drm_enc = &dpu_enc->base;
725 /* this pointers are checked in virt_enable_helper */
726 priv = drm_enc->dev->dev_private;
727
728 dpu_kms = to_dpu_kms(priv->kms);
729 hw_mdptop = dpu_kms->hw_mdp;
730 if (!hw_mdptop) {
731 DPU_ERROR("invalid mdptop\n");
732 return;
733 }
734
735 if (hw_mdptop->ops.setup_vsync_source) {
736 for (i = 0; i < dpu_enc->num_phys_encs; i++)
737 vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
738
739 vsync_cfg.pp_count = dpu_enc->num_phys_encs;
740 vsync_cfg.frame_rate = drm_mode_vrefresh(&dpu_enc->base.crtc->state->adjusted_mode);
741
742 vsync_cfg.vsync_source = disp_info->vsync_source;
743
744 hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
745
746 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
747 phys_enc = dpu_enc->phys_encs[i];
748
749 if (phys_enc->has_intf_te && phys_enc->hw_intf->ops.vsync_sel)
750 phys_enc->hw_intf->ops.vsync_sel(phys_enc->hw_intf,
751 vsync_cfg.vsync_source);
752 }
753 }
754 }
755
_dpu_encoder_irq_enable(struct drm_encoder * drm_enc)756 static void _dpu_encoder_irq_enable(struct drm_encoder *drm_enc)
757 {
758 struct dpu_encoder_virt *dpu_enc;
759 int i;
760
761 if (!drm_enc) {
762 DPU_ERROR("invalid encoder\n");
763 return;
764 }
765
766 dpu_enc = to_dpu_encoder_virt(drm_enc);
767
768 DPU_DEBUG_ENC(dpu_enc, "\n");
769 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
770 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
771
772 phys->ops.irq_enable(phys);
773 }
774 }
775
_dpu_encoder_irq_disable(struct drm_encoder * drm_enc)776 static void _dpu_encoder_irq_disable(struct drm_encoder *drm_enc)
777 {
778 struct dpu_encoder_virt *dpu_enc;
779 int i;
780
781 if (!drm_enc) {
782 DPU_ERROR("invalid encoder\n");
783 return;
784 }
785
786 dpu_enc = to_dpu_encoder_virt(drm_enc);
787
788 DPU_DEBUG_ENC(dpu_enc, "\n");
789 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
790 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
791
792 phys->ops.irq_disable(phys);
793 }
794 }
795
_dpu_encoder_resource_enable(struct drm_encoder * drm_enc)796 static void _dpu_encoder_resource_enable(struct drm_encoder *drm_enc)
797 {
798 struct msm_drm_private *priv;
799 struct dpu_kms *dpu_kms;
800 struct dpu_encoder_virt *dpu_enc;
801
802 dpu_enc = to_dpu_encoder_virt(drm_enc);
803 priv = drm_enc->dev->dev_private;
804 dpu_kms = to_dpu_kms(priv->kms);
805
806 trace_dpu_enc_rc_enable(DRMID(drm_enc));
807
808 if (!dpu_enc->cur_master) {
809 DPU_ERROR("encoder master not set\n");
810 return;
811 }
812
813 /* enable DPU core clks */
814 pm_runtime_get_sync(&dpu_kms->pdev->dev);
815
816 /* enable all the irq */
817 _dpu_encoder_irq_enable(drm_enc);
818 }
819
_dpu_encoder_resource_disable(struct drm_encoder * drm_enc)820 static void _dpu_encoder_resource_disable(struct drm_encoder *drm_enc)
821 {
822 struct msm_drm_private *priv;
823 struct dpu_kms *dpu_kms;
824 struct dpu_encoder_virt *dpu_enc;
825
826 dpu_enc = to_dpu_encoder_virt(drm_enc);
827 priv = drm_enc->dev->dev_private;
828 dpu_kms = to_dpu_kms(priv->kms);
829
830 trace_dpu_enc_rc_disable(DRMID(drm_enc));
831
832 if (!dpu_enc->cur_master) {
833 DPU_ERROR("encoder master not set\n");
834 return;
835 }
836
837 /* disable all the irq */
838 _dpu_encoder_irq_disable(drm_enc);
839
840 /* disable DPU core clks */
841 pm_runtime_put_sync(&dpu_kms->pdev->dev);
842 }
843
dpu_encoder_resource_control(struct drm_encoder * drm_enc,u32 sw_event)844 static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
845 u32 sw_event)
846 {
847 struct dpu_encoder_virt *dpu_enc;
848 struct msm_drm_private *priv;
849 bool is_vid_mode = false;
850
851 if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
852 DPU_ERROR("invalid parameters\n");
853 return -EINVAL;
854 }
855 dpu_enc = to_dpu_encoder_virt(drm_enc);
856 priv = drm_enc->dev->dev_private;
857 is_vid_mode = !dpu_enc->disp_info.is_cmd_mode;
858
859 /*
860 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
861 * events and return early for other events (ie wb display).
862 */
863 if (!dpu_enc->idle_pc_supported &&
864 (sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
865 sw_event != DPU_ENC_RC_EVENT_STOP &&
866 sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
867 return 0;
868
869 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
870 dpu_enc->rc_state, "begin");
871
872 switch (sw_event) {
873 case DPU_ENC_RC_EVENT_KICKOFF:
874 /* cancel delayed off work, if any */
875 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
876 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
877 sw_event);
878
879 mutex_lock(&dpu_enc->rc_lock);
880
881 /* return if the resource control is already in ON state */
882 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
883 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n",
884 DRMID(drm_enc), sw_event);
885 mutex_unlock(&dpu_enc->rc_lock);
886 return 0;
887 } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
888 dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
889 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n",
890 DRMID(drm_enc), sw_event,
891 dpu_enc->rc_state);
892 mutex_unlock(&dpu_enc->rc_lock);
893 return -EINVAL;
894 }
895
896 if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
897 _dpu_encoder_irq_enable(drm_enc);
898 else
899 _dpu_encoder_resource_enable(drm_enc);
900
901 dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
902
903 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
904 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
905 "kickoff");
906
907 mutex_unlock(&dpu_enc->rc_lock);
908 break;
909
910 case DPU_ENC_RC_EVENT_FRAME_DONE:
911 /*
912 * mutex lock is not used as this event happens at interrupt
913 * context. And locking is not required as, the other events
914 * like KICKOFF and STOP does a wait-for-idle before executing
915 * the resource_control
916 */
917 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
918 DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
919 DRMID(drm_enc), sw_event,
920 dpu_enc->rc_state);
921 return -EINVAL;
922 }
923
924 /*
925 * schedule off work item only when there are no
926 * frames pending
927 */
928 if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
929 DRM_DEBUG_KMS("id:%d skip schedule work\n",
930 DRMID(drm_enc));
931 return 0;
932 }
933
934 queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
935 msecs_to_jiffies(dpu_enc->idle_timeout));
936
937 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
938 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
939 "frame done");
940 break;
941
942 case DPU_ENC_RC_EVENT_PRE_STOP:
943 /* cancel delayed off work, if any */
944 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
945 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
946 sw_event);
947
948 mutex_lock(&dpu_enc->rc_lock);
949
950 if (is_vid_mode &&
951 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
952 _dpu_encoder_irq_enable(drm_enc);
953 }
954 /* skip if is already OFF or IDLE, resources are off already */
955 else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
956 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
957 DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
958 DRMID(drm_enc), sw_event,
959 dpu_enc->rc_state);
960 mutex_unlock(&dpu_enc->rc_lock);
961 return 0;
962 }
963
964 dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
965
966 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
967 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
968 "pre stop");
969
970 mutex_unlock(&dpu_enc->rc_lock);
971 break;
972
973 case DPU_ENC_RC_EVENT_STOP:
974 mutex_lock(&dpu_enc->rc_lock);
975
976 /* return if the resource control is already in OFF state */
977 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
978 DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
979 DRMID(drm_enc), sw_event);
980 mutex_unlock(&dpu_enc->rc_lock);
981 return 0;
982 } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
983 DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
984 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
985 mutex_unlock(&dpu_enc->rc_lock);
986 return -EINVAL;
987 }
988
989 /**
990 * expect to arrive here only if in either idle state or pre-off
991 * and in IDLE state the resources are already disabled
992 */
993 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
994 _dpu_encoder_resource_disable(drm_enc);
995
996 dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
997
998 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
999 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
1000 "stop");
1001
1002 mutex_unlock(&dpu_enc->rc_lock);
1003 break;
1004
1005 case DPU_ENC_RC_EVENT_ENTER_IDLE:
1006 mutex_lock(&dpu_enc->rc_lock);
1007
1008 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
1009 DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
1010 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
1011 mutex_unlock(&dpu_enc->rc_lock);
1012 return 0;
1013 }
1014
1015 /*
1016 * if we are in ON but a frame was just kicked off,
1017 * ignore the IDLE event, it's probably a stale timer event
1018 */
1019 if (dpu_enc->frame_busy_mask[0]) {
1020 DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
1021 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
1022 mutex_unlock(&dpu_enc->rc_lock);
1023 return 0;
1024 }
1025
1026 if (is_vid_mode)
1027 _dpu_encoder_irq_disable(drm_enc);
1028 else
1029 _dpu_encoder_resource_disable(drm_enc);
1030
1031 dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
1032
1033 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
1034 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
1035 "idle");
1036
1037 mutex_unlock(&dpu_enc->rc_lock);
1038 break;
1039
1040 default:
1041 DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
1042 sw_event);
1043 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
1044 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
1045 "error");
1046 break;
1047 }
1048
1049 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
1050 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
1051 "end");
1052 return 0;
1053 }
1054
dpu_encoder_prepare_wb_job(struct drm_encoder * drm_enc,struct drm_writeback_job * job)1055 void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
1056 struct drm_writeback_job *job)
1057 {
1058 struct dpu_encoder_virt *dpu_enc;
1059 int i;
1060
1061 dpu_enc = to_dpu_encoder_virt(drm_enc);
1062
1063 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1064 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1065
1066 if (phys->ops.prepare_wb_job)
1067 phys->ops.prepare_wb_job(phys, job);
1068
1069 }
1070 }
1071
dpu_encoder_cleanup_wb_job(struct drm_encoder * drm_enc,struct drm_writeback_job * job)1072 void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
1073 struct drm_writeback_job *job)
1074 {
1075 struct dpu_encoder_virt *dpu_enc;
1076 int i;
1077
1078 dpu_enc = to_dpu_encoder_virt(drm_enc);
1079
1080 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1081 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1082
1083 if (phys->ops.cleanup_wb_job)
1084 phys->ops.cleanup_wb_job(phys, job);
1085
1086 }
1087 }
1088
dpu_encoder_virt_atomic_mode_set(struct drm_encoder * drm_enc,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)1089 static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
1090 struct drm_crtc_state *crtc_state,
1091 struct drm_connector_state *conn_state)
1092 {
1093 struct dpu_encoder_virt *dpu_enc;
1094 struct msm_drm_private *priv;
1095 struct dpu_kms *dpu_kms;
1096 struct dpu_crtc_state *cstate;
1097 struct dpu_global_state *global_state;
1098 struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
1099 struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
1100 struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
1101 struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
1102 struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
1103 int num_lm, num_ctl, num_pp, num_dsc;
1104 unsigned int dsc_mask = 0;
1105 int i;
1106
1107 if (!drm_enc) {
1108 DPU_ERROR("invalid encoder\n");
1109 return;
1110 }
1111
1112 dpu_enc = to_dpu_encoder_virt(drm_enc);
1113 DPU_DEBUG_ENC(dpu_enc, "\n");
1114
1115 priv = drm_enc->dev->dev_private;
1116 dpu_kms = to_dpu_kms(priv->kms);
1117
1118 global_state = dpu_kms_get_existing_global_state(dpu_kms);
1119 if (IS_ERR_OR_NULL(global_state)) {
1120 DPU_ERROR("Failed to get global state");
1121 return;
1122 }
1123
1124 trace_dpu_enc_mode_set(DRMID(drm_enc));
1125
1126 /* Query resource that have been reserved in atomic check step. */
1127 num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1128 drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
1129 ARRAY_SIZE(hw_pp));
1130 num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1131 drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
1132 num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1133 drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
1134 dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1135 drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
1136 ARRAY_SIZE(hw_dspp));
1137
1138 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1139 dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
1140 : NULL;
1141
1142 num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1143 drm_enc->base.id, DPU_HW_BLK_DSC,
1144 hw_dsc, ARRAY_SIZE(hw_dsc));
1145 for (i = 0; i < num_dsc; i++) {
1146 dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
1147 dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0);
1148 }
1149
1150 dpu_enc->dsc_mask = dsc_mask;
1151
1152 if ((dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) ||
1153 dpu_enc->disp_info.intf_type == INTF_DP) {
1154 struct dpu_hw_blk *hw_cdm = NULL;
1155
1156 dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1157 drm_enc->base.id, DPU_HW_BLK_CDM,
1158 &hw_cdm, 1);
1159 dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL;
1160 }
1161
1162 cstate = to_dpu_crtc_state(crtc_state);
1163
1164 for (i = 0; i < num_lm; i++) {
1165 int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
1166
1167 cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
1168 cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
1169 cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
1170 }
1171
1172 cstate->num_mixers = num_lm;
1173
1174 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1175 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1176
1177 if (!dpu_enc->hw_pp[i]) {
1178 DPU_ERROR_ENC(dpu_enc,
1179 "no pp block assigned at idx: %d\n", i);
1180 return;
1181 }
1182
1183 if (!hw_ctl[i]) {
1184 DPU_ERROR_ENC(dpu_enc,
1185 "no ctl block assigned at idx: %d\n", i);
1186 return;
1187 }
1188
1189 phys->hw_pp = dpu_enc->hw_pp[i];
1190 phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
1191
1192 phys->cached_mode = crtc_state->adjusted_mode;
1193 if (phys->ops.atomic_mode_set)
1194 phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
1195 }
1196 }
1197
_dpu_encoder_virt_enable_helper(struct drm_encoder * drm_enc)1198 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
1199 {
1200 struct dpu_encoder_virt *dpu_enc = NULL;
1201 int i;
1202
1203 if (!drm_enc || !drm_enc->dev) {
1204 DPU_ERROR("invalid parameters\n");
1205 return;
1206 }
1207
1208 dpu_enc = to_dpu_encoder_virt(drm_enc);
1209 if (!dpu_enc || !dpu_enc->cur_master) {
1210 DPU_ERROR("invalid dpu encoder/master\n");
1211 return;
1212 }
1213
1214
1215 if (dpu_enc->disp_info.intf_type == INTF_DP &&
1216 dpu_enc->cur_master->hw_mdptop &&
1217 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
1218 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
1219 dpu_enc->cur_master->hw_mdptop);
1220
1221 if (dpu_enc->disp_info.is_cmd_mode)
1222 _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
1223
1224 if (dpu_enc->disp_info.intf_type == INTF_DSI &&
1225 !WARN_ON(dpu_enc->num_phys_encs == 0)) {
1226 unsigned bpc = dpu_enc->connector->display_info.bpc;
1227 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1228 if (!dpu_enc->hw_pp[i])
1229 continue;
1230 _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
1231 }
1232 }
1233 }
1234
dpu_encoder_virt_runtime_resume(struct drm_encoder * drm_enc)1235 void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
1236 {
1237 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1238
1239 mutex_lock(&dpu_enc->enc_lock);
1240
1241 if (!dpu_enc->enabled)
1242 goto out;
1243
1244 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
1245 dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
1246 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
1247 dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
1248
1249 _dpu_encoder_virt_enable_helper(drm_enc);
1250
1251 out:
1252 mutex_unlock(&dpu_enc->enc_lock);
1253 }
1254
dpu_encoder_virt_atomic_enable(struct drm_encoder * drm_enc,struct drm_atomic_state * state)1255 static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc,
1256 struct drm_atomic_state *state)
1257 {
1258 struct dpu_encoder_virt *dpu_enc = NULL;
1259 int ret = 0;
1260 struct drm_display_mode *cur_mode = NULL;
1261
1262 dpu_enc = to_dpu_encoder_virt(drm_enc);
1263 dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc);
1264
1265 atomic_set(&dpu_enc->frame_done_timeout_cnt, 0);
1266
1267 mutex_lock(&dpu_enc->enc_lock);
1268
1269 dpu_enc->commit_done_timedout = false;
1270
1271 dpu_enc->connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
1272
1273 cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
1274
1275 dpu_enc->wide_bus_en = dpu_encoder_is_widebus_enabled(drm_enc);
1276
1277 trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
1278 cur_mode->vdisplay);
1279
1280 /* always enable slave encoder before master */
1281 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
1282 dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
1283
1284 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
1285 dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
1286
1287 ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1288 if (ret) {
1289 DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
1290 ret);
1291 goto out;
1292 }
1293
1294 _dpu_encoder_virt_enable_helper(drm_enc);
1295
1296 dpu_enc->enabled = true;
1297
1298 out:
1299 mutex_unlock(&dpu_enc->enc_lock);
1300 }
1301
dpu_encoder_virt_atomic_disable(struct drm_encoder * drm_enc,struct drm_atomic_state * state)1302 static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc,
1303 struct drm_atomic_state *state)
1304 {
1305 struct dpu_encoder_virt *dpu_enc = NULL;
1306 struct drm_crtc *crtc;
1307 struct drm_crtc_state *old_state = NULL;
1308 int i = 0;
1309
1310 dpu_enc = to_dpu_encoder_virt(drm_enc);
1311 DPU_DEBUG_ENC(dpu_enc, "\n");
1312
1313 crtc = drm_atomic_get_old_crtc_for_encoder(state, drm_enc);
1314 if (crtc)
1315 old_state = drm_atomic_get_old_crtc_state(state, crtc);
1316
1317 /*
1318 * The encoder is already disabled if self refresh mode was set earlier,
1319 * in the old_state for the corresponding crtc.
1320 */
1321 if (old_state && old_state->self_refresh_active)
1322 return;
1323
1324 mutex_lock(&dpu_enc->enc_lock);
1325 dpu_enc->enabled = false;
1326
1327 trace_dpu_enc_disable(DRMID(drm_enc));
1328
1329 /* wait for idle */
1330 dpu_encoder_wait_for_tx_complete(drm_enc);
1331
1332 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
1333
1334 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1335 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1336
1337 if (phys->ops.disable)
1338 phys->ops.disable(phys);
1339 }
1340
1341
1342 /* after phys waits for frame-done, should be no more frames pending */
1343 if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
1344 DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
1345 del_timer_sync(&dpu_enc->frame_done_timer);
1346 }
1347
1348 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
1349
1350 dpu_enc->connector = NULL;
1351
1352 DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
1353
1354 mutex_unlock(&dpu_enc->enc_lock);
1355 }
1356
dpu_encoder_get_intf(const struct dpu_mdss_cfg * catalog,struct dpu_rm * dpu_rm,enum dpu_intf_type type,u32 controller_id)1357 static struct dpu_hw_intf *dpu_encoder_get_intf(const struct dpu_mdss_cfg *catalog,
1358 struct dpu_rm *dpu_rm,
1359 enum dpu_intf_type type, u32 controller_id)
1360 {
1361 int i = 0;
1362
1363 if (type == INTF_WB)
1364 return NULL;
1365
1366 for (i = 0; i < catalog->intf_count; i++) {
1367 if (catalog->intf[i].type == type
1368 && catalog->intf[i].controller_id == controller_id) {
1369 return dpu_rm_get_intf(dpu_rm, catalog->intf[i].id);
1370 }
1371 }
1372
1373 return NULL;
1374 }
1375
dpu_encoder_vblank_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phy_enc)1376 void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
1377 struct dpu_encoder_phys *phy_enc)
1378 {
1379 struct dpu_encoder_virt *dpu_enc = NULL;
1380 unsigned long lock_flags;
1381
1382 if (!drm_enc || !phy_enc)
1383 return;
1384
1385 DPU_ATRACE_BEGIN("encoder_vblank_callback");
1386 dpu_enc = to_dpu_encoder_virt(drm_enc);
1387
1388 atomic_inc(&phy_enc->vsync_cnt);
1389
1390 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1391 if (dpu_enc->crtc)
1392 dpu_crtc_vblank_callback(dpu_enc->crtc);
1393 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1394
1395 DPU_ATRACE_END("encoder_vblank_callback");
1396 }
1397
dpu_encoder_underrun_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phy_enc)1398 void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
1399 struct dpu_encoder_phys *phy_enc)
1400 {
1401 if (!phy_enc)
1402 return;
1403
1404 DPU_ATRACE_BEGIN("encoder_underrun_callback");
1405 atomic_inc(&phy_enc->underrun_cnt);
1406
1407 /* trigger dump only on the first underrun */
1408 if (atomic_read(&phy_enc->underrun_cnt) == 1)
1409 msm_disp_snapshot_state(drm_enc->dev);
1410
1411 trace_dpu_enc_underrun_cb(DRMID(drm_enc),
1412 atomic_read(&phy_enc->underrun_cnt));
1413 DPU_ATRACE_END("encoder_underrun_callback");
1414 }
1415
dpu_encoder_assign_crtc(struct drm_encoder * drm_enc,struct drm_crtc * crtc)1416 void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
1417 {
1418 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1419 unsigned long lock_flags;
1420
1421 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1422 /* crtc should always be cleared before re-assigning */
1423 WARN_ON(crtc && dpu_enc->crtc);
1424 dpu_enc->crtc = crtc;
1425 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1426 }
1427
dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder * drm_enc,struct drm_crtc * crtc,bool enable)1428 void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
1429 struct drm_crtc *crtc, bool enable)
1430 {
1431 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1432 unsigned long lock_flags;
1433 int i;
1434
1435 trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
1436
1437 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1438 if (dpu_enc->crtc != crtc) {
1439 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1440 return;
1441 }
1442 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1443
1444 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1445 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1446
1447 if (phys->ops.control_vblank_irq)
1448 phys->ops.control_vblank_irq(phys, enable);
1449 }
1450 }
1451
dpu_encoder_frame_done_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * ready_phys,u32 event)1452 void dpu_encoder_frame_done_callback(
1453 struct drm_encoder *drm_enc,
1454 struct dpu_encoder_phys *ready_phys, u32 event)
1455 {
1456 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1457 unsigned int i;
1458
1459 if (event & (DPU_ENCODER_FRAME_EVENT_DONE
1460 | DPU_ENCODER_FRAME_EVENT_ERROR
1461 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
1462
1463 if (!dpu_enc->frame_busy_mask[0]) {
1464 /**
1465 * suppress frame_done without waiter,
1466 * likely autorefresh
1467 */
1468 trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event,
1469 dpu_encoder_helper_get_intf_type(ready_phys->intf_mode),
1470 ready_phys->hw_intf ? ready_phys->hw_intf->idx : -1,
1471 ready_phys->hw_wb ? ready_phys->hw_wb->idx : -1);
1472 return;
1473 }
1474
1475 /* One of the physical encoders has become idle */
1476 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1477 if (dpu_enc->phys_encs[i] == ready_phys) {
1478 trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
1479 dpu_enc->frame_busy_mask[0]);
1480 clear_bit(i, dpu_enc->frame_busy_mask);
1481 }
1482 }
1483
1484 if (!dpu_enc->frame_busy_mask[0]) {
1485 atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
1486 del_timer(&dpu_enc->frame_done_timer);
1487
1488 dpu_encoder_resource_control(drm_enc,
1489 DPU_ENC_RC_EVENT_FRAME_DONE);
1490
1491 if (dpu_enc->crtc)
1492 dpu_crtc_frame_event_cb(dpu_enc->crtc, event);
1493 }
1494 } else {
1495 if (dpu_enc->crtc)
1496 dpu_crtc_frame_event_cb(dpu_enc->crtc, event);
1497 }
1498 }
1499
dpu_encoder_off_work(struct work_struct * work)1500 static void dpu_encoder_off_work(struct work_struct *work)
1501 {
1502 struct dpu_encoder_virt *dpu_enc = container_of(work,
1503 struct dpu_encoder_virt, delayed_off_work.work);
1504
1505 dpu_encoder_resource_control(&dpu_enc->base,
1506 DPU_ENC_RC_EVENT_ENTER_IDLE);
1507
1508 dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
1509 DPU_ENCODER_FRAME_EVENT_IDLE);
1510 }
1511
1512 /**
1513 * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
1514 * @drm_enc: Pointer to drm encoder structure
1515 * @phys: Pointer to physical encoder structure
1516 * @extra_flush_bits: Additional bit mask to include in flush trigger
1517 */
_dpu_encoder_trigger_flush(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phys,uint32_t extra_flush_bits)1518 static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
1519 struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
1520 {
1521 struct dpu_hw_ctl *ctl;
1522 int pending_kickoff_cnt;
1523 u32 ret = UINT_MAX;
1524
1525 if (!phys->hw_pp) {
1526 DPU_ERROR("invalid pingpong hw\n");
1527 return;
1528 }
1529
1530 ctl = phys->hw_ctl;
1531 if (!ctl->ops.trigger_flush) {
1532 DPU_ERROR("missing trigger cb\n");
1533 return;
1534 }
1535
1536 pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
1537
1538 if (extra_flush_bits && ctl->ops.update_pending_flush)
1539 ctl->ops.update_pending_flush(ctl, extra_flush_bits);
1540
1541 ctl->ops.trigger_flush(ctl);
1542
1543 if (ctl->ops.get_pending_flush)
1544 ret = ctl->ops.get_pending_flush(ctl);
1545
1546 trace_dpu_enc_trigger_flush(DRMID(drm_enc),
1547 dpu_encoder_helper_get_intf_type(phys->intf_mode),
1548 phys->hw_intf ? phys->hw_intf->idx : -1,
1549 phys->hw_wb ? phys->hw_wb->idx : -1,
1550 pending_kickoff_cnt, ctl->idx,
1551 extra_flush_bits, ret);
1552 }
1553
1554 /**
1555 * _dpu_encoder_trigger_start - trigger start for a physical encoder
1556 * @phys: Pointer to physical encoder structure
1557 */
_dpu_encoder_trigger_start(struct dpu_encoder_phys * phys)1558 static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
1559 {
1560 if (!phys) {
1561 DPU_ERROR("invalid argument(s)\n");
1562 return;
1563 }
1564
1565 if (!phys->hw_pp) {
1566 DPU_ERROR("invalid pingpong hw\n");
1567 return;
1568 }
1569
1570 if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
1571 phys->ops.trigger_start(phys);
1572 }
1573
dpu_encoder_helper_trigger_start(struct dpu_encoder_phys * phys_enc)1574 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
1575 {
1576 struct dpu_hw_ctl *ctl;
1577
1578 ctl = phys_enc->hw_ctl;
1579 if (ctl->ops.trigger_start) {
1580 ctl->ops.trigger_start(ctl);
1581 trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
1582 }
1583 }
1584
dpu_encoder_helper_wait_event_timeout(int32_t drm_id,unsigned int irq_idx,struct dpu_encoder_wait_info * info)1585 static int dpu_encoder_helper_wait_event_timeout(
1586 int32_t drm_id,
1587 unsigned int irq_idx,
1588 struct dpu_encoder_wait_info *info)
1589 {
1590 int rc = 0;
1591 s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
1592 s64 jiffies = msecs_to_jiffies(info->timeout_ms);
1593 s64 time;
1594
1595 do {
1596 rc = wait_event_timeout(*(info->wq),
1597 atomic_read(info->atomic_cnt) == 0, jiffies);
1598 time = ktime_to_ms(ktime_get());
1599
1600 trace_dpu_enc_wait_event_timeout(drm_id,
1601 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
1602 rc, time,
1603 expected_time,
1604 atomic_read(info->atomic_cnt));
1605 /* If we timed out, counter is valid and time is less, wait again */
1606 } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
1607 (time < expected_time));
1608
1609 return rc;
1610 }
1611
dpu_encoder_helper_hw_reset(struct dpu_encoder_phys * phys_enc)1612 static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
1613 {
1614 struct dpu_encoder_virt *dpu_enc;
1615 struct dpu_hw_ctl *ctl;
1616 int rc;
1617 struct drm_encoder *drm_enc;
1618
1619 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
1620 ctl = phys_enc->hw_ctl;
1621 drm_enc = phys_enc->parent;
1622
1623 if (!ctl->ops.reset)
1624 return;
1625
1626 DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc),
1627 ctl->idx);
1628
1629 rc = ctl->ops.reset(ctl);
1630 if (rc) {
1631 DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
1632 msm_disp_snapshot_state(drm_enc->dev);
1633 }
1634
1635 phys_enc->enable_state = DPU_ENC_ENABLED;
1636 }
1637
1638 /**
1639 * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
1640 * Iterate through the physical encoders and perform consolidated flush
1641 * and/or control start triggering as needed. This is done in the virtual
1642 * encoder rather than the individual physical ones in order to handle
1643 * use cases that require visibility into multiple physical encoders at
1644 * a time.
1645 * @dpu_enc: Pointer to virtual encoder structure
1646 */
_dpu_encoder_kickoff_phys(struct dpu_encoder_virt * dpu_enc)1647 static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
1648 {
1649 struct dpu_hw_ctl *ctl;
1650 uint32_t i, pending_flush;
1651 unsigned long lock_flags;
1652
1653 pending_flush = 0x0;
1654
1655 /* update pending counts and trigger kickoff ctl flush atomically */
1656 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1657
1658 /* don't perform flush/start operations for slave encoders */
1659 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1660 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1661
1662 if (phys->enable_state == DPU_ENC_DISABLED)
1663 continue;
1664
1665 ctl = phys->hw_ctl;
1666
1667 /*
1668 * This is cleared in frame_done worker, which isn't invoked
1669 * for async commits. So don't set this for async, since it'll
1670 * roll over to the next commit.
1671 */
1672 if (phys->split_role != ENC_ROLE_SLAVE)
1673 set_bit(i, dpu_enc->frame_busy_mask);
1674
1675 if (!phys->ops.needs_single_flush ||
1676 !phys->ops.needs_single_flush(phys))
1677 _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
1678 else if (ctl->ops.get_pending_flush)
1679 pending_flush |= ctl->ops.get_pending_flush(ctl);
1680 }
1681
1682 /* for split flush, combine pending flush masks and send to master */
1683 if (pending_flush && dpu_enc->cur_master) {
1684 _dpu_encoder_trigger_flush(
1685 &dpu_enc->base,
1686 dpu_enc->cur_master,
1687 pending_flush);
1688 }
1689
1690 _dpu_encoder_trigger_start(dpu_enc->cur_master);
1691
1692 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1693 }
1694
dpu_encoder_trigger_kickoff_pending(struct drm_encoder * drm_enc)1695 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
1696 {
1697 struct dpu_encoder_virt *dpu_enc;
1698 struct dpu_encoder_phys *phys;
1699 unsigned int i;
1700 struct dpu_hw_ctl *ctl;
1701 struct msm_display_info *disp_info;
1702
1703 if (!drm_enc) {
1704 DPU_ERROR("invalid encoder\n");
1705 return;
1706 }
1707 dpu_enc = to_dpu_encoder_virt(drm_enc);
1708 disp_info = &dpu_enc->disp_info;
1709
1710 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1711 phys = dpu_enc->phys_encs[i];
1712
1713 ctl = phys->hw_ctl;
1714 ctl->ops.clear_pending_flush(ctl);
1715
1716 /* update only for command mode primary ctl */
1717 if ((phys == dpu_enc->cur_master) &&
1718 disp_info->is_cmd_mode
1719 && ctl->ops.trigger_pending)
1720 ctl->ops.trigger_pending(ctl);
1721 }
1722 }
1723
_dpu_encoder_calculate_linetime(struct dpu_encoder_virt * dpu_enc,struct drm_display_mode * mode)1724 static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
1725 struct drm_display_mode *mode)
1726 {
1727 u64 pclk_rate;
1728 u32 pclk_period;
1729 u32 line_time;
1730
1731 /*
1732 * For linetime calculation, only operate on master encoder.
1733 */
1734 if (!dpu_enc->cur_master)
1735 return 0;
1736
1737 if (!dpu_enc->cur_master->ops.get_line_count) {
1738 DPU_ERROR("get_line_count function not defined\n");
1739 return 0;
1740 }
1741
1742 pclk_rate = mode->clock; /* pixel clock in kHz */
1743 if (pclk_rate == 0) {
1744 DPU_ERROR("pclk is 0, cannot calculate line time\n");
1745 return 0;
1746 }
1747
1748 pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
1749 if (pclk_period == 0) {
1750 DPU_ERROR("pclk period is 0\n");
1751 return 0;
1752 }
1753
1754 /*
1755 * Line time calculation based on Pixel clock and HTOTAL.
1756 * Final unit is in ns.
1757 */
1758 line_time = (pclk_period * mode->htotal) / 1000;
1759 if (line_time == 0) {
1760 DPU_ERROR("line time calculation is 0\n");
1761 return 0;
1762 }
1763
1764 DPU_DEBUG_ENC(dpu_enc,
1765 "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
1766 pclk_rate, pclk_period, line_time);
1767
1768 return line_time;
1769 }
1770
dpu_encoder_vsync_time(struct drm_encoder * drm_enc,ktime_t * wakeup_time)1771 int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
1772 {
1773 struct drm_display_mode *mode;
1774 struct dpu_encoder_virt *dpu_enc;
1775 u32 cur_line;
1776 u32 line_time;
1777 u32 vtotal, time_to_vsync;
1778 ktime_t cur_time;
1779
1780 dpu_enc = to_dpu_encoder_virt(drm_enc);
1781
1782 if (!drm_enc->crtc || !drm_enc->crtc->state) {
1783 DPU_ERROR("crtc/crtc state object is NULL\n");
1784 return -EINVAL;
1785 }
1786 mode = &drm_enc->crtc->state->adjusted_mode;
1787
1788 line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
1789 if (!line_time)
1790 return -EINVAL;
1791
1792 cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
1793
1794 vtotal = mode->vtotal;
1795 if (cur_line >= vtotal)
1796 time_to_vsync = line_time * vtotal;
1797 else
1798 time_to_vsync = line_time * (vtotal - cur_line);
1799
1800 if (time_to_vsync == 0) {
1801 DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
1802 vtotal);
1803 return -EINVAL;
1804 }
1805
1806 cur_time = ktime_get();
1807 *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
1808
1809 DPU_DEBUG_ENC(dpu_enc,
1810 "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
1811 cur_line, vtotal, time_to_vsync,
1812 ktime_to_ms(cur_time),
1813 ktime_to_ms(*wakeup_time));
1814 return 0;
1815 }
1816
1817 static u32
dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config * dsc,u32 enc_ip_width)1818 dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc,
1819 u32 enc_ip_width)
1820 {
1821 int ssm_delay, total_pixels, soft_slice_per_enc;
1822
1823 soft_slice_per_enc = enc_ip_width / dsc->slice_width;
1824
1825 /*
1826 * minimum number of initial line pixels is a sum of:
1827 * 1. sub-stream multiplexer delay (83 groups for 8bpc,
1828 * 91 for 10 bpc) * 3
1829 * 2. for two soft slice cases, add extra sub-stream multiplexer * 3
1830 * 3. the initial xmit delay
1831 * 4. total pipeline delay through the "lock step" of encoder (47)
1832 * 5. 6 additional pixels as the output of the rate buffer is
1833 * 48 bits wide
1834 */
1835 ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92);
1836 total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47;
1837 if (soft_slice_per_enc > 1)
1838 total_pixels += (ssm_delay * 3);
1839 return DIV_ROUND_UP(total_pixels, dsc->slice_width);
1840 }
1841
dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl * ctl,struct dpu_hw_dsc * hw_dsc,struct dpu_hw_pingpong * hw_pp,struct drm_dsc_config * dsc,u32 common_mode,u32 initial_lines)1842 static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl,
1843 struct dpu_hw_dsc *hw_dsc,
1844 struct dpu_hw_pingpong *hw_pp,
1845 struct drm_dsc_config *dsc,
1846 u32 common_mode,
1847 u32 initial_lines)
1848 {
1849 if (hw_dsc->ops.dsc_config)
1850 hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines);
1851
1852 if (hw_dsc->ops.dsc_config_thresh)
1853 hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
1854
1855 if (hw_pp->ops.setup_dsc)
1856 hw_pp->ops.setup_dsc(hw_pp);
1857
1858 if (hw_dsc->ops.dsc_bind_pingpong_blk)
1859 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, hw_pp->idx);
1860
1861 if (hw_pp->ops.enable_dsc)
1862 hw_pp->ops.enable_dsc(hw_pp);
1863
1864 if (ctl->ops.update_pending_flush_dsc)
1865 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx);
1866 }
1867
dpu_encoder_prep_dsc(struct dpu_encoder_virt * dpu_enc,struct drm_dsc_config * dsc)1868 static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
1869 struct drm_dsc_config *dsc)
1870 {
1871 /* coding only for 2LM, 2enc, 1 dsc config */
1872 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
1873 struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
1874 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
1875 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
1876 int this_frame_slices;
1877 int intf_ip_w, enc_ip_w;
1878 int dsc_common_mode;
1879 int pic_width;
1880 u32 initial_lines;
1881 int i;
1882
1883 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1884 hw_pp[i] = dpu_enc->hw_pp[i];
1885 hw_dsc[i] = dpu_enc->hw_dsc[i];
1886
1887 if (!hw_pp[i] || !hw_dsc[i]) {
1888 DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n");
1889 return;
1890 }
1891 }
1892
1893 dsc_common_mode = 0;
1894 pic_width = dsc->pic_width;
1895
1896 dsc_common_mode = DSC_MODE_SPLIT_PANEL;
1897 if (dpu_encoder_use_dsc_merge(enc_master->parent))
1898 dsc_common_mode |= DSC_MODE_MULTIPLEX;
1899 if (enc_master->intf_mode == INTF_MODE_VIDEO)
1900 dsc_common_mode |= DSC_MODE_VIDEO;
1901
1902 this_frame_slices = pic_width / dsc->slice_width;
1903 intf_ip_w = this_frame_slices * dsc->slice_width;
1904
1905 /*
1906 * dsc merge case: when using 2 encoders for the same stream,
1907 * no. of slices need to be same on both the encoders.
1908 */
1909 enc_ip_w = intf_ip_w / 2;
1910 initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
1911
1912 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1913 dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i],
1914 dsc, dsc_common_mode, initial_lines);
1915 }
1916
dpu_encoder_prepare_for_kickoff(struct drm_encoder * drm_enc)1917 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
1918 {
1919 struct dpu_encoder_virt *dpu_enc;
1920 struct dpu_encoder_phys *phys;
1921 bool needs_hw_reset = false;
1922 unsigned int i;
1923
1924 dpu_enc = to_dpu_encoder_virt(drm_enc);
1925
1926 trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
1927
1928 /* prepare for next kickoff, may include waiting on previous kickoff */
1929 DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
1930 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1931 phys = dpu_enc->phys_encs[i];
1932 if (phys->ops.prepare_for_kickoff)
1933 phys->ops.prepare_for_kickoff(phys);
1934 if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
1935 needs_hw_reset = true;
1936 }
1937 DPU_ATRACE_END("enc_prepare_for_kickoff");
1938
1939 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1940
1941 /* if any phys needs reset, reset all phys, in-order */
1942 if (needs_hw_reset) {
1943 trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
1944 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1945 dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
1946 }
1947 }
1948
1949 if (dpu_enc->dsc)
1950 dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc);
1951 }
1952
dpu_encoder_is_valid_for_commit(struct drm_encoder * drm_enc)1953 bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
1954 {
1955 struct dpu_encoder_virt *dpu_enc;
1956 unsigned int i;
1957 struct dpu_encoder_phys *phys;
1958
1959 dpu_enc = to_dpu_encoder_virt(drm_enc);
1960
1961 if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) {
1962 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1963 phys = dpu_enc->phys_encs[i];
1964 if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) {
1965 DPU_DEBUG("invalid FB not kicking off\n");
1966 return false;
1967 }
1968 }
1969 }
1970
1971 return true;
1972 }
1973
dpu_encoder_kickoff(struct drm_encoder * drm_enc)1974 void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
1975 {
1976 struct dpu_encoder_virt *dpu_enc;
1977 struct dpu_encoder_phys *phys;
1978 unsigned long timeout_ms;
1979 unsigned int i;
1980
1981 DPU_ATRACE_BEGIN("encoder_kickoff");
1982 dpu_enc = to_dpu_encoder_virt(drm_enc);
1983
1984 trace_dpu_enc_kickoff(DRMID(drm_enc));
1985
1986 timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
1987 drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
1988
1989 atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
1990 mod_timer(&dpu_enc->frame_done_timer,
1991 jiffies + msecs_to_jiffies(timeout_ms));
1992
1993 /* All phys encs are ready to go, trigger the kickoff */
1994 _dpu_encoder_kickoff_phys(dpu_enc);
1995
1996 /* allow phys encs to handle any post-kickoff business */
1997 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1998 phys = dpu_enc->phys_encs[i];
1999 if (phys->ops.handle_post_kickoff)
2000 phys->ops.handle_post_kickoff(phys);
2001 }
2002
2003 DPU_ATRACE_END("encoder_kickoff");
2004 }
2005
dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys * phys_enc)2006 static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
2007 {
2008 struct dpu_hw_mixer_cfg mixer;
2009 int i, num_lm;
2010 struct dpu_global_state *global_state;
2011 struct dpu_hw_blk *hw_lm[2];
2012 struct dpu_hw_mixer *hw_mixer[2];
2013 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
2014
2015 memset(&mixer, 0, sizeof(mixer));
2016
2017 /* reset all mixers for this encoder */
2018 if (phys_enc->hw_ctl->ops.clear_all_blendstages)
2019 phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
2020
2021 global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms);
2022
2023 num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state,
2024 phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
2025
2026 for (i = 0; i < num_lm; i++) {
2027 hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
2028 if (phys_enc->hw_ctl->ops.update_pending_flush_mixer)
2029 phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
2030
2031 /* clear all blendstages */
2032 if (phys_enc->hw_ctl->ops.setup_blendstage)
2033 phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
2034 }
2035 }
2036
dpu_encoder_dsc_pipe_clr(struct dpu_hw_ctl * ctl,struct dpu_hw_dsc * hw_dsc,struct dpu_hw_pingpong * hw_pp)2037 static void dpu_encoder_dsc_pipe_clr(struct dpu_hw_ctl *ctl,
2038 struct dpu_hw_dsc *hw_dsc,
2039 struct dpu_hw_pingpong *hw_pp)
2040 {
2041 if (hw_dsc->ops.dsc_disable)
2042 hw_dsc->ops.dsc_disable(hw_dsc);
2043
2044 if (hw_pp->ops.disable_dsc)
2045 hw_pp->ops.disable_dsc(hw_pp);
2046
2047 if (hw_dsc->ops.dsc_bind_pingpong_blk)
2048 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, PINGPONG_NONE);
2049
2050 if (ctl->ops.update_pending_flush_dsc)
2051 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx);
2052 }
2053
dpu_encoder_unprep_dsc(struct dpu_encoder_virt * dpu_enc)2054 static void dpu_encoder_unprep_dsc(struct dpu_encoder_virt *dpu_enc)
2055 {
2056 /* coding only for 2LM, 2enc, 1 dsc config */
2057 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
2058 struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
2059 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
2060 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
2061 int i;
2062
2063 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
2064 hw_pp[i] = dpu_enc->hw_pp[i];
2065 hw_dsc[i] = dpu_enc->hw_dsc[i];
2066
2067 if (hw_pp[i] && hw_dsc[i])
2068 dpu_encoder_dsc_pipe_clr(ctl, hw_dsc[i], hw_pp[i]);
2069 }
2070 }
2071
dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys * phys_enc)2072 void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
2073 {
2074 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
2075 struct dpu_hw_intf_cfg intf_cfg = { 0 };
2076 int i;
2077 struct dpu_encoder_virt *dpu_enc;
2078
2079 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
2080
2081 phys_enc->hw_ctl->ops.reset(ctl);
2082
2083 dpu_encoder_helper_reset_mixers(phys_enc);
2084
2085 /*
2086 * TODO: move the once-only operation like CTL flush/trigger
2087 * into dpu_encoder_virt_disable() and all operations which need
2088 * to be done per phys encoder into the phys_disable() op.
2089 */
2090 if (phys_enc->hw_wb) {
2091 /* disable the PP block */
2092 if (phys_enc->hw_wb->ops.bind_pingpong_blk)
2093 phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, PINGPONG_NONE);
2094
2095 /* mark WB flush as pending */
2096 if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
2097 phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
2098 } else {
2099 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2100 if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
2101 phys_enc->hw_intf->ops.bind_pingpong_blk(
2102 dpu_enc->phys_encs[i]->hw_intf,
2103 PINGPONG_NONE);
2104
2105 /* mark INTF flush as pending */
2106 if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
2107 phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl,
2108 dpu_enc->phys_encs[i]->hw_intf->idx);
2109 }
2110 }
2111
2112 /* reset the merge 3D HW block */
2113 if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
2114 phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
2115 BLEND_3D_NONE);
2116 if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
2117 phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl,
2118 phys_enc->hw_pp->merge_3d->idx);
2119 }
2120
2121 if (phys_enc->hw_cdm) {
2122 if (phys_enc->hw_cdm->ops.bind_pingpong_blk && phys_enc->hw_pp)
2123 phys_enc->hw_cdm->ops.bind_pingpong_blk(phys_enc->hw_cdm,
2124 PINGPONG_NONE);
2125 if (phys_enc->hw_ctl->ops.update_pending_flush_cdm)
2126 phys_enc->hw_ctl->ops.update_pending_flush_cdm(phys_enc->hw_ctl,
2127 phys_enc->hw_cdm->idx);
2128 }
2129
2130 if (dpu_enc->dsc) {
2131 dpu_encoder_unprep_dsc(dpu_enc);
2132 dpu_enc->dsc = NULL;
2133 }
2134
2135 intf_cfg.stream_sel = 0; /* Don't care value for video mode */
2136 intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
2137 intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
2138
2139 if (phys_enc->hw_intf)
2140 intf_cfg.intf = phys_enc->hw_intf->idx;
2141 if (phys_enc->hw_wb)
2142 intf_cfg.wb = phys_enc->hw_wb->idx;
2143
2144 if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d)
2145 intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
2146
2147 if (ctl->ops.reset_intf_cfg)
2148 ctl->ops.reset_intf_cfg(ctl, &intf_cfg);
2149
2150 ctl->ops.trigger_flush(ctl);
2151 ctl->ops.trigger_start(ctl);
2152 ctl->ops.clear_pending_flush(ctl);
2153 }
2154
dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys * phys_enc,const struct msm_format * dpu_fmt,u32 output_type)2155 void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc,
2156 const struct msm_format *dpu_fmt,
2157 u32 output_type)
2158 {
2159 struct dpu_hw_cdm *hw_cdm;
2160 struct dpu_hw_cdm_cfg *cdm_cfg;
2161 struct dpu_hw_pingpong *hw_pp;
2162 int ret;
2163
2164 if (!phys_enc)
2165 return;
2166
2167 cdm_cfg = &phys_enc->cdm_cfg;
2168 hw_pp = phys_enc->hw_pp;
2169 hw_cdm = phys_enc->hw_cdm;
2170
2171 if (!hw_cdm)
2172 return;
2173
2174 if (!MSM_FORMAT_IS_YUV(dpu_fmt)) {
2175 DPU_DEBUG("[enc:%d] cdm_disable fmt:%p4cc\n", DRMID(phys_enc->parent),
2176 &dpu_fmt->pixel_format);
2177 if (hw_cdm->ops.bind_pingpong_blk)
2178 hw_cdm->ops.bind_pingpong_blk(hw_cdm, PINGPONG_NONE);
2179
2180 return;
2181 }
2182
2183 memset(cdm_cfg, 0, sizeof(struct dpu_hw_cdm_cfg));
2184
2185 cdm_cfg->output_width = phys_enc->cached_mode.hdisplay;
2186 cdm_cfg->output_height = phys_enc->cached_mode.vdisplay;
2187 cdm_cfg->output_fmt = dpu_fmt;
2188 cdm_cfg->output_type = output_type;
2189 cdm_cfg->output_bit_depth = MSM_FORMAT_IS_DX(dpu_fmt) ?
2190 CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT;
2191 cdm_cfg->csc_cfg = &dpu_csc10_rgb2yuv_601l;
2192
2193 /* enable 10 bit logic */
2194 switch (cdm_cfg->output_fmt->chroma_sample) {
2195 case CHROMA_FULL:
2196 cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
2197 cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
2198 break;
2199 case CHROMA_H2V1:
2200 cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
2201 cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
2202 break;
2203 case CHROMA_420:
2204 cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
2205 cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE;
2206 break;
2207 case CHROMA_H1V2:
2208 default:
2209 DPU_ERROR("[enc:%d] unsupported chroma sampling type\n",
2210 DRMID(phys_enc->parent));
2211 cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
2212 cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
2213 break;
2214 }
2215
2216 DPU_DEBUG("[enc:%d] cdm_enable:%d,%d,%p4cc,%d,%d,%d,%d]\n",
2217 DRMID(phys_enc->parent), cdm_cfg->output_width,
2218 cdm_cfg->output_height, &cdm_cfg->output_fmt->pixel_format,
2219 cdm_cfg->output_type, cdm_cfg->output_bit_depth,
2220 cdm_cfg->h_cdwn_type, cdm_cfg->v_cdwn_type);
2221
2222 if (hw_cdm->ops.enable) {
2223 cdm_cfg->pp_id = hw_pp->idx;
2224 ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg);
2225 if (ret < 0) {
2226 DPU_ERROR("[enc:%d] failed to enable CDM; ret:%d\n",
2227 DRMID(phys_enc->parent), ret);
2228 return;
2229 }
2230 }
2231 }
2232
2233 #ifdef CONFIG_DEBUG_FS
_dpu_encoder_status_show(struct seq_file * s,void * data)2234 static int _dpu_encoder_status_show(struct seq_file *s, void *data)
2235 {
2236 struct drm_encoder *drm_enc = s->private;
2237 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
2238 int i;
2239
2240 mutex_lock(&dpu_enc->enc_lock);
2241 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2242 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2243
2244 seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d frame_done_cnt:%d",
2245 phys->hw_intf ? phys->hw_intf->idx - INTF_0 : -1,
2246 phys->hw_wb ? phys->hw_wb->idx - WB_0 : -1,
2247 atomic_read(&phys->vsync_cnt),
2248 atomic_read(&phys->underrun_cnt),
2249 atomic_read(&dpu_enc->frame_done_timeout_cnt));
2250
2251 seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode));
2252 }
2253 mutex_unlock(&dpu_enc->enc_lock);
2254
2255 return 0;
2256 }
2257
2258 DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
2259
dpu_encoder_debugfs_init(struct drm_encoder * drm_enc,struct dentry * root)2260 static void dpu_encoder_debugfs_init(struct drm_encoder *drm_enc, struct dentry *root)
2261 {
2262 /* don't error check these */
2263 debugfs_create_file("status", 0600,
2264 root, drm_enc, &_dpu_encoder_status_fops);
2265 }
2266 #else
2267 #define dpu_encoder_debugfs_init NULL
2268 #endif
2269
dpu_encoder_virt_add_phys_encs(struct drm_device * dev,struct msm_display_info * disp_info,struct dpu_encoder_virt * dpu_enc,struct dpu_enc_phys_init_params * params)2270 static int dpu_encoder_virt_add_phys_encs(
2271 struct drm_device *dev,
2272 struct msm_display_info *disp_info,
2273 struct dpu_encoder_virt *dpu_enc,
2274 struct dpu_enc_phys_init_params *params)
2275 {
2276 struct dpu_encoder_phys *enc = NULL;
2277
2278 DPU_DEBUG_ENC(dpu_enc, "\n");
2279
2280 /*
2281 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
2282 * in this function, check up-front.
2283 */
2284 if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
2285 ARRAY_SIZE(dpu_enc->phys_encs)) {
2286 DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
2287 dpu_enc->num_phys_encs);
2288 return -EINVAL;
2289 }
2290
2291
2292 if (disp_info->intf_type == INTF_WB) {
2293 enc = dpu_encoder_phys_wb_init(dev, params);
2294
2295 if (IS_ERR(enc)) {
2296 DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
2297 PTR_ERR(enc));
2298 return PTR_ERR(enc);
2299 }
2300
2301 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2302 ++dpu_enc->num_phys_encs;
2303 } else if (disp_info->is_cmd_mode) {
2304 enc = dpu_encoder_phys_cmd_init(dev, params);
2305
2306 if (IS_ERR(enc)) {
2307 DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
2308 PTR_ERR(enc));
2309 return PTR_ERR(enc);
2310 }
2311
2312 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2313 ++dpu_enc->num_phys_encs;
2314 } else {
2315 enc = dpu_encoder_phys_vid_init(dev, params);
2316
2317 if (IS_ERR(enc)) {
2318 DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
2319 PTR_ERR(enc));
2320 return PTR_ERR(enc);
2321 }
2322
2323 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2324 ++dpu_enc->num_phys_encs;
2325 }
2326
2327 if (params->split_role == ENC_ROLE_SLAVE)
2328 dpu_enc->cur_slave = enc;
2329 else
2330 dpu_enc->cur_master = enc;
2331
2332 return 0;
2333 }
2334
dpu_encoder_setup_display(struct dpu_encoder_virt * dpu_enc,struct dpu_kms * dpu_kms,struct msm_display_info * disp_info)2335 static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
2336 struct dpu_kms *dpu_kms,
2337 struct msm_display_info *disp_info)
2338 {
2339 int ret = 0;
2340 int i = 0;
2341 struct dpu_enc_phys_init_params phys_params;
2342
2343 if (!dpu_enc) {
2344 DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
2345 return -EINVAL;
2346 }
2347
2348 dpu_enc->cur_master = NULL;
2349
2350 memset(&phys_params, 0, sizeof(phys_params));
2351 phys_params.dpu_kms = dpu_kms;
2352 phys_params.parent = &dpu_enc->base;
2353 phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
2354
2355 WARN_ON(disp_info->num_of_h_tiles < 1);
2356
2357 DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
2358
2359 if (disp_info->intf_type != INTF_WB)
2360 dpu_enc->idle_pc_supported =
2361 dpu_kms->catalog->caps->has_idle_pc;
2362
2363 mutex_lock(&dpu_enc->enc_lock);
2364 for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
2365 /*
2366 * Left-most tile is at index 0, content is controller id
2367 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
2368 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
2369 */
2370 u32 controller_id = disp_info->h_tile_instance[i];
2371
2372 if (disp_info->num_of_h_tiles > 1) {
2373 if (i == 0)
2374 phys_params.split_role = ENC_ROLE_MASTER;
2375 else
2376 phys_params.split_role = ENC_ROLE_SLAVE;
2377 } else {
2378 phys_params.split_role = ENC_ROLE_SOLO;
2379 }
2380
2381 DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
2382 i, controller_id, phys_params.split_role);
2383
2384 phys_params.hw_intf = dpu_encoder_get_intf(dpu_kms->catalog, &dpu_kms->rm,
2385 disp_info->intf_type,
2386 controller_id);
2387
2388 if (disp_info->intf_type == INTF_WB && controller_id < WB_MAX)
2389 phys_params.hw_wb = dpu_rm_get_wb(&dpu_kms->rm, controller_id);
2390
2391 if (!phys_params.hw_intf && !phys_params.hw_wb) {
2392 DPU_ERROR_ENC(dpu_enc, "no intf or wb block assigned at idx: %d\n", i);
2393 ret = -EINVAL;
2394 break;
2395 }
2396
2397 if (phys_params.hw_intf && phys_params.hw_wb) {
2398 DPU_ERROR_ENC(dpu_enc,
2399 "invalid phys both intf and wb block at idx: %d\n", i);
2400 ret = -EINVAL;
2401 break;
2402 }
2403
2404 ret = dpu_encoder_virt_add_phys_encs(dpu_kms->dev, disp_info,
2405 dpu_enc, &phys_params);
2406 if (ret) {
2407 DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
2408 break;
2409 }
2410 }
2411
2412 mutex_unlock(&dpu_enc->enc_lock);
2413
2414 return ret;
2415 }
2416
dpu_encoder_frame_done_timeout(struct timer_list * t)2417 static void dpu_encoder_frame_done_timeout(struct timer_list *t)
2418 {
2419 struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
2420 frame_done_timer);
2421 struct drm_encoder *drm_enc = &dpu_enc->base;
2422 u32 event;
2423
2424 if (!drm_enc->dev) {
2425 DPU_ERROR("invalid parameters\n");
2426 return;
2427 }
2428
2429 if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc) {
2430 DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
2431 DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
2432 return;
2433 } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
2434 DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
2435 return;
2436 }
2437
2438 DPU_ERROR_ENC_RATELIMITED(dpu_enc, "frame done timeout\n");
2439
2440 if (atomic_inc_return(&dpu_enc->frame_done_timeout_cnt) == 1)
2441 msm_disp_snapshot_state(drm_enc->dev);
2442
2443 event = DPU_ENCODER_FRAME_EVENT_ERROR;
2444 trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
2445 dpu_crtc_frame_event_cb(dpu_enc->crtc, event);
2446 }
2447
2448 static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
2449 .atomic_mode_set = dpu_encoder_virt_atomic_mode_set,
2450 .atomic_disable = dpu_encoder_virt_atomic_disable,
2451 .atomic_enable = dpu_encoder_virt_atomic_enable,
2452 .atomic_check = dpu_encoder_virt_atomic_check,
2453 };
2454
2455 static const struct drm_encoder_funcs dpu_encoder_funcs = {
2456 .debugfs_init = dpu_encoder_debugfs_init,
2457 };
2458
dpu_encoder_init(struct drm_device * dev,int drm_enc_mode,struct msm_display_info * disp_info)2459 struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
2460 int drm_enc_mode,
2461 struct msm_display_info *disp_info)
2462 {
2463 struct msm_drm_private *priv = dev->dev_private;
2464 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
2465 struct dpu_encoder_virt *dpu_enc;
2466 int ret;
2467
2468 dpu_enc = drmm_encoder_alloc(dev, struct dpu_encoder_virt, base,
2469 &dpu_encoder_funcs, drm_enc_mode, NULL);
2470 if (IS_ERR(dpu_enc))
2471 return ERR_CAST(dpu_enc);
2472
2473 drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
2474
2475 spin_lock_init(&dpu_enc->enc_spinlock);
2476 dpu_enc->enabled = false;
2477 mutex_init(&dpu_enc->enc_lock);
2478 mutex_init(&dpu_enc->rc_lock);
2479
2480 ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
2481 if (ret) {
2482 DPU_ERROR("failed to setup encoder\n");
2483 return ERR_PTR(-ENOMEM);
2484 }
2485
2486 atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
2487 atomic_set(&dpu_enc->frame_done_timeout_cnt, 0);
2488 timer_setup(&dpu_enc->frame_done_timer,
2489 dpu_encoder_frame_done_timeout, 0);
2490
2491 INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
2492 dpu_encoder_off_work);
2493 dpu_enc->idle_timeout = IDLE_TIMEOUT;
2494
2495 memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
2496
2497 DPU_DEBUG_ENC(dpu_enc, "created\n");
2498
2499 return &dpu_enc->base;
2500 }
2501
2502 /**
2503 * dpu_encoder_wait_for_commit_done() - Wait for encoder to flush pending state
2504 * @drm_enc: encoder pointer
2505 *
2506 * Wait for hardware to have flushed the current pending changes to hardware at
2507 * a vblank or CTL_START. Physical encoders will map this differently depending
2508 * on the type: vid mode -> vsync_irq, cmd mode -> CTL_START.
2509 *
2510 * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
2511 */
dpu_encoder_wait_for_commit_done(struct drm_encoder * drm_enc)2512 int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_enc)
2513 {
2514 struct dpu_encoder_virt *dpu_enc = NULL;
2515 int i, ret = 0;
2516
2517 if (!drm_enc) {
2518 DPU_ERROR("invalid encoder\n");
2519 return -EINVAL;
2520 }
2521 dpu_enc = to_dpu_encoder_virt(drm_enc);
2522 DPU_DEBUG_ENC(dpu_enc, "\n");
2523
2524 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2525 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2526
2527 if (phys->ops.wait_for_commit_done) {
2528 DPU_ATRACE_BEGIN("wait_for_commit_done");
2529 ret = phys->ops.wait_for_commit_done(phys);
2530 DPU_ATRACE_END("wait_for_commit_done");
2531 if (ret == -ETIMEDOUT && !dpu_enc->commit_done_timedout) {
2532 dpu_enc->commit_done_timedout = true;
2533 msm_disp_snapshot_state(drm_enc->dev);
2534 }
2535 if (ret)
2536 return ret;
2537 }
2538 }
2539
2540 return ret;
2541 }
2542
2543 /**
2544 * dpu_encoder_wait_for_tx_complete() - Wait for encoder to transfer pixels to panel
2545 * @drm_enc: encoder pointer
2546 *
2547 * Wait for the hardware to transfer all the pixels to the panel. Physical
2548 * encoders will map this differently depending on the type: vid mode -> vsync_irq,
2549 * cmd mode -> pp_done.
2550 *
2551 * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
2552 */
dpu_encoder_wait_for_tx_complete(struct drm_encoder * drm_enc)2553 int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_enc)
2554 {
2555 struct dpu_encoder_virt *dpu_enc = NULL;
2556 int i, ret = 0;
2557
2558 if (!drm_enc) {
2559 DPU_ERROR("invalid encoder\n");
2560 return -EINVAL;
2561 }
2562 dpu_enc = to_dpu_encoder_virt(drm_enc);
2563 DPU_DEBUG_ENC(dpu_enc, "\n");
2564
2565 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2566 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2567
2568 if (phys->ops.wait_for_tx_complete) {
2569 DPU_ATRACE_BEGIN("wait_for_tx_complete");
2570 ret = phys->ops.wait_for_tx_complete(phys);
2571 DPU_ATRACE_END("wait_for_tx_complete");
2572 if (ret)
2573 return ret;
2574 }
2575 }
2576
2577 return ret;
2578 }
2579
dpu_encoder_get_intf_mode(struct drm_encoder * encoder)2580 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
2581 {
2582 struct dpu_encoder_virt *dpu_enc = NULL;
2583
2584 if (!encoder) {
2585 DPU_ERROR("invalid encoder\n");
2586 return INTF_MODE_NONE;
2587 }
2588 dpu_enc = to_dpu_encoder_virt(encoder);
2589
2590 if (dpu_enc->cur_master)
2591 return dpu_enc->cur_master->intf_mode;
2592
2593 if (dpu_enc->num_phys_encs)
2594 return dpu_enc->phys_encs[0]->intf_mode;
2595
2596 return INTF_MODE_NONE;
2597 }
2598
dpu_encoder_helper_get_dsc(struct dpu_encoder_phys * phys_enc)2599 unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
2600 {
2601 struct drm_encoder *encoder = phys_enc->parent;
2602 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
2603
2604 return dpu_enc->dsc_mask;
2605 }
2606
dpu_encoder_phys_init(struct dpu_encoder_phys * phys_enc,struct dpu_enc_phys_init_params * p)2607 void dpu_encoder_phys_init(struct dpu_encoder_phys *phys_enc,
2608 struct dpu_enc_phys_init_params *p)
2609 {
2610 phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
2611 phys_enc->hw_intf = p->hw_intf;
2612 phys_enc->hw_wb = p->hw_wb;
2613 phys_enc->parent = p->parent;
2614 phys_enc->dpu_kms = p->dpu_kms;
2615 phys_enc->split_role = p->split_role;
2616 phys_enc->enc_spinlock = p->enc_spinlock;
2617 phys_enc->enable_state = DPU_ENC_DISABLED;
2618
2619 atomic_set(&phys_enc->pending_kickoff_cnt, 0);
2620 atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
2621
2622 atomic_set(&phys_enc->vsync_cnt, 0);
2623 atomic_set(&phys_enc->underrun_cnt, 0);
2624
2625 init_waitqueue_head(&phys_enc->pending_kickoff_wq);
2626 }
2627