xref: /linux/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c (revision c94cd9508b1335b949fd13ebd269313c65492df0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
5  * Copyright (C) 2013 Red Hat
6  * Author: Rob Clark <robdclark@gmail.com>
7  */
8 
9 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
10 #include <linux/sort.h>
11 #include <linux/debugfs.h>
12 #include <linux/ktime.h>
13 #include <linux/bits.h>
14 
15 #include <drm/drm_atomic.h>
16 #include <drm/drm_blend.h>
17 #include <drm/drm_crtc.h>
18 #include <drm/drm_flip_work.h>
19 #include <drm/drm_framebuffer.h>
20 #include <drm/drm_mode.h>
21 #include <drm/drm_probe_helper.h>
22 #include <drm/drm_rect.h>
23 #include <drm/drm_vblank.h>
24 #include <drm/drm_self_refresh_helper.h>
25 
26 #include "dpu_kms.h"
27 #include "dpu_hw_lm.h"
28 #include "dpu_hw_ctl.h"
29 #include "dpu_hw_dspp.h"
30 #include "dpu_crtc.h"
31 #include "dpu_plane.h"
32 #include "dpu_encoder.h"
33 #include "dpu_vbif.h"
34 #include "dpu_core_perf.h"
35 #include "dpu_trace.h"
36 
37 /* layer mixer index on dpu_crtc */
38 #define LEFT_MIXER 0
39 #define RIGHT_MIXER 1
40 
41 /* timeout in ms waiting for frame done */
42 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS	60
43 
44 #define	CONVERT_S3_15(val) \
45 	(((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
46 
47 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
48 {
49 	struct msm_drm_private *priv = crtc->dev->dev_private;
50 
51 	return to_dpu_kms(priv->kms);
52 }
53 
54 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
55 {
56 	struct drm_device *dev = crtc->dev;
57 	struct drm_encoder *encoder;
58 
59 	drm_for_each_encoder(encoder, dev)
60 		if (encoder->crtc == crtc)
61 			return encoder;
62 
63 	return NULL;
64 }
65 
66 static enum dpu_crtc_crc_source dpu_crtc_parse_crc_source(const char *src_name)
67 {
68 	if (!src_name ||
69 	    !strcmp(src_name, "none"))
70 		return DPU_CRTC_CRC_SOURCE_NONE;
71 	if (!strcmp(src_name, "auto") ||
72 	    !strcmp(src_name, "lm"))
73 		return DPU_CRTC_CRC_SOURCE_LAYER_MIXER;
74 	if (!strcmp(src_name, "encoder"))
75 		return DPU_CRTC_CRC_SOURCE_ENCODER;
76 
77 	return DPU_CRTC_CRC_SOURCE_INVALID;
78 }
79 
80 static int dpu_crtc_verify_crc_source(struct drm_crtc *crtc,
81 		const char *src_name, size_t *values_cnt)
82 {
83 	enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
84 	struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state);
85 
86 	if (source < 0) {
87 		DRM_DEBUG_DRIVER("Invalid source %s for CRTC%d\n", src_name, crtc->index);
88 		return -EINVAL;
89 	}
90 
91 	if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) {
92 		*values_cnt = crtc_state->num_mixers;
93 	} else if (source == DPU_CRTC_CRC_SOURCE_ENCODER) {
94 		struct drm_encoder *drm_enc;
95 
96 		*values_cnt = 0;
97 
98 		drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask)
99 			*values_cnt += dpu_encoder_get_crc_values_cnt(drm_enc);
100 	}
101 
102 	return 0;
103 }
104 
105 static void dpu_crtc_setup_lm_misr(struct dpu_crtc_state *crtc_state)
106 {
107 	struct dpu_crtc_mixer *m;
108 	int i;
109 
110 	for (i = 0; i < crtc_state->num_mixers; ++i) {
111 		m = &crtc_state->mixers[i];
112 
113 		if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
114 			continue;
115 
116 		/* Calculate MISR over 1 frame */
117 		m->hw_lm->ops.setup_misr(m->hw_lm);
118 	}
119 }
120 
121 static void dpu_crtc_setup_encoder_misr(struct drm_crtc *crtc)
122 {
123 	struct drm_encoder *drm_enc;
124 
125 	drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask)
126 		dpu_encoder_setup_misr(drm_enc);
127 }
128 
129 static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
130 {
131 	enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
132 	enum dpu_crtc_crc_source current_source;
133 	struct dpu_crtc_state *crtc_state;
134 	struct drm_device *drm_dev = crtc->dev;
135 
136 	bool was_enabled;
137 	bool enable = false;
138 	int ret = 0;
139 
140 	if (source < 0) {
141 		DRM_DEBUG_DRIVER("Invalid CRC source %s for CRTC%d\n", src_name, crtc->index);
142 		return -EINVAL;
143 	}
144 
145 	ret = drm_modeset_lock(&crtc->mutex, NULL);
146 
147 	if (ret)
148 		return ret;
149 
150 	enable = (source != DPU_CRTC_CRC_SOURCE_NONE);
151 	crtc_state = to_dpu_crtc_state(crtc->state);
152 
153 	spin_lock_irq(&drm_dev->event_lock);
154 	current_source = crtc_state->crc_source;
155 	spin_unlock_irq(&drm_dev->event_lock);
156 
157 	was_enabled = (current_source != DPU_CRTC_CRC_SOURCE_NONE);
158 
159 	if (!was_enabled && enable) {
160 		ret = drm_crtc_vblank_get(crtc);
161 
162 		if (ret)
163 			goto cleanup;
164 
165 	} else if (was_enabled && !enable) {
166 		drm_crtc_vblank_put(crtc);
167 	}
168 
169 	spin_lock_irq(&drm_dev->event_lock);
170 	crtc_state->crc_source = source;
171 	spin_unlock_irq(&drm_dev->event_lock);
172 
173 	crtc_state->crc_frame_skip_count = 0;
174 
175 	if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER)
176 		dpu_crtc_setup_lm_misr(crtc_state);
177 	else if (source == DPU_CRTC_CRC_SOURCE_ENCODER)
178 		dpu_crtc_setup_encoder_misr(crtc);
179 	else
180 		ret = -EINVAL;
181 
182 cleanup:
183 	drm_modeset_unlock(&crtc->mutex);
184 
185 	return ret;
186 }
187 
188 static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
189 {
190 	struct drm_encoder *encoder = get_encoder_from_crtc(crtc);
191 	if (!encoder) {
192 		DRM_ERROR("no encoder found for crtc %d\n", crtc->index);
193 		return 0;
194 	}
195 
196 	return dpu_encoder_get_vsync_count(encoder);
197 }
198 
199 static int dpu_crtc_get_lm_crc(struct drm_crtc *crtc,
200 		struct dpu_crtc_state *crtc_state)
201 {
202 	struct dpu_crtc_mixer *m;
203 	u32 crcs[CRTC_DUAL_MIXERS];
204 
205 	int rc = 0;
206 	int i;
207 
208 	BUILD_BUG_ON(ARRAY_SIZE(crcs) != ARRAY_SIZE(crtc_state->mixers));
209 
210 	for (i = 0; i < crtc_state->num_mixers; ++i) {
211 
212 		m = &crtc_state->mixers[i];
213 
214 		if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
215 			continue;
216 
217 		rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]);
218 
219 		if (rc) {
220 			if (rc != -ENODATA)
221 				DRM_DEBUG_DRIVER("MISR read failed\n");
222 			return rc;
223 		}
224 	}
225 
226 	return drm_crtc_add_crc_entry(crtc, true,
227 			drm_crtc_accurate_vblank_count(crtc), crcs);
228 }
229 
230 static int dpu_crtc_get_encoder_crc(struct drm_crtc *crtc)
231 {
232 	struct drm_encoder *drm_enc;
233 	int rc, pos = 0;
234 	u32 crcs[INTF_MAX];
235 
236 	drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) {
237 		rc = dpu_encoder_get_crc(drm_enc, crcs, pos);
238 		if (rc < 0) {
239 			if (rc != -ENODATA)
240 				DRM_DEBUG_DRIVER("MISR read failed\n");
241 
242 			return rc;
243 		}
244 
245 		pos += rc;
246 	}
247 
248 	return drm_crtc_add_crc_entry(crtc, true,
249 			drm_crtc_accurate_vblank_count(crtc), crcs);
250 }
251 
252 static int dpu_crtc_get_crc(struct drm_crtc *crtc)
253 {
254 	struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state);
255 
256 	/* Skip first 2 frames in case of "uncooked" CRCs */
257 	if (crtc_state->crc_frame_skip_count < 2) {
258 		crtc_state->crc_frame_skip_count++;
259 		return 0;
260 	}
261 
262 	if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER)
263 		return dpu_crtc_get_lm_crc(crtc, crtc_state);
264 	else if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_ENCODER)
265 		return dpu_crtc_get_encoder_crc(crtc);
266 
267 	return -EINVAL;
268 }
269 
270 static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
271 					   bool in_vblank_irq,
272 					   int *vpos, int *hpos,
273 					   ktime_t *stime, ktime_t *etime,
274 					   const struct drm_display_mode *mode)
275 {
276 	unsigned int pipe = crtc->index;
277 	struct drm_encoder *encoder;
278 	int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
279 
280 	encoder = get_encoder_from_crtc(crtc);
281 	if (!encoder) {
282 		DRM_ERROR("no encoder found for crtc %d\n", pipe);
283 		return false;
284 	}
285 
286 	vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
287 	vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
288 
289 	/*
290 	 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
291 	 * the end of VFP. Translate the porch values relative to the line
292 	 * counter positions.
293 	 */
294 
295 	vactive_start = vsw + vbp + 1;
296 	vactive_end = vactive_start + mode->crtc_vdisplay;
297 
298 	/* last scan line before VSYNC */
299 	vfp_end = mode->crtc_vtotal;
300 
301 	if (stime)
302 		*stime = ktime_get();
303 
304 	line = dpu_encoder_get_linecount(encoder);
305 
306 	if (line < vactive_start)
307 		line -= vactive_start;
308 	else if (line > vactive_end)
309 		line = line - vfp_end - vactive_start;
310 	else
311 		line -= vactive_start;
312 
313 	*vpos = line;
314 	*hpos = 0;
315 
316 	if (etime)
317 		*etime = ktime_get();
318 
319 	return true;
320 }
321 
322 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
323 		struct dpu_plane_state *pstate, const struct msm_format *format)
324 {
325 	struct dpu_hw_mixer *lm = mixer->hw_lm;
326 	uint32_t blend_op;
327 	uint32_t fg_alpha, bg_alpha;
328 
329 	fg_alpha = pstate->base.alpha >> 8;
330 	bg_alpha = 0xff - fg_alpha;
331 
332 	/* default to opaque blending */
333 	if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE ||
334 	    !format->alpha_enable) {
335 		blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
336 			DPU_BLEND_BG_ALPHA_BG_CONST;
337 	} else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
338 		blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
339 			DPU_BLEND_BG_ALPHA_FG_PIXEL;
340 		if (fg_alpha != 0xff) {
341 			bg_alpha = fg_alpha;
342 			blend_op |= DPU_BLEND_BG_MOD_ALPHA |
343 				    DPU_BLEND_BG_INV_MOD_ALPHA;
344 		} else {
345 			blend_op |= DPU_BLEND_BG_INV_ALPHA;
346 		}
347 	} else {
348 		/* coverage blending */
349 		blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
350 			DPU_BLEND_BG_ALPHA_FG_PIXEL;
351 		if (fg_alpha != 0xff) {
352 			bg_alpha = fg_alpha;
353 			blend_op |= DPU_BLEND_FG_MOD_ALPHA |
354 				    DPU_BLEND_FG_INV_MOD_ALPHA |
355 				    DPU_BLEND_BG_MOD_ALPHA |
356 				    DPU_BLEND_BG_INV_MOD_ALPHA;
357 		} else {
358 			blend_op |= DPU_BLEND_BG_INV_ALPHA;
359 		}
360 	}
361 
362 	lm->ops.setup_blend_config(lm, pstate->stage,
363 				fg_alpha, bg_alpha, blend_op);
364 
365 	DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
366 		  &format->pixel_format, format->alpha_enable, blend_op);
367 }
368 
369 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
370 {
371 	struct dpu_crtc_state *crtc_state;
372 	int lm_idx, lm_horiz_position;
373 
374 	crtc_state = to_dpu_crtc_state(crtc->state);
375 
376 	lm_horiz_position = 0;
377 	for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
378 		const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
379 		struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
380 		struct dpu_hw_mixer_cfg cfg;
381 
382 		if (!lm_roi || !drm_rect_visible(lm_roi))
383 			continue;
384 
385 		cfg.out_width = drm_rect_width(lm_roi);
386 		cfg.out_height = drm_rect_height(lm_roi);
387 		cfg.right_mixer = lm_horiz_position++;
388 		cfg.flags = 0;
389 		hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
390 	}
391 }
392 
393 static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc,
394 				       struct drm_plane *plane,
395 				       struct dpu_crtc_mixer *mixer,
396 				       u32 num_mixers,
397 				       enum dpu_stage stage,
398 				       const struct msm_format *format,
399 				       uint64_t modifier,
400 				       struct dpu_sw_pipe *pipe,
401 				       unsigned int stage_idx,
402 				       struct dpu_hw_stage_cfg *stage_cfg
403 				      )
404 {
405 	uint32_t lm_idx;
406 	enum dpu_sspp sspp_idx;
407 	struct drm_plane_state *state;
408 
409 	sspp_idx = pipe->sspp->idx;
410 
411 	state = plane->state;
412 
413 	trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
414 				   state, to_dpu_plane_state(state), stage_idx,
415 				   format->pixel_format,
416 				   modifier);
417 
418 	DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d multirect_idx %d\n",
419 			 crtc->base.id,
420 			 stage,
421 			 plane->base.id,
422 			 sspp_idx - SSPP_NONE,
423 			 state->fb ? state->fb->base.id : -1,
424 			 pipe->multirect_index);
425 
426 	stage_cfg->stage[stage][stage_idx] = sspp_idx;
427 	stage_cfg->multirect_index[stage][stage_idx] = pipe->multirect_index;
428 
429 	/* blend config update */
430 	for (lm_idx = 0; lm_idx < num_mixers; lm_idx++)
431 		mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl, sspp_idx);
432 }
433 
434 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
435 	struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer,
436 	struct dpu_hw_stage_cfg *stage_cfg)
437 {
438 	struct drm_plane *plane;
439 	struct drm_framebuffer *fb;
440 	struct drm_plane_state *state;
441 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
442 	struct dpu_plane_state *pstate = NULL;
443 	const struct msm_format *format;
444 	struct dpu_hw_ctl *ctl = mixer->lm_ctl;
445 
446 	uint32_t lm_idx;
447 	bool bg_alpha_enable = false;
448 	DECLARE_BITMAP(fetch_active, SSPP_MAX);
449 
450 	memset(fetch_active, 0, sizeof(fetch_active));
451 	drm_atomic_crtc_for_each_plane(plane, crtc) {
452 		state = plane->state;
453 		if (!state)
454 			continue;
455 
456 		if (!state->visible)
457 			continue;
458 
459 		pstate = to_dpu_plane_state(state);
460 		fb = state->fb;
461 
462 		format = msm_framebuffer_format(pstate->base.fb);
463 
464 		if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
465 			bg_alpha_enable = true;
466 
467 		set_bit(pstate->pipe.sspp->idx, fetch_active);
468 		_dpu_crtc_blend_setup_pipe(crtc, plane,
469 					   mixer, cstate->num_mixers,
470 					   pstate->stage,
471 					   format, fb ? fb->modifier : 0,
472 					   &pstate->pipe, 0, stage_cfg);
473 
474 		if (pstate->r_pipe.sspp) {
475 			set_bit(pstate->r_pipe.sspp->idx, fetch_active);
476 			_dpu_crtc_blend_setup_pipe(crtc, plane,
477 						   mixer, cstate->num_mixers,
478 						   pstate->stage,
479 						   format, fb ? fb->modifier : 0,
480 						   &pstate->r_pipe, 1, stage_cfg);
481 		}
482 
483 		/* blend config update */
484 		for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
485 			_dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate, format);
486 
487 			if (bg_alpha_enable && !format->alpha_enable)
488 				mixer[lm_idx].mixer_op_mode = 0;
489 			else
490 				mixer[lm_idx].mixer_op_mode |=
491 						1 << pstate->stage;
492 		}
493 	}
494 
495 	if (ctl->ops.set_active_pipes)
496 		ctl->ops.set_active_pipes(ctl, fetch_active);
497 
498 	_dpu_crtc_program_lm_output_roi(crtc);
499 }
500 
501 /**
502  * _dpu_crtc_blend_setup - configure crtc mixers
503  * @crtc: Pointer to drm crtc structure
504  */
505 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
506 {
507 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
508 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
509 	struct dpu_crtc_mixer *mixer = cstate->mixers;
510 	struct dpu_hw_ctl *ctl;
511 	struct dpu_hw_mixer *lm;
512 	struct dpu_hw_stage_cfg stage_cfg;
513 	int i;
514 
515 	DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
516 
517 	for (i = 0; i < cstate->num_mixers; i++) {
518 		mixer[i].mixer_op_mode = 0;
519 		if (mixer[i].lm_ctl->ops.clear_all_blendstages)
520 			mixer[i].lm_ctl->ops.clear_all_blendstages(
521 					mixer[i].lm_ctl);
522 	}
523 
524 	/* initialize stage cfg */
525 	memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
526 
527 	_dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg);
528 
529 	for (i = 0; i < cstate->num_mixers; i++) {
530 		ctl = mixer[i].lm_ctl;
531 		lm = mixer[i].hw_lm;
532 
533 		lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
534 
535 		/* stage config flush mask */
536 		ctl->ops.update_pending_flush_mixer(ctl,
537 			mixer[i].hw_lm->idx);
538 
539 		DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n",
540 			mixer[i].hw_lm->idx - LM_0,
541 			mixer[i].mixer_op_mode,
542 			ctl->idx - CTL_0);
543 
544 		ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
545 			&stage_cfg);
546 	}
547 }
548 
549 /**
550  *  _dpu_crtc_complete_flip - signal pending page_flip events
551  * Any pending vblank events are added to the vblank_event_list
552  * so that the next vblank interrupt shall signal them.
553  * However PAGE_FLIP events are not handled through the vblank_event_list.
554  * This API signals any pending PAGE_FLIP events requested through
555  * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
556  * @crtc: Pointer to drm crtc structure
557  */
558 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
559 {
560 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
561 	struct drm_device *dev = crtc->dev;
562 	unsigned long flags;
563 
564 	spin_lock_irqsave(&dev->event_lock, flags);
565 	if (dpu_crtc->event) {
566 		DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
567 			      dpu_crtc->event);
568 		trace_dpu_crtc_complete_flip(DRMID(crtc));
569 		drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
570 		dpu_crtc->event = NULL;
571 	}
572 	spin_unlock_irqrestore(&dev->event_lock, flags);
573 }
574 
575 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
576 {
577 	struct drm_encoder *encoder;
578 
579 	/*
580 	 * TODO: This function is called from dpu debugfs and as part of atomic
581 	 * check. When called from debugfs, the crtc->mutex must be held to
582 	 * read crtc->state. However reading crtc->state from atomic check isn't
583 	 * allowed (unless you have a good reason, a big comment, and a deep
584 	 * understanding of how the atomic/modeset locks work (<- and this is
585 	 * probably not possible)). So we'll keep the WARN_ON here for now, but
586 	 * really we need to figure out a better way to track our operating mode
587 	 */
588 	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
589 
590 	/* TODO: Returns the first INTF_MODE, could there be multiple values? */
591 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
592 		return dpu_encoder_get_intf_mode(encoder);
593 
594 	return INTF_MODE_NONE;
595 }
596 
597 void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
598 {
599 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
600 
601 	/* keep statistics on vblank callback - with auto reset via debugfs */
602 	if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
603 		dpu_crtc->vblank_cb_time = ktime_get();
604 	else
605 		dpu_crtc->vblank_cb_count++;
606 
607 	dpu_crtc_get_crc(crtc);
608 
609 	drm_crtc_handle_vblank(crtc);
610 	trace_dpu_crtc_vblank_cb(DRMID(crtc));
611 }
612 
613 static void dpu_crtc_frame_event_work(struct kthread_work *work)
614 {
615 	struct dpu_crtc_frame_event *fevent = container_of(work,
616 			struct dpu_crtc_frame_event, work);
617 	struct drm_crtc *crtc = fevent->crtc;
618 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
619 	unsigned long flags;
620 	bool frame_done = false;
621 
622 	DPU_ATRACE_BEGIN("crtc_frame_event");
623 
624 	DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
625 			ktime_to_ns(fevent->ts));
626 
627 	if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
628 				| DPU_ENCODER_FRAME_EVENT_ERROR
629 				| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
630 
631 		if (atomic_read(&dpu_crtc->frame_pending) < 1) {
632 			/* ignore vblank when not pending */
633 		} else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
634 			/* release bandwidth and other resources */
635 			trace_dpu_crtc_frame_event_done(DRMID(crtc),
636 							fevent->event);
637 			dpu_core_perf_crtc_release_bw(crtc);
638 		} else {
639 			trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
640 								fevent->event);
641 		}
642 
643 		if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
644 					| DPU_ENCODER_FRAME_EVENT_ERROR))
645 			frame_done = true;
646 	}
647 
648 	if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
649 		DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
650 				crtc->base.id, ktime_to_ns(fevent->ts));
651 
652 	if (frame_done)
653 		complete_all(&dpu_crtc->frame_done_comp);
654 
655 	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
656 	list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
657 	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
658 	DPU_ATRACE_END("crtc_frame_event");
659 }
660 
661 /**
662  * dpu_crtc_frame_event_cb - crtc frame event callback API
663  * @crtc: Pointer to crtc
664  * @event: Event to process
665  *
666  * Encoder may call this for different events from different context - IRQ,
667  * user thread, commit_thread, etc. Each event should be carefully reviewed and
668  * should be processed in proper task context to avoid schedulin delay or
669  * properly manage the irq context's bottom half processing.
670  */
671 void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event)
672 {
673 	struct dpu_crtc *dpu_crtc;
674 	struct msm_drm_private *priv;
675 	struct dpu_crtc_frame_event *fevent;
676 	unsigned long flags;
677 	u32 crtc_id;
678 
679 	/* Nothing to do on idle event */
680 	if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
681 		return;
682 
683 	dpu_crtc = to_dpu_crtc(crtc);
684 	priv = crtc->dev->dev_private;
685 	crtc_id = drm_crtc_index(crtc);
686 
687 	trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
688 
689 	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
690 	fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
691 			struct dpu_crtc_frame_event, list);
692 	if (fevent)
693 		list_del_init(&fevent->list);
694 	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
695 
696 	if (!fevent) {
697 		DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
698 		return;
699 	}
700 
701 	fevent->event = event;
702 	fevent->crtc = crtc;
703 	fevent->ts = ktime_get();
704 	kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
705 }
706 
707 void dpu_crtc_complete_commit(struct drm_crtc *crtc)
708 {
709 	trace_dpu_crtc_complete_commit(DRMID(crtc));
710 	dpu_core_perf_crtc_update(crtc, 0);
711 	_dpu_crtc_complete_flip(crtc);
712 }
713 
714 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
715 		struct drm_crtc_state *state)
716 {
717 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
718 	struct drm_display_mode *adj_mode = &state->adjusted_mode;
719 	u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
720 	int i;
721 
722 	for (i = 0; i < cstate->num_mixers; i++) {
723 		struct drm_rect *r = &cstate->lm_bounds[i];
724 		r->x1 = crtc_split_width * i;
725 		r->y1 = 0;
726 		r->x2 = r->x1 + crtc_split_width;
727 		r->y2 = adj_mode->vdisplay;
728 
729 		trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
730 	}
731 }
732 
733 static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
734 		struct dpu_hw_pcc_cfg *cfg)
735 {
736 	struct drm_color_ctm *ctm;
737 
738 	memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
739 
740 	ctm = (struct drm_color_ctm *)state->ctm->data;
741 
742 	if (!ctm)
743 		return;
744 
745 	cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
746 	cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
747 	cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
748 
749 	cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
750 	cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
751 	cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
752 
753 	cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
754 	cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
755 	cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
756 }
757 
758 static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
759 {
760 	struct drm_crtc_state *state = crtc->state;
761 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
762 	struct dpu_crtc_mixer *mixer = cstate->mixers;
763 	struct dpu_hw_pcc_cfg cfg;
764 	struct dpu_hw_ctl *ctl;
765 	struct dpu_hw_dspp *dspp;
766 	int i;
767 
768 
769 	if (!state->color_mgmt_changed && !drm_atomic_crtc_needs_modeset(state))
770 		return;
771 
772 	for (i = 0; i < cstate->num_mixers; i++) {
773 		ctl = mixer[i].lm_ctl;
774 		dspp = mixer[i].hw_dspp;
775 
776 		if (!dspp || !dspp->ops.setup_pcc)
777 			continue;
778 
779 		if (!state->ctm) {
780 			dspp->ops.setup_pcc(dspp, NULL);
781 		} else {
782 			_dpu_crtc_get_pcc_coeff(state, &cfg);
783 			dspp->ops.setup_pcc(dspp, &cfg);
784 		}
785 
786 		/* stage config flush mask */
787 		ctl->ops.update_pending_flush_dspp(ctl,
788 			mixer[i].hw_dspp->idx, DPU_DSPP_PCC);
789 	}
790 }
791 
792 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
793 		struct drm_atomic_state *state)
794 {
795 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
796 	struct drm_encoder *encoder;
797 
798 	if (!crtc->state->enable) {
799 		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n",
800 				crtc->base.id, crtc->state->enable);
801 		return;
802 	}
803 
804 	DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
805 
806 	_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
807 
808 	/* encoder will trigger pending mask now */
809 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
810 		dpu_encoder_trigger_kickoff_pending(encoder);
811 
812 	/*
813 	 * If no mixers have been allocated in dpu_crtc_atomic_check(),
814 	 * it means we are trying to flush a CRTC whose state is disabled:
815 	 * nothing else needs to be done.
816 	 */
817 	if (unlikely(!cstate->num_mixers))
818 		return;
819 
820 	_dpu_crtc_blend_setup(crtc);
821 
822 	_dpu_crtc_setup_cp_blocks(crtc);
823 
824 	/*
825 	 * PP_DONE irq is only used by command mode for now.
826 	 * It is better to request pending before FLUSH and START trigger
827 	 * to make sure no pp_done irq missed.
828 	 * This is safe because no pp_done will happen before SW trigger
829 	 * in command mode.
830 	 */
831 }
832 
833 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
834 		struct drm_atomic_state *state)
835 {
836 	struct dpu_crtc *dpu_crtc;
837 	struct drm_device *dev;
838 	struct drm_plane *plane;
839 	struct msm_drm_private *priv;
840 	unsigned long flags;
841 	struct dpu_crtc_state *cstate;
842 
843 	if (!crtc->state->enable) {
844 		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n",
845 				crtc->base.id, crtc->state->enable);
846 		return;
847 	}
848 
849 	DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
850 
851 	dpu_crtc = to_dpu_crtc(crtc);
852 	cstate = to_dpu_crtc_state(crtc->state);
853 	dev = crtc->dev;
854 	priv = dev->dev_private;
855 
856 	if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
857 		DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
858 		return;
859 	}
860 
861 	WARN_ON(dpu_crtc->event);
862 	spin_lock_irqsave(&dev->event_lock, flags);
863 	dpu_crtc->event = crtc->state->event;
864 	crtc->state->event = NULL;
865 	spin_unlock_irqrestore(&dev->event_lock, flags);
866 
867 	/*
868 	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
869 	 * it means we are trying to flush a CRTC whose state is disabled:
870 	 * nothing else needs to be done.
871 	 */
872 	if (unlikely(!cstate->num_mixers))
873 		return;
874 
875 	/* update performance setting before crtc kickoff */
876 	dpu_core_perf_crtc_update(crtc, 1);
877 
878 	/*
879 	 * Final plane updates: Give each plane a chance to complete all
880 	 *                      required writes/flushing before crtc's "flush
881 	 *                      everything" call below.
882 	 */
883 	drm_atomic_crtc_for_each_plane(plane, crtc) {
884 		if (dpu_crtc->smmu_state.transition_error)
885 			dpu_plane_set_error(plane, true);
886 		dpu_plane_flush(plane);
887 	}
888 
889 	/* Kickoff will be scheduled by outer layer */
890 }
891 
892 /**
893  * dpu_crtc_destroy_state - state destroy hook
894  * @crtc: drm CRTC
895  * @state: CRTC state object to release
896  */
897 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
898 		struct drm_crtc_state *state)
899 {
900 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
901 
902 	DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
903 
904 	__drm_atomic_helper_crtc_destroy_state(state);
905 
906 	kfree(cstate);
907 }
908 
909 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
910 {
911 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
912 	int ret, rc = 0;
913 
914 	if (!atomic_read(&dpu_crtc->frame_pending)) {
915 		DRM_DEBUG_ATOMIC("no frames pending\n");
916 		return 0;
917 	}
918 
919 	DPU_ATRACE_BEGIN("frame done completion wait");
920 	ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
921 			msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
922 	if (!ret) {
923 		DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
924 		rc = -ETIMEDOUT;
925 	}
926 	DPU_ATRACE_END("frame done completion wait");
927 
928 	return rc;
929 }
930 
931 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
932 {
933 	struct drm_encoder *encoder;
934 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
935 	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
936 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
937 
938 	/*
939 	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
940 	 * it means we are trying to start a CRTC whose state is disabled:
941 	 * nothing else needs to be done.
942 	 */
943 	if (unlikely(!cstate->num_mixers))
944 		return;
945 
946 	DPU_ATRACE_BEGIN("crtc_commit");
947 
948 	drm_for_each_encoder_mask(encoder, crtc->dev,
949 			crtc->state->encoder_mask) {
950 		if (!dpu_encoder_is_valid_for_commit(encoder)) {
951 			DRM_DEBUG_ATOMIC("invalid FB not kicking off crtc\n");
952 			goto end;
953 		}
954 	}
955 	/*
956 	 * Encoder will flush/start now, unless it has a tx pending. If so, it
957 	 * may delay and flush at an irq event (e.g. ppdone)
958 	 */
959 	drm_for_each_encoder_mask(encoder, crtc->dev,
960 				  crtc->state->encoder_mask)
961 		dpu_encoder_prepare_for_kickoff(encoder);
962 
963 	if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
964 		/* acquire bandwidth and other resources */
965 		DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id);
966 	} else
967 		DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id);
968 
969 	dpu_crtc->play_count++;
970 
971 	dpu_vbif_clear_errors(dpu_kms);
972 
973 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
974 		dpu_encoder_kickoff(encoder);
975 
976 	reinit_completion(&dpu_crtc->frame_done_comp);
977 
978 end:
979 	DPU_ATRACE_END("crtc_commit");
980 }
981 
982 static void dpu_crtc_reset(struct drm_crtc *crtc)
983 {
984 	struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
985 
986 	if (crtc->state)
987 		dpu_crtc_destroy_state(crtc, crtc->state);
988 
989 	if (cstate)
990 		__drm_atomic_helper_crtc_reset(crtc, &cstate->base);
991 	else
992 		__drm_atomic_helper_crtc_reset(crtc, NULL);
993 }
994 
995 /**
996  * dpu_crtc_duplicate_state - state duplicate hook
997  * @crtc: Pointer to drm crtc structure
998  */
999 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
1000 {
1001 	struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
1002 
1003 	cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
1004 	if (!cstate) {
1005 		DPU_ERROR("failed to allocate state\n");
1006 		return NULL;
1007 	}
1008 
1009 	/* duplicate base helper */
1010 	__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
1011 
1012 	return &cstate->base;
1013 }
1014 
1015 static void dpu_crtc_atomic_print_state(struct drm_printer *p,
1016 					const struct drm_crtc_state *state)
1017 {
1018 	const struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
1019 	int i;
1020 
1021 	for (i = 0; i < cstate->num_mixers; i++) {
1022 		drm_printf(p, "\tlm[%d]=%d\n", i, cstate->mixers[i].hw_lm->idx - LM_0);
1023 		drm_printf(p, "\tctl[%d]=%d\n", i, cstate->mixers[i].lm_ctl->idx - CTL_0);
1024 		if (cstate->mixers[i].hw_dspp)
1025 			drm_printf(p, "\tdspp[%d]=%d\n", i, cstate->mixers[i].hw_dspp->idx - DSPP_0);
1026 	}
1027 }
1028 
1029 static void dpu_crtc_disable(struct drm_crtc *crtc,
1030 			     struct drm_atomic_state *state)
1031 {
1032 	struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
1033 									      crtc);
1034 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1035 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
1036 	struct drm_encoder *encoder;
1037 	unsigned long flags;
1038 	bool release_bandwidth = false;
1039 
1040 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
1041 
1042 	/* If disable is triggered while in self refresh mode,
1043 	 * reset the encoder software state so that in enable
1044 	 * it won't trigger a warn while assigning crtc.
1045 	 */
1046 	if (old_crtc_state->self_refresh_active) {
1047 		drm_for_each_encoder_mask(encoder, crtc->dev,
1048 					old_crtc_state->encoder_mask) {
1049 			dpu_encoder_assign_crtc(encoder, NULL);
1050 		}
1051 		return;
1052 	}
1053 
1054 	/* Disable/save vblank irq handling */
1055 	drm_crtc_vblank_off(crtc);
1056 
1057 	drm_for_each_encoder_mask(encoder, crtc->dev,
1058 				  old_crtc_state->encoder_mask) {
1059 		/* in video mode, we hold an extra bandwidth reference
1060 		 * as we cannot drop bandwidth at frame-done if any
1061 		 * crtc is being used in video mode.
1062 		 */
1063 		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
1064 			release_bandwidth = true;
1065 
1066 		/*
1067 		 * If disable is triggered during psr active(e.g: screen dim in PSR),
1068 		 * we will need encoder->crtc connection to process the device sleep &
1069 		 * preserve it during psr sequence.
1070 		 */
1071 		if (!crtc->state->self_refresh_active)
1072 			dpu_encoder_assign_crtc(encoder, NULL);
1073 	}
1074 
1075 	/* wait for frame_event_done completion */
1076 	if (_dpu_crtc_wait_for_frame_done(crtc))
1077 		DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
1078 				crtc->base.id,
1079 				atomic_read(&dpu_crtc->frame_pending));
1080 
1081 	trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
1082 	dpu_crtc->enabled = false;
1083 
1084 	if (atomic_read(&dpu_crtc->frame_pending)) {
1085 		trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
1086 				     atomic_read(&dpu_crtc->frame_pending));
1087 		if (release_bandwidth)
1088 			dpu_core_perf_crtc_release_bw(crtc);
1089 		atomic_set(&dpu_crtc->frame_pending, 0);
1090 	}
1091 
1092 	dpu_core_perf_crtc_update(crtc, 0);
1093 
1094 	memset(cstate->mixers, 0, sizeof(cstate->mixers));
1095 	cstate->num_mixers = 0;
1096 
1097 	/* disable clk & bw control until clk & bw properties are set */
1098 	cstate->bw_control = false;
1099 	cstate->bw_split_vote = false;
1100 
1101 	if (crtc->state->event && !crtc->state->active) {
1102 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
1103 		drm_crtc_send_vblank_event(crtc, crtc->state->event);
1104 		crtc->state->event = NULL;
1105 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
1106 	}
1107 
1108 	pm_runtime_put_sync(crtc->dev->dev);
1109 }
1110 
1111 static void dpu_crtc_enable(struct drm_crtc *crtc,
1112 		struct drm_atomic_state *state)
1113 {
1114 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1115 	struct drm_encoder *encoder;
1116 	bool request_bandwidth = false;
1117 	struct drm_crtc_state *old_crtc_state;
1118 
1119 	old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
1120 
1121 	pm_runtime_get_sync(crtc->dev->dev);
1122 
1123 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
1124 
1125 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
1126 		/* in video mode, we hold an extra bandwidth reference
1127 		 * as we cannot drop bandwidth at frame-done if any
1128 		 * crtc is being used in video mode.
1129 		 */
1130 		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
1131 			request_bandwidth = true;
1132 	}
1133 
1134 	if (request_bandwidth)
1135 		atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
1136 
1137 	trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
1138 	dpu_crtc->enabled = true;
1139 
1140 	if (!old_crtc_state->self_refresh_active) {
1141 		drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
1142 			dpu_encoder_assign_crtc(encoder, crtc);
1143 	}
1144 
1145 	/* Enable/restore vblank irq handling */
1146 	drm_crtc_vblank_on(crtc);
1147 }
1148 
1149 static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate)
1150 {
1151 	struct drm_crtc *crtc = cstate->crtc;
1152 	struct drm_encoder *encoder;
1153 
1154 	if (cstate->self_refresh_active)
1155 		return true;
1156 
1157 	drm_for_each_encoder_mask (encoder, crtc->dev, cstate->encoder_mask) {
1158 		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_CMD) {
1159 			return true;
1160 		}
1161 	}
1162 
1163 	return false;
1164 }
1165 
1166 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
1167 		struct drm_atomic_state *state)
1168 {
1169 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
1170 									  crtc);
1171 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1172 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state);
1173 
1174 	const struct drm_plane_state *pstate;
1175 	struct drm_plane *plane;
1176 
1177 	int rc = 0;
1178 
1179 	bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
1180 
1181 	if (!crtc_state->enable || !drm_atomic_crtc_effectively_active(crtc_state)) {
1182 		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
1183 				crtc->base.id, crtc_state->enable,
1184 				crtc_state->active);
1185 		memset(&cstate->new_perf, 0, sizeof(cstate->new_perf));
1186 		return 0;
1187 	}
1188 
1189 	DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
1190 
1191 	/* force a full mode set if active state changed */
1192 	if (crtc_state->active_changed)
1193 		crtc_state->mode_changed = true;
1194 
1195 	if (cstate->num_mixers)
1196 		_dpu_crtc_setup_lm_bounds(crtc, crtc_state);
1197 
1198 	/* FIXME: move this to dpu_plane_atomic_check? */
1199 	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
1200 		struct dpu_plane_state *dpu_pstate = to_dpu_plane_state(pstate);
1201 
1202 		if (IS_ERR_OR_NULL(pstate)) {
1203 			rc = PTR_ERR(pstate);
1204 			DPU_ERROR("%s: failed to get plane%d state, %d\n",
1205 					dpu_crtc->name, plane->base.id, rc);
1206 			return rc;
1207 		}
1208 
1209 		if (!pstate->visible)
1210 			continue;
1211 
1212 		dpu_pstate->needs_dirtyfb = needs_dirtyfb;
1213 	}
1214 
1215 	atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
1216 
1217 	rc = dpu_core_perf_crtc_check(crtc, crtc_state);
1218 	if (rc) {
1219 		DPU_ERROR("crtc%d failed performance check %d\n",
1220 				crtc->base.id, rc);
1221 		return rc;
1222 	}
1223 
1224 	return 0;
1225 }
1226 
1227 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1228 {
1229 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1230 	struct drm_encoder *enc;
1231 
1232 	trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1233 
1234 	/*
1235 	 * Normally we would iterate through encoder_mask in crtc state to find
1236 	 * attached encoders. In this case, we might be disabling vblank _after_
1237 	 * encoder_mask has been cleared.
1238 	 *
1239 	 * Instead, we "assign" a crtc to the encoder in enable and clear it in
1240 	 * disable (which is also after encoder_mask is cleared). So instead of
1241 	 * using encoder mask, we'll ask the encoder to toggle itself iff it's
1242 	 * currently assigned to our crtc.
1243 	 *
1244 	 * Note also that this function cannot be called while crtc is disabled
1245 	 * since we use drm_crtc_vblank_on/off. So we don't need to worry
1246 	 * about the assigned crtcs being inconsistent with the current state
1247 	 * (which means no need to worry about modeset locks).
1248 	 */
1249 	list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1250 		trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
1251 					     dpu_crtc);
1252 
1253 		dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
1254 	}
1255 
1256 	return 0;
1257 }
1258 
1259 #ifdef CONFIG_DEBUG_FS
1260 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1261 {
1262 	struct dpu_crtc *dpu_crtc;
1263 	struct dpu_plane_state *pstate = NULL;
1264 	struct dpu_crtc_mixer *m;
1265 
1266 	struct drm_crtc *crtc;
1267 	struct drm_plane *plane;
1268 	struct drm_display_mode *mode;
1269 	struct drm_framebuffer *fb;
1270 	struct drm_plane_state *state;
1271 	struct dpu_crtc_state *cstate;
1272 
1273 	int i, out_width;
1274 
1275 	dpu_crtc = s->private;
1276 	crtc = &dpu_crtc->base;
1277 
1278 	drm_modeset_lock_all(crtc->dev);
1279 	cstate = to_dpu_crtc_state(crtc->state);
1280 
1281 	mode = &crtc->state->adjusted_mode;
1282 	out_width = mode->hdisplay / cstate->num_mixers;
1283 
1284 	seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1285 				mode->hdisplay, mode->vdisplay);
1286 
1287 	seq_puts(s, "\n");
1288 
1289 	for (i = 0; i < cstate->num_mixers; ++i) {
1290 		m = &cstate->mixers[i];
1291 		seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1292 			m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1293 			out_width, mode->vdisplay);
1294 	}
1295 
1296 	seq_puts(s, "\n");
1297 
1298 	drm_atomic_crtc_for_each_plane(plane, crtc) {
1299 		pstate = to_dpu_plane_state(plane->state);
1300 		state = plane->state;
1301 
1302 		if (!pstate || !state)
1303 			continue;
1304 
1305 		seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1306 			pstate->stage);
1307 
1308 		if (plane->state->fb) {
1309 			fb = plane->state->fb;
1310 
1311 			seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1312 				fb->base.id, (char *) &fb->format->format,
1313 				fb->width, fb->height);
1314 			for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1315 				seq_printf(s, "cpp[%d]:%u ",
1316 						i, fb->format->cpp[i]);
1317 			seq_puts(s, "\n\t");
1318 
1319 			seq_printf(s, "modifier:%8llu ", fb->modifier);
1320 			seq_puts(s, "\n");
1321 
1322 			seq_puts(s, "\t");
1323 			for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1324 				seq_printf(s, "pitches[%d]:%8u ", i,
1325 							fb->pitches[i]);
1326 			seq_puts(s, "\n");
1327 
1328 			seq_puts(s, "\t");
1329 			for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1330 				seq_printf(s, "offsets[%d]:%8u ", i,
1331 							fb->offsets[i]);
1332 			seq_puts(s, "\n");
1333 		}
1334 
1335 		seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1336 			state->src_x, state->src_y, state->src_w, state->src_h);
1337 
1338 		seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1339 			state->crtc_x, state->crtc_y, state->crtc_w,
1340 			state->crtc_h);
1341 		seq_printf(s, "\tsspp[0]:%s\n",
1342 			   pstate->pipe.sspp->cap->name);
1343 		seq_printf(s, "\tmultirect[0]: mode: %d index: %d\n",
1344 			pstate->pipe.multirect_mode, pstate->pipe.multirect_index);
1345 		if (pstate->r_pipe.sspp) {
1346 			seq_printf(s, "\tsspp[1]:%s\n",
1347 				   pstate->r_pipe.sspp->cap->name);
1348 			seq_printf(s, "\tmultirect[1]: mode: %d index: %d\n",
1349 				   pstate->r_pipe.multirect_mode, pstate->r_pipe.multirect_index);
1350 		}
1351 
1352 		seq_puts(s, "\n");
1353 	}
1354 	if (dpu_crtc->vblank_cb_count) {
1355 		ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1356 		s64 diff_ms = ktime_to_ms(diff);
1357 		s64 fps = diff_ms ? div_s64(
1358 				dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1359 
1360 		seq_printf(s,
1361 			"vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1362 				fps, dpu_crtc->vblank_cb_count,
1363 				ktime_to_ms(diff), dpu_crtc->play_count);
1364 
1365 		/* reset time & count for next measurement */
1366 		dpu_crtc->vblank_cb_count = 0;
1367 		dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1368 	}
1369 
1370 	drm_modeset_unlock_all(crtc->dev);
1371 
1372 	return 0;
1373 }
1374 
1375 DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
1376 
1377 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1378 {
1379 	struct drm_crtc *crtc = s->private;
1380 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1381 
1382 	seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1383 	seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1384 	seq_printf(s, "core_clk_rate: %llu\n",
1385 			dpu_crtc->cur_perf.core_clk_rate);
1386 	seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
1387 	seq_printf(s, "max_per_pipe_ib: %llu\n",
1388 				dpu_crtc->cur_perf.max_per_pipe_ib);
1389 
1390 	return 0;
1391 }
1392 DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
1393 
1394 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1395 {
1396 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1397 
1398 	debugfs_create_file("status", 0400,
1399 			crtc->debugfs_entry,
1400 			dpu_crtc, &_dpu_debugfs_status_fops);
1401 	debugfs_create_file("state", 0600,
1402 			crtc->debugfs_entry,
1403 			&dpu_crtc->base,
1404 			&dpu_crtc_debugfs_state_fops);
1405 
1406 	return 0;
1407 }
1408 #else
1409 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1410 {
1411 	return 0;
1412 }
1413 #endif /* CONFIG_DEBUG_FS */
1414 
1415 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1416 {
1417 	return _dpu_crtc_init_debugfs(crtc);
1418 }
1419 
1420 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1421 	.set_config = drm_atomic_helper_set_config,
1422 	.page_flip = drm_atomic_helper_page_flip,
1423 	.reset = dpu_crtc_reset,
1424 	.atomic_duplicate_state = dpu_crtc_duplicate_state,
1425 	.atomic_destroy_state = dpu_crtc_destroy_state,
1426 	.atomic_print_state = dpu_crtc_atomic_print_state,
1427 	.late_register = dpu_crtc_late_register,
1428 	.verify_crc_source = dpu_crtc_verify_crc_source,
1429 	.set_crc_source = dpu_crtc_set_crc_source,
1430 	.enable_vblank  = msm_crtc_enable_vblank,
1431 	.disable_vblank = msm_crtc_disable_vblank,
1432 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
1433 	.get_vblank_counter = dpu_crtc_get_vblank_counter,
1434 };
1435 
1436 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1437 	.atomic_disable = dpu_crtc_disable,
1438 	.atomic_enable = dpu_crtc_enable,
1439 	.atomic_check = dpu_crtc_atomic_check,
1440 	.atomic_begin = dpu_crtc_atomic_begin,
1441 	.atomic_flush = dpu_crtc_atomic_flush,
1442 	.get_scanout_position = dpu_crtc_get_scanout_position,
1443 };
1444 
1445 /* initialize crtc */
1446 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1447 				struct drm_plane *cursor)
1448 {
1449 	struct msm_drm_private *priv = dev->dev_private;
1450 	struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
1451 	struct drm_crtc *crtc = NULL;
1452 	struct dpu_crtc *dpu_crtc;
1453 	int i, ret;
1454 
1455 	dpu_crtc = drmm_crtc_alloc_with_planes(dev, struct dpu_crtc, base,
1456 					       plane, cursor,
1457 					       &dpu_crtc_funcs,
1458 					       NULL);
1459 
1460 	if (IS_ERR(dpu_crtc))
1461 		return ERR_CAST(dpu_crtc);
1462 
1463 	crtc = &dpu_crtc->base;
1464 	crtc->dev = dev;
1465 
1466 	spin_lock_init(&dpu_crtc->spin_lock);
1467 	atomic_set(&dpu_crtc->frame_pending, 0);
1468 
1469 	init_completion(&dpu_crtc->frame_done_comp);
1470 
1471 	INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1472 
1473 	for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1474 		INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1475 		list_add(&dpu_crtc->frame_events[i].list,
1476 				&dpu_crtc->frame_event_list);
1477 		kthread_init_work(&dpu_crtc->frame_events[i].work,
1478 				dpu_crtc_frame_event_work);
1479 	}
1480 
1481 	drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1482 
1483 	if (dpu_kms->catalog->dspp_count)
1484 		drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
1485 
1486 	/* save user friendly CRTC name for later */
1487 	snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1488 
1489 	/* initialize event handling */
1490 	spin_lock_init(&dpu_crtc->event_lock);
1491 
1492 	ret = drm_self_refresh_helper_init(crtc);
1493 	if (ret) {
1494 		DPU_ERROR("Failed to initialize %s with self-refresh helpers %d\n",
1495 			crtc->name, ret);
1496 		return ERR_PTR(ret);
1497 	}
1498 
1499 	DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name);
1500 	return crtc;
1501 }
1502