xref: /linux/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c (revision 4ac6d90867a4de2e12117e755dbd76e08d88697f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
4  * Copyright (C) 2013 Red Hat
5  * Author: Rob Clark <robdclark@gmail.com>
6  */
7 
8 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
9 #include <linux/sort.h>
10 #include <linux/debugfs.h>
11 #include <linux/ktime.h>
12 #include <linux/bits.h>
13 
14 #include <drm/drm_atomic.h>
15 #include <drm/drm_crtc.h>
16 #include <drm/drm_flip_work.h>
17 #include <drm/drm_mode.h>
18 #include <drm/drm_probe_helper.h>
19 #include <drm/drm_rect.h>
20 #include <drm/drm_vblank.h>
21 
22 #include "dpu_kms.h"
23 #include "dpu_hw_lm.h"
24 #include "dpu_hw_ctl.h"
25 #include "dpu_hw_dspp.h"
26 #include "dpu_crtc.h"
27 #include "dpu_plane.h"
28 #include "dpu_encoder.h"
29 #include "dpu_vbif.h"
30 #include "dpu_core_perf.h"
31 #include "dpu_trace.h"
32 
33 /* layer mixer index on dpu_crtc */
34 #define LEFT_MIXER 0
35 #define RIGHT_MIXER 1
36 
37 /* timeout in ms waiting for frame done */
38 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS	60
39 
40 #define	CONVERT_S3_15(val) \
41 	(((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
42 
43 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
44 {
45 	struct msm_drm_private *priv = crtc->dev->dev_private;
46 
47 	return to_dpu_kms(priv->kms);
48 }
49 
50 static void dpu_crtc_destroy(struct drm_crtc *crtc)
51 {
52 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
53 
54 	if (!crtc)
55 		return;
56 
57 	drm_crtc_cleanup(crtc);
58 	kfree(dpu_crtc);
59 }
60 
61 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
62 {
63 	struct drm_device *dev = crtc->dev;
64 	struct drm_encoder *encoder;
65 
66 	drm_for_each_encoder(encoder, dev)
67 		if (encoder->crtc == crtc)
68 			return encoder;
69 
70 	return NULL;
71 }
72 
73 static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
74 {
75 	struct drm_encoder *encoder;
76 
77 	encoder = get_encoder_from_crtc(crtc);
78 	if (!encoder) {
79 		DRM_ERROR("no encoder found for crtc %d\n", crtc->index);
80 		return false;
81 	}
82 
83 	return dpu_encoder_get_frame_count(encoder);
84 }
85 
86 static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
87 					   bool in_vblank_irq,
88 					   int *vpos, int *hpos,
89 					   ktime_t *stime, ktime_t *etime,
90 					   const struct drm_display_mode *mode)
91 {
92 	unsigned int pipe = crtc->index;
93 	struct drm_encoder *encoder;
94 	int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
95 
96 	encoder = get_encoder_from_crtc(crtc);
97 	if (!encoder) {
98 		DRM_ERROR("no encoder found for crtc %d\n", pipe);
99 		return false;
100 	}
101 
102 	vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
103 	vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
104 
105 	/*
106 	 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
107 	 * the end of VFP. Translate the porch values relative to the line
108 	 * counter positions.
109 	 */
110 
111 	vactive_start = vsw + vbp + 1;
112 	vactive_end = vactive_start + mode->crtc_vdisplay;
113 
114 	/* last scan line before VSYNC */
115 	vfp_end = mode->crtc_vtotal;
116 
117 	if (stime)
118 		*stime = ktime_get();
119 
120 	line = dpu_encoder_get_linecount(encoder);
121 
122 	if (line < vactive_start)
123 		line -= vactive_start;
124 	else if (line > vactive_end)
125 		line = line - vfp_end - vactive_start;
126 	else
127 		line -= vactive_start;
128 
129 	*vpos = line;
130 	*hpos = 0;
131 
132 	if (etime)
133 		*etime = ktime_get();
134 
135 	return true;
136 }
137 
138 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
139 		struct dpu_plane_state *pstate, struct dpu_format *format)
140 {
141 	struct dpu_hw_mixer *lm = mixer->hw_lm;
142 	uint32_t blend_op;
143 	uint32_t fg_alpha, bg_alpha;
144 
145 	fg_alpha = pstate->base.alpha >> 8;
146 	bg_alpha = 0xff - fg_alpha;
147 
148 	/* default to opaque blending */
149 	if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE ||
150 	    !format->alpha_enable) {
151 		blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
152 			DPU_BLEND_BG_ALPHA_BG_CONST;
153 	} else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
154 		blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
155 			DPU_BLEND_BG_ALPHA_FG_PIXEL;
156 		if (fg_alpha != 0xff) {
157 			bg_alpha = fg_alpha;
158 			blend_op |= DPU_BLEND_BG_MOD_ALPHA |
159 				    DPU_BLEND_BG_INV_MOD_ALPHA;
160 		} else {
161 			blend_op |= DPU_BLEND_BG_INV_ALPHA;
162 		}
163 	} else {
164 		/* coverage blending */
165 		blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
166 			DPU_BLEND_BG_ALPHA_FG_PIXEL;
167 		if (fg_alpha != 0xff) {
168 			bg_alpha = fg_alpha;
169 			blend_op |= DPU_BLEND_FG_MOD_ALPHA |
170 				    DPU_BLEND_FG_INV_MOD_ALPHA |
171 				    DPU_BLEND_BG_MOD_ALPHA |
172 				    DPU_BLEND_BG_INV_MOD_ALPHA;
173 		} else {
174 			blend_op |= DPU_BLEND_BG_INV_ALPHA;
175 		}
176 	}
177 
178 	lm->ops.setup_blend_config(lm, pstate->stage,
179 				fg_alpha, bg_alpha, blend_op);
180 
181 	DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
182 		  &format->base.pixel_format, format->alpha_enable, blend_op);
183 }
184 
185 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
186 {
187 	struct dpu_crtc_state *crtc_state;
188 	int lm_idx, lm_horiz_position;
189 
190 	crtc_state = to_dpu_crtc_state(crtc->state);
191 
192 	lm_horiz_position = 0;
193 	for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
194 		const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
195 		struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
196 		struct dpu_hw_mixer_cfg cfg;
197 
198 		if (!lm_roi || !drm_rect_visible(lm_roi))
199 			continue;
200 
201 		cfg.out_width = drm_rect_width(lm_roi);
202 		cfg.out_height = drm_rect_height(lm_roi);
203 		cfg.right_mixer = lm_horiz_position++;
204 		cfg.flags = 0;
205 		hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
206 	}
207 }
208 
209 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
210 	struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
211 {
212 	struct drm_plane *plane;
213 	struct drm_framebuffer *fb;
214 	struct drm_plane_state *state;
215 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
216 	struct dpu_plane_state *pstate = NULL;
217 	struct dpu_format *format;
218 	struct dpu_hw_ctl *ctl = mixer->lm_ctl;
219 	struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
220 
221 	u32 flush_mask;
222 	uint32_t stage_idx, lm_idx;
223 	int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
224 	bool bg_alpha_enable = false;
225 	DECLARE_BITMAP(fetch_active, SSPP_MAX);
226 
227 	memset(fetch_active, 0, sizeof(fetch_active));
228 	drm_atomic_crtc_for_each_plane(plane, crtc) {
229 		state = plane->state;
230 		if (!state)
231 			continue;
232 
233 		pstate = to_dpu_plane_state(state);
234 		fb = state->fb;
235 
236 		dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
237 		set_bit(dpu_plane_pipe(plane), fetch_active);
238 
239 		DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n",
240 				crtc->base.id,
241 				pstate->stage,
242 				plane->base.id,
243 				dpu_plane_pipe(plane) - SSPP_VIG0,
244 				state->fb ? state->fb->base.id : -1);
245 
246 		format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
247 
248 		if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
249 			bg_alpha_enable = true;
250 
251 		stage_idx = zpos_cnt[pstate->stage]++;
252 		stage_cfg->stage[pstate->stage][stage_idx] =
253 					dpu_plane_pipe(plane);
254 		stage_cfg->multirect_index[pstate->stage][stage_idx] =
255 					pstate->multirect_index;
256 
257 		trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
258 					   state, pstate, stage_idx,
259 					   dpu_plane_pipe(plane) - SSPP_VIG0,
260 					   format->base.pixel_format,
261 					   fb ? fb->modifier : 0);
262 
263 		/* blend config update */
264 		for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
265 			_dpu_crtc_setup_blend_cfg(mixer + lm_idx,
266 						pstate, format);
267 
268 			mixer[lm_idx].flush_mask |= flush_mask;
269 
270 			if (bg_alpha_enable && !format->alpha_enable)
271 				mixer[lm_idx].mixer_op_mode = 0;
272 			else
273 				mixer[lm_idx].mixer_op_mode |=
274 						1 << pstate->stage;
275 		}
276 	}
277 
278 	if (ctl->ops.set_active_pipes)
279 		ctl->ops.set_active_pipes(ctl, fetch_active);
280 
281 	 _dpu_crtc_program_lm_output_roi(crtc);
282 }
283 
284 /**
285  * _dpu_crtc_blend_setup - configure crtc mixers
286  * @crtc: Pointer to drm crtc structure
287  */
288 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
289 {
290 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
291 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
292 	struct dpu_crtc_mixer *mixer = cstate->mixers;
293 	struct dpu_hw_ctl *ctl;
294 	struct dpu_hw_mixer *lm;
295 	int i;
296 
297 	DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
298 
299 	for (i = 0; i < cstate->num_mixers; i++) {
300 		mixer[i].mixer_op_mode = 0;
301 		mixer[i].flush_mask = 0;
302 		if (mixer[i].lm_ctl->ops.clear_all_blendstages)
303 			mixer[i].lm_ctl->ops.clear_all_blendstages(
304 					mixer[i].lm_ctl);
305 	}
306 
307 	/* initialize stage cfg */
308 	memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
309 
310 	_dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
311 
312 	for (i = 0; i < cstate->num_mixers; i++) {
313 		ctl = mixer[i].lm_ctl;
314 		lm = mixer[i].hw_lm;
315 
316 		lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
317 
318 		mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
319 			mixer[i].hw_lm->idx);
320 
321 		/* stage config flush mask */
322 		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
323 
324 		DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
325 			mixer[i].hw_lm->idx - LM_0,
326 			mixer[i].mixer_op_mode,
327 			ctl->idx - CTL_0,
328 			mixer[i].flush_mask);
329 
330 		ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
331 			&dpu_crtc->stage_cfg);
332 	}
333 }
334 
335 /**
336  *  _dpu_crtc_complete_flip - signal pending page_flip events
337  * Any pending vblank events are added to the vblank_event_list
338  * so that the next vblank interrupt shall signal them.
339  * However PAGE_FLIP events are not handled through the vblank_event_list.
340  * This API signals any pending PAGE_FLIP events requested through
341  * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
342  * @crtc: Pointer to drm crtc structure
343  */
344 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
345 {
346 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
347 	struct drm_device *dev = crtc->dev;
348 	unsigned long flags;
349 
350 	spin_lock_irqsave(&dev->event_lock, flags);
351 	if (dpu_crtc->event) {
352 		DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
353 			      dpu_crtc->event);
354 		trace_dpu_crtc_complete_flip(DRMID(crtc));
355 		drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
356 		dpu_crtc->event = NULL;
357 	}
358 	spin_unlock_irqrestore(&dev->event_lock, flags);
359 }
360 
361 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
362 {
363 	struct drm_encoder *encoder;
364 
365 	/*
366 	 * TODO: This function is called from dpu debugfs and as part of atomic
367 	 * check. When called from debugfs, the crtc->mutex must be held to
368 	 * read crtc->state. However reading crtc->state from atomic check isn't
369 	 * allowed (unless you have a good reason, a big comment, and a deep
370 	 * understanding of how the atomic/modeset locks work (<- and this is
371 	 * probably not possible)). So we'll keep the WARN_ON here for now, but
372 	 * really we need to figure out a better way to track our operating mode
373 	 */
374 	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
375 
376 	/* TODO: Returns the first INTF_MODE, could there be multiple values? */
377 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
378 		return dpu_encoder_get_intf_mode(encoder);
379 
380 	return INTF_MODE_NONE;
381 }
382 
383 void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
384 {
385 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
386 
387 	/* keep statistics on vblank callback - with auto reset via debugfs */
388 	if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
389 		dpu_crtc->vblank_cb_time = ktime_get();
390 	else
391 		dpu_crtc->vblank_cb_count++;
392 	drm_crtc_handle_vblank(crtc);
393 	trace_dpu_crtc_vblank_cb(DRMID(crtc));
394 }
395 
396 static void dpu_crtc_frame_event_work(struct kthread_work *work)
397 {
398 	struct dpu_crtc_frame_event *fevent = container_of(work,
399 			struct dpu_crtc_frame_event, work);
400 	struct drm_crtc *crtc = fevent->crtc;
401 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
402 	unsigned long flags;
403 	bool frame_done = false;
404 
405 	DPU_ATRACE_BEGIN("crtc_frame_event");
406 
407 	DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
408 			ktime_to_ns(fevent->ts));
409 
410 	if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
411 				| DPU_ENCODER_FRAME_EVENT_ERROR
412 				| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
413 
414 		if (atomic_read(&dpu_crtc->frame_pending) < 1) {
415 			/* ignore vblank when not pending */
416 		} else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
417 			/* release bandwidth and other resources */
418 			trace_dpu_crtc_frame_event_done(DRMID(crtc),
419 							fevent->event);
420 			dpu_core_perf_crtc_release_bw(crtc);
421 		} else {
422 			trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
423 								fevent->event);
424 		}
425 
426 		if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
427 					| DPU_ENCODER_FRAME_EVENT_ERROR))
428 			frame_done = true;
429 	}
430 
431 	if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
432 		DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
433 				crtc->base.id, ktime_to_ns(fevent->ts));
434 
435 	if (frame_done)
436 		complete_all(&dpu_crtc->frame_done_comp);
437 
438 	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
439 	list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
440 	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
441 	DPU_ATRACE_END("crtc_frame_event");
442 }
443 
444 /*
445  * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
446  * registers this API to encoder for all frame event callbacks like
447  * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
448  * from different context - IRQ, user thread, commit_thread, etc. Each event
449  * should be carefully reviewed and should be processed in proper task context
450  * to avoid schedulin delay or properly manage the irq context's bottom half
451  * processing.
452  */
453 static void dpu_crtc_frame_event_cb(void *data, u32 event)
454 {
455 	struct drm_crtc *crtc = (struct drm_crtc *)data;
456 	struct dpu_crtc *dpu_crtc;
457 	struct msm_drm_private *priv;
458 	struct dpu_crtc_frame_event *fevent;
459 	unsigned long flags;
460 	u32 crtc_id;
461 
462 	/* Nothing to do on idle event */
463 	if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
464 		return;
465 
466 	dpu_crtc = to_dpu_crtc(crtc);
467 	priv = crtc->dev->dev_private;
468 	crtc_id = drm_crtc_index(crtc);
469 
470 	trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
471 
472 	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
473 	fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
474 			struct dpu_crtc_frame_event, list);
475 	if (fevent)
476 		list_del_init(&fevent->list);
477 	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
478 
479 	if (!fevent) {
480 		DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
481 		return;
482 	}
483 
484 	fevent->event = event;
485 	fevent->crtc = crtc;
486 	fevent->ts = ktime_get();
487 	kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
488 }
489 
490 void dpu_crtc_complete_commit(struct drm_crtc *crtc)
491 {
492 	trace_dpu_crtc_complete_commit(DRMID(crtc));
493 	dpu_core_perf_crtc_update(crtc, 0, false);
494 	_dpu_crtc_complete_flip(crtc);
495 }
496 
497 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
498 		struct drm_crtc_state *state)
499 {
500 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
501 	struct drm_display_mode *adj_mode = &state->adjusted_mode;
502 	u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
503 	int i;
504 
505 	for (i = 0; i < cstate->num_mixers; i++) {
506 		struct drm_rect *r = &cstate->lm_bounds[i];
507 		r->x1 = crtc_split_width * i;
508 		r->y1 = 0;
509 		r->x2 = r->x1 + crtc_split_width;
510 		r->y2 = adj_mode->vdisplay;
511 
512 		trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
513 	}
514 }
515 
516 static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
517 		struct dpu_hw_pcc_cfg *cfg)
518 {
519 	struct drm_color_ctm *ctm;
520 
521 	memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
522 
523 	ctm = (struct drm_color_ctm *)state->ctm->data;
524 
525 	if (!ctm)
526 		return;
527 
528 	cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
529 	cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
530 	cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
531 
532 	cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
533 	cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
534 	cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
535 
536 	cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
537 	cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
538 	cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
539 }
540 
541 static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
542 {
543 	struct drm_crtc_state *state = crtc->state;
544 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
545 	struct dpu_crtc_mixer *mixer = cstate->mixers;
546 	struct dpu_hw_pcc_cfg cfg;
547 	struct dpu_hw_ctl *ctl;
548 	struct dpu_hw_dspp *dspp;
549 	int i;
550 
551 
552 	if (!state->color_mgmt_changed)
553 		return;
554 
555 	for (i = 0; i < cstate->num_mixers; i++) {
556 		ctl = mixer[i].lm_ctl;
557 		dspp = mixer[i].hw_dspp;
558 
559 		if (!dspp || !dspp->ops.setup_pcc)
560 			continue;
561 
562 		if (!state->ctm) {
563 			dspp->ops.setup_pcc(dspp, NULL);
564 		} else {
565 			_dpu_crtc_get_pcc_coeff(state, &cfg);
566 			dspp->ops.setup_pcc(dspp, &cfg);
567 		}
568 
569 		mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl,
570 			mixer[i].hw_dspp->idx);
571 
572 		/* stage config flush mask */
573 		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
574 
575 		DRM_DEBUG_ATOMIC("lm %d, ctl %d, flush mask 0x%x\n",
576 			mixer[i].hw_lm->idx - DSPP_0,
577 			ctl->idx - CTL_0,
578 			mixer[i].flush_mask);
579 	}
580 }
581 
582 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
583 		struct drm_atomic_state *state)
584 {
585 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
586 	struct drm_encoder *encoder;
587 
588 	if (!crtc->state->enable) {
589 		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n",
590 				crtc->base.id, crtc->state->enable);
591 		return;
592 	}
593 
594 	DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
595 
596 	_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
597 
598 	/* encoder will trigger pending mask now */
599 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
600 		dpu_encoder_trigger_kickoff_pending(encoder);
601 
602 	/*
603 	 * If no mixers have been allocated in dpu_crtc_atomic_check(),
604 	 * it means we are trying to flush a CRTC whose state is disabled:
605 	 * nothing else needs to be done.
606 	 */
607 	if (unlikely(!cstate->num_mixers))
608 		return;
609 
610 	_dpu_crtc_blend_setup(crtc);
611 
612 	_dpu_crtc_setup_cp_blocks(crtc);
613 
614 	/*
615 	 * PP_DONE irq is only used by command mode for now.
616 	 * It is better to request pending before FLUSH and START trigger
617 	 * to make sure no pp_done irq missed.
618 	 * This is safe because no pp_done will happen before SW trigger
619 	 * in command mode.
620 	 */
621 }
622 
623 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
624 		struct drm_atomic_state *state)
625 {
626 	struct dpu_crtc *dpu_crtc;
627 	struct drm_device *dev;
628 	struct drm_plane *plane;
629 	struct msm_drm_private *priv;
630 	unsigned long flags;
631 	struct dpu_crtc_state *cstate;
632 
633 	if (!crtc->state->enable) {
634 		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n",
635 				crtc->base.id, crtc->state->enable);
636 		return;
637 	}
638 
639 	DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
640 
641 	dpu_crtc = to_dpu_crtc(crtc);
642 	cstate = to_dpu_crtc_state(crtc->state);
643 	dev = crtc->dev;
644 	priv = dev->dev_private;
645 
646 	if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
647 		DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
648 		return;
649 	}
650 
651 	WARN_ON(dpu_crtc->event);
652 	spin_lock_irqsave(&dev->event_lock, flags);
653 	dpu_crtc->event = crtc->state->event;
654 	crtc->state->event = NULL;
655 	spin_unlock_irqrestore(&dev->event_lock, flags);
656 
657 	/*
658 	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
659 	 * it means we are trying to flush a CRTC whose state is disabled:
660 	 * nothing else needs to be done.
661 	 */
662 	if (unlikely(!cstate->num_mixers))
663 		return;
664 
665 	/* update performance setting before crtc kickoff */
666 	dpu_core_perf_crtc_update(crtc, 1, false);
667 
668 	/*
669 	 * Final plane updates: Give each plane a chance to complete all
670 	 *                      required writes/flushing before crtc's "flush
671 	 *                      everything" call below.
672 	 */
673 	drm_atomic_crtc_for_each_plane(plane, crtc) {
674 		if (dpu_crtc->smmu_state.transition_error)
675 			dpu_plane_set_error(plane, true);
676 		dpu_plane_flush(plane);
677 	}
678 
679 	/* Kickoff will be scheduled by outer layer */
680 }
681 
682 /**
683  * dpu_crtc_destroy_state - state destroy hook
684  * @crtc: drm CRTC
685  * @state: CRTC state object to release
686  */
687 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
688 		struct drm_crtc_state *state)
689 {
690 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
691 
692 	DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
693 
694 	__drm_atomic_helper_crtc_destroy_state(state);
695 
696 	kfree(cstate);
697 }
698 
699 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
700 {
701 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
702 	int ret, rc = 0;
703 
704 	if (!atomic_read(&dpu_crtc->frame_pending)) {
705 		DRM_DEBUG_ATOMIC("no frames pending\n");
706 		return 0;
707 	}
708 
709 	DPU_ATRACE_BEGIN("frame done completion wait");
710 	ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
711 			msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
712 	if (!ret) {
713 		DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
714 		rc = -ETIMEDOUT;
715 	}
716 	DPU_ATRACE_END("frame done completion wait");
717 
718 	return rc;
719 }
720 
721 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
722 {
723 	struct drm_encoder *encoder;
724 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
725 	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
726 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
727 
728 	/*
729 	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
730 	 * it means we are trying to start a CRTC whose state is disabled:
731 	 * nothing else needs to be done.
732 	 */
733 	if (unlikely(!cstate->num_mixers))
734 		return;
735 
736 	DPU_ATRACE_BEGIN("crtc_commit");
737 
738 	/*
739 	 * Encoder will flush/start now, unless it has a tx pending. If so, it
740 	 * may delay and flush at an irq event (e.g. ppdone)
741 	 */
742 	drm_for_each_encoder_mask(encoder, crtc->dev,
743 				  crtc->state->encoder_mask)
744 		dpu_encoder_prepare_for_kickoff(encoder);
745 
746 	if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
747 		/* acquire bandwidth and other resources */
748 		DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id);
749 	} else
750 		DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id);
751 
752 	dpu_crtc->play_count++;
753 
754 	dpu_vbif_clear_errors(dpu_kms);
755 
756 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
757 		dpu_encoder_kickoff(encoder);
758 
759 	reinit_completion(&dpu_crtc->frame_done_comp);
760 	DPU_ATRACE_END("crtc_commit");
761 }
762 
763 static void dpu_crtc_reset(struct drm_crtc *crtc)
764 {
765 	struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
766 
767 	if (crtc->state)
768 		dpu_crtc_destroy_state(crtc, crtc->state);
769 
770 	__drm_atomic_helper_crtc_reset(crtc, &cstate->base);
771 }
772 
773 /**
774  * dpu_crtc_duplicate_state - state duplicate hook
775  * @crtc: Pointer to drm crtc structure
776  */
777 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
778 {
779 	struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
780 
781 	cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
782 	if (!cstate) {
783 		DPU_ERROR("failed to allocate state\n");
784 		return NULL;
785 	}
786 
787 	/* duplicate base helper */
788 	__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
789 
790 	return &cstate->base;
791 }
792 
793 static void dpu_crtc_disable(struct drm_crtc *crtc,
794 			     struct drm_atomic_state *state)
795 {
796 	struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
797 									      crtc);
798 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
799 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
800 	struct drm_encoder *encoder;
801 	unsigned long flags;
802 	bool release_bandwidth = false;
803 
804 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
805 
806 	/* Disable/save vblank irq handling */
807 	drm_crtc_vblank_off(crtc);
808 
809 	drm_for_each_encoder_mask(encoder, crtc->dev,
810 				  old_crtc_state->encoder_mask) {
811 		/* in video mode, we hold an extra bandwidth reference
812 		 * as we cannot drop bandwidth at frame-done if any
813 		 * crtc is being used in video mode.
814 		 */
815 		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
816 			release_bandwidth = true;
817 		dpu_encoder_assign_crtc(encoder, NULL);
818 	}
819 
820 	/* wait for frame_event_done completion */
821 	if (_dpu_crtc_wait_for_frame_done(crtc))
822 		DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
823 				crtc->base.id,
824 				atomic_read(&dpu_crtc->frame_pending));
825 
826 	trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
827 	dpu_crtc->enabled = false;
828 
829 	if (atomic_read(&dpu_crtc->frame_pending)) {
830 		trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
831 				     atomic_read(&dpu_crtc->frame_pending));
832 		if (release_bandwidth)
833 			dpu_core_perf_crtc_release_bw(crtc);
834 		atomic_set(&dpu_crtc->frame_pending, 0);
835 	}
836 
837 	dpu_core_perf_crtc_update(crtc, 0, true);
838 
839 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
840 		dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
841 
842 	memset(cstate->mixers, 0, sizeof(cstate->mixers));
843 	cstate->num_mixers = 0;
844 
845 	/* disable clk & bw control until clk & bw properties are set */
846 	cstate->bw_control = false;
847 	cstate->bw_split_vote = false;
848 
849 	if (crtc->state->event && !crtc->state->active) {
850 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
851 		drm_crtc_send_vblank_event(crtc, crtc->state->event);
852 		crtc->state->event = NULL;
853 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
854 	}
855 
856 	pm_runtime_put_sync(crtc->dev->dev);
857 }
858 
859 static void dpu_crtc_enable(struct drm_crtc *crtc,
860 		struct drm_atomic_state *state)
861 {
862 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
863 	struct drm_encoder *encoder;
864 	bool request_bandwidth = false;
865 
866 	pm_runtime_get_sync(crtc->dev->dev);
867 
868 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
869 
870 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
871 		/* in video mode, we hold an extra bandwidth reference
872 		 * as we cannot drop bandwidth at frame-done if any
873 		 * crtc is being used in video mode.
874 		 */
875 		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
876 			request_bandwidth = true;
877 		dpu_encoder_register_frame_event_callback(encoder,
878 				dpu_crtc_frame_event_cb, (void *)crtc);
879 	}
880 
881 	if (request_bandwidth)
882 		atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
883 
884 	trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
885 	dpu_crtc->enabled = true;
886 
887 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
888 		dpu_encoder_assign_crtc(encoder, crtc);
889 
890 	/* Enable/restore vblank irq handling */
891 	drm_crtc_vblank_on(crtc);
892 }
893 
894 struct plane_state {
895 	struct dpu_plane_state *dpu_pstate;
896 	const struct drm_plane_state *drm_pstate;
897 	int stage;
898 	u32 pipe_id;
899 };
900 
901 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
902 		struct drm_atomic_state *state)
903 {
904 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
905 									  crtc);
906 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
907 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state);
908 	struct plane_state *pstates;
909 
910 	const struct drm_plane_state *pstate;
911 	struct drm_plane *plane;
912 	struct drm_display_mode *mode;
913 
914 	int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
915 
916 	struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
917 	int multirect_count = 0;
918 	const struct drm_plane_state *pipe_staged[SSPP_MAX];
919 	int left_zpos_cnt = 0, right_zpos_cnt = 0;
920 	struct drm_rect crtc_rect = { 0 };
921 
922 	pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
923 
924 	if (!crtc_state->enable || !crtc_state->active) {
925 		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
926 				crtc->base.id, crtc_state->enable,
927 				crtc_state->active);
928 		memset(&cstate->new_perf, 0, sizeof(cstate->new_perf));
929 		goto end;
930 	}
931 
932 	mode = &crtc_state->adjusted_mode;
933 	DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
934 
935 	/* force a full mode set if active state changed */
936 	if (crtc_state->active_changed)
937 		crtc_state->mode_changed = true;
938 
939 	memset(pipe_staged, 0, sizeof(pipe_staged));
940 
941 	if (cstate->num_mixers) {
942 		mixer_width = mode->hdisplay / cstate->num_mixers;
943 
944 		_dpu_crtc_setup_lm_bounds(crtc, crtc_state);
945 	}
946 
947 	crtc_rect.x2 = mode->hdisplay;
948 	crtc_rect.y2 = mode->vdisplay;
949 
950 	 /* get plane state for all drm planes associated with crtc state */
951 	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
952 		struct drm_rect dst, clip = crtc_rect;
953 
954 		if (IS_ERR_OR_NULL(pstate)) {
955 			rc = PTR_ERR(pstate);
956 			DPU_ERROR("%s: failed to get plane%d state, %d\n",
957 					dpu_crtc->name, plane->base.id, rc);
958 			goto end;
959 		}
960 		if (cnt >= DPU_STAGE_MAX * 4)
961 			continue;
962 
963 		pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
964 		pstates[cnt].drm_pstate = pstate;
965 		pstates[cnt].stage = pstate->normalized_zpos;
966 		pstates[cnt].pipe_id = dpu_plane_pipe(plane);
967 
968 		if (pipe_staged[pstates[cnt].pipe_id]) {
969 			multirect_plane[multirect_count].r0 =
970 				pipe_staged[pstates[cnt].pipe_id];
971 			multirect_plane[multirect_count].r1 = pstate;
972 			multirect_count++;
973 
974 			pipe_staged[pstates[cnt].pipe_id] = NULL;
975 		} else {
976 			pipe_staged[pstates[cnt].pipe_id] = pstate;
977 		}
978 
979 		cnt++;
980 
981 		dst = drm_plane_state_dest(pstate);
982 		if (!drm_rect_intersect(&clip, &dst)) {
983 			DPU_ERROR("invalid vertical/horizontal destination\n");
984 			DPU_ERROR("display: " DRM_RECT_FMT " plane: "
985 				  DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
986 				  DRM_RECT_ARG(&dst));
987 			rc = -E2BIG;
988 			goto end;
989 		}
990 	}
991 
992 	for (i = 1; i < SSPP_MAX; i++) {
993 		if (pipe_staged[i]) {
994 			dpu_plane_clear_multirect(pipe_staged[i]);
995 
996 			if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
997 				DPU_ERROR(
998 					"r1 only virt plane:%d not supported\n",
999 					pipe_staged[i]->plane->base.id);
1000 				rc  = -EINVAL;
1001 				goto end;
1002 			}
1003 		}
1004 	}
1005 
1006 	z_pos = -1;
1007 	for (i = 0; i < cnt; i++) {
1008 		/* reset counts at every new blend stage */
1009 		if (pstates[i].stage != z_pos) {
1010 			left_zpos_cnt = 0;
1011 			right_zpos_cnt = 0;
1012 			z_pos = pstates[i].stage;
1013 		}
1014 
1015 		/* verify z_pos setting before using it */
1016 		if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
1017 			DPU_ERROR("> %d plane stages assigned\n",
1018 					DPU_STAGE_MAX - DPU_STAGE_0);
1019 			rc = -EINVAL;
1020 			goto end;
1021 		} else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
1022 			if (left_zpos_cnt == 2) {
1023 				DPU_ERROR("> 2 planes @ stage %d on left\n",
1024 					z_pos);
1025 				rc = -EINVAL;
1026 				goto end;
1027 			}
1028 			left_zpos_cnt++;
1029 
1030 		} else {
1031 			if (right_zpos_cnt == 2) {
1032 				DPU_ERROR("> 2 planes @ stage %d on right\n",
1033 					z_pos);
1034 				rc = -EINVAL;
1035 				goto end;
1036 			}
1037 			right_zpos_cnt++;
1038 		}
1039 
1040 		pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
1041 		DRM_DEBUG_ATOMIC("%s: zpos %d\n", dpu_crtc->name, z_pos);
1042 	}
1043 
1044 	for (i = 0; i < multirect_count; i++) {
1045 		if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
1046 			DPU_ERROR(
1047 			"multirect validation failed for planes (%d - %d)\n",
1048 					multirect_plane[i].r0->plane->base.id,
1049 					multirect_plane[i].r1->plane->base.id);
1050 			rc = -EINVAL;
1051 			goto end;
1052 		}
1053 	}
1054 
1055 	atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
1056 
1057 	rc = dpu_core_perf_crtc_check(crtc, crtc_state);
1058 	if (rc) {
1059 		DPU_ERROR("crtc%d failed performance check %d\n",
1060 				crtc->base.id, rc);
1061 		goto end;
1062 	}
1063 
1064 	/* validate source split:
1065 	 * use pstates sorted by stage to check planes on same stage
1066 	 * we assume that all pipes are in source split so its valid to compare
1067 	 * without taking into account left/right mixer placement
1068 	 */
1069 	for (i = 1; i < cnt; i++) {
1070 		struct plane_state *prv_pstate, *cur_pstate;
1071 		struct drm_rect left_rect, right_rect;
1072 		int32_t left_pid, right_pid;
1073 		int32_t stage;
1074 
1075 		prv_pstate = &pstates[i - 1];
1076 		cur_pstate = &pstates[i];
1077 		if (prv_pstate->stage != cur_pstate->stage)
1078 			continue;
1079 
1080 		stage = cur_pstate->stage;
1081 
1082 		left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
1083 		left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
1084 
1085 		right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
1086 		right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
1087 
1088 		if (right_rect.x1 < left_rect.x1) {
1089 			swap(left_pid, right_pid);
1090 			swap(left_rect, right_rect);
1091 		}
1092 
1093 		/**
1094 		 * - planes are enumerated in pipe-priority order such that
1095 		 *   planes with lower drm_id must be left-most in a shared
1096 		 *   blend-stage when using source split.
1097 		 * - planes in source split must be contiguous in width
1098 		 * - planes in source split must have same dest yoff and height
1099 		 */
1100 		if (right_pid < left_pid) {
1101 			DPU_ERROR(
1102 				"invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1103 				stage, left_pid, right_pid);
1104 			rc = -EINVAL;
1105 			goto end;
1106 		} else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1107 			DPU_ERROR("non-contiguous coordinates for src split. "
1108 				  "stage: %d left: " DRM_RECT_FMT " right: "
1109 				  DRM_RECT_FMT "\n", stage,
1110 				  DRM_RECT_ARG(&left_rect),
1111 				  DRM_RECT_ARG(&right_rect));
1112 			rc = -EINVAL;
1113 			goto end;
1114 		} else if (left_rect.y1 != right_rect.y1 ||
1115 			   drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1116 			DPU_ERROR("source split at stage: %d. invalid "
1117 				  "yoff/height: left: " DRM_RECT_FMT " right: "
1118 				  DRM_RECT_FMT "\n", stage,
1119 				  DRM_RECT_ARG(&left_rect),
1120 				  DRM_RECT_ARG(&right_rect));
1121 			rc = -EINVAL;
1122 			goto end;
1123 		}
1124 	}
1125 
1126 end:
1127 	kfree(pstates);
1128 	return rc;
1129 }
1130 
1131 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1132 {
1133 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1134 	struct drm_encoder *enc;
1135 
1136 	trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1137 
1138 	/*
1139 	 * Normally we would iterate through encoder_mask in crtc state to find
1140 	 * attached encoders. In this case, we might be disabling vblank _after_
1141 	 * encoder_mask has been cleared.
1142 	 *
1143 	 * Instead, we "assign" a crtc to the encoder in enable and clear it in
1144 	 * disable (which is also after encoder_mask is cleared). So instead of
1145 	 * using encoder mask, we'll ask the encoder to toggle itself iff it's
1146 	 * currently assigned to our crtc.
1147 	 *
1148 	 * Note also that this function cannot be called while crtc is disabled
1149 	 * since we use drm_crtc_vblank_on/off. So we don't need to worry
1150 	 * about the assigned crtcs being inconsistent with the current state
1151 	 * (which means no need to worry about modeset locks).
1152 	 */
1153 	list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1154 		trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
1155 					     dpu_crtc);
1156 
1157 		dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
1158 	}
1159 
1160 	return 0;
1161 }
1162 
1163 #ifdef CONFIG_DEBUG_FS
1164 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1165 {
1166 	struct dpu_crtc *dpu_crtc;
1167 	struct dpu_plane_state *pstate = NULL;
1168 	struct dpu_crtc_mixer *m;
1169 
1170 	struct drm_crtc *crtc;
1171 	struct drm_plane *plane;
1172 	struct drm_display_mode *mode;
1173 	struct drm_framebuffer *fb;
1174 	struct drm_plane_state *state;
1175 	struct dpu_crtc_state *cstate;
1176 
1177 	int i, out_width;
1178 
1179 	dpu_crtc = s->private;
1180 	crtc = &dpu_crtc->base;
1181 
1182 	drm_modeset_lock_all(crtc->dev);
1183 	cstate = to_dpu_crtc_state(crtc->state);
1184 
1185 	mode = &crtc->state->adjusted_mode;
1186 	out_width = mode->hdisplay / cstate->num_mixers;
1187 
1188 	seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1189 				mode->hdisplay, mode->vdisplay);
1190 
1191 	seq_puts(s, "\n");
1192 
1193 	for (i = 0; i < cstate->num_mixers; ++i) {
1194 		m = &cstate->mixers[i];
1195 		seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1196 			m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1197 			out_width, mode->vdisplay);
1198 	}
1199 
1200 	seq_puts(s, "\n");
1201 
1202 	drm_atomic_crtc_for_each_plane(plane, crtc) {
1203 		pstate = to_dpu_plane_state(plane->state);
1204 		state = plane->state;
1205 
1206 		if (!pstate || !state)
1207 			continue;
1208 
1209 		seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1210 			pstate->stage);
1211 
1212 		if (plane->state->fb) {
1213 			fb = plane->state->fb;
1214 
1215 			seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1216 				fb->base.id, (char *) &fb->format->format,
1217 				fb->width, fb->height);
1218 			for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1219 				seq_printf(s, "cpp[%d]:%u ",
1220 						i, fb->format->cpp[i]);
1221 			seq_puts(s, "\n\t");
1222 
1223 			seq_printf(s, "modifier:%8llu ", fb->modifier);
1224 			seq_puts(s, "\n");
1225 
1226 			seq_puts(s, "\t");
1227 			for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1228 				seq_printf(s, "pitches[%d]:%8u ", i,
1229 							fb->pitches[i]);
1230 			seq_puts(s, "\n");
1231 
1232 			seq_puts(s, "\t");
1233 			for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1234 				seq_printf(s, "offsets[%d]:%8u ", i,
1235 							fb->offsets[i]);
1236 			seq_puts(s, "\n");
1237 		}
1238 
1239 		seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1240 			state->src_x, state->src_y, state->src_w, state->src_h);
1241 
1242 		seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1243 			state->crtc_x, state->crtc_y, state->crtc_w,
1244 			state->crtc_h);
1245 		seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1246 			pstate->multirect_mode, pstate->multirect_index);
1247 
1248 		seq_puts(s, "\n");
1249 	}
1250 	if (dpu_crtc->vblank_cb_count) {
1251 		ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1252 		s64 diff_ms = ktime_to_ms(diff);
1253 		s64 fps = diff_ms ? div_s64(
1254 				dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1255 
1256 		seq_printf(s,
1257 			"vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1258 				fps, dpu_crtc->vblank_cb_count,
1259 				ktime_to_ms(diff), dpu_crtc->play_count);
1260 
1261 		/* reset time & count for next measurement */
1262 		dpu_crtc->vblank_cb_count = 0;
1263 		dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1264 	}
1265 
1266 	drm_modeset_unlock_all(crtc->dev);
1267 
1268 	return 0;
1269 }
1270 
1271 DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
1272 
1273 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1274 {
1275 	struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1276 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1277 
1278 	seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1279 	seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1280 	seq_printf(s, "core_clk_rate: %llu\n",
1281 			dpu_crtc->cur_perf.core_clk_rate);
1282 	seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
1283 	seq_printf(s, "max_per_pipe_ib: %llu\n",
1284 				dpu_crtc->cur_perf.max_per_pipe_ib);
1285 
1286 	return 0;
1287 }
1288 DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
1289 
1290 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1291 {
1292 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1293 
1294 	dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
1295 			crtc->dev->primary->debugfs_root);
1296 
1297 	debugfs_create_file("status", 0400,
1298 			dpu_crtc->debugfs_root,
1299 			dpu_crtc, &_dpu_debugfs_status_fops);
1300 	debugfs_create_file("state", 0600,
1301 			dpu_crtc->debugfs_root,
1302 			&dpu_crtc->base,
1303 			&dpu_crtc_debugfs_state_fops);
1304 
1305 	return 0;
1306 }
1307 #else
1308 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1309 {
1310 	return 0;
1311 }
1312 #endif /* CONFIG_DEBUG_FS */
1313 
1314 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1315 {
1316 	return _dpu_crtc_init_debugfs(crtc);
1317 }
1318 
1319 static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
1320 {
1321 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1322 
1323 	debugfs_remove_recursive(dpu_crtc->debugfs_root);
1324 }
1325 
1326 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1327 	.set_config = drm_atomic_helper_set_config,
1328 	.destroy = dpu_crtc_destroy,
1329 	.page_flip = drm_atomic_helper_page_flip,
1330 	.reset = dpu_crtc_reset,
1331 	.atomic_duplicate_state = dpu_crtc_duplicate_state,
1332 	.atomic_destroy_state = dpu_crtc_destroy_state,
1333 	.late_register = dpu_crtc_late_register,
1334 	.early_unregister = dpu_crtc_early_unregister,
1335 	.enable_vblank  = msm_crtc_enable_vblank,
1336 	.disable_vblank = msm_crtc_disable_vblank,
1337 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
1338 	.get_vblank_counter = dpu_crtc_get_vblank_counter,
1339 };
1340 
1341 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1342 	.atomic_disable = dpu_crtc_disable,
1343 	.atomic_enable = dpu_crtc_enable,
1344 	.atomic_check = dpu_crtc_atomic_check,
1345 	.atomic_begin = dpu_crtc_atomic_begin,
1346 	.atomic_flush = dpu_crtc_atomic_flush,
1347 	.get_scanout_position = dpu_crtc_get_scanout_position,
1348 };
1349 
1350 /* initialize crtc */
1351 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1352 				struct drm_plane *cursor)
1353 {
1354 	struct drm_crtc *crtc = NULL;
1355 	struct dpu_crtc *dpu_crtc = NULL;
1356 	int i;
1357 
1358 	dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1359 	if (!dpu_crtc)
1360 		return ERR_PTR(-ENOMEM);
1361 
1362 	crtc = &dpu_crtc->base;
1363 	crtc->dev = dev;
1364 
1365 	spin_lock_init(&dpu_crtc->spin_lock);
1366 	atomic_set(&dpu_crtc->frame_pending, 0);
1367 
1368 	init_completion(&dpu_crtc->frame_done_comp);
1369 
1370 	INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1371 
1372 	for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1373 		INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1374 		list_add(&dpu_crtc->frame_events[i].list,
1375 				&dpu_crtc->frame_event_list);
1376 		kthread_init_work(&dpu_crtc->frame_events[i].work,
1377 				dpu_crtc_frame_event_work);
1378 	}
1379 
1380 	drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1381 				NULL);
1382 
1383 	drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1384 
1385 	drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
1386 
1387 	/* save user friendly CRTC name for later */
1388 	snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1389 
1390 	/* initialize event handling */
1391 	spin_lock_init(&dpu_crtc->event_lock);
1392 
1393 	DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name);
1394 	return crtc;
1395 }
1396