xref: /linux/drivers/gpu/drm/i915/display/intel_vrr.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  *
5  */
6 
7 #include <drm/drm_print.h>
8 
9 #include "intel_de.h"
10 #include "intel_display_regs.h"
11 #include "intel_display_types.h"
12 #include "intel_dp.h"
13 #include "intel_psr.h"
14 #include "intel_vrr.h"
15 #include "intel_vrr_regs.h"
16 #include "skl_prefill.h"
17 #include "skl_watermark.h"
18 
19 #define FIXED_POINT_PRECISION		100
20 #define CMRR_PRECISION_TOLERANCE	10
21 
22 bool intel_vrr_is_capable(struct intel_connector *connector)
23 {
24 	struct intel_display *display = to_intel_display(connector);
25 	const struct drm_display_info *info = &connector->base.display_info;
26 	struct intel_dp *intel_dp;
27 
28 	if (!HAS_VRR(display))
29 		return false;
30 
31 	/*
32 	 * DP Sink is capable of VRR video timings if
33 	 * Ignore MSA bit is set in DPCD.
34 	 * EDID monitor range also should be atleast 10 for reasonable
35 	 * Adaptive Sync or Variable Refresh Rate end user experience.
36 	 */
37 	switch (connector->base.connector_type) {
38 	case DRM_MODE_CONNECTOR_eDP:
39 		if (!connector->panel.vbt.vrr)
40 			return false;
41 		fallthrough;
42 	case DRM_MODE_CONNECTOR_DisplayPort:
43 		if (connector->mst.dp)
44 			return false;
45 		intel_dp = intel_attached_dp(connector);
46 
47 		if (!drm_dp_sink_can_do_video_without_timing_msa(intel_dp->dpcd))
48 			return false;
49 
50 		break;
51 	default:
52 		return false;
53 	}
54 
55 	return info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10;
56 }
57 
58 bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh)
59 {
60 	const struct drm_display_info *info = &connector->base.display_info;
61 
62 	return intel_vrr_is_capable(connector) &&
63 		vrefresh >= info->monitor_range.min_vfreq &&
64 		vrefresh <= info->monitor_range.max_vfreq;
65 }
66 
67 bool intel_vrr_possible(const struct intel_crtc_state *crtc_state)
68 {
69 	return crtc_state->vrr.flipline;
70 }
71 
72 void
73 intel_vrr_check_modeset(struct intel_atomic_state *state)
74 {
75 	int i;
76 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
77 	struct intel_crtc *crtc;
78 
79 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
80 					    new_crtc_state, i) {
81 		if (new_crtc_state->uapi.vrr_enabled !=
82 		    old_crtc_state->uapi.vrr_enabled)
83 			new_crtc_state->uapi.mode_changed = true;
84 	}
85 }
86 
87 static int intel_vrr_extra_vblank_delay(struct intel_display *display)
88 {
89 	/*
90 	 * On ICL/TGL VRR hardware inserts one extra scanline
91 	 * just after vactive, which pushes the vmin decision
92 	 * boundary ahead accordingly, and thus reduces the
93 	 * max guardband length by one scanline.
94 	 */
95 	return DISPLAY_VER(display) < 13 ? 1 : 0;
96 }
97 
98 static int intel_vrr_vmin_flipline_offset(struct intel_display *display)
99 {
100 	/*
101 	 * ICL/TGL hardware imposes flipline>=vmin+1
102 	 *
103 	 * We reduce the vmin value to compensate when programming the
104 	 * hardware. This approach allows flipline to remain set at the
105 	 * original value, and thus the frame will have the desired
106 	 * minimum vtotal.
107 	 */
108 	return DISPLAY_VER(display) < 13 ? 1 : 0;
109 }
110 
111 static int intel_vrr_guardband_to_pipeline_full(const struct intel_crtc_state *crtc_state,
112 						int guardband)
113 {
114 	/* hardware imposes one extra scanline somewhere */
115 	return guardband - crtc_state->framestart_delay - 1;
116 }
117 
118 static int intel_vrr_pipeline_full_to_guardband(const struct intel_crtc_state *crtc_state,
119 						int pipeline_full)
120 {
121 	/* hardware imposes one extra scanline somewhere */
122 	return pipeline_full + crtc_state->framestart_delay + 1;
123 }
124 
125 /*
126  * Without VRR registers get latched at:
127  *  vblank_start
128  *
129  * With VRR the earliest registers can get latched is:
130  *  intel_vrr_vmin_vblank_start(), which if we want to maintain
131  *  the correct min vtotal is >=vblank_start+1
132  *
133  * The latest point registers can get latched is the vmax decision boundary:
134  *  intel_vrr_vmax_vblank_start()
135  *
136  * Between those two points the vblank exit starts (and hence registers get
137  * latched) ASAP after a push is sent.
138  *
139  * framestart_delay is programmable 1-4.
140  */
141 
142 int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state)
143 {
144 	/* Min vblank actually determined by flipline */
145 	return crtc_state->vrr.vmin;
146 }
147 
148 int intel_vrr_vmax_vtotal(const struct intel_crtc_state *crtc_state)
149 {
150 	return crtc_state->vrr.vmax;
151 }
152 
153 int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state)
154 {
155 	return intel_vrr_vmin_vtotal(crtc_state) - crtc_state->vrr.guardband;
156 }
157 
158 int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state)
159 {
160 	return intel_vrr_vmax_vtotal(crtc_state) - crtc_state->vrr.guardband;
161 }
162 
163 static bool
164 is_cmrr_frac_required(struct intel_crtc_state *crtc_state)
165 {
166 	struct intel_display *display = to_intel_display(crtc_state);
167 	int calculated_refresh_k, actual_refresh_k, pixel_clock_per_line;
168 	struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
169 
170 	/* Avoid CMRR for now till we have VRR with fixed timings working */
171 	if (!HAS_CMRR(display) || true)
172 		return false;
173 
174 	actual_refresh_k =
175 		drm_mode_vrefresh(adjusted_mode) * FIXED_POINT_PRECISION;
176 	pixel_clock_per_line =
177 		adjusted_mode->crtc_clock * 1000 / adjusted_mode->crtc_htotal;
178 	calculated_refresh_k =
179 		pixel_clock_per_line * FIXED_POINT_PRECISION / adjusted_mode->crtc_vtotal;
180 
181 	if ((actual_refresh_k - calculated_refresh_k) < CMRR_PRECISION_TOLERANCE)
182 		return false;
183 
184 	return true;
185 }
186 
187 static unsigned int
188 cmrr_get_vtotal(struct intel_crtc_state *crtc_state, bool video_mode_required)
189 {
190 	int multiplier_m = 1, multiplier_n = 1, vtotal, desired_refresh_rate;
191 	u64 adjusted_pixel_rate;
192 	struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
193 
194 	desired_refresh_rate = drm_mode_vrefresh(adjusted_mode);
195 
196 	if (video_mode_required) {
197 		multiplier_m = 1001;
198 		multiplier_n = 1000;
199 	}
200 
201 	crtc_state->cmrr.cmrr_n = mul_u32_u32(desired_refresh_rate * adjusted_mode->crtc_htotal,
202 					      multiplier_n);
203 	vtotal = DIV_ROUND_UP_ULL(mul_u32_u32(adjusted_mode->crtc_clock * 1000, multiplier_n),
204 				  crtc_state->cmrr.cmrr_n);
205 	adjusted_pixel_rate = mul_u32_u32(adjusted_mode->crtc_clock * 1000, multiplier_m);
206 	crtc_state->cmrr.cmrr_m = do_div(adjusted_pixel_rate, crtc_state->cmrr.cmrr_n);
207 
208 	return vtotal;
209 }
210 
211 static
212 void intel_vrr_compute_cmrr_timings(struct intel_crtc_state *crtc_state)
213 {
214 	/*
215 	 * TODO: Compute precise target refresh rate to determine
216 	 * if video_mode_required should be true. Currently set to
217 	 * false due to uncertainty about the precise target
218 	 * refresh Rate.
219 	 */
220 	crtc_state->vrr.vmax = cmrr_get_vtotal(crtc_state, false);
221 	crtc_state->vrr.vmin = crtc_state->vrr.vmax;
222 	crtc_state->vrr.flipline = crtc_state->vrr.vmin;
223 
224 	crtc_state->cmrr.enable = true;
225 	crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
226 }
227 
228 static
229 void intel_vrr_compute_vrr_timings(struct intel_crtc_state *crtc_state,
230 				   int vmin, int vmax)
231 {
232 	crtc_state->vrr.vmax = vmax;
233 	crtc_state->vrr.vmin = vmin;
234 	crtc_state->vrr.flipline = crtc_state->vrr.vmin;
235 
236 	crtc_state->vrr.enable = true;
237 	crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
238 }
239 
240 static
241 void intel_vrr_compute_fixed_rr_timings(struct intel_crtc_state *crtc_state)
242 {
243 	/* For fixed rr,  vmin = vmax = flipline */
244 	crtc_state->vrr.vmax = crtc_state->hw.adjusted_mode.crtc_vtotal;
245 	crtc_state->vrr.vmin = crtc_state->vrr.vmax;
246 	crtc_state->vrr.flipline = crtc_state->vrr.vmin;
247 }
248 
249 static int intel_vrr_hw_value(const struct intel_crtc_state *crtc_state,
250 			      int value)
251 {
252 	struct intel_display *display = to_intel_display(crtc_state);
253 
254 	/*
255 	 * On TGL vmin/vmax/flipline also need to be
256 	 * adjusted by the SCL to maintain correct vtotals.
257 	 */
258 	if (DISPLAY_VER(display) >= 13)
259 		return value;
260 	else
261 		return value - crtc_state->set_context_latency;
262 }
263 
264 /*
265  * For fixed refresh rate mode Vmin, Vmax and Flipline all are set to
266  * Vtotal value.
267  */
268 static
269 int intel_vrr_fixed_rr_hw_vtotal(const struct intel_crtc_state *crtc_state)
270 {
271 	return intel_vrr_hw_value(crtc_state, crtc_state->hw.adjusted_mode.crtc_vtotal);
272 }
273 
274 static
275 int intel_vrr_fixed_rr_hw_vmax(const struct intel_crtc_state *crtc_state)
276 {
277 	return intel_vrr_fixed_rr_hw_vtotal(crtc_state);
278 }
279 
280 static
281 int intel_vrr_fixed_rr_hw_vmin(const struct intel_crtc_state *crtc_state)
282 {
283 	struct intel_display *display = to_intel_display(crtc_state);
284 
285 	return intel_vrr_fixed_rr_hw_vtotal(crtc_state) -
286 		intel_vrr_vmin_flipline_offset(display);
287 }
288 
289 static
290 int intel_vrr_fixed_rr_hw_flipline(const struct intel_crtc_state *crtc_state)
291 {
292 	return intel_vrr_fixed_rr_hw_vtotal(crtc_state);
293 }
294 
295 void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state)
296 {
297 	struct intel_display *display = to_intel_display(crtc_state);
298 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
299 
300 	if (!intel_vrr_possible(crtc_state))
301 		return;
302 
303 	intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
304 		       intel_vrr_fixed_rr_hw_vmin(crtc_state) - 1);
305 	intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
306 		       intel_vrr_fixed_rr_hw_vmax(crtc_state) - 1);
307 	intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
308 		       intel_vrr_fixed_rr_hw_flipline(crtc_state) - 1);
309 }
310 
311 static
312 int intel_vrr_compute_vmin(struct intel_crtc_state *crtc_state)
313 {
314 	/*
315 	 * To make fixed rr and vrr work seamless the guardband/pipeline full
316 	 * should be set such that it satisfies both the fixed and variable
317 	 * timings.
318 	 * For this set the vmin as crtc_vtotal. With this we never need to
319 	 * change anything to do with the guardband.
320 	 */
321 	return crtc_state->hw.adjusted_mode.crtc_vtotal;
322 }
323 
324 static
325 int intel_vrr_compute_vmax(struct intel_connector *connector,
326 			   const struct drm_display_mode *adjusted_mode)
327 {
328 	const struct drm_display_info *info = &connector->base.display_info;
329 	int vmax;
330 
331 	vmax = adjusted_mode->crtc_clock * 1000 /
332 		(adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq);
333 	vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal);
334 
335 	return vmax;
336 }
337 
338 void
339 intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
340 			 struct drm_connector_state *conn_state)
341 {
342 	struct intel_display *display = to_intel_display(crtc_state);
343 	struct intel_connector *connector =
344 		to_intel_connector(conn_state->connector);
345 	struct intel_dp *intel_dp = intel_attached_dp(connector);
346 	bool is_edp = intel_dp_is_edp(intel_dp);
347 	struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
348 	int vmin, vmax;
349 
350 	if (!HAS_VRR(display))
351 		return;
352 
353 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
354 		return;
355 
356 	crtc_state->vrr.in_range =
357 		intel_vrr_is_in_range(connector, drm_mode_vrefresh(adjusted_mode));
358 
359 	/*
360 	 * Allow fixed refresh rate with VRR Timing Generator.
361 	 * For now set the vrr.in_range to 0, to allow fixed_rr but skip actual
362 	 * VRR and LRR.
363 	 * #TODO For actual VRR with joiner, we need to figure out how to
364 	 * correctly sequence transcoder level stuff vs. pipe level stuff
365 	 * in the commit.
366 	 */
367 	if (crtc_state->joiner_pipes)
368 		crtc_state->vrr.in_range = false;
369 
370 	vmin = intel_vrr_compute_vmin(crtc_state);
371 
372 	if (crtc_state->vrr.in_range) {
373 		if (HAS_LRR(display))
374 			crtc_state->update_lrr = true;
375 		vmax = intel_vrr_compute_vmax(connector, adjusted_mode);
376 	} else {
377 		vmax = vmin;
378 	}
379 
380 	if (crtc_state->uapi.vrr_enabled && vmin < vmax)
381 		intel_vrr_compute_vrr_timings(crtc_state, vmin, vmax);
382 	else if (is_cmrr_frac_required(crtc_state) && is_edp)
383 		intel_vrr_compute_cmrr_timings(crtc_state);
384 	else
385 		intel_vrr_compute_fixed_rr_timings(crtc_state);
386 
387 	if (HAS_AS_SDP(display)) {
388 		crtc_state->vrr.vsync_start =
389 			(crtc_state->hw.adjusted_mode.crtc_vtotal -
390 			 crtc_state->hw.adjusted_mode.crtc_vsync_start);
391 		crtc_state->vrr.vsync_end =
392 			(crtc_state->hw.adjusted_mode.crtc_vtotal -
393 			 crtc_state->hw.adjusted_mode.crtc_vsync_end);
394 	}
395 }
396 
397 static int
398 intel_vrr_max_hw_guardband(const struct intel_crtc_state *crtc_state)
399 {
400 	struct intel_display *display = to_intel_display(crtc_state);
401 	int max_pipeline_full = REG_FIELD_MAX(VRR_CTL_PIPELINE_FULL_MASK);
402 
403 	if (DISPLAY_VER(display) >= 13)
404 		return REG_FIELD_MAX(XELPD_VRR_CTL_VRR_GUARDBAND_MASK);
405 	else
406 		return intel_vrr_pipeline_full_to_guardband(crtc_state,
407 							    max_pipeline_full);
408 }
409 
410 static int
411 intel_vrr_max_vblank_guardband(const struct intel_crtc_state *crtc_state)
412 {
413 	struct intel_display *display = to_intel_display(crtc_state);
414 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
415 
416 	return crtc_state->vrr.vmin -
417 	       adjusted_mode->crtc_vdisplay -
418 	       crtc_state->set_context_latency -
419 	       intel_vrr_extra_vblank_delay(display);
420 }
421 
422 static int
423 intel_vrr_max_guardband(struct intel_crtc_state *crtc_state)
424 {
425 	return min(intel_vrr_max_hw_guardband(crtc_state),
426 		   intel_vrr_max_vblank_guardband(crtc_state));
427 }
428 
429 static
430 int intel_vrr_compute_optimized_guardband(struct intel_crtc_state *crtc_state)
431 {
432 	struct intel_display *display = to_intel_display(crtc_state);
433 	struct skl_prefill_ctx prefill_ctx;
434 	int prefill_latency_us;
435 	int guardband = 0;
436 
437 	skl_prefill_init_worst(&prefill_ctx, crtc_state);
438 
439 	/*
440 	 * The SoC power controller runs SAGV mutually exclusive with package C states,
441 	 * so the max of package C and SAGV latencies is used to compute the min prefill guardband.
442 	 * PM delay = max(sagv_latency, pkgc_max_latency (highest enabled wm level 1 and up))
443 	 */
444 	prefill_latency_us = max(display->sagv.block_time_us,
445 				 skl_watermark_max_latency(display, 1));
446 
447 	guardband = skl_prefill_min_guardband(&prefill_ctx,
448 					      crtc_state,
449 					      prefill_latency_us);
450 
451 	if (intel_crtc_has_dp_encoder(crtc_state)) {
452 		guardband = max(guardband, intel_psr_min_guardband(crtc_state));
453 		guardband = max(guardband, intel_dp_sdp_min_guardband(crtc_state, true));
454 	}
455 
456 	return guardband;
457 }
458 
459 static bool intel_vrr_use_optimized_guardband(const struct intel_crtc_state *crtc_state)
460 {
461 	/*
462 	 * #TODO: Enable optimized guardband for HDMI
463 	 * For HDMI lot of infoframes are transmitted a line or two after vsync.
464 	 * Since with optimized guardband the double bufferring point is at delayed vblank,
465 	 * we need to ensure that vsync happens after delayed vblank for the HDMI case.
466 	 */
467 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
468 		return false;
469 
470 	return true;
471 }
472 
473 void intel_vrr_compute_guardband(struct intel_crtc_state *crtc_state)
474 {
475 	struct intel_display *display = to_intel_display(crtc_state);
476 	struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
477 	struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
478 	int guardband;
479 
480 	if (!intel_vrr_possible(crtc_state))
481 		return;
482 
483 	if (intel_vrr_use_optimized_guardband(crtc_state))
484 		guardband = intel_vrr_compute_optimized_guardband(crtc_state);
485 	else
486 		guardband = crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay;
487 
488 	crtc_state->vrr.guardband = min(guardband, intel_vrr_max_guardband(crtc_state));
489 
490 	if (intel_vrr_always_use_vrr_tg(display)) {
491 		adjusted_mode->crtc_vblank_start  =
492 			adjusted_mode->crtc_vtotal - crtc_state->vrr.guardband;
493 		/*
494 		 * pipe_mode has already been derived from the
495 		 * original adjusted_mode, keep the two in sync.
496 		 */
497 		pipe_mode->crtc_vblank_start =
498 			adjusted_mode->crtc_vblank_start;
499 	}
500 
501 	if (DISPLAY_VER(display) < 13)
502 		crtc_state->vrr.pipeline_full =
503 			intel_vrr_guardband_to_pipeline_full(crtc_state,
504 							     crtc_state->vrr.guardband);
505 }
506 
507 static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state)
508 {
509 	struct intel_display *display = to_intel_display(crtc_state);
510 
511 	if (DISPLAY_VER(display) >= 14)
512 		return VRR_CTL_FLIP_LINE_EN |
513 			XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband);
514 	else if (DISPLAY_VER(display) >= 13)
515 		return VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
516 			XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband);
517 	else
518 		return VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
519 			VRR_CTL_PIPELINE_FULL(crtc_state->vrr.pipeline_full) |
520 			VRR_CTL_PIPELINE_FULL_OVERRIDE;
521 }
522 
523 void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
524 {
525 	struct intel_display *display = to_intel_display(crtc_state);
526 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
527 
528 	if (!HAS_VRR(display))
529 		return;
530 
531 	/*
532 	 * This bit seems to have two meanings depending on the platform:
533 	 * TGL: generate VRR "safe window" for DSB vblank waits
534 	 * ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR
535 	 */
536 	if (IS_DISPLAY_VER(display, 12, 13))
537 		intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
538 			     0, PIPE_VBLANK_WITH_DELAY);
539 
540 	if (!intel_vrr_possible(crtc_state)) {
541 		intel_de_write(display,
542 			       TRANS_VRR_CTL(display, cpu_transcoder), 0);
543 		return;
544 	}
545 
546 	if (crtc_state->cmrr.enable) {
547 		intel_de_write(display, TRANS_CMRR_M_HI(display, cpu_transcoder),
548 			       upper_32_bits(crtc_state->cmrr.cmrr_m));
549 		intel_de_write(display, TRANS_CMRR_M_LO(display, cpu_transcoder),
550 			       lower_32_bits(crtc_state->cmrr.cmrr_m));
551 		intel_de_write(display, TRANS_CMRR_N_HI(display, cpu_transcoder),
552 			       upper_32_bits(crtc_state->cmrr.cmrr_n));
553 		intel_de_write(display, TRANS_CMRR_N_LO(display, cpu_transcoder),
554 			       lower_32_bits(crtc_state->cmrr.cmrr_n));
555 	}
556 
557 	intel_vrr_set_fixed_rr_timings(crtc_state);
558 
559 	if (!intel_vrr_always_use_vrr_tg(display))
560 		intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
561 			       trans_vrr_ctl(crtc_state));
562 
563 	if (HAS_AS_SDP(display))
564 		intel_de_write(display,
565 			       TRANS_VRR_VSYNC(display, cpu_transcoder),
566 			       VRR_VSYNC_END(crtc_state->vrr.vsync_end) |
567 			       VRR_VSYNC_START(crtc_state->vrr.vsync_start));
568 
569 	/*
570 	 * For BMG and LNL+ onwards the EMP_AS_SDP_TL is used for programming
571 	 * double buffering point and transmission line for VRR packets for
572 	 * HDMI2.1/DP/eDP/DP->HDMI2.1 PCON.
573 	 * Since currently we support VRR only for DP/eDP, so this is programmed
574 	 * to for Adaptive Sync SDP to Vsync start.
575 	 */
576 	if (DISPLAY_VERx100(display) == 1401 || DISPLAY_VER(display) >= 20)
577 		intel_de_write(display,
578 			       EMP_AS_SDP_TL(display, cpu_transcoder),
579 			       EMP_AS_SDP_DB_TL(crtc_state->vrr.vsync_start));
580 }
581 
582 void intel_vrr_send_push(struct intel_dsb *dsb,
583 			 const struct intel_crtc_state *crtc_state)
584 {
585 	struct intel_display *display = to_intel_display(crtc_state);
586 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
587 
588 	if (!crtc_state->vrr.enable)
589 		return;
590 
591 	if (dsb)
592 		intel_dsb_nonpost_start(dsb);
593 
594 	intel_de_write_dsb(display, dsb,
595 			   TRANS_PUSH(display, cpu_transcoder),
596 			   TRANS_PUSH_EN | TRANS_PUSH_SEND);
597 
598 	if (dsb)
599 		intel_dsb_nonpost_end(dsb);
600 }
601 
602 void intel_vrr_check_push_sent(struct intel_dsb *dsb,
603 			       const struct intel_crtc_state *crtc_state)
604 {
605 	struct intel_display *display = to_intel_display(crtc_state);
606 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
607 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
608 
609 	if (!crtc_state->vrr.enable)
610 		return;
611 
612 	/*
613 	 * Make sure the push send bit has cleared. This should
614 	 * already be the case as long as the caller makes sure
615 	 * this is called after the delayed vblank has occurred.
616 	 */
617 	if (dsb) {
618 		int wait_us, count;
619 
620 		wait_us = 2;
621 		count = 1;
622 
623 		/*
624 		 * If the bit hasn't cleared the DSB will
625 		 * raise the poll error interrupt.
626 		 */
627 		intel_dsb_poll(dsb, TRANS_PUSH(display, cpu_transcoder),
628 			       TRANS_PUSH_SEND, 0, wait_us, count);
629 	} else {
630 		if (intel_vrr_is_push_sent(crtc_state))
631 			drm_err(display->drm, "[CRTC:%d:%s] VRR push send still pending\n",
632 				crtc->base.base.id, crtc->base.name);
633 	}
634 }
635 
636 bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state)
637 {
638 	struct intel_display *display = to_intel_display(crtc_state);
639 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
640 
641 	if (!crtc_state->vrr.enable)
642 		return false;
643 
644 	return intel_de_read(display, TRANS_PUSH(display, cpu_transcoder)) & TRANS_PUSH_SEND;
645 }
646 
647 bool intel_vrr_always_use_vrr_tg(struct intel_display *display)
648 {
649 	if (!HAS_VRR(display))
650 		return false;
651 
652 	if (DISPLAY_VER(display) >= 30)
653 		return true;
654 
655 	return false;
656 }
657 
658 static int intel_vrr_hw_vmin(const struct intel_crtc_state *crtc_state)
659 {
660 	struct intel_display *display = to_intel_display(crtc_state);
661 
662 	return intel_vrr_hw_value(crtc_state, crtc_state->vrr.vmin) -
663 		intel_vrr_vmin_flipline_offset(display);
664 }
665 
666 static int intel_vrr_hw_vmax(const struct intel_crtc_state *crtc_state)
667 {
668 	return intel_vrr_hw_value(crtc_state, crtc_state->vrr.vmax);
669 }
670 
671 static int intel_vrr_hw_flipline(const struct intel_crtc_state *crtc_state)
672 {
673 	return intel_vrr_hw_value(crtc_state, crtc_state->vrr.flipline);
674 }
675 
676 static void intel_vrr_set_vrr_timings(const struct intel_crtc_state *crtc_state)
677 {
678 	struct intel_display *display = to_intel_display(crtc_state);
679 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
680 
681 	intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
682 		       intel_vrr_hw_vmin(crtc_state) - 1);
683 	intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
684 		       intel_vrr_hw_vmax(crtc_state) - 1);
685 	intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
686 		       intel_vrr_hw_flipline(crtc_state) - 1);
687 }
688 
689 static void intel_vrr_tg_enable(const struct intel_crtc_state *crtc_state,
690 				bool cmrr_enable)
691 {
692 	struct intel_display *display = to_intel_display(crtc_state);
693 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
694 	u32 vrr_ctl;
695 
696 	intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), TRANS_PUSH_EN);
697 
698 	vrr_ctl = VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state);
699 
700 	/*
701 	 * FIXME this might be broken as bspec seems to imply that
702 	 * even VRR_CTL_CMRR_ENABLE is armed by TRANS_CMRR_N_HI
703 	 * when enabling CMRR (but not when disabling CMRR?).
704 	 */
705 	if (cmrr_enable)
706 		vrr_ctl |= VRR_CTL_CMRR_ENABLE;
707 
708 	intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), vrr_ctl);
709 }
710 
711 static void intel_vrr_tg_disable(const struct intel_crtc_state *old_crtc_state)
712 {
713 	struct intel_display *display = to_intel_display(old_crtc_state);
714 	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
715 
716 	intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
717 		       trans_vrr_ctl(old_crtc_state));
718 
719 	if (intel_de_wait_for_clear(display,
720 				    TRANS_VRR_STATUS(display, cpu_transcoder),
721 				    VRR_STATUS_VRR_EN_LIVE, 1000))
722 		drm_err(display->drm, "Timed out waiting for VRR live status to clear\n");
723 
724 	intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
725 }
726 
727 void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
728 {
729 	struct intel_display *display = to_intel_display(crtc_state);
730 
731 	if (!crtc_state->vrr.enable)
732 		return;
733 
734 	intel_vrr_set_vrr_timings(crtc_state);
735 
736 	if (!intel_vrr_always_use_vrr_tg(display))
737 		intel_vrr_tg_enable(crtc_state, crtc_state->cmrr.enable);
738 }
739 
740 void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
741 {
742 	struct intel_display *display = to_intel_display(old_crtc_state);
743 
744 	if (!old_crtc_state->vrr.enable)
745 		return;
746 
747 	if (!intel_vrr_always_use_vrr_tg(display))
748 		intel_vrr_tg_disable(old_crtc_state);
749 
750 	intel_vrr_set_fixed_rr_timings(old_crtc_state);
751 }
752 
753 void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state)
754 {
755 	struct intel_display *display = to_intel_display(crtc_state);
756 
757 	if (!intel_vrr_possible(crtc_state))
758 		return;
759 
760 	if (intel_vrr_always_use_vrr_tg(display))
761 		intel_vrr_tg_enable(crtc_state, false);
762 }
763 
764 void intel_vrr_transcoder_disable(const struct intel_crtc_state *old_crtc_state)
765 {
766 	struct intel_display *display = to_intel_display(old_crtc_state);
767 
768 	if (!intel_vrr_possible(old_crtc_state))
769 		return;
770 
771 	if (intel_vrr_always_use_vrr_tg(display))
772 		intel_vrr_tg_disable(old_crtc_state);
773 }
774 
775 bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state)
776 {
777 	return crtc_state->vrr.flipline &&
778 	       crtc_state->vrr.flipline == crtc_state->vrr.vmax &&
779 	       crtc_state->vrr.flipline == crtc_state->vrr.vmin;
780 }
781 
782 void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
783 {
784 	struct intel_display *display = to_intel_display(crtc_state);
785 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
786 	u32 trans_vrr_ctl, trans_vrr_vsync;
787 	bool vrr_enable;
788 
789 	trans_vrr_ctl = intel_de_read(display,
790 				      TRANS_VRR_CTL(display, cpu_transcoder));
791 
792 	if (HAS_CMRR(display))
793 		crtc_state->cmrr.enable = (trans_vrr_ctl & VRR_CTL_CMRR_ENABLE);
794 
795 	if (crtc_state->cmrr.enable) {
796 		crtc_state->cmrr.cmrr_n =
797 			intel_de_read64_2x32(display, TRANS_CMRR_N_LO(display, cpu_transcoder),
798 					     TRANS_CMRR_N_HI(display, cpu_transcoder));
799 		crtc_state->cmrr.cmrr_m =
800 			intel_de_read64_2x32(display, TRANS_CMRR_M_LO(display, cpu_transcoder),
801 					     TRANS_CMRR_M_HI(display, cpu_transcoder));
802 	}
803 
804 	if (DISPLAY_VER(display) >= 13) {
805 		crtc_state->vrr.guardband =
806 			REG_FIELD_GET(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, trans_vrr_ctl);
807 	} else {
808 		if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE) {
809 			crtc_state->vrr.pipeline_full =
810 				REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl);
811 
812 			crtc_state->vrr.guardband =
813 				intel_vrr_pipeline_full_to_guardband(crtc_state,
814 								     crtc_state->vrr.pipeline_full);
815 		}
816 	}
817 
818 	if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN) {
819 		crtc_state->vrr.flipline = intel_de_read(display,
820 							 TRANS_VRR_FLIPLINE(display, cpu_transcoder)) + 1;
821 		crtc_state->vrr.vmax = intel_de_read(display,
822 						     TRANS_VRR_VMAX(display, cpu_transcoder)) + 1;
823 		crtc_state->vrr.vmin = intel_de_read(display,
824 						     TRANS_VRR_VMIN(display, cpu_transcoder)) + 1;
825 
826 		if (DISPLAY_VER(display) < 13) {
827 			/* undo what intel_vrr_hw_value() does when writing the values */
828 			crtc_state->vrr.flipline += crtc_state->set_context_latency;
829 			crtc_state->vrr.vmax += crtc_state->set_context_latency;
830 			crtc_state->vrr.vmin += crtc_state->set_context_latency;
831 
832 			crtc_state->vrr.vmin += intel_vrr_vmin_flipline_offset(display);
833 		}
834 
835 		/*
836 		 * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
837 		 * bits are not filled. Since for these platforms TRAN_VMIN is always
838 		 * filled with crtc_vtotal, use TRAN_VRR_VMIN to get the vtotal for
839 		 * adjusted_mode.
840 		 */
841 		if (intel_vrr_always_use_vrr_tg(display))
842 			crtc_state->hw.adjusted_mode.crtc_vtotal =
843 				intel_vrr_vmin_vtotal(crtc_state);
844 
845 		if (HAS_AS_SDP(display)) {
846 			trans_vrr_vsync =
847 				intel_de_read(display,
848 					      TRANS_VRR_VSYNC(display, cpu_transcoder));
849 			crtc_state->vrr.vsync_start =
850 				REG_FIELD_GET(VRR_VSYNC_START_MASK, trans_vrr_vsync);
851 			crtc_state->vrr.vsync_end =
852 				REG_FIELD_GET(VRR_VSYNC_END_MASK, trans_vrr_vsync);
853 		}
854 	}
855 
856 	vrr_enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
857 
858 	if (intel_vrr_always_use_vrr_tg(display))
859 		crtc_state->vrr.enable = vrr_enable && !intel_vrr_is_fixed_rr(crtc_state);
860 	else
861 		crtc_state->vrr.enable = vrr_enable;
862 
863 	/*
864 	 * #TODO: For Both VRR and CMRR the flag I915_MODE_FLAG_VRR is set for mode_flags.
865 	 * Since CMRR is currently disabled, set this flag for VRR for now.
866 	 * Need to keep this in mind while re-enabling CMRR.
867 	 */
868 	if (crtc_state->vrr.enable)
869 		crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
870 
871 	/*
872 	 * For platforms that always use the VRR timing generator, we overwrite
873 	 * crtc_vblank_start with vtotal - guardband to reflect the delayed
874 	 * vblank start. This works for both default and optimized guardband values.
875 	 * On other platforms, we keep the original value from
876 	 * intel_get_transcoder_timings() and apply adjustments only in VRR-specific
877 	 * paths as needed.
878 	 */
879 	if (intel_vrr_always_use_vrr_tg(display))
880 		crtc_state->hw.adjusted_mode.crtc_vblank_start =
881 			crtc_state->hw.adjusted_mode.crtc_vtotal -
882 			crtc_state->vrr.guardband;
883 }
884 
885 int intel_vrr_safe_window_start(const struct intel_crtc_state *crtc_state)
886 {
887 	struct intel_display *display = to_intel_display(crtc_state);
888 
889 	if (DISPLAY_VER(display) >= 30)
890 		return crtc_state->hw.adjusted_mode.crtc_vdisplay -
891 		       crtc_state->set_context_latency;
892 	else
893 		return crtc_state->hw.adjusted_mode.crtc_vdisplay;
894 }
895 
896 int intel_vrr_vmin_safe_window_end(const struct intel_crtc_state *crtc_state)
897 {
898 	return intel_vrr_vmin_vblank_start(crtc_state) -
899 	       crtc_state->set_context_latency;
900 }
901