xref: /linux/drivers/gpu/drm/i915/display/intel_psr.c (revision 4b660dbd9ee2059850fd30e0df420ca7a38a1856)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26 #include <drm/drm_debugfs.h>
27 
28 #include "i915_drv.h"
29 #include "i915_reg.h"
30 #include "intel_atomic.h"
31 #include "intel_crtc.h"
32 #include "intel_ddi.h"
33 #include "intel_de.h"
34 #include "intel_display_types.h"
35 #include "intel_dp.h"
36 #include "intel_dp_aux.h"
37 #include "intel_frontbuffer.h"
38 #include "intel_hdmi.h"
39 #include "intel_psr.h"
40 #include "intel_psr_regs.h"
41 #include "intel_snps_phy.h"
42 #include "skl_universal_plane.h"
43 
44 /**
45  * DOC: Panel Self Refresh (PSR/SRD)
46  *
47  * Since Haswell Display controller supports Panel Self-Refresh on display
48  * panels witch have a remote frame buffer (RFB) implemented according to PSR
49  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
50  * when system is idle but display is on as it eliminates display refresh
51  * request to DDR memory completely as long as the frame buffer for that
52  * display is unchanged.
53  *
54  * Panel Self Refresh must be supported by both Hardware (source) and
55  * Panel (sink).
56  *
57  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
58  * to power down the link and memory controller. For DSI panels the same idea
59  * is called "manual mode".
60  *
61  * The implementation uses the hardware-based PSR support which automatically
62  * enters/exits self-refresh mode. The hardware takes care of sending the
63  * required DP aux message and could even retrain the link (that part isn't
64  * enabled yet though). The hardware also keeps track of any frontbuffer
65  * changes to know when to exit self-refresh mode again. Unfortunately that
66  * part doesn't work too well, hence why the i915 PSR support uses the
67  * software frontbuffer tracking to make sure it doesn't miss a screen
68  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
69  * get called by the frontbuffer tracking code. Note that because of locking
70  * issues the self-refresh re-enable code is done from a work queue, which
71  * must be correctly synchronized/cancelled when shutting down the pipe."
72  *
73  * DC3CO (DC3 clock off)
74  *
75  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
76  * clock off automatically during PSR2 idle state.
77  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
78  * entry/exit allows the HW to enter a low-power state even when page flipping
79  * periodically (for instance a 30fps video playback scenario).
80  *
81  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
82  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
83  * frames, if no other flip occurs and the function above is executed, DC3CO is
84  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
85  * of another flip.
86  * Front buffer modifications do not trigger DC3CO activation on purpose as it
87  * would bring a lot of complexity and most of the moderns systems will only
88  * use page flips.
89  */
90 
91 /*
92  * Description of PSR mask bits:
93  *
94  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
95  *
96  *  When unmasked (nearly) all display register writes (eg. even
97  *  SWF) trigger a PSR exit. Some registers are excluded from this
98  *  and they have a more specific mask (described below). On icl+
99  *  this bit no longer exists and is effectively always set.
100  *
101  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
102  *
103  *  When unmasked (nearly) all pipe/plane register writes
104  *  trigger a PSR exit. Some plane registers are excluded from this
105  *  and they have a more specific mask (described below).
106  *
107  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
108  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
109  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
110  *
111  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
112  *  SPR_SURF/CURBASE are not included in this and instead are
113  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
114  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
115  *
116  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
117  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
118  *
119  *  When unmasked PSR is blocked as long as the sprite
120  *  plane is enabled. skl+ with their universal planes no
121  *  longer have a mask bit like this, and no plane being
122  *  enabledb blocks PSR.
123  *
124  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
125  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
126  *
127  *  When umasked CURPOS writes trigger a PSR exit. On skl+
128  *  this doesn't exit but CURPOS is included in the
129  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
130  *
131  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
132  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
133  *
134  *  When unmasked PSR is blocked as long as vblank and/or vsync
135  *  interrupt is unmasked in IMR *and* enabled in IER.
136  *
137  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
138  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
139  *
140  *  Selectcs whether PSR exit generates an extra vblank before
141  *  the first frame is transmitted. Also note the opposite polarity
142  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
143  *  unmasked==do not generate the extra vblank).
144  *
145  *  With DC states enabled the extra vblank happens after link training,
146  *  with DC states disabled it happens immediately upuon PSR exit trigger.
147  *  No idea as of now why there is a difference. HSW/BDW (which don't
148  *  even have DMC) always generate it after link training. Go figure.
149  *
150  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
151  *  and thus won't latch until the first vblank. So with DC states
152  *  enabled the register effctively uses the reset value during DC5
153  *  exit+PSR exit sequence, and thus the bit does nothing until
154  *  latched by the vblank that it was trying to prevent from being
155  *  generated in the first place. So we should probably call this
156  *  one a chicken/egg bit instead on skl+.
157  *
158  *  In standby mode (as opposed to link-off) this makes no difference
159  *  as the timing generator keeps running the whole time generating
160  *  normal periodic vblanks.
161  *
162  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
163  *  and doing so makes the behaviour match the skl+ reset value.
164  *
165  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
166  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
167  *
168  *  On BDW without this bit is no vblanks whatsoever are
169  *  generated after PSR exit. On HSW this has no apparant effect.
170  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
171  *
172  * The rest of the bits are more self-explanatory and/or
173  * irrelevant for normal operation.
174  */
175 
176 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
177 			   (intel_dp)->psr.source_support)
178 
179 #define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
180 				    (intel_dp)->psr.source_panel_replay_support)
181 
182 bool intel_encoder_can_psr(struct intel_encoder *encoder)
183 {
184 	if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
185 		return CAN_PSR(enc_to_intel_dp(encoder)) ||
186 		       CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
187 	else
188 		return false;
189 }
190 
191 static bool psr_global_enabled(struct intel_dp *intel_dp)
192 {
193 	struct intel_connector *connector = intel_dp->attached_connector;
194 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
195 
196 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
197 	case I915_PSR_DEBUG_DEFAULT:
198 		if (i915->display.params.enable_psr == -1)
199 			return connector->panel.vbt.psr.enable;
200 		return i915->display.params.enable_psr;
201 	case I915_PSR_DEBUG_DISABLE:
202 		return false;
203 	default:
204 		return true;
205 	}
206 }
207 
208 static bool psr2_global_enabled(struct intel_dp *intel_dp)
209 {
210 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
211 
212 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
213 	case I915_PSR_DEBUG_DISABLE:
214 	case I915_PSR_DEBUG_FORCE_PSR1:
215 		return false;
216 	default:
217 		if (i915->display.params.enable_psr == 1)
218 			return false;
219 		return true;
220 	}
221 }
222 
223 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
224 {
225 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
226 
227 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
228 		EDP_PSR_ERROR(intel_dp->psr.transcoder);
229 }
230 
231 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
232 {
233 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
234 
235 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
236 		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
237 }
238 
239 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
240 {
241 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
242 
243 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
244 		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
245 }
246 
247 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
248 {
249 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
250 
251 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
252 		EDP_PSR_MASK(intel_dp->psr.transcoder);
253 }
254 
255 static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
256 			      enum transcoder cpu_transcoder)
257 {
258 	if (DISPLAY_VER(dev_priv) >= 8)
259 		return EDP_PSR_CTL(cpu_transcoder);
260 	else
261 		return HSW_SRD_CTL;
262 }
263 
264 static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
265 				enum transcoder cpu_transcoder)
266 {
267 	if (DISPLAY_VER(dev_priv) >= 8)
268 		return EDP_PSR_DEBUG(cpu_transcoder);
269 	else
270 		return HSW_SRD_DEBUG;
271 }
272 
273 static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
274 				   enum transcoder cpu_transcoder)
275 {
276 	if (DISPLAY_VER(dev_priv) >= 8)
277 		return EDP_PSR_PERF_CNT(cpu_transcoder);
278 	else
279 		return HSW_SRD_PERF_CNT;
280 }
281 
282 static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
283 				 enum transcoder cpu_transcoder)
284 {
285 	if (DISPLAY_VER(dev_priv) >= 8)
286 		return EDP_PSR_STATUS(cpu_transcoder);
287 	else
288 		return HSW_SRD_STATUS;
289 }
290 
291 static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
292 			      enum transcoder cpu_transcoder)
293 {
294 	if (DISPLAY_VER(dev_priv) >= 12)
295 		return TRANS_PSR_IMR(cpu_transcoder);
296 	else
297 		return EDP_PSR_IMR;
298 }
299 
300 static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
301 			      enum transcoder cpu_transcoder)
302 {
303 	if (DISPLAY_VER(dev_priv) >= 12)
304 		return TRANS_PSR_IIR(cpu_transcoder);
305 	else
306 		return EDP_PSR_IIR;
307 }
308 
309 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
310 				  enum transcoder cpu_transcoder)
311 {
312 	if (DISPLAY_VER(dev_priv) >= 8)
313 		return EDP_PSR_AUX_CTL(cpu_transcoder);
314 	else
315 		return HSW_SRD_AUX_CTL;
316 }
317 
318 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
319 				   enum transcoder cpu_transcoder, int i)
320 {
321 	if (DISPLAY_VER(dev_priv) >= 8)
322 		return EDP_PSR_AUX_DATA(cpu_transcoder, i);
323 	else
324 		return HSW_SRD_AUX_DATA(i);
325 }
326 
327 static void psr_irq_control(struct intel_dp *intel_dp)
328 {
329 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
330 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
331 	u32 mask;
332 
333 	mask = psr_irq_psr_error_bit_get(intel_dp);
334 	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
335 		mask |= psr_irq_post_exit_bit_get(intel_dp) |
336 			psr_irq_pre_entry_bit_get(intel_dp);
337 
338 	intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
339 		     psr_irq_mask_get(intel_dp), ~mask);
340 }
341 
342 static void psr_event_print(struct drm_i915_private *i915,
343 			    u32 val, bool psr2_enabled)
344 {
345 	drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
346 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
347 		drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
348 	if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
349 		drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
350 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
351 		drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
352 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
353 		drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
354 	if (val & PSR_EVENT_GRAPHICS_RESET)
355 		drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
356 	if (val & PSR_EVENT_PCH_INTERRUPT)
357 		drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
358 	if (val & PSR_EVENT_MEMORY_UP)
359 		drm_dbg_kms(&i915->drm, "\tMemory up\n");
360 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
361 		drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
362 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
363 		drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
364 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
365 		drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
366 	if (val & PSR_EVENT_REGISTER_UPDATE)
367 		drm_dbg_kms(&i915->drm, "\tRegister updated\n");
368 	if (val & PSR_EVENT_HDCP_ENABLE)
369 		drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
370 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
371 		drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
372 	if (val & PSR_EVENT_VBI_ENABLE)
373 		drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
374 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
375 		drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
376 	if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
377 		drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
378 }
379 
380 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
381 {
382 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
383 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
384 	ktime_t time_ns =  ktime_get();
385 
386 	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
387 		intel_dp->psr.last_entry_attempt = time_ns;
388 		drm_dbg_kms(&dev_priv->drm,
389 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
390 			    transcoder_name(cpu_transcoder));
391 	}
392 
393 	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
394 		intel_dp->psr.last_exit = time_ns;
395 		drm_dbg_kms(&dev_priv->drm,
396 			    "[transcoder %s] PSR exit completed\n",
397 			    transcoder_name(cpu_transcoder));
398 
399 		if (DISPLAY_VER(dev_priv) >= 9) {
400 			u32 val;
401 
402 			val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
403 
404 			psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
405 		}
406 	}
407 
408 	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
409 		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
410 			 transcoder_name(cpu_transcoder));
411 
412 		intel_dp->psr.irq_aux_error = true;
413 
414 		/*
415 		 * If this interruption is not masked it will keep
416 		 * interrupting so fast that it prevents the scheduled
417 		 * work to run.
418 		 * Also after a PSR error, we don't want to arm PSR
419 		 * again so we don't care about unmask the interruption
420 		 * or unset irq_aux_error.
421 		 */
422 		intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
423 			     0, psr_irq_psr_error_bit_get(intel_dp));
424 
425 		queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
426 	}
427 }
428 
429 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
430 {
431 	u8 alpm_caps = 0;
432 
433 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
434 			      &alpm_caps) != 1)
435 		return false;
436 	return alpm_caps & DP_ALPM_CAP;
437 }
438 
439 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
440 {
441 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
442 	u8 val = 8; /* assume the worst if we can't read the value */
443 
444 	if (drm_dp_dpcd_readb(&intel_dp->aux,
445 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
446 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
447 	else
448 		drm_dbg_kms(&i915->drm,
449 			    "Unable to get sink synchronization latency, assuming 8 frames\n");
450 	return val;
451 }
452 
453 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
454 {
455 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
456 	ssize_t r;
457 	u16 w;
458 	u8 y;
459 
460 	/* If sink don't have specific granularity requirements set legacy ones */
461 	if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
462 		/* As PSR2 HW sends full lines, we do not care about x granularity */
463 		w = 4;
464 		y = 4;
465 		goto exit;
466 	}
467 
468 	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
469 	if (r != 2)
470 		drm_dbg_kms(&i915->drm,
471 			    "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
472 	/*
473 	 * Spec says that if the value read is 0 the default granularity should
474 	 * be used instead.
475 	 */
476 	if (r != 2 || w == 0)
477 		w = 4;
478 
479 	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
480 	if (r != 1) {
481 		drm_dbg_kms(&i915->drm,
482 			    "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
483 		y = 4;
484 	}
485 	if (y == 0)
486 		y = 1;
487 
488 exit:
489 	intel_dp->psr.su_w_granularity = w;
490 	intel_dp->psr.su_y_granularity = y;
491 }
492 
493 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
494 {
495 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
496 	u8 pr_dpcd = 0;
497 
498 	intel_dp->psr.sink_panel_replay_support = false;
499 	drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP, &pr_dpcd);
500 
501 	if (!(pr_dpcd & DP_PANEL_REPLAY_SUPPORT)) {
502 		drm_dbg_kms(&i915->drm,
503 			    "Panel replay is not supported by panel\n");
504 		return;
505 	}
506 
507 	drm_dbg_kms(&i915->drm,
508 		    "Panel replay is supported by panel\n");
509 	intel_dp->psr.sink_panel_replay_support = true;
510 }
511 
512 static void _psr_init_dpcd(struct intel_dp *intel_dp)
513 {
514 	struct drm_i915_private *i915 =
515 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
516 
517 	drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
518 		    intel_dp->psr_dpcd[0]);
519 
520 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
521 		drm_dbg_kms(&i915->drm,
522 			    "PSR support not currently available for this panel\n");
523 		return;
524 	}
525 
526 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
527 		drm_dbg_kms(&i915->drm,
528 			    "Panel lacks power state control, PSR cannot be enabled\n");
529 		return;
530 	}
531 
532 	intel_dp->psr.sink_support = true;
533 	intel_dp->psr.sink_sync_latency =
534 		intel_dp_get_sink_sync_latency(intel_dp);
535 
536 	if (DISPLAY_VER(i915) >= 9 &&
537 	    intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
538 		bool y_req = intel_dp->psr_dpcd[1] &
539 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
540 		bool alpm = intel_dp_get_alpm_status(intel_dp);
541 
542 		/*
543 		 * All panels that supports PSR version 03h (PSR2 +
544 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
545 		 * only sure that it is going to be used when required by the
546 		 * panel. This way panel is capable to do selective update
547 		 * without a aux frame sync.
548 		 *
549 		 * To support PSR version 02h and PSR version 03h without
550 		 * Y-coordinate requirement panels we would need to enable
551 		 * GTC first.
552 		 */
553 		intel_dp->psr.sink_psr2_support = y_req && alpm;
554 		drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
555 			    intel_dp->psr.sink_psr2_support ? "" : "not ");
556 	}
557 }
558 
559 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
560 {
561 	_panel_replay_init_dpcd(intel_dp);
562 
563 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
564 			 sizeof(intel_dp->psr_dpcd));
565 
566 	if (intel_dp->psr_dpcd[0])
567 		_psr_init_dpcd(intel_dp);
568 
569 	if (intel_dp->psr.sink_psr2_support)
570 		intel_dp_get_su_granularity(intel_dp);
571 }
572 
573 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
574 {
575 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
576 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
577 	u32 aux_clock_divider, aux_ctl;
578 	/* write DP_SET_POWER=D0 */
579 	static const u8 aux_msg[] = {
580 		[0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
581 		[1] = (DP_SET_POWER >> 8) & 0xff,
582 		[2] = DP_SET_POWER & 0xff,
583 		[3] = 1 - 1,
584 		[4] = DP_SET_POWER_D0,
585 	};
586 	int i;
587 
588 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
589 	for (i = 0; i < sizeof(aux_msg); i += 4)
590 		intel_de_write(dev_priv,
591 			       psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
592 			       intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
593 
594 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
595 
596 	/* Start with bits set for DDI_AUX_CTL register */
597 	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
598 					     aux_clock_divider);
599 
600 	/* Select only valid bits for SRD_AUX_CTL */
601 	aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
602 		EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
603 		EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
604 		EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
605 
606 	intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
607 		       aux_ctl);
608 }
609 
610 static bool psr2_su_region_et_valid(struct intel_dp *intel_dp)
611 {
612 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
613 
614 	if (DISPLAY_VER(i915) >= 20 &&
615 	    intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
616 	    !(intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE))
617 		return true;
618 
619 	return false;
620 }
621 
622 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
623 {
624 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
625 	u8 dpcd_val = DP_PSR_ENABLE;
626 
627 	if (intel_dp->psr.panel_replay_enabled)
628 		return;
629 
630 	if (intel_dp->psr.psr2_enabled) {
631 		/* Enable ALPM at sink for psr2 */
632 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
633 				   DP_ALPM_ENABLE |
634 				   DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
635 
636 		dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
637 		if (psr2_su_region_et_valid(intel_dp))
638 			dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET;
639 	} else {
640 		if (intel_dp->psr.link_standby)
641 			dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
642 
643 		if (DISPLAY_VER(dev_priv) >= 8)
644 			dpcd_val |= DP_PSR_CRC_VERIFICATION;
645 	}
646 
647 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
648 		dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
649 
650 	if (intel_dp->psr.entry_setup_frames > 0)
651 		dpcd_val |= DP_PSR_FRAME_CAPTURE;
652 
653 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
654 
655 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
656 }
657 
658 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
659 {
660 	struct intel_connector *connector = intel_dp->attached_connector;
661 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
662 	u32 val = 0;
663 
664 	if (DISPLAY_VER(dev_priv) >= 11)
665 		val |= EDP_PSR_TP4_TIME_0us;
666 
667 	if (dev_priv->display.params.psr_safest_params) {
668 		val |= EDP_PSR_TP1_TIME_2500us;
669 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
670 		goto check_tp3_sel;
671 	}
672 
673 	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
674 		val |= EDP_PSR_TP1_TIME_0us;
675 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
676 		val |= EDP_PSR_TP1_TIME_100us;
677 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
678 		val |= EDP_PSR_TP1_TIME_500us;
679 	else
680 		val |= EDP_PSR_TP1_TIME_2500us;
681 
682 	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
683 		val |= EDP_PSR_TP2_TP3_TIME_0us;
684 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
685 		val |= EDP_PSR_TP2_TP3_TIME_100us;
686 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
687 		val |= EDP_PSR_TP2_TP3_TIME_500us;
688 	else
689 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
690 
691 	/*
692 	 * WA 0479: hsw,bdw
693 	 * "Do not skip both TP1 and TP2/TP3"
694 	 */
695 	if (DISPLAY_VER(dev_priv) < 9 &&
696 	    connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
697 	    connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
698 		val |= EDP_PSR_TP2_TP3_TIME_100us;
699 
700 check_tp3_sel:
701 	if (intel_dp_source_supports_tps3(dev_priv) &&
702 	    drm_dp_tps3_supported(intel_dp->dpcd))
703 		val |= EDP_PSR_TP_TP1_TP3;
704 	else
705 		val |= EDP_PSR_TP_TP1_TP2;
706 
707 	return val;
708 }
709 
710 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
711 {
712 	struct intel_connector *connector = intel_dp->attached_connector;
713 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
714 	int idle_frames;
715 
716 	/* Let's use 6 as the minimum to cover all known cases including the
717 	 * off-by-one issue that HW has in some cases.
718 	 */
719 	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
720 	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
721 
722 	if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
723 		idle_frames = 0xf;
724 
725 	return idle_frames;
726 }
727 
728 static void hsw_activate_psr1(struct intel_dp *intel_dp)
729 {
730 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
731 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
732 	u32 max_sleep_time = 0x1f;
733 	u32 val = EDP_PSR_ENABLE;
734 
735 	val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
736 
737 	if (DISPLAY_VER(dev_priv) < 20)
738 		val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
739 
740 	if (IS_HASWELL(dev_priv))
741 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
742 
743 	if (intel_dp->psr.link_standby)
744 		val |= EDP_PSR_LINK_STANDBY;
745 
746 	val |= intel_psr1_get_tp_time(intel_dp);
747 
748 	if (DISPLAY_VER(dev_priv) >= 8)
749 		val |= EDP_PSR_CRC_ENABLE;
750 
751 	if (DISPLAY_VER(dev_priv) >= 20)
752 		val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
753 
754 	intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
755 		     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
756 }
757 
758 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
759 {
760 	struct intel_connector *connector = intel_dp->attached_connector;
761 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
762 	u32 val = 0;
763 
764 	if (dev_priv->display.params.psr_safest_params)
765 		return EDP_PSR2_TP2_TIME_2500us;
766 
767 	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
768 	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
769 		val |= EDP_PSR2_TP2_TIME_50us;
770 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
771 		val |= EDP_PSR2_TP2_TIME_100us;
772 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
773 		val |= EDP_PSR2_TP2_TIME_500us;
774 	else
775 		val |= EDP_PSR2_TP2_TIME_2500us;
776 
777 	return val;
778 }
779 
780 static int psr2_block_count_lines(struct intel_dp *intel_dp)
781 {
782 	return intel_dp->psr.alpm_parameters.io_wake_lines < 9 &&
783 		intel_dp->psr.alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
784 }
785 
786 static int psr2_block_count(struct intel_dp *intel_dp)
787 {
788 	return psr2_block_count_lines(intel_dp) / 4;
789 }
790 
791 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
792 {
793 	u8 frames_before_su_entry;
794 
795 	frames_before_su_entry = max_t(u8,
796 				       intel_dp->psr.sink_sync_latency + 1,
797 				       2);
798 
799 	/* Entry setup frames must be at least 1 less than frames before SU entry */
800 	if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
801 		frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
802 
803 	return frames_before_su_entry;
804 }
805 
806 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
807 {
808 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
809 
810 	intel_de_rmw(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
811 		     0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
812 
813 	intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
814 		     TRANS_DP2_PANEL_REPLAY_ENABLE);
815 }
816 
817 static void hsw_activate_psr2(struct intel_dp *intel_dp)
818 {
819 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
820 	struct intel_psr *psr = &intel_dp->psr;
821 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
822 	u32 val = EDP_PSR2_ENABLE;
823 	u32 psr_val = 0;
824 
825 	val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
826 
827 	if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
828 		val |= EDP_SU_TRACK_ENABLE;
829 
830 	if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
831 		val |= EDP_Y_COORDINATE_ENABLE;
832 
833 	val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
834 
835 	val |= intel_psr2_get_tp_time(intel_dp);
836 
837 	if (DISPLAY_VER(dev_priv) >= 12) {
838 		if (psr2_block_count(intel_dp) > 2)
839 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
840 		else
841 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
842 	}
843 
844 	/* Wa_22012278275:adl-p */
845 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
846 		static const u8 map[] = {
847 			2, /* 5 lines */
848 			1, /* 6 lines */
849 			0, /* 7 lines */
850 			3, /* 8 lines */
851 			6, /* 9 lines */
852 			5, /* 10 lines */
853 			4, /* 11 lines */
854 			7, /* 12 lines */
855 		};
856 		/*
857 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
858 		 * comments bellow for more information
859 		 */
860 		int tmp;
861 
862 		tmp = map[psr->alpm_parameters.io_wake_lines -
863 			  TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
864 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
865 
866 		tmp = map[psr->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
867 		val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
868 	} else if (DISPLAY_VER(dev_priv) >= 12) {
869 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines);
870 		val |= TGL_EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines);
871 	} else if (DISPLAY_VER(dev_priv) >= 9) {
872 		val |= EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines);
873 		val |= EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines);
874 	}
875 
876 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
877 		val |= EDP_PSR2_SU_SDP_SCANLINE;
878 
879 	if (DISPLAY_VER(dev_priv) >= 20)
880 		psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
881 
882 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
883 		u32 tmp;
884 
885 		tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
886 		drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
887 	} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
888 		intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
889 	}
890 
891 	if (psr2_su_region_et_valid(intel_dp))
892 		val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
893 
894 	/*
895 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
896 	 * recommending keep this bit unset while PSR2 is enabled.
897 	 */
898 	intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
899 
900 	intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
901 }
902 
903 static bool
904 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
905 {
906 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
907 		return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
908 	else if (DISPLAY_VER(dev_priv) >= 12)
909 		return cpu_transcoder == TRANSCODER_A;
910 	else if (DISPLAY_VER(dev_priv) >= 9)
911 		return cpu_transcoder == TRANSCODER_EDP;
912 	else
913 		return false;
914 }
915 
916 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
917 {
918 	if (!crtc_state->hw.active)
919 		return 0;
920 
921 	return DIV_ROUND_UP(1000 * 1000,
922 			    drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
923 }
924 
925 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
926 				     u32 idle_frames)
927 {
928 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
929 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
930 
931 	intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
932 		     EDP_PSR2_IDLE_FRAMES_MASK,
933 		     EDP_PSR2_IDLE_FRAMES(idle_frames));
934 }
935 
936 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
937 {
938 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
939 
940 	psr2_program_idle_frames(intel_dp, 0);
941 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
942 }
943 
944 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
945 {
946 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
947 
948 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
949 	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
950 }
951 
952 static void tgl_dc3co_disable_work(struct work_struct *work)
953 {
954 	struct intel_dp *intel_dp =
955 		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
956 
957 	mutex_lock(&intel_dp->psr.lock);
958 	/* If delayed work is pending, it is not idle */
959 	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
960 		goto unlock;
961 
962 	tgl_psr2_disable_dc3co(intel_dp);
963 unlock:
964 	mutex_unlock(&intel_dp->psr.lock);
965 }
966 
967 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
968 {
969 	if (!intel_dp->psr.dc3co_exitline)
970 		return;
971 
972 	cancel_delayed_work(&intel_dp->psr.dc3co_work);
973 	/* Before PSR2 exit disallow dc3co*/
974 	tgl_psr2_disable_dc3co(intel_dp);
975 }
976 
977 static bool
978 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
979 			      struct intel_crtc_state *crtc_state)
980 {
981 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
982 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
983 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
984 	enum port port = dig_port->base.port;
985 
986 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
987 		return pipe <= PIPE_B && port <= PORT_B;
988 	else
989 		return pipe == PIPE_A && port == PORT_A;
990 }
991 
992 static void
993 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
994 				  struct intel_crtc_state *crtc_state)
995 {
996 	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
997 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
998 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
999 	u32 exit_scanlines;
1000 
1001 	/*
1002 	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1003 	 * disable DC3CO until the changed dc3co activating/deactivating sequence
1004 	 * is applied. B.Specs:49196
1005 	 */
1006 	return;
1007 
1008 	/*
1009 	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1010 	 * TODO: when the issue is addressed, this restriction should be removed.
1011 	 */
1012 	if (crtc_state->enable_psr2_sel_fetch)
1013 		return;
1014 
1015 	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1016 		return;
1017 
1018 	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1019 		return;
1020 
1021 	/* Wa_16011303918:adl-p */
1022 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1023 		return;
1024 
1025 	/*
1026 	 * DC3CO Exit time 200us B.Spec 49196
1027 	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1028 	 */
1029 	exit_scanlines =
1030 		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1031 
1032 	if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
1033 		return;
1034 
1035 	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1036 }
1037 
1038 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1039 					      struct intel_crtc_state *crtc_state)
1040 {
1041 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1042 
1043 	if (!dev_priv->display.params.enable_psr2_sel_fetch &&
1044 	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1045 		drm_dbg_kms(&dev_priv->drm,
1046 			    "PSR2 sel fetch not enabled, disabled by parameter\n");
1047 		return false;
1048 	}
1049 
1050 	if (crtc_state->uapi.async_flip) {
1051 		drm_dbg_kms(&dev_priv->drm,
1052 			    "PSR2 sel fetch not enabled, async flip enabled\n");
1053 		return false;
1054 	}
1055 
1056 	if (psr2_su_region_et_valid(intel_dp))
1057 		crtc_state->enable_psr2_su_region_et = true;
1058 
1059 	return crtc_state->enable_psr2_sel_fetch = true;
1060 }
1061 
1062 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1063 				   struct intel_crtc_state *crtc_state)
1064 {
1065 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1066 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1067 	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1068 	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1069 	u16 y_granularity = 0;
1070 
1071 	/* PSR2 HW only send full lines so we only need to validate the width */
1072 	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1073 		return false;
1074 
1075 	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1076 		return false;
1077 
1078 	/* HW tracking is only aligned to 4 lines */
1079 	if (!crtc_state->enable_psr2_sel_fetch)
1080 		return intel_dp->psr.su_y_granularity == 4;
1081 
1082 	/*
1083 	 * adl_p and mtl platforms have 1 line granularity.
1084 	 * For other platforms with SW tracking we can adjust the y coordinates
1085 	 * to match sink requirement if multiple of 4.
1086 	 */
1087 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1088 		y_granularity = intel_dp->psr.su_y_granularity;
1089 	else if (intel_dp->psr.su_y_granularity <= 2)
1090 		y_granularity = 4;
1091 	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1092 		y_granularity = intel_dp->psr.su_y_granularity;
1093 
1094 	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1095 		return false;
1096 
1097 	if (crtc_state->dsc.compression_enable &&
1098 	    vdsc_cfg->slice_height % y_granularity)
1099 		return false;
1100 
1101 	crtc_state->su_y_granularity = y_granularity;
1102 	return true;
1103 }
1104 
1105 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1106 							struct intel_crtc_state *crtc_state)
1107 {
1108 	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1109 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1110 	u32 hblank_total, hblank_ns, req_ns;
1111 
1112 	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1113 	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1114 
1115 	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1116 	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1117 
1118 	if ((hblank_ns - req_ns) > 100)
1119 		return true;
1120 
1121 	/* Not supported <13 / Wa_22012279113:adl-p */
1122 	if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1123 		return false;
1124 
1125 	crtc_state->req_psr2_sdp_prior_scanline = true;
1126 	return true;
1127 }
1128 
1129 static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
1130 				     struct intel_crtc_state *crtc_state)
1131 {
1132 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1133 	int check_entry_lines;
1134 
1135 	if (DISPLAY_VER(i915) < 20)
1136 		return true;
1137 
1138 	/* ALPM Entry Check = 2 + CEILING( 5us /tline ) */
1139 	check_entry_lines = 2 +
1140 		intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 5);
1141 
1142 	if (check_entry_lines > 15)
1143 		return false;
1144 
1145 	if (i915->display.params.psr_safest_params)
1146 		check_entry_lines = 15;
1147 
1148 	intel_dp->psr.alpm_parameters.check_entry_lines = check_entry_lines;
1149 
1150 	return true;
1151 }
1152 
1153 static bool _compute_alpm_params(struct intel_dp *intel_dp,
1154 				 struct intel_crtc_state *crtc_state)
1155 {
1156 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1157 	int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
1158 	u8 max_wake_lines;
1159 
1160 	if (DISPLAY_VER(i915) >= 12) {
1161 		io_wake_time = 42;
1162 		/*
1163 		 * According to Bspec it's 42us, but based on testing
1164 		 * it is not enough -> use 45 us.
1165 		 */
1166 		fast_wake_time = 45;
1167 
1168 		/* TODO: Check how we can use ALPM_CTL fast wake extended field */
1169 		max_wake_lines = 12;
1170 	} else {
1171 		io_wake_time = 50;
1172 		fast_wake_time = 32;
1173 		max_wake_lines = 8;
1174 	}
1175 
1176 	io_wake_lines = intel_usecs_to_scanlines(
1177 		&crtc_state->hw.adjusted_mode, io_wake_time);
1178 	fast_wake_lines = intel_usecs_to_scanlines(
1179 		&crtc_state->hw.adjusted_mode, fast_wake_time);
1180 
1181 	if (io_wake_lines > max_wake_lines ||
1182 	    fast_wake_lines > max_wake_lines)
1183 		return false;
1184 
1185 	if (!_lnl_compute_alpm_params(intel_dp, crtc_state))
1186 		return false;
1187 
1188 	if (i915->display.params.psr_safest_params)
1189 		io_wake_lines = fast_wake_lines = max_wake_lines;
1190 
1191 	/* According to Bspec lower limit should be set as 7 lines. */
1192 	intel_dp->psr.alpm_parameters.io_wake_lines = max(io_wake_lines, 7);
1193 	intel_dp->psr.alpm_parameters.fast_wake_lines = max(fast_wake_lines, 7);
1194 
1195 	return true;
1196 }
1197 
1198 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1199 					const struct drm_display_mode *adjusted_mode)
1200 {
1201 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1202 	int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1203 	int entry_setup_frames = 0;
1204 
1205 	if (psr_setup_time < 0) {
1206 		drm_dbg_kms(&i915->drm,
1207 			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1208 			    intel_dp->psr_dpcd[1]);
1209 		return -ETIME;
1210 	}
1211 
1212 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1213 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1214 		if (DISPLAY_VER(i915) >= 20) {
1215 			/* setup entry frames can be up to 3 frames */
1216 			entry_setup_frames = 1;
1217 			drm_dbg_kms(&i915->drm,
1218 				    "PSR setup entry frames %d\n",
1219 				    entry_setup_frames);
1220 		} else {
1221 			drm_dbg_kms(&i915->drm,
1222 				    "PSR condition failed: PSR setup time (%d us) too long\n",
1223 				    psr_setup_time);
1224 			return -ETIME;
1225 		}
1226 	}
1227 
1228 	return entry_setup_frames;
1229 }
1230 
1231 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1232 				    struct intel_crtc_state *crtc_state)
1233 {
1234 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1235 	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1236 	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1237 	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1238 
1239 	if (!intel_dp->psr.sink_psr2_support)
1240 		return false;
1241 
1242 	/* JSL and EHL only supports eDP 1.3 */
1243 	if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1244 		drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1245 		return false;
1246 	}
1247 
1248 	/* Wa_16011181250 */
1249 	if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1250 	    IS_DG2(dev_priv)) {
1251 		drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1252 		return false;
1253 	}
1254 
1255 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1256 		drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1257 		return false;
1258 	}
1259 
1260 	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1261 		drm_dbg_kms(&dev_priv->drm,
1262 			    "PSR2 not supported in transcoder %s\n",
1263 			    transcoder_name(crtc_state->cpu_transcoder));
1264 		return false;
1265 	}
1266 
1267 	if (!psr2_global_enabled(intel_dp)) {
1268 		drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
1269 		return false;
1270 	}
1271 
1272 	/*
1273 	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1274 	 * resolution requires DSC to be enabled, priority is given to DSC
1275 	 * over PSR2.
1276 	 */
1277 	if (crtc_state->dsc.compression_enable &&
1278 	    (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
1279 		drm_dbg_kms(&dev_priv->drm,
1280 			    "PSR2 cannot be enabled since DSC is enabled\n");
1281 		return false;
1282 	}
1283 
1284 	if (crtc_state->crc_enabled) {
1285 		drm_dbg_kms(&dev_priv->drm,
1286 			    "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1287 		return false;
1288 	}
1289 
1290 	if (DISPLAY_VER(dev_priv) >= 12) {
1291 		psr_max_h = 5120;
1292 		psr_max_v = 3200;
1293 		max_bpp = 30;
1294 	} else if (DISPLAY_VER(dev_priv) >= 10) {
1295 		psr_max_h = 4096;
1296 		psr_max_v = 2304;
1297 		max_bpp = 24;
1298 	} else if (DISPLAY_VER(dev_priv) == 9) {
1299 		psr_max_h = 3640;
1300 		psr_max_v = 2304;
1301 		max_bpp = 24;
1302 	}
1303 
1304 	if (crtc_state->pipe_bpp > max_bpp) {
1305 		drm_dbg_kms(&dev_priv->drm,
1306 			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1307 			    crtc_state->pipe_bpp, max_bpp);
1308 		return false;
1309 	}
1310 
1311 	/* Wa_16011303918:adl-p */
1312 	if (crtc_state->vrr.enable &&
1313 	    IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1314 		drm_dbg_kms(&dev_priv->drm,
1315 			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1316 		return false;
1317 	}
1318 
1319 	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1320 		drm_dbg_kms(&dev_priv->drm,
1321 			    "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1322 		return false;
1323 	}
1324 
1325 	if (!_compute_alpm_params(intel_dp, crtc_state)) {
1326 		drm_dbg_kms(&dev_priv->drm,
1327 			    "PSR2 not enabled, Unable to use long enough wake times\n");
1328 		return false;
1329 	}
1330 
1331 	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
1332 	if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1333 	    crtc_state->hw.adjusted_mode.crtc_vblank_start <
1334 	    psr2_block_count_lines(intel_dp)) {
1335 		drm_dbg_kms(&dev_priv->drm,
1336 			    "PSR2 not enabled, too short vblank time\n");
1337 		return false;
1338 	}
1339 
1340 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1341 		if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1342 		    !HAS_PSR_HW_TRACKING(dev_priv)) {
1343 			drm_dbg_kms(&dev_priv->drm,
1344 				    "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1345 			return false;
1346 		}
1347 	}
1348 
1349 	if (!psr2_granularity_check(intel_dp, crtc_state)) {
1350 		drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1351 		goto unsupported;
1352 	}
1353 
1354 	if (!crtc_state->enable_psr2_sel_fetch &&
1355 	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1356 		drm_dbg_kms(&dev_priv->drm,
1357 			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1358 			    crtc_hdisplay, crtc_vdisplay,
1359 			    psr_max_h, psr_max_v);
1360 		goto unsupported;
1361 	}
1362 
1363 	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1364 	return true;
1365 
1366 unsupported:
1367 	crtc_state->enable_psr2_sel_fetch = false;
1368 	return false;
1369 }
1370 
1371 static bool _psr_compute_config(struct intel_dp *intel_dp,
1372 				struct intel_crtc_state *crtc_state)
1373 {
1374 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1375 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1376 	int entry_setup_frames;
1377 
1378 	/*
1379 	 * Current PSR panels don't work reliably with VRR enabled
1380 	 * So if VRR is enabled, do not enable PSR.
1381 	 */
1382 	if (crtc_state->vrr.enable)
1383 		return false;
1384 
1385 	if (!CAN_PSR(intel_dp))
1386 		return false;
1387 
1388 	entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1389 
1390 	if (entry_setup_frames >= 0) {
1391 		intel_dp->psr.entry_setup_frames = entry_setup_frames;
1392 	} else {
1393 		drm_dbg_kms(&dev_priv->drm,
1394 			    "PSR condition failed: PSR setup timing not met\n");
1395 		return false;
1396 	}
1397 
1398 	return true;
1399 }
1400 
1401 void intel_psr_compute_config(struct intel_dp *intel_dp,
1402 			      struct intel_crtc_state *crtc_state,
1403 			      struct drm_connector_state *conn_state)
1404 {
1405 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1406 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1407 
1408 	if (!psr_global_enabled(intel_dp)) {
1409 		drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1410 		return;
1411 	}
1412 
1413 	if (intel_dp->psr.sink_not_reliable) {
1414 		drm_dbg_kms(&dev_priv->drm,
1415 			    "PSR sink implementation is not reliable\n");
1416 		return;
1417 	}
1418 
1419 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1420 		drm_dbg_kms(&dev_priv->drm,
1421 			    "PSR condition failed: Interlaced mode enabled\n");
1422 		return;
1423 	}
1424 
1425 	if (CAN_PANEL_REPLAY(intel_dp))
1426 		crtc_state->has_panel_replay = true;
1427 	else
1428 		crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state);
1429 
1430 	if (!(crtc_state->has_panel_replay || crtc_state->has_psr))
1431 		return;
1432 
1433 	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1434 }
1435 
1436 void intel_psr_get_config(struct intel_encoder *encoder,
1437 			  struct intel_crtc_state *pipe_config)
1438 {
1439 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1440 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1441 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1442 	struct intel_dp *intel_dp;
1443 	u32 val;
1444 
1445 	if (!dig_port)
1446 		return;
1447 
1448 	intel_dp = &dig_port->dp;
1449 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1450 		return;
1451 
1452 	mutex_lock(&intel_dp->psr.lock);
1453 	if (!intel_dp->psr.enabled)
1454 		goto unlock;
1455 
1456 	if (intel_dp->psr.panel_replay_enabled) {
1457 		pipe_config->has_panel_replay = true;
1458 	} else {
1459 		/*
1460 		 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1461 		 * enabled/disabled because of frontbuffer tracking and others.
1462 		 */
1463 		pipe_config->has_psr = true;
1464 	}
1465 
1466 	pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1467 	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1468 
1469 	if (!intel_dp->psr.psr2_enabled)
1470 		goto unlock;
1471 
1472 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1473 		val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
1474 		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1475 			pipe_config->enable_psr2_sel_fetch = true;
1476 	}
1477 
1478 	if (DISPLAY_VER(dev_priv) >= 12) {
1479 		val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
1480 		pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1481 	}
1482 unlock:
1483 	mutex_unlock(&intel_dp->psr.lock);
1484 }
1485 
1486 static void intel_psr_activate(struct intel_dp *intel_dp)
1487 {
1488 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1489 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1490 
1491 	drm_WARN_ON(&dev_priv->drm,
1492 		    transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1493 		    intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1494 
1495 	drm_WARN_ON(&dev_priv->drm,
1496 		    intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1497 
1498 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1499 
1500 	lockdep_assert_held(&intel_dp->psr.lock);
1501 
1502 	/* psr1, psr2 and panel-replay are mutually exclusive.*/
1503 	if (intel_dp->psr.panel_replay_enabled)
1504 		dg2_activate_panel_replay(intel_dp);
1505 	else if (intel_dp->psr.psr2_enabled)
1506 		hsw_activate_psr2(intel_dp);
1507 	else
1508 		hsw_activate_psr1(intel_dp);
1509 
1510 	intel_dp->psr.active = true;
1511 }
1512 
1513 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1514 {
1515 	switch (intel_dp->psr.pipe) {
1516 	case PIPE_A:
1517 		return LATENCY_REPORTING_REMOVED_PIPE_A;
1518 	case PIPE_B:
1519 		return LATENCY_REPORTING_REMOVED_PIPE_B;
1520 	case PIPE_C:
1521 		return LATENCY_REPORTING_REMOVED_PIPE_C;
1522 	case PIPE_D:
1523 		return LATENCY_REPORTING_REMOVED_PIPE_D;
1524 	default:
1525 		MISSING_CASE(intel_dp->psr.pipe);
1526 		return 0;
1527 	}
1528 }
1529 
1530 /*
1531  * Wa_16013835468
1532  * Wa_14015648006
1533  */
1534 static void wm_optimization_wa(struct intel_dp *intel_dp,
1535 			       const struct intel_crtc_state *crtc_state)
1536 {
1537 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1538 	bool set_wa_bit = false;
1539 
1540 	/* Wa_14015648006 */
1541 	if (IS_DISPLAY_VER(dev_priv, 11, 14))
1542 		set_wa_bit |= crtc_state->wm_level_disabled;
1543 
1544 	/* Wa_16013835468 */
1545 	if (DISPLAY_VER(dev_priv) == 12)
1546 		set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1547 			crtc_state->hw.adjusted_mode.crtc_vdisplay;
1548 
1549 	if (set_wa_bit)
1550 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1551 			     0, wa_16013835468_bit_get(intel_dp));
1552 	else
1553 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1554 			     wa_16013835468_bit_get(intel_dp), 0);
1555 }
1556 
1557 static void lnl_alpm_configure(struct intel_dp *intel_dp)
1558 {
1559 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1560 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1561 	struct intel_psr *psr = &intel_dp->psr;
1562 
1563 	if (DISPLAY_VER(dev_priv) < 20)
1564 		return;
1565 
1566 	intel_de_write(dev_priv, ALPM_CTL(cpu_transcoder),
1567 		       ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
1568 		       ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines) |
1569 		       ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines));
1570 }
1571 
1572 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1573 				    const struct intel_crtc_state *crtc_state)
1574 {
1575 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1576 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1577 	u32 mask;
1578 
1579 	/*
1580 	 * Only HSW and BDW have PSR AUX registers that need to be setup.
1581 	 * SKL+ use hardcoded values PSR AUX transactions
1582 	 */
1583 	if (DISPLAY_VER(dev_priv) < 9)
1584 		hsw_psr_setup_aux(intel_dp);
1585 
1586 	/*
1587 	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1588 	 * mask LPSP to avoid dependency on other drivers that might block
1589 	 * runtime_pm besides preventing  other hw tracking issues now we
1590 	 * can rely on frontbuffer tracking.
1591 	 */
1592 	mask = EDP_PSR_DEBUG_MASK_MEMUP |
1593 	       EDP_PSR_DEBUG_MASK_HPD;
1594 
1595 	/*
1596 	 * For some unknown reason on HSW non-ULT (or at least on
1597 	 * Dell Latitude E6540) external displays start to flicker
1598 	 * when PSR is enabled on the eDP. SR/PC6 residency is much
1599 	 * higher than should be possible with an external display.
1600 	 * As a workaround leave LPSP unmasked to prevent PSR entry
1601 	 * when external displays are active.
1602 	 */
1603 	if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
1604 		mask |= EDP_PSR_DEBUG_MASK_LPSP;
1605 
1606 	if (DISPLAY_VER(dev_priv) < 20)
1607 		mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1608 
1609 	/*
1610 	 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1611 	 * registers in order to keep the CURSURFLIVE tricks working :(
1612 	 */
1613 	if (IS_DISPLAY_VER(dev_priv, 9, 10))
1614 		mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1615 
1616 	/* allow PSR with sprite enabled */
1617 	if (IS_HASWELL(dev_priv))
1618 		mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1619 
1620 	intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1621 
1622 	psr_irq_control(intel_dp);
1623 
1624 	/*
1625 	 * TODO: if future platforms supports DC3CO in more than one
1626 	 * transcoder, EXITLINE will need to be unset when disabling PSR
1627 	 */
1628 	if (intel_dp->psr.dc3co_exitline)
1629 		intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1630 			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1631 
1632 	if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1633 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1634 			     intel_dp->psr.psr2_sel_fetch_enabled ?
1635 			     IGNORE_PSR2_HW_TRACKING : 0);
1636 
1637 	lnl_alpm_configure(intel_dp);
1638 
1639 	/*
1640 	 * Wa_16013835468
1641 	 * Wa_14015648006
1642 	 */
1643 	wm_optimization_wa(intel_dp, crtc_state);
1644 
1645 	if (intel_dp->psr.psr2_enabled) {
1646 		if (DISPLAY_VER(dev_priv) == 9)
1647 			intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1648 				     PSR2_VSC_ENABLE_PROG_HEADER |
1649 				     PSR2_ADD_VERTICAL_LINE_COUNT);
1650 
1651 		/*
1652 		 * Wa_16014451276:adlp,mtl[a0,b0]
1653 		 * All supported adlp panels have 1-based X granularity, this may
1654 		 * cause issues if non-supported panels are used.
1655 		 */
1656 		if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
1657 		    IS_ALDERLAKE_P(dev_priv))
1658 			intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
1659 				     0, ADLP_1_BASED_X_GRANULARITY);
1660 
1661 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1662 		if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1663 			intel_de_rmw(dev_priv,
1664 				     MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1665 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1666 		else if (IS_ALDERLAKE_P(dev_priv))
1667 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1668 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1669 	}
1670 }
1671 
1672 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1673 {
1674 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1675 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1676 	u32 val;
1677 
1678 	/*
1679 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1680 	 * will still keep the error set even after the reset done in the
1681 	 * irq_preinstall and irq_uninstall hooks.
1682 	 * And enabling in this situation cause the screen to freeze in the
1683 	 * first time that PSR HW tries to activate so lets keep PSR disabled
1684 	 * to avoid any rendering problems.
1685 	 */
1686 	val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1687 	val &= psr_irq_psr_error_bit_get(intel_dp);
1688 	if (val) {
1689 		intel_dp->psr.sink_not_reliable = true;
1690 		drm_dbg_kms(&dev_priv->drm,
1691 			    "PSR interruption error set, not enabling PSR\n");
1692 		return false;
1693 	}
1694 
1695 	return true;
1696 }
1697 
1698 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1699 				    const struct intel_crtc_state *crtc_state)
1700 {
1701 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1702 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1703 	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1704 	u32 val;
1705 
1706 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1707 
1708 	intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1709 	intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
1710 	intel_dp->psr.busy_frontbuffer_bits = 0;
1711 	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1712 	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1713 	/* DC5/DC6 requires at least 6 idle frames */
1714 	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1715 	intel_dp->psr.dc3co_exit_delay = val;
1716 	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1717 	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1718 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1719 	intel_dp->psr.req_psr2_sdp_prior_scanline =
1720 		crtc_state->req_psr2_sdp_prior_scanline;
1721 
1722 	if (!psr_interrupt_error_check(intel_dp))
1723 		return;
1724 
1725 	if (intel_dp->psr.panel_replay_enabled)
1726 		drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
1727 	else
1728 		drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1729 			    intel_dp->psr.psr2_enabled ? "2" : "1");
1730 
1731 	intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1732 	intel_psr_enable_sink(intel_dp);
1733 	intel_psr_enable_source(intel_dp, crtc_state);
1734 	intel_dp->psr.enabled = true;
1735 	intel_dp->psr.paused = false;
1736 
1737 	intel_psr_activate(intel_dp);
1738 }
1739 
1740 static void intel_psr_exit(struct intel_dp *intel_dp)
1741 {
1742 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1743 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1744 	u32 val;
1745 
1746 	if (!intel_dp->psr.active) {
1747 		if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1748 			val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
1749 			drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1750 		}
1751 
1752 		val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
1753 		drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1754 
1755 		return;
1756 	}
1757 
1758 	if (intel_dp->psr.panel_replay_enabled) {
1759 		intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
1760 			     TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
1761 	} else if (intel_dp->psr.psr2_enabled) {
1762 		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1763 
1764 		val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
1765 				   EDP_PSR2_ENABLE, 0);
1766 
1767 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1768 	} else {
1769 		val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
1770 				   EDP_PSR_ENABLE, 0);
1771 
1772 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1773 	}
1774 	intel_dp->psr.active = false;
1775 }
1776 
1777 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1778 {
1779 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1780 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1781 	i915_reg_t psr_status;
1782 	u32 psr_status_mask;
1783 
1784 	if (intel_dp->psr.psr2_enabled) {
1785 		psr_status = EDP_PSR2_STATUS(cpu_transcoder);
1786 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1787 	} else {
1788 		psr_status = psr_status_reg(dev_priv, cpu_transcoder);
1789 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1790 	}
1791 
1792 	/* Wait till PSR is idle */
1793 	if (intel_de_wait_for_clear(dev_priv, psr_status,
1794 				    psr_status_mask, 2000))
1795 		drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1796 }
1797 
1798 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1799 {
1800 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1801 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1802 	enum phy phy = intel_port_to_phy(dev_priv,
1803 					 dp_to_dig_port(intel_dp)->base.port);
1804 
1805 	lockdep_assert_held(&intel_dp->psr.lock);
1806 
1807 	if (!intel_dp->psr.enabled)
1808 		return;
1809 
1810 	if (intel_dp->psr.panel_replay_enabled)
1811 		drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
1812 	else
1813 		drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1814 			    intel_dp->psr.psr2_enabled ? "2" : "1");
1815 
1816 	intel_psr_exit(intel_dp);
1817 	intel_psr_wait_exit_locked(intel_dp);
1818 
1819 	/*
1820 	 * Wa_16013835468
1821 	 * Wa_14015648006
1822 	 */
1823 	if (DISPLAY_VER(dev_priv) >= 11)
1824 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1825 			     wa_16013835468_bit_get(intel_dp), 0);
1826 
1827 	if (intel_dp->psr.psr2_enabled) {
1828 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1829 		if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1830 			intel_de_rmw(dev_priv,
1831 				     MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
1832 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1833 		else if (IS_ALDERLAKE_P(dev_priv))
1834 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1835 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1836 	}
1837 
1838 	intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1839 
1840 	/* Disable PSR on Sink */
1841 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1842 
1843 	if (intel_dp->psr.psr2_enabled)
1844 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1845 
1846 	intel_dp->psr.enabled = false;
1847 	intel_dp->psr.panel_replay_enabled = false;
1848 	intel_dp->psr.psr2_enabled = false;
1849 	intel_dp->psr.psr2_sel_fetch_enabled = false;
1850 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1851 }
1852 
1853 /**
1854  * intel_psr_disable - Disable PSR
1855  * @intel_dp: Intel DP
1856  * @old_crtc_state: old CRTC state
1857  *
1858  * This function needs to be called before disabling pipe.
1859  */
1860 void intel_psr_disable(struct intel_dp *intel_dp,
1861 		       const struct intel_crtc_state *old_crtc_state)
1862 {
1863 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1864 
1865 	if (!old_crtc_state->has_psr)
1866 		return;
1867 
1868 	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1869 		return;
1870 
1871 	mutex_lock(&intel_dp->psr.lock);
1872 
1873 	intel_psr_disable_locked(intel_dp);
1874 
1875 	mutex_unlock(&intel_dp->psr.lock);
1876 	cancel_work_sync(&intel_dp->psr.work);
1877 	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1878 }
1879 
1880 /**
1881  * intel_psr_pause - Pause PSR
1882  * @intel_dp: Intel DP
1883  *
1884  * This function need to be called after enabling psr.
1885  */
1886 void intel_psr_pause(struct intel_dp *intel_dp)
1887 {
1888 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1889 	struct intel_psr *psr = &intel_dp->psr;
1890 
1891 	if (!CAN_PSR(intel_dp))
1892 		return;
1893 
1894 	mutex_lock(&psr->lock);
1895 
1896 	if (!psr->enabled) {
1897 		mutex_unlock(&psr->lock);
1898 		return;
1899 	}
1900 
1901 	/* If we ever hit this, we will need to add refcount to pause/resume */
1902 	drm_WARN_ON(&dev_priv->drm, psr->paused);
1903 
1904 	intel_psr_exit(intel_dp);
1905 	intel_psr_wait_exit_locked(intel_dp);
1906 	psr->paused = true;
1907 
1908 	mutex_unlock(&psr->lock);
1909 
1910 	cancel_work_sync(&psr->work);
1911 	cancel_delayed_work_sync(&psr->dc3co_work);
1912 }
1913 
1914 /**
1915  * intel_psr_resume - Resume PSR
1916  * @intel_dp: Intel DP
1917  *
1918  * This function need to be called after pausing psr.
1919  */
1920 void intel_psr_resume(struct intel_dp *intel_dp)
1921 {
1922 	struct intel_psr *psr = &intel_dp->psr;
1923 
1924 	if (!CAN_PSR(intel_dp))
1925 		return;
1926 
1927 	mutex_lock(&psr->lock);
1928 
1929 	if (!psr->paused)
1930 		goto unlock;
1931 
1932 	psr->paused = false;
1933 	intel_psr_activate(intel_dp);
1934 
1935 unlock:
1936 	mutex_unlock(&psr->lock);
1937 }
1938 
1939 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1940 {
1941 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1942 		PSR2_MAN_TRK_CTL_ENABLE;
1943 }
1944 
1945 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1946 {
1947 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1948 	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1949 	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1950 }
1951 
1952 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1953 {
1954 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1955 	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1956 	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1957 }
1958 
1959 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1960 {
1961 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1962 	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1963 	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1964 }
1965 
1966 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1967 {
1968 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1969 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1970 
1971 	if (intel_dp->psr.psr2_sel_fetch_enabled)
1972 		intel_de_write(dev_priv,
1973 			       PSR2_MAN_TRK_CTL(cpu_transcoder),
1974 			       man_trk_ctl_enable_bit_get(dev_priv) |
1975 			       man_trk_ctl_partial_frame_bit_get(dev_priv) |
1976 			       man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1977 			       man_trk_ctl_continuos_full_frame(dev_priv));
1978 
1979 	/*
1980 	 * Display WA #0884: skl+
1981 	 * This documented WA for bxt can be safely applied
1982 	 * broadly so we can force HW tracking to exit PSR
1983 	 * instead of disabling and re-enabling.
1984 	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1985 	 * but it makes more sense write to the current active
1986 	 * pipe.
1987 	 *
1988 	 * This workaround do not exist for platforms with display 10 or newer
1989 	 * but testing proved that it works for up display 13, for newer
1990 	 * than that testing will be needed.
1991 	 */
1992 	intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1993 }
1994 
1995 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1996 {
1997 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1998 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1999 	struct intel_encoder *encoder;
2000 
2001 	if (!crtc_state->enable_psr2_sel_fetch)
2002 		return;
2003 
2004 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2005 					     crtc_state->uapi.encoder_mask) {
2006 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2007 
2008 		lockdep_assert_held(&intel_dp->psr.lock);
2009 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
2010 			return;
2011 		break;
2012 	}
2013 
2014 	intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2015 		       crtc_state->psr2_man_track_ctl);
2016 }
2017 
2018 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2019 				  bool full_update)
2020 {
2021 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2022 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2023 	u32 val = man_trk_ctl_enable_bit_get(dev_priv);
2024 
2025 	/* SF partial frame enable has to be set even on full update */
2026 	val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
2027 
2028 	if (full_update) {
2029 		val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
2030 		val |= man_trk_ctl_continuos_full_frame(dev_priv);
2031 		goto exit;
2032 	}
2033 
2034 	if (crtc_state->psr2_su_area.y1 == -1)
2035 		goto exit;
2036 
2037 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
2038 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2039 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2040 	} else {
2041 		drm_WARN_ON(crtc_state->uapi.crtc->dev,
2042 			    crtc_state->psr2_su_area.y1 % 4 ||
2043 			    crtc_state->psr2_su_area.y2 % 4);
2044 
2045 		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2046 			crtc_state->psr2_su_area.y1 / 4 + 1);
2047 		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2048 			crtc_state->psr2_su_area.y2 / 4 + 1);
2049 	}
2050 exit:
2051 	crtc_state->psr2_man_track_ctl = val;
2052 }
2053 
2054 static void clip_area_update(struct drm_rect *overlap_damage_area,
2055 			     struct drm_rect *damage_area,
2056 			     struct drm_rect *pipe_src)
2057 {
2058 	if (!drm_rect_intersect(damage_area, pipe_src))
2059 		return;
2060 
2061 	if (overlap_damage_area->y1 == -1) {
2062 		overlap_damage_area->y1 = damage_area->y1;
2063 		overlap_damage_area->y2 = damage_area->y2;
2064 		return;
2065 	}
2066 
2067 	if (damage_area->y1 < overlap_damage_area->y1)
2068 		overlap_damage_area->y1 = damage_area->y1;
2069 
2070 	if (damage_area->y2 > overlap_damage_area->y2)
2071 		overlap_damage_area->y2 = damage_area->y2;
2072 }
2073 
2074 static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2075 {
2076 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2077 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2078 	u16 y_alignment;
2079 
2080 	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2081 	if (crtc_state->dsc.compression_enable &&
2082 	    (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
2083 		y_alignment = vdsc_cfg->slice_height;
2084 	else
2085 		y_alignment = crtc_state->su_y_granularity;
2086 
2087 	crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2088 	if (crtc_state->psr2_su_area.y2 % y_alignment)
2089 		crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2090 						y_alignment) + 1) * y_alignment;
2091 }
2092 
2093 /*
2094  * When early transport is in use we need to extend SU area to cover
2095  * cursor fully when cursor is in SU area.
2096  */
2097 static void
2098 intel_psr2_sel_fetch_et_alignment(struct intel_crtc_state *crtc_state,
2099 				  struct intel_plane_state *cursor_state)
2100 {
2101 	struct drm_rect inter;
2102 
2103 	if (!crtc_state->enable_psr2_su_region_et ||
2104 	    !cursor_state->uapi.visible)
2105 		return;
2106 
2107 	inter = crtc_state->psr2_su_area;
2108 	if (!drm_rect_intersect(&inter, &cursor_state->uapi.dst))
2109 		return;
2110 
2111 	clip_area_update(&crtc_state->psr2_su_area, &cursor_state->uapi.dst,
2112 			 &crtc_state->pipe_src);
2113 }
2114 
2115 /*
2116  * TODO: Not clear how to handle planes with negative position,
2117  * also planes are not updated if they have a negative X
2118  * position so for now doing a full update in this cases
2119  *
2120  * Plane scaling and rotation is not supported by selective fetch and both
2121  * properties can change without a modeset, so need to be check at every
2122  * atomic commit.
2123  */
2124 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2125 {
2126 	if (plane_state->uapi.dst.y1 < 0 ||
2127 	    plane_state->uapi.dst.x1 < 0 ||
2128 	    plane_state->scaler_id >= 0 ||
2129 	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2130 		return false;
2131 
2132 	return true;
2133 }
2134 
2135 /*
2136  * Check for pipe properties that is not supported by selective fetch.
2137  *
2138  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2139  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2140  * enabled and going to the full update path.
2141  */
2142 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2143 {
2144 	if (crtc_state->scaler_state.scaler_id >= 0)
2145 		return false;
2146 
2147 	return true;
2148 }
2149 
2150 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2151 				struct intel_crtc *crtc)
2152 {
2153 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2154 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2155 	struct intel_plane_state *new_plane_state, *old_plane_state,
2156 		*cursor_plane_state = NULL;
2157 	struct intel_plane *plane;
2158 	bool full_update = false;
2159 	int i, ret;
2160 
2161 	if (!crtc_state->enable_psr2_sel_fetch)
2162 		return 0;
2163 
2164 	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2165 		full_update = true;
2166 		goto skip_sel_fetch_set_loop;
2167 	}
2168 
2169 	crtc_state->psr2_su_area.x1 = 0;
2170 	crtc_state->psr2_su_area.y1 = -1;
2171 	crtc_state->psr2_su_area.x2 = INT_MAX;
2172 	crtc_state->psr2_su_area.y2 = -1;
2173 
2174 	/*
2175 	 * Calculate minimal selective fetch area of each plane and calculate
2176 	 * the pipe damaged area.
2177 	 * In the next loop the plane selective fetch area will actually be set
2178 	 * using whole pipe damaged area.
2179 	 */
2180 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2181 					     new_plane_state, i) {
2182 		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2183 						      .x2 = INT_MAX };
2184 
2185 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2186 			continue;
2187 
2188 		if (!new_plane_state->uapi.visible &&
2189 		    !old_plane_state->uapi.visible)
2190 			continue;
2191 
2192 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2193 			full_update = true;
2194 			break;
2195 		}
2196 
2197 		/*
2198 		 * If visibility or plane moved, mark the whole plane area as
2199 		 * damaged as it needs to be complete redraw in the new and old
2200 		 * position.
2201 		 */
2202 		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2203 		    !drm_rect_equals(&new_plane_state->uapi.dst,
2204 				     &old_plane_state->uapi.dst)) {
2205 			if (old_plane_state->uapi.visible) {
2206 				damaged_area.y1 = old_plane_state->uapi.dst.y1;
2207 				damaged_area.y2 = old_plane_state->uapi.dst.y2;
2208 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2209 						 &crtc_state->pipe_src);
2210 			}
2211 
2212 			if (new_plane_state->uapi.visible) {
2213 				damaged_area.y1 = new_plane_state->uapi.dst.y1;
2214 				damaged_area.y2 = new_plane_state->uapi.dst.y2;
2215 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2216 						 &crtc_state->pipe_src);
2217 			}
2218 			continue;
2219 		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2220 			/* If alpha changed mark the whole plane area as damaged */
2221 			damaged_area.y1 = new_plane_state->uapi.dst.y1;
2222 			damaged_area.y2 = new_plane_state->uapi.dst.y2;
2223 			clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2224 					 &crtc_state->pipe_src);
2225 			continue;
2226 		}
2227 
2228 		src = drm_plane_state_src(&new_plane_state->uapi);
2229 		drm_rect_fp_to_int(&src, &src);
2230 
2231 		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2232 						     &new_plane_state->uapi, &damaged_area))
2233 			continue;
2234 
2235 		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2236 		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2237 		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2238 		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2239 
2240 		clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
2241 
2242 		/*
2243 		 * Cursor plane new state is stored to adjust su area to cover
2244 		 * cursor are fully.
2245 		 */
2246 		if (plane->id == PLANE_CURSOR)
2247 			cursor_plane_state = new_plane_state;
2248 	}
2249 
2250 	/*
2251 	 * TODO: For now we are just using full update in case
2252 	 * selective fetch area calculation fails. To optimize this we
2253 	 * should identify cases where this happens and fix the area
2254 	 * calculation for those.
2255 	 */
2256 	if (crtc_state->psr2_su_area.y1 == -1) {
2257 		drm_info_once(&dev_priv->drm,
2258 			      "Selective fetch area calculation failed in pipe %c\n",
2259 			      pipe_name(crtc->pipe));
2260 		full_update = true;
2261 	}
2262 
2263 	if (full_update)
2264 		goto skip_sel_fetch_set_loop;
2265 
2266 	/* Wa_14014971492 */
2267 	if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
2268 	     IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
2269 	    crtc_state->splitter.enable)
2270 		crtc_state->psr2_su_area.y1 = 0;
2271 
2272 	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2273 	if (ret)
2274 		return ret;
2275 
2276 	/* Adjust su area to cover cursor fully as necessary */
2277 	if (cursor_plane_state)
2278 		intel_psr2_sel_fetch_et_alignment(crtc_state, cursor_plane_state);
2279 
2280 	intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2281 
2282 	/*
2283 	 * Now that we have the pipe damaged area check if it intersect with
2284 	 * every plane, if it does set the plane selective fetch area.
2285 	 */
2286 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2287 					     new_plane_state, i) {
2288 		struct drm_rect *sel_fetch_area, inter;
2289 		struct intel_plane *linked = new_plane_state->planar_linked_plane;
2290 
2291 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2292 		    !new_plane_state->uapi.visible)
2293 			continue;
2294 
2295 		inter = crtc_state->psr2_su_area;
2296 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2297 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2298 			sel_fetch_area->y1 = -1;
2299 			sel_fetch_area->y2 = -1;
2300 			/*
2301 			 * if plane sel fetch was previously enabled ->
2302 			 * disable it
2303 			 */
2304 			if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2305 				crtc_state->update_planes |= BIT(plane->id);
2306 
2307 			continue;
2308 		}
2309 
2310 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2311 			full_update = true;
2312 			break;
2313 		}
2314 
2315 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2316 		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2317 		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2318 		crtc_state->update_planes |= BIT(plane->id);
2319 
2320 		/*
2321 		 * Sel_fetch_area is calculated for UV plane. Use
2322 		 * same area for Y plane as well.
2323 		 */
2324 		if (linked) {
2325 			struct intel_plane_state *linked_new_plane_state;
2326 			struct drm_rect *linked_sel_fetch_area;
2327 
2328 			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2329 			if (IS_ERR(linked_new_plane_state))
2330 				return PTR_ERR(linked_new_plane_state);
2331 
2332 			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2333 			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2334 			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2335 			crtc_state->update_planes |= BIT(linked->id);
2336 		}
2337 	}
2338 
2339 skip_sel_fetch_set_loop:
2340 	psr2_man_trk_ctl_calc(crtc_state, full_update);
2341 	return 0;
2342 }
2343 
2344 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2345 				struct intel_crtc *crtc)
2346 {
2347 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2348 	const struct intel_crtc_state *old_crtc_state =
2349 		intel_atomic_get_old_crtc_state(state, crtc);
2350 	const struct intel_crtc_state *new_crtc_state =
2351 		intel_atomic_get_new_crtc_state(state, crtc);
2352 	struct intel_encoder *encoder;
2353 
2354 	if (!HAS_PSR(i915))
2355 		return;
2356 
2357 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2358 					     old_crtc_state->uapi.encoder_mask) {
2359 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2360 		struct intel_psr *psr = &intel_dp->psr;
2361 		bool needs_to_disable = false;
2362 
2363 		mutex_lock(&psr->lock);
2364 
2365 		/*
2366 		 * Reasons to disable:
2367 		 * - PSR disabled in new state
2368 		 * - All planes will go inactive
2369 		 * - Changing between PSR versions
2370 		 * - Display WA #1136: skl, bxt
2371 		 */
2372 		needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2373 		needs_to_disable |= !new_crtc_state->has_psr;
2374 		needs_to_disable |= !new_crtc_state->active_planes;
2375 		needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2376 		needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2377 			new_crtc_state->wm_level_disabled;
2378 
2379 		if (psr->enabled && needs_to_disable)
2380 			intel_psr_disable_locked(intel_dp);
2381 		else if (psr->enabled && new_crtc_state->wm_level_disabled)
2382 			/* Wa_14015648006 */
2383 			wm_optimization_wa(intel_dp, new_crtc_state);
2384 
2385 		mutex_unlock(&psr->lock);
2386 	}
2387 }
2388 
2389 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2390 				 struct intel_crtc *crtc)
2391 {
2392 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2393 	const struct intel_crtc_state *crtc_state =
2394 		intel_atomic_get_new_crtc_state(state, crtc);
2395 	struct intel_encoder *encoder;
2396 
2397 	if (!(crtc_state->has_psr || crtc_state->has_panel_replay))
2398 		return;
2399 
2400 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2401 					     crtc_state->uapi.encoder_mask) {
2402 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2403 		struct intel_psr *psr = &intel_dp->psr;
2404 		bool keep_disabled = false;
2405 
2406 		mutex_lock(&psr->lock);
2407 
2408 		drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2409 
2410 		keep_disabled |= psr->sink_not_reliable;
2411 		keep_disabled |= !crtc_state->active_planes;
2412 
2413 		/* Display WA #1136: skl, bxt */
2414 		keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2415 			crtc_state->wm_level_disabled;
2416 
2417 		if (!psr->enabled && !keep_disabled)
2418 			intel_psr_enable_locked(intel_dp, crtc_state);
2419 		else if (psr->enabled && !crtc_state->wm_level_disabled)
2420 			/* Wa_14015648006 */
2421 			wm_optimization_wa(intel_dp, crtc_state);
2422 
2423 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2424 		if (crtc_state->crc_enabled && psr->enabled)
2425 			psr_force_hw_tracking_exit(intel_dp);
2426 
2427 		/*
2428 		 * Clear possible busy bits in case we have
2429 		 * invalidate -> flip -> flush sequence.
2430 		 */
2431 		intel_dp->psr.busy_frontbuffer_bits = 0;
2432 
2433 		mutex_unlock(&psr->lock);
2434 	}
2435 }
2436 
2437 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2438 {
2439 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2440 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2441 
2442 	/*
2443 	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2444 	 * As all higher states has bit 4 of PSR2 state set we can just wait for
2445 	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2446 	 */
2447 	return intel_de_wait_for_clear(dev_priv,
2448 				       EDP_PSR2_STATUS(cpu_transcoder),
2449 				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2450 }
2451 
2452 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2453 {
2454 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2455 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2456 
2457 	/*
2458 	 * From bspec: Panel Self Refresh (BDW+)
2459 	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2460 	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2461 	 * defensive enough to cover everything.
2462 	 */
2463 	return intel_de_wait_for_clear(dev_priv,
2464 				       psr_status_reg(dev_priv, cpu_transcoder),
2465 				       EDP_PSR_STATUS_STATE_MASK, 50);
2466 }
2467 
2468 /**
2469  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2470  * @new_crtc_state: new CRTC state
2471  *
2472  * This function is expected to be called from pipe_update_start() where it is
2473  * not expected to race with PSR enable or disable.
2474  */
2475 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2476 {
2477 	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2478 	struct intel_encoder *encoder;
2479 
2480 	if (!new_crtc_state->has_psr)
2481 		return;
2482 
2483 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2484 					     new_crtc_state->uapi.encoder_mask) {
2485 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2486 		int ret;
2487 
2488 		lockdep_assert_held(&intel_dp->psr.lock);
2489 
2490 		if (!intel_dp->psr.enabled)
2491 			continue;
2492 
2493 		if (intel_dp->psr.psr2_enabled)
2494 			ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2495 		else
2496 			ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2497 
2498 		if (ret)
2499 			drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2500 	}
2501 }
2502 
2503 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2504 {
2505 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2506 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2507 	i915_reg_t reg;
2508 	u32 mask;
2509 	int err;
2510 
2511 	if (!intel_dp->psr.enabled)
2512 		return false;
2513 
2514 	if (intel_dp->psr.psr2_enabled) {
2515 		reg = EDP_PSR2_STATUS(cpu_transcoder);
2516 		mask = EDP_PSR2_STATUS_STATE_MASK;
2517 	} else {
2518 		reg = psr_status_reg(dev_priv, cpu_transcoder);
2519 		mask = EDP_PSR_STATUS_STATE_MASK;
2520 	}
2521 
2522 	mutex_unlock(&intel_dp->psr.lock);
2523 
2524 	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2525 	if (err)
2526 		drm_err(&dev_priv->drm,
2527 			"Timed out waiting for PSR Idle for re-enable\n");
2528 
2529 	/* After the unlocked wait, verify that PSR is still wanted! */
2530 	mutex_lock(&intel_dp->psr.lock);
2531 	return err == 0 && intel_dp->psr.enabled;
2532 }
2533 
2534 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2535 {
2536 	struct drm_connector_list_iter conn_iter;
2537 	struct drm_modeset_acquire_ctx ctx;
2538 	struct drm_atomic_state *state;
2539 	struct drm_connector *conn;
2540 	int err = 0;
2541 
2542 	state = drm_atomic_state_alloc(&dev_priv->drm);
2543 	if (!state)
2544 		return -ENOMEM;
2545 
2546 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2547 
2548 	state->acquire_ctx = &ctx;
2549 	to_intel_atomic_state(state)->internal = true;
2550 
2551 retry:
2552 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2553 	drm_for_each_connector_iter(conn, &conn_iter) {
2554 		struct drm_connector_state *conn_state;
2555 		struct drm_crtc_state *crtc_state;
2556 
2557 		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2558 			continue;
2559 
2560 		conn_state = drm_atomic_get_connector_state(state, conn);
2561 		if (IS_ERR(conn_state)) {
2562 			err = PTR_ERR(conn_state);
2563 			break;
2564 		}
2565 
2566 		if (!conn_state->crtc)
2567 			continue;
2568 
2569 		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2570 		if (IS_ERR(crtc_state)) {
2571 			err = PTR_ERR(crtc_state);
2572 			break;
2573 		}
2574 
2575 		/* Mark mode as changed to trigger a pipe->update() */
2576 		crtc_state->mode_changed = true;
2577 	}
2578 	drm_connector_list_iter_end(&conn_iter);
2579 
2580 	if (err == 0)
2581 		err = drm_atomic_commit(state);
2582 
2583 	if (err == -EDEADLK) {
2584 		drm_atomic_state_clear(state);
2585 		err = drm_modeset_backoff(&ctx);
2586 		if (!err)
2587 			goto retry;
2588 	}
2589 
2590 	drm_modeset_drop_locks(&ctx);
2591 	drm_modeset_acquire_fini(&ctx);
2592 	drm_atomic_state_put(state);
2593 
2594 	return err;
2595 }
2596 
2597 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2598 {
2599 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2600 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2601 	u32 old_mode;
2602 	int ret;
2603 
2604 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2605 	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2606 		drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2607 		return -EINVAL;
2608 	}
2609 
2610 	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2611 	if (ret)
2612 		return ret;
2613 
2614 	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2615 	intel_dp->psr.debug = val;
2616 
2617 	/*
2618 	 * Do it right away if it's already enabled, otherwise it will be done
2619 	 * when enabling the source.
2620 	 */
2621 	if (intel_dp->psr.enabled)
2622 		psr_irq_control(intel_dp);
2623 
2624 	mutex_unlock(&intel_dp->psr.lock);
2625 
2626 	if (old_mode != mode)
2627 		ret = intel_psr_fastset_force(dev_priv);
2628 
2629 	return ret;
2630 }
2631 
2632 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2633 {
2634 	struct intel_psr *psr = &intel_dp->psr;
2635 
2636 	intel_psr_disable_locked(intel_dp);
2637 	psr->sink_not_reliable = true;
2638 	/* let's make sure that sink is awaken */
2639 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2640 }
2641 
2642 static void intel_psr_work(struct work_struct *work)
2643 {
2644 	struct intel_dp *intel_dp =
2645 		container_of(work, typeof(*intel_dp), psr.work);
2646 
2647 	mutex_lock(&intel_dp->psr.lock);
2648 
2649 	if (!intel_dp->psr.enabled)
2650 		goto unlock;
2651 
2652 	if (READ_ONCE(intel_dp->psr.irq_aux_error))
2653 		intel_psr_handle_irq(intel_dp);
2654 
2655 	/*
2656 	 * We have to make sure PSR is ready for re-enable
2657 	 * otherwise it keeps disabled until next full enable/disable cycle.
2658 	 * PSR might take some time to get fully disabled
2659 	 * and be ready for re-enable.
2660 	 */
2661 	if (!__psr_wait_for_idle_locked(intel_dp))
2662 		goto unlock;
2663 
2664 	/*
2665 	 * The delayed work can race with an invalidate hence we need to
2666 	 * recheck. Since psr_flush first clears this and then reschedules we
2667 	 * won't ever miss a flush when bailing out here.
2668 	 */
2669 	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2670 		goto unlock;
2671 
2672 	intel_psr_activate(intel_dp);
2673 unlock:
2674 	mutex_unlock(&intel_dp->psr.lock);
2675 }
2676 
2677 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2678 {
2679 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2680 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2681 
2682 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2683 		u32 val;
2684 
2685 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2686 			/* Send one update otherwise lag is observed in screen */
2687 			intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2688 			return;
2689 		}
2690 
2691 		val = man_trk_ctl_enable_bit_get(dev_priv) |
2692 		      man_trk_ctl_partial_frame_bit_get(dev_priv) |
2693 		      man_trk_ctl_continuos_full_frame(dev_priv);
2694 		intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
2695 		intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2696 		intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2697 	} else {
2698 		intel_psr_exit(intel_dp);
2699 	}
2700 }
2701 
2702 /**
2703  * intel_psr_invalidate - Invalidate PSR
2704  * @dev_priv: i915 device
2705  * @frontbuffer_bits: frontbuffer plane tracking bits
2706  * @origin: which operation caused the invalidate
2707  *
2708  * Since the hardware frontbuffer tracking has gaps we need to integrate
2709  * with the software frontbuffer tracking. This function gets called every
2710  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2711  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2712  *
2713  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2714  */
2715 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2716 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
2717 {
2718 	struct intel_encoder *encoder;
2719 
2720 	if (origin == ORIGIN_FLIP)
2721 		return;
2722 
2723 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2724 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2725 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2726 
2727 		mutex_lock(&intel_dp->psr.lock);
2728 		if (!intel_dp->psr.enabled) {
2729 			mutex_unlock(&intel_dp->psr.lock);
2730 			continue;
2731 		}
2732 
2733 		pipe_frontbuffer_bits &=
2734 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2735 		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2736 
2737 		if (pipe_frontbuffer_bits)
2738 			_psr_invalidate_handle(intel_dp);
2739 
2740 		mutex_unlock(&intel_dp->psr.lock);
2741 	}
2742 }
2743 /*
2744  * When we will be completely rely on PSR2 S/W tracking in future,
2745  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2746  * event also therefore tgl_dc3co_flush_locked() require to be changed
2747  * accordingly in future.
2748  */
2749 static void
2750 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2751 		       enum fb_op_origin origin)
2752 {
2753 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2754 
2755 	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2756 	    !intel_dp->psr.active)
2757 		return;
2758 
2759 	/*
2760 	 * At every frontbuffer flush flip event modified delay of delayed work,
2761 	 * when delayed work schedules that means display has been idle.
2762 	 */
2763 	if (!(frontbuffer_bits &
2764 	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2765 		return;
2766 
2767 	tgl_psr2_enable_dc3co(intel_dp);
2768 	mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
2769 			 intel_dp->psr.dc3co_exit_delay);
2770 }
2771 
2772 static void _psr_flush_handle(struct intel_dp *intel_dp)
2773 {
2774 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2775 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2776 
2777 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2778 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2779 			/* can we turn CFF off? */
2780 			if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2781 				u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2782 					man_trk_ctl_partial_frame_bit_get(dev_priv) |
2783 					man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2784 					man_trk_ctl_continuos_full_frame(dev_priv);
2785 
2786 				/*
2787 				 * Set psr2_sel_fetch_cff_enabled as false to allow selective
2788 				 * updates. Still keep cff bit enabled as we don't have proper
2789 				 * SU configuration in case update is sent for any reason after
2790 				 * sff bit gets cleared by the HW on next vblank.
2791 				 */
2792 				intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2793 					       val);
2794 				intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2795 				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2796 			}
2797 		} else {
2798 			/*
2799 			 * continuous full frame is disabled, only a single full
2800 			 * frame is required
2801 			 */
2802 			psr_force_hw_tracking_exit(intel_dp);
2803 		}
2804 	} else {
2805 		psr_force_hw_tracking_exit(intel_dp);
2806 
2807 		if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2808 			queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
2809 	}
2810 }
2811 
2812 /**
2813  * intel_psr_flush - Flush PSR
2814  * @dev_priv: i915 device
2815  * @frontbuffer_bits: frontbuffer plane tracking bits
2816  * @origin: which operation caused the flush
2817  *
2818  * Since the hardware frontbuffer tracking has gaps we need to integrate
2819  * with the software frontbuffer tracking. This function gets called every
2820  * time frontbuffer rendering has completed and flushed out to memory. PSR
2821  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2822  *
2823  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2824  */
2825 void intel_psr_flush(struct drm_i915_private *dev_priv,
2826 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
2827 {
2828 	struct intel_encoder *encoder;
2829 
2830 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2831 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2832 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2833 
2834 		mutex_lock(&intel_dp->psr.lock);
2835 		if (!intel_dp->psr.enabled) {
2836 			mutex_unlock(&intel_dp->psr.lock);
2837 			continue;
2838 		}
2839 
2840 		pipe_frontbuffer_bits &=
2841 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2842 		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2843 
2844 		/*
2845 		 * If the PSR is paused by an explicit intel_psr_paused() call,
2846 		 * we have to ensure that the PSR is not activated until
2847 		 * intel_psr_resume() is called.
2848 		 */
2849 		if (intel_dp->psr.paused)
2850 			goto unlock;
2851 
2852 		if (origin == ORIGIN_FLIP ||
2853 		    (origin == ORIGIN_CURSOR_UPDATE &&
2854 		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
2855 			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2856 			goto unlock;
2857 		}
2858 
2859 		if (pipe_frontbuffer_bits == 0)
2860 			goto unlock;
2861 
2862 		/* By definition flush = invalidate + flush */
2863 		_psr_flush_handle(intel_dp);
2864 unlock:
2865 		mutex_unlock(&intel_dp->psr.lock);
2866 	}
2867 }
2868 
2869 /**
2870  * intel_psr_init - Init basic PSR work and mutex.
2871  * @intel_dp: Intel DP
2872  *
2873  * This function is called after the initializing connector.
2874  * (the initializing of connector treats the handling of connector capabilities)
2875  * And it initializes basic PSR stuff for each DP Encoder.
2876  */
2877 void intel_psr_init(struct intel_dp *intel_dp)
2878 {
2879 	struct intel_connector *connector = intel_dp->attached_connector;
2880 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2881 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2882 
2883 	if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
2884 		return;
2885 
2886 	/*
2887 	 * HSW spec explicitly says PSR is tied to port A.
2888 	 * BDW+ platforms have a instance of PSR registers per transcoder but
2889 	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2890 	 * than eDP one.
2891 	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2892 	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2893 	 * But GEN12 supports a instance of PSR registers per transcoder.
2894 	 */
2895 	if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2896 		drm_dbg_kms(&dev_priv->drm,
2897 			    "PSR condition failed: Port not supported\n");
2898 		return;
2899 	}
2900 
2901 	if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp))
2902 		intel_dp->psr.source_panel_replay_support = true;
2903 	else
2904 		intel_dp->psr.source_support = true;
2905 
2906 	/* Disable early transport for now */
2907 	intel_dp->psr.debug |= I915_PSR_DEBUG_SU_REGION_ET_DISABLE;
2908 
2909 	/* Set link_standby x link_off defaults */
2910 	if (DISPLAY_VER(dev_priv) < 12)
2911 		/* For new platforms up to TGL let's respect VBT back again */
2912 		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2913 
2914 	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2915 	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2916 	mutex_init(&intel_dp->psr.lock);
2917 }
2918 
2919 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2920 					   u8 *status, u8 *error_status)
2921 {
2922 	struct drm_dp_aux *aux = &intel_dp->aux;
2923 	int ret;
2924 	unsigned int offset;
2925 
2926 	offset = intel_dp->psr.panel_replay_enabled ?
2927 		 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
2928 
2929 	ret = drm_dp_dpcd_readb(aux, offset, status);
2930 	if (ret != 1)
2931 		return ret;
2932 
2933 	offset = intel_dp->psr.panel_replay_enabled ?
2934 		 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
2935 
2936 	ret = drm_dp_dpcd_readb(aux, offset, error_status);
2937 	if (ret != 1)
2938 		return ret;
2939 
2940 	*status = *status & DP_PSR_SINK_STATE_MASK;
2941 
2942 	return 0;
2943 }
2944 
2945 static void psr_alpm_check(struct intel_dp *intel_dp)
2946 {
2947 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2948 	struct drm_dp_aux *aux = &intel_dp->aux;
2949 	struct intel_psr *psr = &intel_dp->psr;
2950 	u8 val;
2951 	int r;
2952 
2953 	if (!psr->psr2_enabled)
2954 		return;
2955 
2956 	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2957 	if (r != 1) {
2958 		drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2959 		return;
2960 	}
2961 
2962 	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2963 		intel_psr_disable_locked(intel_dp);
2964 		psr->sink_not_reliable = true;
2965 		drm_dbg_kms(&dev_priv->drm,
2966 			    "ALPM lock timeout error, disabling PSR\n");
2967 
2968 		/* Clearing error */
2969 		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2970 	}
2971 }
2972 
2973 static void psr_capability_changed_check(struct intel_dp *intel_dp)
2974 {
2975 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2976 	struct intel_psr *psr = &intel_dp->psr;
2977 	u8 val;
2978 	int r;
2979 
2980 	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2981 	if (r != 1) {
2982 		drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2983 		return;
2984 	}
2985 
2986 	if (val & DP_PSR_CAPS_CHANGE) {
2987 		intel_psr_disable_locked(intel_dp);
2988 		psr->sink_not_reliable = true;
2989 		drm_dbg_kms(&dev_priv->drm,
2990 			    "Sink PSR capability changed, disabling PSR\n");
2991 
2992 		/* Clearing it */
2993 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2994 	}
2995 }
2996 
2997 void intel_psr_short_pulse(struct intel_dp *intel_dp)
2998 {
2999 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3000 	struct intel_psr *psr = &intel_dp->psr;
3001 	u8 status, error_status;
3002 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3003 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3004 			  DP_PSR_LINK_CRC_ERROR;
3005 
3006 	if (!CAN_PSR(intel_dp))
3007 		return;
3008 
3009 	mutex_lock(&psr->lock);
3010 
3011 	if (!psr->enabled)
3012 		goto exit;
3013 
3014 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
3015 		drm_err(&dev_priv->drm,
3016 			"Error reading PSR status or error status\n");
3017 		goto exit;
3018 	}
3019 
3020 	if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
3021 		intel_psr_disable_locked(intel_dp);
3022 		psr->sink_not_reliable = true;
3023 	}
3024 
3025 	if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
3026 		drm_dbg_kms(&dev_priv->drm,
3027 			    "PSR sink internal error, disabling PSR\n");
3028 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3029 		drm_dbg_kms(&dev_priv->drm,
3030 			    "PSR RFB storage error, disabling PSR\n");
3031 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3032 		drm_dbg_kms(&dev_priv->drm,
3033 			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
3034 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3035 		drm_dbg_kms(&dev_priv->drm,
3036 			    "PSR Link CRC error, disabling PSR\n");
3037 
3038 	if (error_status & ~errors)
3039 		drm_err(&dev_priv->drm,
3040 			"PSR_ERROR_STATUS unhandled errors %x\n",
3041 			error_status & ~errors);
3042 	/* clear status register */
3043 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
3044 
3045 	psr_alpm_check(intel_dp);
3046 	psr_capability_changed_check(intel_dp);
3047 
3048 exit:
3049 	mutex_unlock(&psr->lock);
3050 }
3051 
3052 bool intel_psr_enabled(struct intel_dp *intel_dp)
3053 {
3054 	bool ret;
3055 
3056 	if (!CAN_PSR(intel_dp))
3057 		return false;
3058 
3059 	mutex_lock(&intel_dp->psr.lock);
3060 	ret = intel_dp->psr.enabled;
3061 	mutex_unlock(&intel_dp->psr.lock);
3062 
3063 	return ret;
3064 }
3065 
3066 /**
3067  * intel_psr_lock - grab PSR lock
3068  * @crtc_state: the crtc state
3069  *
3070  * This is initially meant to be used by around CRTC update, when
3071  * vblank sensitive registers are updated and we need grab the lock
3072  * before it to avoid vblank evasion.
3073  */
3074 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3075 {
3076 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3077 	struct intel_encoder *encoder;
3078 
3079 	if (!crtc_state->has_psr)
3080 		return;
3081 
3082 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
3083 					     crtc_state->uapi.encoder_mask) {
3084 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3085 
3086 		mutex_lock(&intel_dp->psr.lock);
3087 		break;
3088 	}
3089 }
3090 
3091 /**
3092  * intel_psr_unlock - release PSR lock
3093  * @crtc_state: the crtc state
3094  *
3095  * Release the PSR lock that was held during pipe update.
3096  */
3097 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3098 {
3099 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3100 	struct intel_encoder *encoder;
3101 
3102 	if (!crtc_state->has_psr)
3103 		return;
3104 
3105 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
3106 					     crtc_state->uapi.encoder_mask) {
3107 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3108 
3109 		mutex_unlock(&intel_dp->psr.lock);
3110 		break;
3111 	}
3112 }
3113 
3114 static void
3115 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3116 {
3117 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3118 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3119 	const char *status = "unknown";
3120 	u32 val, status_val;
3121 
3122 	if (intel_dp->psr.psr2_enabled) {
3123 		static const char * const live_status[] = {
3124 			"IDLE",
3125 			"CAPTURE",
3126 			"CAPTURE_FS",
3127 			"SLEEP",
3128 			"BUFON_FW",
3129 			"ML_UP",
3130 			"SU_STANDBY",
3131 			"FAST_SLEEP",
3132 			"DEEP_SLEEP",
3133 			"BUF_ON",
3134 			"TG_ON"
3135 		};
3136 		val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
3137 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3138 		if (status_val < ARRAY_SIZE(live_status))
3139 			status = live_status[status_val];
3140 	} else {
3141 		static const char * const live_status[] = {
3142 			"IDLE",
3143 			"SRDONACK",
3144 			"SRDENT",
3145 			"BUFOFF",
3146 			"BUFON",
3147 			"AUXACK",
3148 			"SRDOFFACK",
3149 			"SRDENT_ON",
3150 		};
3151 		val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
3152 		status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3153 		if (status_val < ARRAY_SIZE(live_status))
3154 			status = live_status[status_val];
3155 	}
3156 
3157 	seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3158 }
3159 
3160 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3161 {
3162 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3163 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3164 	struct intel_psr *psr = &intel_dp->psr;
3165 	intel_wakeref_t wakeref;
3166 	const char *status;
3167 	bool enabled;
3168 	u32 val;
3169 
3170 	seq_printf(m, "Sink support: PSR = %s",
3171 		   str_yes_no(psr->sink_support));
3172 
3173 	if (psr->sink_support)
3174 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3175 	seq_printf(m, ", Panel Replay = %s\n", str_yes_no(psr->sink_panel_replay_support));
3176 
3177 	if (!(psr->sink_support || psr->sink_panel_replay_support))
3178 		return 0;
3179 
3180 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3181 	mutex_lock(&psr->lock);
3182 
3183 	if (psr->panel_replay_enabled)
3184 		status = "Panel Replay Enabled";
3185 	else if (psr->enabled)
3186 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
3187 	else
3188 		status = "disabled";
3189 	seq_printf(m, "PSR mode: %s\n", status);
3190 
3191 	if (!psr->enabled) {
3192 		seq_printf(m, "PSR sink not reliable: %s\n",
3193 			   str_yes_no(psr->sink_not_reliable));
3194 
3195 		goto unlock;
3196 	}
3197 
3198 	if (psr->panel_replay_enabled) {
3199 		val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
3200 		enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3201 	} else if (psr->psr2_enabled) {
3202 		val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
3203 		enabled = val & EDP_PSR2_ENABLE;
3204 	} else {
3205 		val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
3206 		enabled = val & EDP_PSR_ENABLE;
3207 	}
3208 	seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
3209 		   str_enabled_disabled(enabled), val);
3210 	psr_source_status(intel_dp, m);
3211 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3212 		   psr->busy_frontbuffer_bits);
3213 
3214 	/*
3215 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
3216 	 */
3217 	val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3218 	seq_printf(m, "Performance counter: %u\n",
3219 		   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3220 
3221 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
3222 		seq_printf(m, "Last attempted entry at: %lld\n",
3223 			   psr->last_entry_attempt);
3224 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3225 	}
3226 
3227 	if (psr->psr2_enabled) {
3228 		u32 su_frames_val[3];
3229 		int frame;
3230 
3231 		/*
3232 		 * Reading all 3 registers before hand to minimize crossing a
3233 		 * frame boundary between register reads
3234 		 */
3235 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3236 			val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
3237 			su_frames_val[frame / 3] = val;
3238 		}
3239 
3240 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3241 
3242 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3243 			u32 su_blocks;
3244 
3245 			su_blocks = su_frames_val[frame / 3] &
3246 				    PSR2_SU_STATUS_MASK(frame);
3247 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3248 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
3249 		}
3250 
3251 		seq_printf(m, "PSR2 selective fetch: %s\n",
3252 			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3253 	}
3254 
3255 unlock:
3256 	mutex_unlock(&psr->lock);
3257 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3258 
3259 	return 0;
3260 }
3261 
3262 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3263 {
3264 	struct drm_i915_private *dev_priv = m->private;
3265 	struct intel_dp *intel_dp = NULL;
3266 	struct intel_encoder *encoder;
3267 
3268 	if (!HAS_PSR(dev_priv))
3269 		return -ENODEV;
3270 
3271 	/* Find the first EDP which supports PSR */
3272 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3273 		intel_dp = enc_to_intel_dp(encoder);
3274 		break;
3275 	}
3276 
3277 	if (!intel_dp)
3278 		return -ENODEV;
3279 
3280 	return intel_psr_status(m, intel_dp);
3281 }
3282 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3283 
3284 static int
3285 i915_edp_psr_debug_set(void *data, u64 val)
3286 {
3287 	struct drm_i915_private *dev_priv = data;
3288 	struct intel_encoder *encoder;
3289 	intel_wakeref_t wakeref;
3290 	int ret = -ENODEV;
3291 
3292 	if (!HAS_PSR(dev_priv))
3293 		return ret;
3294 
3295 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3296 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3297 
3298 		drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3299 
3300 		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3301 
3302 		// TODO: split to each transcoder's PSR debug state
3303 		ret = intel_psr_debug_set(intel_dp, val);
3304 
3305 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3306 	}
3307 
3308 	return ret;
3309 }
3310 
3311 static int
3312 i915_edp_psr_debug_get(void *data, u64 *val)
3313 {
3314 	struct drm_i915_private *dev_priv = data;
3315 	struct intel_encoder *encoder;
3316 
3317 	if (!HAS_PSR(dev_priv))
3318 		return -ENODEV;
3319 
3320 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3321 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3322 
3323 		// TODO: split to each transcoder's PSR debug state
3324 		*val = READ_ONCE(intel_dp->psr.debug);
3325 		return 0;
3326 	}
3327 
3328 	return -ENODEV;
3329 }
3330 
3331 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3332 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3333 			"%llu\n");
3334 
3335 void intel_psr_debugfs_register(struct drm_i915_private *i915)
3336 {
3337 	struct drm_minor *minor = i915->drm.primary;
3338 
3339 	debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3340 			    i915, &i915_edp_psr_debug_fops);
3341 
3342 	debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3343 			    i915, &i915_edp_psr_status_fops);
3344 }
3345 
3346 static const char *psr_mode_str(struct intel_dp *intel_dp)
3347 {
3348 	if (intel_dp->psr.panel_replay_enabled)
3349 		return "PANEL-REPLAY";
3350 	else if (intel_dp->psr.enabled)
3351 		return "PSR";
3352 
3353 	return "unknown";
3354 }
3355 
3356 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3357 {
3358 	struct intel_connector *connector = m->private;
3359 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3360 	static const char * const sink_status[] = {
3361 		"inactive",
3362 		"transition to active, capture and display",
3363 		"active, display from RFB",
3364 		"active, capture and display on sink device timings",
3365 		"transition to inactive, capture and display, timing re-sync",
3366 		"reserved",
3367 		"reserved",
3368 		"sink internal error",
3369 	};
3370 	static const char * const panel_replay_status[] = {
3371 		"Sink device frame is locked to the Source device",
3372 		"Sink device is coasting, using the VTotal target",
3373 		"Sink device is governing the frame rate (frame rate unlock is granted)",
3374 		"Sink device in the process of re-locking with the Source device",
3375 	};
3376 	const char *str;
3377 	int ret;
3378 	u8 status, error_status;
3379 	u32 idx;
3380 
3381 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
3382 		seq_puts(m, "PSR/Panel-Replay Unsupported\n");
3383 		return -ENODEV;
3384 	}
3385 
3386 	if (connector->base.status != connector_status_connected)
3387 		return -ENODEV;
3388 
3389 	ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
3390 	if (ret)
3391 		return ret;
3392 
3393 	str = "unknown";
3394 	if (intel_dp->psr.panel_replay_enabled) {
3395 		idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT;
3396 		if (idx < ARRAY_SIZE(panel_replay_status))
3397 			str = panel_replay_status[idx];
3398 	} else if (intel_dp->psr.enabled) {
3399 		idx = status & DP_PSR_SINK_STATE_MASK;
3400 		if (idx < ARRAY_SIZE(sink_status))
3401 			str = sink_status[idx];
3402 	}
3403 
3404 	seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
3405 
3406 	seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
3407 
3408 	if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
3409 			    DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3410 			    DP_PSR_LINK_CRC_ERROR))
3411 		seq_puts(m, ":\n");
3412 	else
3413 		seq_puts(m, "\n");
3414 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3415 		seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
3416 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3417 		seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
3418 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3419 		seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
3420 
3421 	return ret;
3422 }
3423 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3424 
3425 static int i915_psr_status_show(struct seq_file *m, void *data)
3426 {
3427 	struct intel_connector *connector = m->private;
3428 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3429 
3430 	return intel_psr_status(m, intel_dp);
3431 }
3432 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3433 
3434 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3435 {
3436 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3437 	struct dentry *root = connector->base.debugfs_entry;
3438 
3439 	/* TODO: Add support for MST connectors as well. */
3440 	if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
3441 	     connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
3442 	    connector->mst_port)
3443 		return;
3444 
3445 	debugfs_create_file("i915_psr_sink_status", 0444, root,
3446 			    connector, &i915_psr_sink_status_fops);
3447 
3448 	if (HAS_PSR(i915) || HAS_DP20(i915))
3449 		debugfs_create_file("i915_psr_status", 0444, root,
3450 				    connector, &i915_psr_status_fops);
3451 }
3452