xref: /linux/drivers/gpu/drm/i915/display/intel_psr.c (revision 569d7db70e5dcf13fbf072f10e9096577ac1e565)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26 #include <drm/drm_debugfs.h>
27 
28 #include "i915_drv.h"
29 #include "i915_reg.h"
30 #include "intel_alpm.h"
31 #include "intel_atomic.h"
32 #include "intel_crtc.h"
33 #include "intel_cursor_regs.h"
34 #include "intel_ddi.h"
35 #include "intel_de.h"
36 #include "intel_display_types.h"
37 #include "intel_dp.h"
38 #include "intel_dp_aux.h"
39 #include "intel_frontbuffer.h"
40 #include "intel_hdmi.h"
41 #include "intel_psr.h"
42 #include "intel_psr_regs.h"
43 #include "intel_snps_phy.h"
44 #include "skl_universal_plane.h"
45 
46 /**
47  * DOC: Panel Self Refresh (PSR/SRD)
48  *
49  * Since Haswell Display controller supports Panel Self-Refresh on display
50  * panels witch have a remote frame buffer (RFB) implemented according to PSR
51  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
52  * when system is idle but display is on as it eliminates display refresh
53  * request to DDR memory completely as long as the frame buffer for that
54  * display is unchanged.
55  *
56  * Panel Self Refresh must be supported by both Hardware (source) and
57  * Panel (sink).
58  *
59  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
60  * to power down the link and memory controller. For DSI panels the same idea
61  * is called "manual mode".
62  *
63  * The implementation uses the hardware-based PSR support which automatically
64  * enters/exits self-refresh mode. The hardware takes care of sending the
65  * required DP aux message and could even retrain the link (that part isn't
66  * enabled yet though). The hardware also keeps track of any frontbuffer
67  * changes to know when to exit self-refresh mode again. Unfortunately that
68  * part doesn't work too well, hence why the i915 PSR support uses the
69  * software frontbuffer tracking to make sure it doesn't miss a screen
70  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
71  * get called by the frontbuffer tracking code. Note that because of locking
72  * issues the self-refresh re-enable code is done from a work queue, which
73  * must be correctly synchronized/cancelled when shutting down the pipe."
74  *
75  * DC3CO (DC3 clock off)
76  *
77  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
78  * clock off automatically during PSR2 idle state.
79  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
80  * entry/exit allows the HW to enter a low-power state even when page flipping
81  * periodically (for instance a 30fps video playback scenario).
82  *
83  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
84  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
85  * frames, if no other flip occurs and the function above is executed, DC3CO is
86  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
87  * of another flip.
88  * Front buffer modifications do not trigger DC3CO activation on purpose as it
89  * would bring a lot of complexity and most of the moderns systems will only
90  * use page flips.
91  */
92 
93 /*
94  * Description of PSR mask bits:
95  *
96  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
97  *
98  *  When unmasked (nearly) all display register writes (eg. even
99  *  SWF) trigger a PSR exit. Some registers are excluded from this
100  *  and they have a more specific mask (described below). On icl+
101  *  this bit no longer exists and is effectively always set.
102  *
103  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
104  *
105  *  When unmasked (nearly) all pipe/plane register writes
106  *  trigger a PSR exit. Some plane registers are excluded from this
107  *  and they have a more specific mask (described below).
108  *
109  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
110  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
111  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
112  *
113  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
114  *  SPR_SURF/CURBASE are not included in this and instead are
115  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
116  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
117  *
118  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
119  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
120  *
121  *  When unmasked PSR is blocked as long as the sprite
122  *  plane is enabled. skl+ with their universal planes no
123  *  longer have a mask bit like this, and no plane being
124  *  enabledb blocks PSR.
125  *
126  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
127  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
128  *
129  *  When umasked CURPOS writes trigger a PSR exit. On skl+
130  *  this doesn't exit but CURPOS is included in the
131  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
132  *
133  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
134  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
135  *
136  *  When unmasked PSR is blocked as long as vblank and/or vsync
137  *  interrupt is unmasked in IMR *and* enabled in IER.
138  *
139  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
140  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
141  *
142  *  Selectcs whether PSR exit generates an extra vblank before
143  *  the first frame is transmitted. Also note the opposite polarity
144  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
145  *  unmasked==do not generate the extra vblank).
146  *
147  *  With DC states enabled the extra vblank happens after link training,
148  *  with DC states disabled it happens immediately upuon PSR exit trigger.
149  *  No idea as of now why there is a difference. HSW/BDW (which don't
150  *  even have DMC) always generate it after link training. Go figure.
151  *
152  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
153  *  and thus won't latch until the first vblank. So with DC states
154  *  enabled the register effctively uses the reset value during DC5
155  *  exit+PSR exit sequence, and thus the bit does nothing until
156  *  latched by the vblank that it was trying to prevent from being
157  *  generated in the first place. So we should probably call this
158  *  one a chicken/egg bit instead on skl+.
159  *
160  *  In standby mode (as opposed to link-off) this makes no difference
161  *  as the timing generator keeps running the whole time generating
162  *  normal periodic vblanks.
163  *
164  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
165  *  and doing so makes the behaviour match the skl+ reset value.
166  *
167  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
168  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
169  *
170  *  On BDW without this bit is no vblanks whatsoever are
171  *  generated after PSR exit. On HSW this has no apparant effect.
172  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
173  *
174  * The rest of the bits are more self-explanatory and/or
175  * irrelevant for normal operation.
176  *
177  * Description of intel_crtc_state variables. has_psr, has_panel_replay and
178  * has_sel_update:
179  *
180  *  has_psr (alone):					PSR1
181  *  has_psr + has_sel_update:				PSR2
182  *  has_psr + has_panel_replay:				Panel Replay
183  *  has_psr + has_panel_replay + has_sel_update:	Panel Replay Selective Update
184  *
185  * Description of some intel_psr varibles. enabled, panel_replay_enabled,
186  * sel_update_enabled
187  *
188  *  enabled (alone):						PSR1
189  *  enabled + sel_update_enabled:				PSR2
190  *  enabled + panel_replay_enabled:				Panel Replay
191  *  enabled + panel_replay_enabled + sel_update_enabled:	Panel Replay SU
192  */
193 
194 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
195 			   (intel_dp)->psr.source_support)
196 
197 bool intel_encoder_can_psr(struct intel_encoder *encoder)
198 {
199 	if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
200 		return CAN_PSR(enc_to_intel_dp(encoder)) ||
201 		       CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
202 	else
203 		return false;
204 }
205 
206 static bool psr_global_enabled(struct intel_dp *intel_dp)
207 {
208 	struct intel_connector *connector = intel_dp->attached_connector;
209 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
210 
211 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
212 	case I915_PSR_DEBUG_DEFAULT:
213 		if (i915->display.params.enable_psr == -1)
214 			return connector->panel.vbt.psr.enable;
215 		return i915->display.params.enable_psr;
216 	case I915_PSR_DEBUG_DISABLE:
217 		return false;
218 	default:
219 		return true;
220 	}
221 }
222 
223 static bool psr2_global_enabled(struct intel_dp *intel_dp)
224 {
225 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
226 
227 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
228 	case I915_PSR_DEBUG_DISABLE:
229 	case I915_PSR_DEBUG_FORCE_PSR1:
230 		return false;
231 	default:
232 		if (i915->display.params.enable_psr == 1)
233 			return false;
234 		return true;
235 	}
236 }
237 
238 static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp)
239 {
240 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
241 
242 	if (i915->display.params.enable_psr != -1)
243 		return false;
244 
245 	return true;
246 }
247 
248 static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
249 {
250 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
251 
252 	if ((i915->display.params.enable_psr != -1) ||
253 	    (intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE))
254 		return false;
255 	return true;
256 }
257 
258 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
259 {
260 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
261 
262 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
263 		EDP_PSR_ERROR(intel_dp->psr.transcoder);
264 }
265 
266 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
267 {
268 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
269 
270 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
271 		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
272 }
273 
274 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
275 {
276 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
277 
278 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
279 		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
280 }
281 
282 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
283 {
284 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
285 
286 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
287 		EDP_PSR_MASK(intel_dp->psr.transcoder);
288 }
289 
290 static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
291 			      enum transcoder cpu_transcoder)
292 {
293 	if (DISPLAY_VER(dev_priv) >= 8)
294 		return EDP_PSR_CTL(dev_priv, cpu_transcoder);
295 	else
296 		return HSW_SRD_CTL;
297 }
298 
299 static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
300 				enum transcoder cpu_transcoder)
301 {
302 	if (DISPLAY_VER(dev_priv) >= 8)
303 		return EDP_PSR_DEBUG(dev_priv, cpu_transcoder);
304 	else
305 		return HSW_SRD_DEBUG;
306 }
307 
308 static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
309 				   enum transcoder cpu_transcoder)
310 {
311 	if (DISPLAY_VER(dev_priv) >= 8)
312 		return EDP_PSR_PERF_CNT(dev_priv, cpu_transcoder);
313 	else
314 		return HSW_SRD_PERF_CNT;
315 }
316 
317 static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
318 				 enum transcoder cpu_transcoder)
319 {
320 	if (DISPLAY_VER(dev_priv) >= 8)
321 		return EDP_PSR_STATUS(dev_priv, cpu_transcoder);
322 	else
323 		return HSW_SRD_STATUS;
324 }
325 
326 static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
327 			      enum transcoder cpu_transcoder)
328 {
329 	if (DISPLAY_VER(dev_priv) >= 12)
330 		return TRANS_PSR_IMR(dev_priv, cpu_transcoder);
331 	else
332 		return EDP_PSR_IMR;
333 }
334 
335 static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
336 			      enum transcoder cpu_transcoder)
337 {
338 	if (DISPLAY_VER(dev_priv) >= 12)
339 		return TRANS_PSR_IIR(dev_priv, cpu_transcoder);
340 	else
341 		return EDP_PSR_IIR;
342 }
343 
344 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
345 				  enum transcoder cpu_transcoder)
346 {
347 	if (DISPLAY_VER(dev_priv) >= 8)
348 		return EDP_PSR_AUX_CTL(dev_priv, cpu_transcoder);
349 	else
350 		return HSW_SRD_AUX_CTL;
351 }
352 
353 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
354 				   enum transcoder cpu_transcoder, int i)
355 {
356 	if (DISPLAY_VER(dev_priv) >= 8)
357 		return EDP_PSR_AUX_DATA(dev_priv, cpu_transcoder, i);
358 	else
359 		return HSW_SRD_AUX_DATA(i);
360 }
361 
362 static void psr_irq_control(struct intel_dp *intel_dp)
363 {
364 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
365 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
366 	u32 mask;
367 
368 	if (intel_dp->psr.panel_replay_enabled)
369 		return;
370 
371 	mask = psr_irq_psr_error_bit_get(intel_dp);
372 	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
373 		mask |= psr_irq_post_exit_bit_get(intel_dp) |
374 			psr_irq_pre_entry_bit_get(intel_dp);
375 
376 	intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
377 		     psr_irq_mask_get(intel_dp), ~mask);
378 }
379 
380 static void psr_event_print(struct drm_i915_private *i915,
381 			    u32 val, bool sel_update_enabled)
382 {
383 	drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
384 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
385 		drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
386 	if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled)
387 		drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
388 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
389 		drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
390 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
391 		drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
392 	if (val & PSR_EVENT_GRAPHICS_RESET)
393 		drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
394 	if (val & PSR_EVENT_PCH_INTERRUPT)
395 		drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
396 	if (val & PSR_EVENT_MEMORY_UP)
397 		drm_dbg_kms(&i915->drm, "\tMemory up\n");
398 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
399 		drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
400 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
401 		drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
402 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
403 		drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
404 	if (val & PSR_EVENT_REGISTER_UPDATE)
405 		drm_dbg_kms(&i915->drm, "\tRegister updated\n");
406 	if (val & PSR_EVENT_HDCP_ENABLE)
407 		drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
408 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
409 		drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
410 	if (val & PSR_EVENT_VBI_ENABLE)
411 		drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
412 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
413 		drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
414 	if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled)
415 		drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
416 }
417 
418 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
419 {
420 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
421 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
422 	ktime_t time_ns =  ktime_get();
423 
424 	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
425 		intel_dp->psr.last_entry_attempt = time_ns;
426 		drm_dbg_kms(&dev_priv->drm,
427 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
428 			    transcoder_name(cpu_transcoder));
429 	}
430 
431 	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
432 		intel_dp->psr.last_exit = time_ns;
433 		drm_dbg_kms(&dev_priv->drm,
434 			    "[transcoder %s] PSR exit completed\n",
435 			    transcoder_name(cpu_transcoder));
436 
437 		if (DISPLAY_VER(dev_priv) >= 9) {
438 			u32 val;
439 
440 			val = intel_de_rmw(dev_priv,
441 					   PSR_EVENT(dev_priv, cpu_transcoder),
442 					   0, 0);
443 
444 			psr_event_print(dev_priv, val, intel_dp->psr.sel_update_enabled);
445 		}
446 	}
447 
448 	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
449 		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
450 			 transcoder_name(cpu_transcoder));
451 
452 		intel_dp->psr.irq_aux_error = true;
453 
454 		/*
455 		 * If this interruption is not masked it will keep
456 		 * interrupting so fast that it prevents the scheduled
457 		 * work to run.
458 		 * Also after a PSR error, we don't want to arm PSR
459 		 * again so we don't care about unmask the interruption
460 		 * or unset irq_aux_error.
461 		 */
462 		intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
463 			     0, psr_irq_psr_error_bit_get(intel_dp));
464 
465 		queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
466 	}
467 }
468 
469 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
470 {
471 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
472 	u8 val = 8; /* assume the worst if we can't read the value */
473 
474 	if (drm_dp_dpcd_readb(&intel_dp->aux,
475 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
476 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
477 	else
478 		drm_dbg_kms(&i915->drm,
479 			    "Unable to get sink synchronization latency, assuming 8 frames\n");
480 	return val;
481 }
482 
483 static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
484 {
485 	u8 su_capability = 0;
486 
487 	if (intel_dp->psr.sink_panel_replay_su_support)
488 		drm_dp_dpcd_readb(&intel_dp->aux,
489 				  DP_PANEL_PANEL_REPLAY_CAPABILITY,
490 				  &su_capability);
491 	else
492 		su_capability = intel_dp->psr_dpcd[1];
493 
494 	return su_capability;
495 }
496 
497 static unsigned int
498 intel_dp_get_su_x_granularity_offset(struct intel_dp *intel_dp)
499 {
500 	return intel_dp->psr.sink_panel_replay_su_support ?
501 		DP_PANEL_PANEL_REPLAY_X_GRANULARITY :
502 		DP_PSR2_SU_X_GRANULARITY;
503 }
504 
505 static unsigned int
506 intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
507 {
508 	return intel_dp->psr.sink_panel_replay_su_support ?
509 		DP_PANEL_PANEL_REPLAY_Y_GRANULARITY :
510 		DP_PSR2_SU_Y_GRANULARITY;
511 }
512 
513 /*
514  * Note: Bits related to granularity are same in panel replay and psr
515  * registers. Rely on PSR definitions on these "common" bits.
516  */
517 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
518 {
519 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
520 	ssize_t r;
521 	u16 w;
522 	u8 y;
523 
524 	/*
525 	 * TODO: Do we need to take into account panel supporting both PSR and
526 	 * Panel replay?
527 	 */
528 
529 	/*
530 	 * If sink don't have specific granularity requirements set legacy
531 	 * ones.
532 	 */
533 	if (!(intel_dp_get_su_capability(intel_dp) &
534 	      DP_PSR2_SU_GRANULARITY_REQUIRED)) {
535 		/* As PSR2 HW sends full lines, we do not care about x granularity */
536 		w = 4;
537 		y = 4;
538 		goto exit;
539 	}
540 
541 	r = drm_dp_dpcd_read(&intel_dp->aux,
542 			     intel_dp_get_su_x_granularity_offset(intel_dp),
543 			     &w, 2);
544 	if (r != 2)
545 		drm_dbg_kms(&i915->drm,
546 			    "Unable to read selective update x granularity\n");
547 	/*
548 	 * Spec says that if the value read is 0 the default granularity should
549 	 * be used instead.
550 	 */
551 	if (r != 2 || w == 0)
552 		w = 4;
553 
554 	r = drm_dp_dpcd_read(&intel_dp->aux,
555 			     intel_dp_get_su_y_granularity_offset(intel_dp),
556 			     &y, 1);
557 	if (r != 1) {
558 		drm_dbg_kms(&i915->drm,
559 			    "Unable to read selective update y granularity\n");
560 		y = 4;
561 	}
562 	if (y == 0)
563 		y = 1;
564 
565 exit:
566 	intel_dp->psr.su_w_granularity = w;
567 	intel_dp->psr.su_y_granularity = y;
568 }
569 
570 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
571 {
572 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
573 
574 	intel_dp->psr.sink_panel_replay_support = true;
575 
576 	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SU_SUPPORT)
577 		intel_dp->psr.sink_panel_replay_su_support = true;
578 
579 	drm_dbg_kms(&i915->drm,
580 		    "Panel replay %sis supported by panel\n",
581 		    intel_dp->psr.sink_panel_replay_su_support ?
582 		    "selective_update " : "");
583 }
584 
585 static void _psr_init_dpcd(struct intel_dp *intel_dp)
586 {
587 	struct drm_i915_private *i915 =
588 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
589 
590 	drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
591 		    intel_dp->psr_dpcd[0]);
592 
593 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
594 		drm_dbg_kms(&i915->drm,
595 			    "PSR support not currently available for this panel\n");
596 		return;
597 	}
598 
599 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
600 		drm_dbg_kms(&i915->drm,
601 			    "Panel lacks power state control, PSR cannot be enabled\n");
602 		return;
603 	}
604 
605 	intel_dp->psr.sink_support = true;
606 	intel_dp->psr.sink_sync_latency =
607 		intel_dp_get_sink_sync_latency(intel_dp);
608 
609 	if (DISPLAY_VER(i915) >= 9 &&
610 	    intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
611 		bool y_req = intel_dp->psr_dpcd[1] &
612 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
613 
614 		/*
615 		 * All panels that supports PSR version 03h (PSR2 +
616 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
617 		 * only sure that it is going to be used when required by the
618 		 * panel. This way panel is capable to do selective update
619 		 * without a aux frame sync.
620 		 *
621 		 * To support PSR version 02h and PSR version 03h without
622 		 * Y-coordinate requirement panels we would need to enable
623 		 * GTC first.
624 		 */
625 		intel_dp->psr.sink_psr2_support = y_req &&
626 			intel_alpm_aux_wake_supported(intel_dp);
627 		drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
628 			    intel_dp->psr.sink_psr2_support ? "" : "not ");
629 	}
630 }
631 
632 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
633 {
634 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
635 			 sizeof(intel_dp->psr_dpcd));
636 	drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP,
637 			  &intel_dp->pr_dpcd);
638 
639 	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SUPPORT)
640 		_panel_replay_init_dpcd(intel_dp);
641 
642 	if (intel_dp->psr_dpcd[0])
643 		_psr_init_dpcd(intel_dp);
644 
645 	if (intel_dp->psr.sink_psr2_support ||
646 	    intel_dp->psr.sink_panel_replay_su_support)
647 		intel_dp_get_su_granularity(intel_dp);
648 }
649 
650 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
651 {
652 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
653 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
654 	u32 aux_clock_divider, aux_ctl;
655 	/* write DP_SET_POWER=D0 */
656 	static const u8 aux_msg[] = {
657 		[0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
658 		[1] = (DP_SET_POWER >> 8) & 0xff,
659 		[2] = DP_SET_POWER & 0xff,
660 		[3] = 1 - 1,
661 		[4] = DP_SET_POWER_D0,
662 	};
663 	int i;
664 
665 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
666 	for (i = 0; i < sizeof(aux_msg); i += 4)
667 		intel_de_write(dev_priv,
668 			       psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
669 			       intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
670 
671 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
672 
673 	/* Start with bits set for DDI_AUX_CTL register */
674 	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
675 					     aux_clock_divider);
676 
677 	/* Select only valid bits for SRD_AUX_CTL */
678 	aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
679 		EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
680 		EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
681 		EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
682 
683 	intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
684 		       aux_ctl);
685 }
686 
687 static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay)
688 {
689 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
690 
691 	if (DISPLAY_VER(i915) < 20 || !intel_dp_is_edp(intel_dp) ||
692 	    intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)
693 		return false;
694 
695 	return panel_replay ?
696 		intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
697 		intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
698 		psr2_su_region_et_global_enabled(intel_dp);
699 }
700 
701 static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
702 				      const struct intel_crtc_state *crtc_state)
703 {
704 	u8 val = DP_PANEL_REPLAY_ENABLE |
705 		DP_PANEL_REPLAY_VSC_SDP_CRC_EN |
706 		DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
707 		DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN |
708 		DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN;
709 
710 	if (crtc_state->has_sel_update)
711 		val |= DP_PANEL_REPLAY_SU_ENABLE;
712 
713 	if (crtc_state->enable_psr2_su_region_et)
714 		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
715 
716 	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, val);
717 }
718 
719 static void _psr_enable_sink(struct intel_dp *intel_dp,
720 			     const struct intel_crtc_state *crtc_state)
721 {
722 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
723 	u8 val = DP_PSR_ENABLE;
724 
725 	if (crtc_state->has_sel_update) {
726 		val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
727 	} else {
728 		if (intel_dp->psr.link_standby)
729 			val |= DP_PSR_MAIN_LINK_ACTIVE;
730 
731 		if (DISPLAY_VER(i915) >= 8)
732 			val |= DP_PSR_CRC_VERIFICATION;
733 	}
734 
735 	if (crtc_state->enable_psr2_su_region_et)
736 		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
737 
738 	if (intel_dp->psr.entry_setup_frames > 0)
739 		val |= DP_PSR_FRAME_CAPTURE;
740 
741 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
742 }
743 
744 void intel_psr_enable_sink(struct intel_dp *intel_dp,
745 			   const struct intel_crtc_state *crtc_state)
746 {
747 	/* Enable ALPM at sink for psr2 */
748 	if (!crtc_state->has_panel_replay && crtc_state->has_sel_update)
749 		drm_dp_dpcd_writeb(&intel_dp->aux,
750 				   DP_RECEIVER_ALPM_CONFIG,
751 				   DP_ALPM_ENABLE |
752 				   DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
753 
754 	crtc_state->has_panel_replay ?
755 		_panel_replay_enable_sink(intel_dp, crtc_state) :
756 		_psr_enable_sink(intel_dp, crtc_state);
757 
758 	if (intel_dp_is_edp(intel_dp))
759 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
760 }
761 
762 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
763 {
764 	struct intel_connector *connector = intel_dp->attached_connector;
765 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
766 	u32 val = 0;
767 
768 	if (DISPLAY_VER(dev_priv) >= 11)
769 		val |= EDP_PSR_TP4_TIME_0us;
770 
771 	if (dev_priv->display.params.psr_safest_params) {
772 		val |= EDP_PSR_TP1_TIME_2500us;
773 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
774 		goto check_tp3_sel;
775 	}
776 
777 	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
778 		val |= EDP_PSR_TP1_TIME_0us;
779 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
780 		val |= EDP_PSR_TP1_TIME_100us;
781 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
782 		val |= EDP_PSR_TP1_TIME_500us;
783 	else
784 		val |= EDP_PSR_TP1_TIME_2500us;
785 
786 	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
787 		val |= EDP_PSR_TP2_TP3_TIME_0us;
788 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
789 		val |= EDP_PSR_TP2_TP3_TIME_100us;
790 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
791 		val |= EDP_PSR_TP2_TP3_TIME_500us;
792 	else
793 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
794 
795 	/*
796 	 * WA 0479: hsw,bdw
797 	 * "Do not skip both TP1 and TP2/TP3"
798 	 */
799 	if (DISPLAY_VER(dev_priv) < 9 &&
800 	    connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
801 	    connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
802 		val |= EDP_PSR_TP2_TP3_TIME_100us;
803 
804 check_tp3_sel:
805 	if (intel_dp_source_supports_tps3(dev_priv) &&
806 	    drm_dp_tps3_supported(intel_dp->dpcd))
807 		val |= EDP_PSR_TP_TP1_TP3;
808 	else
809 		val |= EDP_PSR_TP_TP1_TP2;
810 
811 	return val;
812 }
813 
814 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
815 {
816 	struct intel_connector *connector = intel_dp->attached_connector;
817 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
818 	int idle_frames;
819 
820 	/* Let's use 6 as the minimum to cover all known cases including the
821 	 * off-by-one issue that HW has in some cases.
822 	 */
823 	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
824 	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
825 
826 	if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
827 		idle_frames = 0xf;
828 
829 	return idle_frames;
830 }
831 
832 static void hsw_activate_psr1(struct intel_dp *intel_dp)
833 {
834 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
835 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
836 	u32 max_sleep_time = 0x1f;
837 	u32 val = EDP_PSR_ENABLE;
838 
839 	val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
840 
841 	if (DISPLAY_VER(dev_priv) < 20)
842 		val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
843 
844 	if (IS_HASWELL(dev_priv))
845 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
846 
847 	if (intel_dp->psr.link_standby)
848 		val |= EDP_PSR_LINK_STANDBY;
849 
850 	val |= intel_psr1_get_tp_time(intel_dp);
851 
852 	if (DISPLAY_VER(dev_priv) >= 8)
853 		val |= EDP_PSR_CRC_ENABLE;
854 
855 	if (DISPLAY_VER(dev_priv) >= 20)
856 		val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
857 
858 	intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
859 		     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
860 }
861 
862 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
863 {
864 	struct intel_connector *connector = intel_dp->attached_connector;
865 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
866 	u32 val = 0;
867 
868 	if (dev_priv->display.params.psr_safest_params)
869 		return EDP_PSR2_TP2_TIME_2500us;
870 
871 	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
872 	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
873 		val |= EDP_PSR2_TP2_TIME_50us;
874 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
875 		val |= EDP_PSR2_TP2_TIME_100us;
876 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
877 		val |= EDP_PSR2_TP2_TIME_500us;
878 	else
879 		val |= EDP_PSR2_TP2_TIME_2500us;
880 
881 	return val;
882 }
883 
884 static int psr2_block_count_lines(struct intel_dp *intel_dp)
885 {
886 	return intel_dp->alpm_parameters.io_wake_lines < 9 &&
887 		intel_dp->alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
888 }
889 
890 static int psr2_block_count(struct intel_dp *intel_dp)
891 {
892 	return psr2_block_count_lines(intel_dp) / 4;
893 }
894 
895 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
896 {
897 	u8 frames_before_su_entry;
898 
899 	frames_before_su_entry = max_t(u8,
900 				       intel_dp->psr.sink_sync_latency + 1,
901 				       2);
902 
903 	/* Entry setup frames must be at least 1 less than frames before SU entry */
904 	if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
905 		frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
906 
907 	return frames_before_su_entry;
908 }
909 
910 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
911 {
912 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
913 
914 	intel_de_rmw(dev_priv,
915 		     PSR2_MAN_TRK_CTL(dev_priv, intel_dp->psr.transcoder),
916 		     0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
917 
918 	intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
919 		     TRANS_DP2_PANEL_REPLAY_ENABLE);
920 }
921 
922 static void hsw_activate_psr2(struct intel_dp *intel_dp)
923 {
924 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
925 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
926 	u32 val = EDP_PSR2_ENABLE;
927 	u32 psr_val = 0;
928 
929 	val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
930 
931 	if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
932 		val |= EDP_SU_TRACK_ENABLE;
933 
934 	if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
935 		val |= EDP_Y_COORDINATE_ENABLE;
936 
937 	val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
938 
939 	val |= intel_psr2_get_tp_time(intel_dp);
940 
941 	if (DISPLAY_VER(dev_priv) >= 12 && DISPLAY_VER(dev_priv) < 20) {
942 		if (psr2_block_count(intel_dp) > 2)
943 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
944 		else
945 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
946 	}
947 
948 	/* Wa_22012278275:adl-p */
949 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
950 		static const u8 map[] = {
951 			2, /* 5 lines */
952 			1, /* 6 lines */
953 			0, /* 7 lines */
954 			3, /* 8 lines */
955 			6, /* 9 lines */
956 			5, /* 10 lines */
957 			4, /* 11 lines */
958 			7, /* 12 lines */
959 		};
960 		/*
961 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
962 		 * comments bellow for more information
963 		 */
964 		int tmp;
965 
966 		tmp = map[intel_dp->alpm_parameters.io_wake_lines -
967 			  TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
968 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
969 
970 		tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
971 		val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
972 	} else if (DISPLAY_VER(dev_priv) >= 20) {
973 		val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
974 	} else if (DISPLAY_VER(dev_priv) >= 12) {
975 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
976 		val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
977 	} else if (DISPLAY_VER(dev_priv) >= 9) {
978 		val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
979 		val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
980 	}
981 
982 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
983 		val |= EDP_PSR2_SU_SDP_SCANLINE;
984 
985 	if (DISPLAY_VER(dev_priv) >= 20)
986 		psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
987 
988 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
989 		u32 tmp;
990 
991 		tmp = intel_de_read(dev_priv,
992 				    PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder));
993 		drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
994 	} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
995 		intel_de_write(dev_priv,
996 			       PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder), 0);
997 	}
998 
999 	if (intel_dp->psr.su_region_et_enabled)
1000 		val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
1001 
1002 	/*
1003 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
1004 	 * recommending keep this bit unset while PSR2 is enabled.
1005 	 */
1006 	intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
1007 
1008 	intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder), val);
1009 }
1010 
1011 static bool
1012 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
1013 {
1014 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1015 		return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
1016 	else if (DISPLAY_VER(dev_priv) >= 12)
1017 		return cpu_transcoder == TRANSCODER_A;
1018 	else if (DISPLAY_VER(dev_priv) >= 9)
1019 		return cpu_transcoder == TRANSCODER_EDP;
1020 	else
1021 		return false;
1022 }
1023 
1024 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
1025 {
1026 	if (!crtc_state->hw.active)
1027 		return 0;
1028 
1029 	return DIV_ROUND_UP(1000 * 1000,
1030 			    drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
1031 }
1032 
1033 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
1034 				     u32 idle_frames)
1035 {
1036 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1037 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1038 
1039 	intel_de_rmw(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder),
1040 		     EDP_PSR2_IDLE_FRAMES_MASK,
1041 		     EDP_PSR2_IDLE_FRAMES(idle_frames));
1042 }
1043 
1044 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
1045 {
1046 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1047 
1048 	psr2_program_idle_frames(intel_dp, 0);
1049 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
1050 }
1051 
1052 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
1053 {
1054 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1055 
1056 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1057 	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
1058 }
1059 
1060 static void tgl_dc3co_disable_work(struct work_struct *work)
1061 {
1062 	struct intel_dp *intel_dp =
1063 		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
1064 
1065 	mutex_lock(&intel_dp->psr.lock);
1066 	/* If delayed work is pending, it is not idle */
1067 	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
1068 		goto unlock;
1069 
1070 	tgl_psr2_disable_dc3co(intel_dp);
1071 unlock:
1072 	mutex_unlock(&intel_dp->psr.lock);
1073 }
1074 
1075 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
1076 {
1077 	if (!intel_dp->psr.dc3co_exitline)
1078 		return;
1079 
1080 	cancel_delayed_work(&intel_dp->psr.dc3co_work);
1081 	/* Before PSR2 exit disallow dc3co*/
1082 	tgl_psr2_disable_dc3co(intel_dp);
1083 }
1084 
1085 static bool
1086 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
1087 			      struct intel_crtc_state *crtc_state)
1088 {
1089 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1090 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1091 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1092 	enum port port = dig_port->base.port;
1093 
1094 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1095 		return pipe <= PIPE_B && port <= PORT_B;
1096 	else
1097 		return pipe == PIPE_A && port == PORT_A;
1098 }
1099 
1100 static void
1101 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
1102 				  struct intel_crtc_state *crtc_state)
1103 {
1104 	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
1105 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1106 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1107 	u32 exit_scanlines;
1108 
1109 	/*
1110 	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1111 	 * disable DC3CO until the changed dc3co activating/deactivating sequence
1112 	 * is applied. B.Specs:49196
1113 	 */
1114 	return;
1115 
1116 	/*
1117 	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1118 	 * TODO: when the issue is addressed, this restriction should be removed.
1119 	 */
1120 	if (crtc_state->enable_psr2_sel_fetch)
1121 		return;
1122 
1123 	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1124 		return;
1125 
1126 	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1127 		return;
1128 
1129 	/* Wa_16011303918:adl-p */
1130 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1131 		return;
1132 
1133 	/*
1134 	 * DC3CO Exit time 200us B.Spec 49196
1135 	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1136 	 */
1137 	exit_scanlines =
1138 		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1139 
1140 	if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
1141 		return;
1142 
1143 	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1144 }
1145 
1146 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1147 					      struct intel_crtc_state *crtc_state)
1148 {
1149 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1150 
1151 	if (!dev_priv->display.params.enable_psr2_sel_fetch &&
1152 	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1153 		drm_dbg_kms(&dev_priv->drm,
1154 			    "PSR2 sel fetch not enabled, disabled by parameter\n");
1155 		return false;
1156 	}
1157 
1158 	if (crtc_state->uapi.async_flip) {
1159 		drm_dbg_kms(&dev_priv->drm,
1160 			    "PSR2 sel fetch not enabled, async flip enabled\n");
1161 		return false;
1162 	}
1163 
1164 	return crtc_state->enable_psr2_sel_fetch = true;
1165 }
1166 
1167 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1168 				   struct intel_crtc_state *crtc_state)
1169 {
1170 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1171 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1172 	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1173 	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1174 	u16 y_granularity = 0;
1175 
1176 	/* PSR2 HW only send full lines so we only need to validate the width */
1177 	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1178 		return false;
1179 
1180 	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1181 		return false;
1182 
1183 	/* HW tracking is only aligned to 4 lines */
1184 	if (!crtc_state->enable_psr2_sel_fetch)
1185 		return intel_dp->psr.su_y_granularity == 4;
1186 
1187 	/*
1188 	 * adl_p and mtl platforms have 1 line granularity.
1189 	 * For other platforms with SW tracking we can adjust the y coordinates
1190 	 * to match sink requirement if multiple of 4.
1191 	 */
1192 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1193 		y_granularity = intel_dp->psr.su_y_granularity;
1194 	else if (intel_dp->psr.su_y_granularity <= 2)
1195 		y_granularity = 4;
1196 	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1197 		y_granularity = intel_dp->psr.su_y_granularity;
1198 
1199 	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1200 		return false;
1201 
1202 	if (crtc_state->dsc.compression_enable &&
1203 	    vdsc_cfg->slice_height % y_granularity)
1204 		return false;
1205 
1206 	crtc_state->su_y_granularity = y_granularity;
1207 	return true;
1208 }
1209 
1210 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1211 							struct intel_crtc_state *crtc_state)
1212 {
1213 	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1214 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1215 	u32 hblank_total, hblank_ns, req_ns;
1216 
1217 	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1218 	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1219 
1220 	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1221 	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1222 
1223 	if ((hblank_ns - req_ns) > 100)
1224 		return true;
1225 
1226 	/* Not supported <13 / Wa_22012279113:adl-p */
1227 	if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1228 		return false;
1229 
1230 	crtc_state->req_psr2_sdp_prior_scanline = true;
1231 	return true;
1232 }
1233 
1234 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1235 					const struct drm_display_mode *adjusted_mode)
1236 {
1237 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1238 	int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1239 	int entry_setup_frames = 0;
1240 
1241 	if (psr_setup_time < 0) {
1242 		drm_dbg_kms(&i915->drm,
1243 			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1244 			    intel_dp->psr_dpcd[1]);
1245 		return -ETIME;
1246 	}
1247 
1248 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1249 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1250 		if (DISPLAY_VER(i915) >= 20) {
1251 			/* setup entry frames can be up to 3 frames */
1252 			entry_setup_frames = 1;
1253 			drm_dbg_kms(&i915->drm,
1254 				    "PSR setup entry frames %d\n",
1255 				    entry_setup_frames);
1256 		} else {
1257 			drm_dbg_kms(&i915->drm,
1258 				    "PSR condition failed: PSR setup time (%d us) too long\n",
1259 				    psr_setup_time);
1260 			return -ETIME;
1261 		}
1262 	}
1263 
1264 	return entry_setup_frames;
1265 }
1266 
1267 static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
1268 				       const struct intel_crtc_state *crtc_state)
1269 {
1270 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1271 	int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
1272 		crtc_state->hw.adjusted_mode.crtc_vblank_start;
1273 	int wake_lines;
1274 
1275 	if (crtc_state->has_panel_replay)
1276 		wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
1277 	else
1278 		wake_lines = DISPLAY_VER(i915) < 20 ?
1279 			psr2_block_count_lines(intel_dp) :
1280 			intel_dp->alpm_parameters.io_wake_lines;
1281 
1282 	if (crtc_state->req_psr2_sdp_prior_scanline)
1283 		vblank -= 1;
1284 
1285 	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
1286 	if (vblank < wake_lines)
1287 		return false;
1288 
1289 	return true;
1290 }
1291 
1292 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1293 				    struct intel_crtc_state *crtc_state)
1294 {
1295 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1296 	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1297 	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1298 	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1299 
1300 	if (!intel_dp->psr.sink_psr2_support)
1301 		return false;
1302 
1303 	/* JSL and EHL only supports eDP 1.3 */
1304 	if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1305 		drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1306 		return false;
1307 	}
1308 
1309 	/* Wa_16011181250 */
1310 	if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1311 	    IS_DG2(dev_priv)) {
1312 		drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1313 		return false;
1314 	}
1315 
1316 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1317 		drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1318 		return false;
1319 	}
1320 
1321 	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1322 		drm_dbg_kms(&dev_priv->drm,
1323 			    "PSR2 not supported in transcoder %s\n",
1324 			    transcoder_name(crtc_state->cpu_transcoder));
1325 		return false;
1326 	}
1327 
1328 	/*
1329 	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1330 	 * resolution requires DSC to be enabled, priority is given to DSC
1331 	 * over PSR2.
1332 	 */
1333 	if (crtc_state->dsc.compression_enable &&
1334 	    (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
1335 		drm_dbg_kms(&dev_priv->drm,
1336 			    "PSR2 cannot be enabled since DSC is enabled\n");
1337 		return false;
1338 	}
1339 
1340 	if (DISPLAY_VER(dev_priv) >= 12) {
1341 		psr_max_h = 5120;
1342 		psr_max_v = 3200;
1343 		max_bpp = 30;
1344 	} else if (DISPLAY_VER(dev_priv) >= 10) {
1345 		psr_max_h = 4096;
1346 		psr_max_v = 2304;
1347 		max_bpp = 24;
1348 	} else if (DISPLAY_VER(dev_priv) == 9) {
1349 		psr_max_h = 3640;
1350 		psr_max_v = 2304;
1351 		max_bpp = 24;
1352 	}
1353 
1354 	if (crtc_state->pipe_bpp > max_bpp) {
1355 		drm_dbg_kms(&dev_priv->drm,
1356 			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1357 			    crtc_state->pipe_bpp, max_bpp);
1358 		return false;
1359 	}
1360 
1361 	/* Wa_16011303918:adl-p */
1362 	if (crtc_state->vrr.enable &&
1363 	    IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1364 		drm_dbg_kms(&dev_priv->drm,
1365 			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1366 		return false;
1367 	}
1368 
1369 	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1370 		drm_dbg_kms(&dev_priv->drm,
1371 			    "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1372 		return false;
1373 	}
1374 
1375 	if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
1376 		drm_dbg_kms(&dev_priv->drm,
1377 			    "PSR2 not enabled, Unable to use long enough wake times\n");
1378 		return false;
1379 	}
1380 
1381 	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
1382 	if (!wake_lines_fit_into_vblank(intel_dp, crtc_state)) {
1383 		drm_dbg_kms(&dev_priv->drm,
1384 			    "PSR2 not enabled, too short vblank time\n");
1385 		return false;
1386 	}
1387 
1388 	if (!crtc_state->enable_psr2_sel_fetch &&
1389 	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1390 		drm_dbg_kms(&dev_priv->drm,
1391 			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1392 			    crtc_hdisplay, crtc_vdisplay,
1393 			    psr_max_h, psr_max_v);
1394 		return false;
1395 	}
1396 
1397 	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1398 
1399 	if (psr2_su_region_et_valid(intel_dp, crtc_state->has_panel_replay))
1400 		crtc_state->enable_psr2_su_region_et = true;
1401 
1402 	return true;
1403 }
1404 
1405 static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
1406 					  struct intel_crtc_state *crtc_state)
1407 {
1408 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1409 
1410 	if (HAS_PSR2_SEL_FETCH(dev_priv) &&
1411 	    !intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1412 	    !HAS_PSR_HW_TRACKING(dev_priv)) {
1413 		drm_dbg_kms(&dev_priv->drm,
1414 			    "Selective update not enabled, selective fetch not valid and no HW tracking available\n");
1415 		goto unsupported;
1416 	}
1417 
1418 	if (!psr2_global_enabled(intel_dp)) {
1419 		drm_dbg_kms(&dev_priv->drm, "Selective update disabled by flag\n");
1420 		goto unsupported;
1421 	}
1422 
1423 	if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state))
1424 		goto unsupported;
1425 
1426 	if (crtc_state->has_panel_replay && (DISPLAY_VER(dev_priv) < 14 ||
1427 					     !intel_dp->psr.sink_panel_replay_su_support))
1428 		goto unsupported;
1429 
1430 	if (crtc_state->crc_enabled) {
1431 		drm_dbg_kms(&dev_priv->drm,
1432 			    "Selective update not enabled because it would inhibit pipe CRC calculation\n");
1433 		goto unsupported;
1434 	}
1435 
1436 	if (!psr2_granularity_check(intel_dp, crtc_state)) {
1437 		drm_dbg_kms(&dev_priv->drm,
1438 			    "Selective update not enabled, SU granularity not compatible\n");
1439 		goto unsupported;
1440 	}
1441 
1442 	return true;
1443 
1444 unsupported:
1445 	crtc_state->enable_psr2_sel_fetch = false;
1446 	return false;
1447 }
1448 
1449 static bool _psr_compute_config(struct intel_dp *intel_dp,
1450 				struct intel_crtc_state *crtc_state)
1451 {
1452 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1453 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1454 	int entry_setup_frames;
1455 
1456 	/*
1457 	 * Current PSR panels don't work reliably with VRR enabled
1458 	 * So if VRR is enabled, do not enable PSR.
1459 	 */
1460 	if (crtc_state->vrr.enable)
1461 		return false;
1462 
1463 	if (!CAN_PSR(intel_dp))
1464 		return false;
1465 
1466 	entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1467 
1468 	if (entry_setup_frames >= 0) {
1469 		intel_dp->psr.entry_setup_frames = entry_setup_frames;
1470 	} else {
1471 		drm_dbg_kms(&dev_priv->drm,
1472 			    "PSR condition failed: PSR setup timing not met\n");
1473 		return false;
1474 	}
1475 
1476 	return true;
1477 }
1478 
1479 static bool _panel_replay_compute_config(struct intel_dp *intel_dp)
1480 {
1481 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1482 
1483 	if (!CAN_PANEL_REPLAY(intel_dp))
1484 		return false;
1485 
1486 	if (!panel_replay_global_enabled(intel_dp)) {
1487 		drm_dbg_kms(&i915->drm, "Panel Replay disabled by flag\n");
1488 		return false;
1489 	}
1490 
1491 	return true;
1492 }
1493 
1494 void intel_psr_compute_config(struct intel_dp *intel_dp,
1495 			      struct intel_crtc_state *crtc_state,
1496 			      struct drm_connector_state *conn_state)
1497 {
1498 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1499 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1500 
1501 	if (!psr_global_enabled(intel_dp)) {
1502 		drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1503 		return;
1504 	}
1505 
1506 	if (intel_dp->psr.sink_not_reliable) {
1507 		drm_dbg_kms(&dev_priv->drm,
1508 			    "PSR sink implementation is not reliable\n");
1509 		return;
1510 	}
1511 
1512 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1513 		drm_dbg_kms(&dev_priv->drm,
1514 			    "PSR condition failed: Interlaced mode enabled\n");
1515 		return;
1516 	}
1517 
1518 	/*
1519 	 * FIXME figure out what is wrong with PSR+joiner and
1520 	 * fix it. Presumably something related to the fact that
1521 	 * PSR is a transcoder level feature.
1522 	 */
1523 	if (crtc_state->joiner_pipes) {
1524 		drm_dbg_kms(&dev_priv->drm,
1525 			    "PSR disabled due to joiner\n");
1526 		return;
1527 	}
1528 
1529 	crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp);
1530 
1531 	crtc_state->has_psr = crtc_state->has_panel_replay ? true :
1532 		_psr_compute_config(intel_dp, crtc_state);
1533 
1534 	if (!crtc_state->has_psr)
1535 		return;
1536 
1537 	crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
1538 }
1539 
1540 void intel_psr_get_config(struct intel_encoder *encoder,
1541 			  struct intel_crtc_state *pipe_config)
1542 {
1543 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1544 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1545 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1546 	struct intel_dp *intel_dp;
1547 	u32 val;
1548 
1549 	if (!dig_port)
1550 		return;
1551 
1552 	intel_dp = &dig_port->dp;
1553 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1554 		return;
1555 
1556 	mutex_lock(&intel_dp->psr.lock);
1557 	if (!intel_dp->psr.enabled)
1558 		goto unlock;
1559 
1560 	if (intel_dp->psr.panel_replay_enabled) {
1561 		pipe_config->has_psr = pipe_config->has_panel_replay = true;
1562 	} else {
1563 		/*
1564 		 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1565 		 * enabled/disabled because of frontbuffer tracking and others.
1566 		 */
1567 		pipe_config->has_psr = true;
1568 	}
1569 
1570 	pipe_config->has_sel_update = intel_dp->psr.sel_update_enabled;
1571 	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1572 
1573 	if (!intel_dp->psr.sel_update_enabled)
1574 		goto unlock;
1575 
1576 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1577 		val = intel_de_read(dev_priv,
1578 				    PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder));
1579 		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1580 			pipe_config->enable_psr2_sel_fetch = true;
1581 	}
1582 
1583 	pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled;
1584 
1585 	if (DISPLAY_VER(dev_priv) >= 12) {
1586 		val = intel_de_read(dev_priv,
1587 				    TRANS_EXITLINE(dev_priv, cpu_transcoder));
1588 		pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1589 	}
1590 unlock:
1591 	mutex_unlock(&intel_dp->psr.lock);
1592 }
1593 
1594 static void intel_psr_activate(struct intel_dp *intel_dp)
1595 {
1596 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1597 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1598 
1599 	drm_WARN_ON(&dev_priv->drm,
1600 		    transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1601 		    intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder)) & EDP_PSR2_ENABLE);
1602 
1603 	drm_WARN_ON(&dev_priv->drm,
1604 		    intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1605 
1606 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1607 
1608 	lockdep_assert_held(&intel_dp->psr.lock);
1609 
1610 	/* psr1, psr2 and panel-replay are mutually exclusive.*/
1611 	if (intel_dp->psr.panel_replay_enabled)
1612 		dg2_activate_panel_replay(intel_dp);
1613 	else if (intel_dp->psr.sel_update_enabled)
1614 		hsw_activate_psr2(intel_dp);
1615 	else
1616 		hsw_activate_psr1(intel_dp);
1617 
1618 	intel_dp->psr.active = true;
1619 }
1620 
1621 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1622 {
1623 	switch (intel_dp->psr.pipe) {
1624 	case PIPE_A:
1625 		return LATENCY_REPORTING_REMOVED_PIPE_A;
1626 	case PIPE_B:
1627 		return LATENCY_REPORTING_REMOVED_PIPE_B;
1628 	case PIPE_C:
1629 		return LATENCY_REPORTING_REMOVED_PIPE_C;
1630 	case PIPE_D:
1631 		return LATENCY_REPORTING_REMOVED_PIPE_D;
1632 	default:
1633 		MISSING_CASE(intel_dp->psr.pipe);
1634 		return 0;
1635 	}
1636 }
1637 
1638 /*
1639  * Wa_16013835468
1640  * Wa_14015648006
1641  */
1642 static void wm_optimization_wa(struct intel_dp *intel_dp,
1643 			       const struct intel_crtc_state *crtc_state)
1644 {
1645 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1646 	bool set_wa_bit = false;
1647 
1648 	/* Wa_14015648006 */
1649 	if (IS_DISPLAY_VER(dev_priv, 11, 14))
1650 		set_wa_bit |= crtc_state->wm_level_disabled;
1651 
1652 	/* Wa_16013835468 */
1653 	if (DISPLAY_VER(dev_priv) == 12)
1654 		set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1655 			crtc_state->hw.adjusted_mode.crtc_vdisplay;
1656 
1657 	if (set_wa_bit)
1658 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1659 			     0, wa_16013835468_bit_get(intel_dp));
1660 	else
1661 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1662 			     wa_16013835468_bit_get(intel_dp), 0);
1663 }
1664 
1665 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1666 				    const struct intel_crtc_state *crtc_state)
1667 {
1668 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1669 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1670 	u32 mask = 0;
1671 
1672 	/*
1673 	 * Only HSW and BDW have PSR AUX registers that need to be setup.
1674 	 * SKL+ use hardcoded values PSR AUX transactions
1675 	 */
1676 	if (DISPLAY_VER(dev_priv) < 9)
1677 		hsw_psr_setup_aux(intel_dp);
1678 
1679 	/*
1680 	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1681 	 * mask LPSP to avoid dependency on other drivers that might block
1682 	 * runtime_pm besides preventing  other hw tracking issues now we
1683 	 * can rely on frontbuffer tracking.
1684 	 *
1685 	 * From bspec prior LunarLake:
1686 	 * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
1687 	 * panel replay mode.
1688 	 *
1689 	 * From bspec beyod LunarLake:
1690 	 * Panel Replay on DP: No bits are applicable
1691 	 * Panel Replay on eDP: All bits are applicable
1692 	 */
1693 	if (DISPLAY_VER(dev_priv) < 20 || intel_dp_is_edp(intel_dp))
1694 		mask = EDP_PSR_DEBUG_MASK_HPD;
1695 
1696 	if (intel_dp_is_edp(intel_dp)) {
1697 		mask |= EDP_PSR_DEBUG_MASK_MEMUP;
1698 
1699 		/*
1700 		 * For some unknown reason on HSW non-ULT (or at least on
1701 		 * Dell Latitude E6540) external displays start to flicker
1702 		 * when PSR is enabled on the eDP. SR/PC6 residency is much
1703 		 * higher than should be possible with an external display.
1704 		 * As a workaround leave LPSP unmasked to prevent PSR entry
1705 		 * when external displays are active.
1706 		 */
1707 		if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
1708 			mask |= EDP_PSR_DEBUG_MASK_LPSP;
1709 
1710 		if (DISPLAY_VER(dev_priv) < 20)
1711 			mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1712 
1713 		/*
1714 		 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1715 		 * registers in order to keep the CURSURFLIVE tricks working :(
1716 		 */
1717 		if (IS_DISPLAY_VER(dev_priv, 9, 10))
1718 			mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1719 
1720 		/* allow PSR with sprite enabled */
1721 		if (IS_HASWELL(dev_priv))
1722 			mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1723 	}
1724 
1725 	intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1726 
1727 	psr_irq_control(intel_dp);
1728 
1729 	/*
1730 	 * TODO: if future platforms supports DC3CO in more than one
1731 	 * transcoder, EXITLINE will need to be unset when disabling PSR
1732 	 */
1733 	if (intel_dp->psr.dc3co_exitline)
1734 		intel_de_rmw(dev_priv,
1735 			     TRANS_EXITLINE(dev_priv, cpu_transcoder),
1736 			     EXITLINE_MASK,
1737 			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1738 
1739 	if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1740 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1741 			     intel_dp->psr.psr2_sel_fetch_enabled ?
1742 			     IGNORE_PSR2_HW_TRACKING : 0);
1743 
1744 	if (intel_dp_is_edp(intel_dp))
1745 		intel_alpm_configure(intel_dp, crtc_state);
1746 
1747 	/*
1748 	 * Wa_16013835468
1749 	 * Wa_14015648006
1750 	 */
1751 	wm_optimization_wa(intel_dp, crtc_state);
1752 
1753 	if (intel_dp->psr.sel_update_enabled) {
1754 		if (DISPLAY_VER(dev_priv) == 9)
1755 			intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1756 				     PSR2_VSC_ENABLE_PROG_HEADER |
1757 				     PSR2_ADD_VERTICAL_LINE_COUNT);
1758 
1759 		/*
1760 		 * Wa_16014451276:adlp,mtl[a0,b0]
1761 		 * All supported adlp panels have 1-based X granularity, this may
1762 		 * cause issues if non-supported panels are used.
1763 		 */
1764 		if (!intel_dp->psr.panel_replay_enabled &&
1765 		    (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
1766 		     IS_ALDERLAKE_P(dev_priv)))
1767 			intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
1768 				     0, ADLP_1_BASED_X_GRANULARITY);
1769 
1770 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1771 		if (!intel_dp->psr.panel_replay_enabled &&
1772 		    IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1773 			intel_de_rmw(dev_priv,
1774 				     MTL_CLKGATE_DIS_TRANS(dev_priv, cpu_transcoder),
1775 				     0,
1776 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1777 		else if (IS_ALDERLAKE_P(dev_priv))
1778 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1779 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1780 	}
1781 }
1782 
1783 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1784 {
1785 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1786 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1787 	u32 val;
1788 
1789 	if (intel_dp->psr.panel_replay_enabled)
1790 		goto no_err;
1791 
1792 	/*
1793 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1794 	 * will still keep the error set even after the reset done in the
1795 	 * irq_preinstall and irq_uninstall hooks.
1796 	 * And enabling in this situation cause the screen to freeze in the
1797 	 * first time that PSR HW tries to activate so lets keep PSR disabled
1798 	 * to avoid any rendering problems.
1799 	 */
1800 	val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1801 	val &= psr_irq_psr_error_bit_get(intel_dp);
1802 	if (val) {
1803 		intel_dp->psr.sink_not_reliable = true;
1804 		drm_dbg_kms(&dev_priv->drm,
1805 			    "PSR interruption error set, not enabling PSR\n");
1806 		return false;
1807 	}
1808 
1809 no_err:
1810 	return true;
1811 }
1812 
1813 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1814 				    const struct intel_crtc_state *crtc_state)
1815 {
1816 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1817 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1818 	u32 val;
1819 
1820 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1821 
1822 	intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update;
1823 	intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
1824 	intel_dp->psr.busy_frontbuffer_bits = 0;
1825 	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1826 	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1827 	/* DC5/DC6 requires at least 6 idle frames */
1828 	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1829 	intel_dp->psr.dc3co_exit_delay = val;
1830 	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1831 	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1832 	intel_dp->psr.su_region_et_enabled = crtc_state->enable_psr2_su_region_et;
1833 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1834 	intel_dp->psr.req_psr2_sdp_prior_scanline =
1835 		crtc_state->req_psr2_sdp_prior_scanline;
1836 
1837 	if (!psr_interrupt_error_check(intel_dp))
1838 		return;
1839 
1840 	if (intel_dp->psr.panel_replay_enabled) {
1841 		drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
1842 	} else {
1843 		drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1844 			    intel_dp->psr.sel_update_enabled ? "2" : "1");
1845 
1846 		/*
1847 		 * Panel replay has to be enabled before link training: doing it
1848 		 * only for PSR here.
1849 		 */
1850 		intel_psr_enable_sink(intel_dp, crtc_state);
1851 	}
1852 
1853 	if (intel_dp_is_edp(intel_dp))
1854 		intel_snps_phy_update_psr_power_state(&dig_port->base, true);
1855 
1856 	intel_psr_enable_source(intel_dp, crtc_state);
1857 	intel_dp->psr.enabled = true;
1858 	intel_dp->psr.paused = false;
1859 
1860 	intel_psr_activate(intel_dp);
1861 }
1862 
1863 static void intel_psr_exit(struct intel_dp *intel_dp)
1864 {
1865 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1866 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1867 	u32 val;
1868 
1869 	if (!intel_dp->psr.active) {
1870 		if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1871 			val = intel_de_read(dev_priv,
1872 					    EDP_PSR2_CTL(dev_priv, cpu_transcoder));
1873 			drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1874 		}
1875 
1876 		val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
1877 		drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1878 
1879 		return;
1880 	}
1881 
1882 	if (intel_dp->psr.panel_replay_enabled) {
1883 		intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
1884 			     TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
1885 	} else if (intel_dp->psr.sel_update_enabled) {
1886 		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1887 
1888 		val = intel_de_rmw(dev_priv,
1889 				   EDP_PSR2_CTL(dev_priv, cpu_transcoder),
1890 				   EDP_PSR2_ENABLE, 0);
1891 
1892 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1893 	} else {
1894 		val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
1895 				   EDP_PSR_ENABLE, 0);
1896 
1897 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1898 	}
1899 	intel_dp->psr.active = false;
1900 }
1901 
1902 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1903 {
1904 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1905 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1906 	i915_reg_t psr_status;
1907 	u32 psr_status_mask;
1908 
1909 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
1910 					  intel_dp->psr.panel_replay_enabled)) {
1911 		psr_status = EDP_PSR2_STATUS(dev_priv, cpu_transcoder);
1912 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1913 	} else {
1914 		psr_status = psr_status_reg(dev_priv, cpu_transcoder);
1915 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1916 	}
1917 
1918 	/* Wait till PSR is idle */
1919 	if (intel_de_wait_for_clear(dev_priv, psr_status,
1920 				    psr_status_mask, 2000))
1921 		drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1922 }
1923 
1924 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1925 {
1926 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1927 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1928 
1929 	lockdep_assert_held(&intel_dp->psr.lock);
1930 
1931 	if (!intel_dp->psr.enabled)
1932 		return;
1933 
1934 	if (intel_dp->psr.panel_replay_enabled)
1935 		drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
1936 	else
1937 		drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1938 			    intel_dp->psr.sel_update_enabled ? "2" : "1");
1939 
1940 	intel_psr_exit(intel_dp);
1941 	intel_psr_wait_exit_locked(intel_dp);
1942 
1943 	/*
1944 	 * Wa_16013835468
1945 	 * Wa_14015648006
1946 	 */
1947 	if (DISPLAY_VER(dev_priv) >= 11)
1948 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1949 			     wa_16013835468_bit_get(intel_dp), 0);
1950 
1951 	if (intel_dp->psr.sel_update_enabled) {
1952 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1953 		if (!intel_dp->psr.panel_replay_enabled &&
1954 		    IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1955 			intel_de_rmw(dev_priv,
1956 				     MTL_CLKGATE_DIS_TRANS(dev_priv, cpu_transcoder),
1957 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1958 		else if (IS_ALDERLAKE_P(dev_priv))
1959 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1960 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1961 	}
1962 
1963 	if (intel_dp_is_edp(intel_dp))
1964 		intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
1965 
1966 	/* Panel Replay on eDP is always using ALPM aux less. */
1967 	if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
1968 		intel_de_rmw(dev_priv, ALPM_CTL(dev_priv, cpu_transcoder),
1969 			     ALPM_CTL_ALPM_ENABLE |
1970 			     ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
1971 
1972 		intel_de_rmw(dev_priv,
1973 			     PORT_ALPM_CTL(dev_priv, cpu_transcoder),
1974 			     PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
1975 	}
1976 
1977 	/* Disable PSR on Sink */
1978 	if (!intel_dp->psr.panel_replay_enabled) {
1979 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1980 
1981 		if (intel_dp->psr.sel_update_enabled)
1982 			drm_dp_dpcd_writeb(&intel_dp->aux,
1983 					   DP_RECEIVER_ALPM_CONFIG, 0);
1984 	}
1985 
1986 	intel_dp->psr.enabled = false;
1987 	intel_dp->psr.panel_replay_enabled = false;
1988 	intel_dp->psr.sel_update_enabled = false;
1989 	intel_dp->psr.psr2_sel_fetch_enabled = false;
1990 	intel_dp->psr.su_region_et_enabled = false;
1991 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1992 }
1993 
1994 /**
1995  * intel_psr_disable - Disable PSR
1996  * @intel_dp: Intel DP
1997  * @old_crtc_state: old CRTC state
1998  *
1999  * This function needs to be called before disabling pipe.
2000  */
2001 void intel_psr_disable(struct intel_dp *intel_dp,
2002 		       const struct intel_crtc_state *old_crtc_state)
2003 {
2004 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2005 
2006 	if (!old_crtc_state->has_psr)
2007 		return;
2008 
2009 	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
2010 		return;
2011 
2012 	mutex_lock(&intel_dp->psr.lock);
2013 
2014 	intel_psr_disable_locked(intel_dp);
2015 
2016 	mutex_unlock(&intel_dp->psr.lock);
2017 	cancel_work_sync(&intel_dp->psr.work);
2018 	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
2019 }
2020 
2021 /**
2022  * intel_psr_pause - Pause PSR
2023  * @intel_dp: Intel DP
2024  *
2025  * This function need to be called after enabling psr.
2026  */
2027 void intel_psr_pause(struct intel_dp *intel_dp)
2028 {
2029 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2030 	struct intel_psr *psr = &intel_dp->psr;
2031 
2032 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2033 		return;
2034 
2035 	mutex_lock(&psr->lock);
2036 
2037 	if (!psr->enabled) {
2038 		mutex_unlock(&psr->lock);
2039 		return;
2040 	}
2041 
2042 	/* If we ever hit this, we will need to add refcount to pause/resume */
2043 	drm_WARN_ON(&dev_priv->drm, psr->paused);
2044 
2045 	intel_psr_exit(intel_dp);
2046 	intel_psr_wait_exit_locked(intel_dp);
2047 	psr->paused = true;
2048 
2049 	mutex_unlock(&psr->lock);
2050 
2051 	cancel_work_sync(&psr->work);
2052 	cancel_delayed_work_sync(&psr->dc3co_work);
2053 }
2054 
2055 /**
2056  * intel_psr_resume - Resume PSR
2057  * @intel_dp: Intel DP
2058  *
2059  * This function need to be called after pausing psr.
2060  */
2061 void intel_psr_resume(struct intel_dp *intel_dp)
2062 {
2063 	struct intel_psr *psr = &intel_dp->psr;
2064 
2065 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2066 		return;
2067 
2068 	mutex_lock(&psr->lock);
2069 
2070 	if (!psr->paused)
2071 		goto unlock;
2072 
2073 	psr->paused = false;
2074 	intel_psr_activate(intel_dp);
2075 
2076 unlock:
2077 	mutex_unlock(&psr->lock);
2078 }
2079 
2080 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
2081 {
2082 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
2083 		PSR2_MAN_TRK_CTL_ENABLE;
2084 }
2085 
2086 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
2087 {
2088 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
2089 	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
2090 	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
2091 }
2092 
2093 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
2094 {
2095 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
2096 	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
2097 	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
2098 }
2099 
2100 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
2101 {
2102 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
2103 	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
2104 	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
2105 }
2106 
2107 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
2108 {
2109 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2110 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2111 
2112 	if (intel_dp->psr.psr2_sel_fetch_enabled)
2113 		intel_de_write(dev_priv,
2114 			       PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
2115 			       man_trk_ctl_enable_bit_get(dev_priv) |
2116 			       man_trk_ctl_partial_frame_bit_get(dev_priv) |
2117 			       man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2118 			       man_trk_ctl_continuos_full_frame(dev_priv));
2119 
2120 	/*
2121 	 * Display WA #0884: skl+
2122 	 * This documented WA for bxt can be safely applied
2123 	 * broadly so we can force HW tracking to exit PSR
2124 	 * instead of disabling and re-enabling.
2125 	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
2126 	 * but it makes more sense write to the current active
2127 	 * pipe.
2128 	 *
2129 	 * This workaround do not exist for platforms with display 10 or newer
2130 	 * but testing proved that it works for up display 13, for newer
2131 	 * than that testing will be needed.
2132 	 */
2133 	intel_de_write(dev_priv, CURSURFLIVE(dev_priv, intel_dp->psr.pipe), 0);
2134 }
2135 
2136 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
2137 {
2138 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2139 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2140 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2141 	struct intel_encoder *encoder;
2142 
2143 	if (!crtc_state->enable_psr2_sel_fetch)
2144 		return;
2145 
2146 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2147 					     crtc_state->uapi.encoder_mask) {
2148 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2149 
2150 		lockdep_assert_held(&intel_dp->psr.lock);
2151 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
2152 			return;
2153 		break;
2154 	}
2155 
2156 	intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
2157 		       crtc_state->psr2_man_track_ctl);
2158 
2159 	if (!crtc_state->enable_psr2_su_region_et)
2160 		return;
2161 
2162 	intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
2163 		       crtc_state->pipe_srcsz_early_tpt);
2164 }
2165 
2166 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2167 				  bool full_update)
2168 {
2169 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2170 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2171 	u32 val = man_trk_ctl_enable_bit_get(dev_priv);
2172 
2173 	/* SF partial frame enable has to be set even on full update */
2174 	val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
2175 
2176 	if (full_update) {
2177 		val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
2178 		val |= man_trk_ctl_continuos_full_frame(dev_priv);
2179 		goto exit;
2180 	}
2181 
2182 	if (crtc_state->psr2_su_area.y1 == -1)
2183 		goto exit;
2184 
2185 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
2186 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2187 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2188 	} else {
2189 		drm_WARN_ON(crtc_state->uapi.crtc->dev,
2190 			    crtc_state->psr2_su_area.y1 % 4 ||
2191 			    crtc_state->psr2_su_area.y2 % 4);
2192 
2193 		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2194 			crtc_state->psr2_su_area.y1 / 4 + 1);
2195 		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2196 			crtc_state->psr2_su_area.y2 / 4 + 1);
2197 	}
2198 exit:
2199 	crtc_state->psr2_man_track_ctl = val;
2200 }
2201 
2202 static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
2203 					  bool full_update)
2204 {
2205 	int width, height;
2206 
2207 	if (!crtc_state->enable_psr2_su_region_et || full_update)
2208 		return 0;
2209 
2210 	width = drm_rect_width(&crtc_state->psr2_su_area);
2211 	height = drm_rect_height(&crtc_state->psr2_su_area);
2212 
2213 	return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
2214 }
2215 
2216 static void clip_area_update(struct drm_rect *overlap_damage_area,
2217 			     struct drm_rect *damage_area,
2218 			     struct drm_rect *pipe_src)
2219 {
2220 	if (!drm_rect_intersect(damage_area, pipe_src))
2221 		return;
2222 
2223 	if (overlap_damage_area->y1 == -1) {
2224 		overlap_damage_area->y1 = damage_area->y1;
2225 		overlap_damage_area->y2 = damage_area->y2;
2226 		return;
2227 	}
2228 
2229 	if (damage_area->y1 < overlap_damage_area->y1)
2230 		overlap_damage_area->y1 = damage_area->y1;
2231 
2232 	if (damage_area->y2 > overlap_damage_area->y2)
2233 		overlap_damage_area->y2 = damage_area->y2;
2234 }
2235 
2236 static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2237 {
2238 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2239 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2240 	u16 y_alignment;
2241 
2242 	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2243 	if (crtc_state->dsc.compression_enable &&
2244 	    (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
2245 		y_alignment = vdsc_cfg->slice_height;
2246 	else
2247 		y_alignment = crtc_state->su_y_granularity;
2248 
2249 	crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2250 	if (crtc_state->psr2_su_area.y2 % y_alignment)
2251 		crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2252 						y_alignment) + 1) * y_alignment;
2253 }
2254 
2255 /*
2256  * When early transport is in use we need to extend SU area to cover
2257  * cursor fully when cursor is in SU area.
2258  */
2259 static void
2260 intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
2261 				  struct intel_crtc *crtc,
2262 				  bool *cursor_in_su_area)
2263 {
2264 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2265 	struct intel_plane_state *new_plane_state;
2266 	struct intel_plane *plane;
2267 	int i;
2268 
2269 	if (!crtc_state->enable_psr2_su_region_et)
2270 		return;
2271 
2272 	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
2273 		struct drm_rect inter;
2274 
2275 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2276 			continue;
2277 
2278 		if (plane->id != PLANE_CURSOR)
2279 			continue;
2280 
2281 		if (!new_plane_state->uapi.visible)
2282 			continue;
2283 
2284 		inter = crtc_state->psr2_su_area;
2285 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2286 			continue;
2287 
2288 		clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
2289 				 &crtc_state->pipe_src);
2290 		*cursor_in_su_area = true;
2291 	}
2292 }
2293 
2294 /*
2295  * TODO: Not clear how to handle planes with negative position,
2296  * also planes are not updated if they have a negative X
2297  * position so for now doing a full update in this cases
2298  *
2299  * Plane scaling and rotation is not supported by selective fetch and both
2300  * properties can change without a modeset, so need to be check at every
2301  * atomic commit.
2302  */
2303 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2304 {
2305 	if (plane_state->uapi.dst.y1 < 0 ||
2306 	    plane_state->uapi.dst.x1 < 0 ||
2307 	    plane_state->scaler_id >= 0 ||
2308 	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2309 		return false;
2310 
2311 	return true;
2312 }
2313 
2314 /*
2315  * Check for pipe properties that is not supported by selective fetch.
2316  *
2317  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2318  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2319  * enabled and going to the full update path.
2320  */
2321 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2322 {
2323 	if (crtc_state->scaler_state.scaler_id >= 0)
2324 		return false;
2325 
2326 	return true;
2327 }
2328 
2329 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2330 				struct intel_crtc *crtc)
2331 {
2332 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2333 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2334 	struct intel_plane_state *new_plane_state, *old_plane_state;
2335 	struct intel_plane *plane;
2336 	bool full_update = false, cursor_in_su_area = false;
2337 	int i, ret;
2338 
2339 	if (!crtc_state->enable_psr2_sel_fetch)
2340 		return 0;
2341 
2342 	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2343 		full_update = true;
2344 		goto skip_sel_fetch_set_loop;
2345 	}
2346 
2347 	crtc_state->psr2_su_area.x1 = 0;
2348 	crtc_state->psr2_su_area.y1 = -1;
2349 	crtc_state->psr2_su_area.x2 = drm_rect_width(&crtc_state->pipe_src);
2350 	crtc_state->psr2_su_area.y2 = -1;
2351 
2352 	/*
2353 	 * Calculate minimal selective fetch area of each plane and calculate
2354 	 * the pipe damaged area.
2355 	 * In the next loop the plane selective fetch area will actually be set
2356 	 * using whole pipe damaged area.
2357 	 */
2358 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2359 					     new_plane_state, i) {
2360 		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2361 						      .x2 = INT_MAX };
2362 
2363 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2364 			continue;
2365 
2366 		if (!new_plane_state->uapi.visible &&
2367 		    !old_plane_state->uapi.visible)
2368 			continue;
2369 
2370 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2371 			full_update = true;
2372 			break;
2373 		}
2374 
2375 		/*
2376 		 * If visibility or plane moved, mark the whole plane area as
2377 		 * damaged as it needs to be complete redraw in the new and old
2378 		 * position.
2379 		 */
2380 		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2381 		    !drm_rect_equals(&new_plane_state->uapi.dst,
2382 				     &old_plane_state->uapi.dst)) {
2383 			if (old_plane_state->uapi.visible) {
2384 				damaged_area.y1 = old_plane_state->uapi.dst.y1;
2385 				damaged_area.y2 = old_plane_state->uapi.dst.y2;
2386 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2387 						 &crtc_state->pipe_src);
2388 			}
2389 
2390 			if (new_plane_state->uapi.visible) {
2391 				damaged_area.y1 = new_plane_state->uapi.dst.y1;
2392 				damaged_area.y2 = new_plane_state->uapi.dst.y2;
2393 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2394 						 &crtc_state->pipe_src);
2395 			}
2396 			continue;
2397 		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2398 			/* If alpha changed mark the whole plane area as damaged */
2399 			damaged_area.y1 = new_plane_state->uapi.dst.y1;
2400 			damaged_area.y2 = new_plane_state->uapi.dst.y2;
2401 			clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2402 					 &crtc_state->pipe_src);
2403 			continue;
2404 		}
2405 
2406 		src = drm_plane_state_src(&new_plane_state->uapi);
2407 		drm_rect_fp_to_int(&src, &src);
2408 
2409 		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2410 						     &new_plane_state->uapi, &damaged_area))
2411 			continue;
2412 
2413 		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2414 		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2415 		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2416 		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2417 
2418 		clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
2419 	}
2420 
2421 	/*
2422 	 * TODO: For now we are just using full update in case
2423 	 * selective fetch area calculation fails. To optimize this we
2424 	 * should identify cases where this happens and fix the area
2425 	 * calculation for those.
2426 	 */
2427 	if (crtc_state->psr2_su_area.y1 == -1) {
2428 		drm_info_once(&dev_priv->drm,
2429 			      "Selective fetch area calculation failed in pipe %c\n",
2430 			      pipe_name(crtc->pipe));
2431 		full_update = true;
2432 	}
2433 
2434 	if (full_update)
2435 		goto skip_sel_fetch_set_loop;
2436 
2437 	/* Wa_14014971492 */
2438 	if (!crtc_state->has_panel_replay &&
2439 	    ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
2440 	      IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv))) &&
2441 	    crtc_state->splitter.enable)
2442 		crtc_state->psr2_su_area.y1 = 0;
2443 
2444 	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2445 	if (ret)
2446 		return ret;
2447 
2448 	/*
2449 	 * Adjust su area to cover cursor fully as necessary (early
2450 	 * transport). This needs to be done after
2451 	 * drm_atomic_add_affected_planes to ensure visible cursor is added into
2452 	 * affected planes even when cursor is not updated by itself.
2453 	 */
2454 	intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
2455 
2456 	intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2457 
2458 	/*
2459 	 * Now that we have the pipe damaged area check if it intersect with
2460 	 * every plane, if it does set the plane selective fetch area.
2461 	 */
2462 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2463 					     new_plane_state, i) {
2464 		struct drm_rect *sel_fetch_area, inter;
2465 		struct intel_plane *linked = new_plane_state->planar_linked_plane;
2466 
2467 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2468 		    !new_plane_state->uapi.visible)
2469 			continue;
2470 
2471 		inter = crtc_state->psr2_su_area;
2472 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2473 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2474 			sel_fetch_area->y1 = -1;
2475 			sel_fetch_area->y2 = -1;
2476 			/*
2477 			 * if plane sel fetch was previously enabled ->
2478 			 * disable it
2479 			 */
2480 			if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2481 				crtc_state->update_planes |= BIT(plane->id);
2482 
2483 			continue;
2484 		}
2485 
2486 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2487 			full_update = true;
2488 			break;
2489 		}
2490 
2491 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2492 		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2493 		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2494 		crtc_state->update_planes |= BIT(plane->id);
2495 
2496 		/*
2497 		 * Sel_fetch_area is calculated for UV plane. Use
2498 		 * same area for Y plane as well.
2499 		 */
2500 		if (linked) {
2501 			struct intel_plane_state *linked_new_plane_state;
2502 			struct drm_rect *linked_sel_fetch_area;
2503 
2504 			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2505 			if (IS_ERR(linked_new_plane_state))
2506 				return PTR_ERR(linked_new_plane_state);
2507 
2508 			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2509 			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2510 			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2511 			crtc_state->update_planes |= BIT(linked->id);
2512 		}
2513 	}
2514 
2515 skip_sel_fetch_set_loop:
2516 	psr2_man_trk_ctl_calc(crtc_state, full_update);
2517 	crtc_state->pipe_srcsz_early_tpt =
2518 		psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
2519 	return 0;
2520 }
2521 
2522 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2523 				struct intel_crtc *crtc)
2524 {
2525 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2526 	const struct intel_crtc_state *old_crtc_state =
2527 		intel_atomic_get_old_crtc_state(state, crtc);
2528 	const struct intel_crtc_state *new_crtc_state =
2529 		intel_atomic_get_new_crtc_state(state, crtc);
2530 	struct intel_encoder *encoder;
2531 
2532 	if (!HAS_PSR(i915))
2533 		return;
2534 
2535 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2536 					     old_crtc_state->uapi.encoder_mask) {
2537 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2538 		struct intel_psr *psr = &intel_dp->psr;
2539 		bool needs_to_disable = false;
2540 
2541 		mutex_lock(&psr->lock);
2542 
2543 		/*
2544 		 * Reasons to disable:
2545 		 * - PSR disabled in new state
2546 		 * - All planes will go inactive
2547 		 * - Changing between PSR versions
2548 		 * - Region Early Transport changing
2549 		 * - Display WA #1136: skl, bxt
2550 		 */
2551 		needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2552 		needs_to_disable |= !new_crtc_state->has_psr;
2553 		needs_to_disable |= !new_crtc_state->active_planes;
2554 		needs_to_disable |= new_crtc_state->has_sel_update != psr->sel_update_enabled;
2555 		needs_to_disable |= new_crtc_state->enable_psr2_su_region_et !=
2556 			psr->su_region_et_enabled;
2557 		needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2558 			new_crtc_state->wm_level_disabled;
2559 
2560 		if (psr->enabled && needs_to_disable)
2561 			intel_psr_disable_locked(intel_dp);
2562 		else if (psr->enabled && new_crtc_state->wm_level_disabled)
2563 			/* Wa_14015648006 */
2564 			wm_optimization_wa(intel_dp, new_crtc_state);
2565 
2566 		mutex_unlock(&psr->lock);
2567 	}
2568 }
2569 
2570 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2571 				 struct intel_crtc *crtc)
2572 {
2573 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2574 	const struct intel_crtc_state *crtc_state =
2575 		intel_atomic_get_new_crtc_state(state, crtc);
2576 	struct intel_encoder *encoder;
2577 
2578 	if (!crtc_state->has_psr)
2579 		return;
2580 
2581 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2582 					     crtc_state->uapi.encoder_mask) {
2583 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2584 		struct intel_psr *psr = &intel_dp->psr;
2585 		bool keep_disabled = false;
2586 
2587 		mutex_lock(&psr->lock);
2588 
2589 		drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2590 
2591 		keep_disabled |= psr->sink_not_reliable;
2592 		keep_disabled |= !crtc_state->active_planes;
2593 
2594 		/* Display WA #1136: skl, bxt */
2595 		keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2596 			crtc_state->wm_level_disabled;
2597 
2598 		if (!psr->enabled && !keep_disabled)
2599 			intel_psr_enable_locked(intel_dp, crtc_state);
2600 		else if (psr->enabled && !crtc_state->wm_level_disabled)
2601 			/* Wa_14015648006 */
2602 			wm_optimization_wa(intel_dp, crtc_state);
2603 
2604 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2605 		if (crtc_state->crc_enabled && psr->enabled)
2606 			psr_force_hw_tracking_exit(intel_dp);
2607 
2608 		/*
2609 		 * Clear possible busy bits in case we have
2610 		 * invalidate -> flip -> flush sequence.
2611 		 */
2612 		intel_dp->psr.busy_frontbuffer_bits = 0;
2613 
2614 		mutex_unlock(&psr->lock);
2615 	}
2616 }
2617 
2618 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2619 {
2620 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2621 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2622 
2623 	/*
2624 	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2625 	 * As all higher states has bit 4 of PSR2 state set we can just wait for
2626 	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2627 	 */
2628 	return intel_de_wait_for_clear(dev_priv,
2629 				       EDP_PSR2_STATUS(dev_priv, cpu_transcoder),
2630 				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2631 }
2632 
2633 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2634 {
2635 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2636 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2637 
2638 	/*
2639 	 * From bspec: Panel Self Refresh (BDW+)
2640 	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2641 	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2642 	 * defensive enough to cover everything.
2643 	 */
2644 	return intel_de_wait_for_clear(dev_priv,
2645 				       psr_status_reg(dev_priv, cpu_transcoder),
2646 				       EDP_PSR_STATUS_STATE_MASK, 50);
2647 }
2648 
2649 static int _panel_replay_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2650 {
2651 	return intel_dp_is_edp(intel_dp) ?
2652 		_psr2_ready_for_pipe_update_locked(intel_dp) :
2653 		_psr1_ready_for_pipe_update_locked(intel_dp);
2654 }
2655 
2656 /**
2657  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2658  * @new_crtc_state: new CRTC state
2659  *
2660  * This function is expected to be called from pipe_update_start() where it is
2661  * not expected to race with PSR enable or disable.
2662  */
2663 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2664 {
2665 	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2666 	struct intel_encoder *encoder;
2667 
2668 	if (!new_crtc_state->has_psr)
2669 		return;
2670 
2671 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2672 					     new_crtc_state->uapi.encoder_mask) {
2673 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2674 		int ret;
2675 
2676 		lockdep_assert_held(&intel_dp->psr.lock);
2677 
2678 		if (!intel_dp->psr.enabled)
2679 			continue;
2680 
2681 		if (intel_dp->psr.panel_replay_enabled)
2682 			ret = _panel_replay_ready_for_pipe_update_locked(intel_dp);
2683 		else if (intel_dp->psr.sel_update_enabled)
2684 			ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2685 		else
2686 			ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2687 
2688 		if (ret)
2689 			drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2690 	}
2691 }
2692 
2693 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2694 {
2695 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2696 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2697 	i915_reg_t reg;
2698 	u32 mask;
2699 	int err;
2700 
2701 	if (!intel_dp->psr.enabled)
2702 		return false;
2703 
2704 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
2705 					  intel_dp->psr.panel_replay_enabled)) {
2706 		reg = EDP_PSR2_STATUS(dev_priv, cpu_transcoder);
2707 		mask = EDP_PSR2_STATUS_STATE_MASK;
2708 	} else {
2709 		reg = psr_status_reg(dev_priv, cpu_transcoder);
2710 		mask = EDP_PSR_STATUS_STATE_MASK;
2711 	}
2712 
2713 	mutex_unlock(&intel_dp->psr.lock);
2714 
2715 	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2716 	if (err)
2717 		drm_err(&dev_priv->drm,
2718 			"Timed out waiting for PSR Idle for re-enable\n");
2719 
2720 	/* After the unlocked wait, verify that PSR is still wanted! */
2721 	mutex_lock(&intel_dp->psr.lock);
2722 	return err == 0 && intel_dp->psr.enabled;
2723 }
2724 
2725 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2726 {
2727 	struct drm_connector_list_iter conn_iter;
2728 	struct drm_modeset_acquire_ctx ctx;
2729 	struct drm_atomic_state *state;
2730 	struct drm_connector *conn;
2731 	int err = 0;
2732 
2733 	state = drm_atomic_state_alloc(&dev_priv->drm);
2734 	if (!state)
2735 		return -ENOMEM;
2736 
2737 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2738 
2739 	state->acquire_ctx = &ctx;
2740 	to_intel_atomic_state(state)->internal = true;
2741 
2742 retry:
2743 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2744 	drm_for_each_connector_iter(conn, &conn_iter) {
2745 		struct drm_connector_state *conn_state;
2746 		struct drm_crtc_state *crtc_state;
2747 
2748 		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2749 			continue;
2750 
2751 		conn_state = drm_atomic_get_connector_state(state, conn);
2752 		if (IS_ERR(conn_state)) {
2753 			err = PTR_ERR(conn_state);
2754 			break;
2755 		}
2756 
2757 		if (!conn_state->crtc)
2758 			continue;
2759 
2760 		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2761 		if (IS_ERR(crtc_state)) {
2762 			err = PTR_ERR(crtc_state);
2763 			break;
2764 		}
2765 
2766 		/* Mark mode as changed to trigger a pipe->update() */
2767 		crtc_state->mode_changed = true;
2768 	}
2769 	drm_connector_list_iter_end(&conn_iter);
2770 
2771 	if (err == 0)
2772 		err = drm_atomic_commit(state);
2773 
2774 	if (err == -EDEADLK) {
2775 		drm_atomic_state_clear(state);
2776 		err = drm_modeset_backoff(&ctx);
2777 		if (!err)
2778 			goto retry;
2779 	}
2780 
2781 	drm_modeset_drop_locks(&ctx);
2782 	drm_modeset_acquire_fini(&ctx);
2783 	drm_atomic_state_put(state);
2784 
2785 	return err;
2786 }
2787 
2788 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2789 {
2790 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2791 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2792 	const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
2793 					I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
2794 	u32 old_mode, old_disable_bits;
2795 	int ret;
2796 
2797 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
2798 		    I915_PSR_DEBUG_PANEL_REPLAY_DISABLE |
2799 		    I915_PSR_DEBUG_MODE_MASK) ||
2800 	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2801 		drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2802 		return -EINVAL;
2803 	}
2804 
2805 	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2806 	if (ret)
2807 		return ret;
2808 
2809 	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2810 	old_disable_bits = intel_dp->psr.debug &
2811 		(I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
2812 		 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
2813 
2814 	intel_dp->psr.debug = val;
2815 
2816 	/*
2817 	 * Do it right away if it's already enabled, otherwise it will be done
2818 	 * when enabling the source.
2819 	 */
2820 	if (intel_dp->psr.enabled)
2821 		psr_irq_control(intel_dp);
2822 
2823 	mutex_unlock(&intel_dp->psr.lock);
2824 
2825 	if (old_mode != mode || old_disable_bits != disable_bits)
2826 		ret = intel_psr_fastset_force(dev_priv);
2827 
2828 	return ret;
2829 }
2830 
2831 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2832 {
2833 	struct intel_psr *psr = &intel_dp->psr;
2834 
2835 	intel_psr_disable_locked(intel_dp);
2836 	psr->sink_not_reliable = true;
2837 	/* let's make sure that sink is awaken */
2838 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2839 }
2840 
2841 static void intel_psr_work(struct work_struct *work)
2842 {
2843 	struct intel_dp *intel_dp =
2844 		container_of(work, typeof(*intel_dp), psr.work);
2845 
2846 	mutex_lock(&intel_dp->psr.lock);
2847 
2848 	if (!intel_dp->psr.enabled)
2849 		goto unlock;
2850 
2851 	if (READ_ONCE(intel_dp->psr.irq_aux_error))
2852 		intel_psr_handle_irq(intel_dp);
2853 
2854 	/*
2855 	 * We have to make sure PSR is ready for re-enable
2856 	 * otherwise it keeps disabled until next full enable/disable cycle.
2857 	 * PSR might take some time to get fully disabled
2858 	 * and be ready for re-enable.
2859 	 */
2860 	if (!__psr_wait_for_idle_locked(intel_dp))
2861 		goto unlock;
2862 
2863 	/*
2864 	 * The delayed work can race with an invalidate hence we need to
2865 	 * recheck. Since psr_flush first clears this and then reschedules we
2866 	 * won't ever miss a flush when bailing out here.
2867 	 */
2868 	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2869 		goto unlock;
2870 
2871 	intel_psr_activate(intel_dp);
2872 unlock:
2873 	mutex_unlock(&intel_dp->psr.lock);
2874 }
2875 
2876 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2877 {
2878 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2879 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2880 
2881 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2882 		u32 val;
2883 
2884 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2885 			/* Send one update otherwise lag is observed in screen */
2886 			intel_de_write(dev_priv,
2887 				       CURSURFLIVE(dev_priv, intel_dp->psr.pipe),
2888 				       0);
2889 			return;
2890 		}
2891 
2892 		val = man_trk_ctl_enable_bit_get(dev_priv) |
2893 		      man_trk_ctl_partial_frame_bit_get(dev_priv) |
2894 		      man_trk_ctl_continuos_full_frame(dev_priv);
2895 		intel_de_write(dev_priv,
2896 			       PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
2897 			       val);
2898 		intel_de_write(dev_priv,
2899 			       CURSURFLIVE(dev_priv, intel_dp->psr.pipe), 0);
2900 		intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2901 	} else {
2902 		intel_psr_exit(intel_dp);
2903 	}
2904 }
2905 
2906 /**
2907  * intel_psr_invalidate - Invalidate PSR
2908  * @dev_priv: i915 device
2909  * @frontbuffer_bits: frontbuffer plane tracking bits
2910  * @origin: which operation caused the invalidate
2911  *
2912  * Since the hardware frontbuffer tracking has gaps we need to integrate
2913  * with the software frontbuffer tracking. This function gets called every
2914  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2915  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2916  *
2917  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2918  */
2919 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2920 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
2921 {
2922 	struct intel_encoder *encoder;
2923 
2924 	if (origin == ORIGIN_FLIP)
2925 		return;
2926 
2927 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2928 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2929 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2930 
2931 		mutex_lock(&intel_dp->psr.lock);
2932 		if (!intel_dp->psr.enabled) {
2933 			mutex_unlock(&intel_dp->psr.lock);
2934 			continue;
2935 		}
2936 
2937 		pipe_frontbuffer_bits &=
2938 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2939 		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2940 
2941 		if (pipe_frontbuffer_bits)
2942 			_psr_invalidate_handle(intel_dp);
2943 
2944 		mutex_unlock(&intel_dp->psr.lock);
2945 	}
2946 }
2947 /*
2948  * When we will be completely rely on PSR2 S/W tracking in future,
2949  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2950  * event also therefore tgl_dc3co_flush_locked() require to be changed
2951  * accordingly in future.
2952  */
2953 static void
2954 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2955 		       enum fb_op_origin origin)
2956 {
2957 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2958 
2959 	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
2960 	    !intel_dp->psr.active)
2961 		return;
2962 
2963 	/*
2964 	 * At every frontbuffer flush flip event modified delay of delayed work,
2965 	 * when delayed work schedules that means display has been idle.
2966 	 */
2967 	if (!(frontbuffer_bits &
2968 	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2969 		return;
2970 
2971 	tgl_psr2_enable_dc3co(intel_dp);
2972 	mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
2973 			 intel_dp->psr.dc3co_exit_delay);
2974 }
2975 
2976 static void _psr_flush_handle(struct intel_dp *intel_dp)
2977 {
2978 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2979 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2980 
2981 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2982 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2983 			/* can we turn CFF off? */
2984 			if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2985 				u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2986 					man_trk_ctl_partial_frame_bit_get(dev_priv) |
2987 					man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2988 					man_trk_ctl_continuos_full_frame(dev_priv);
2989 
2990 				/*
2991 				 * Set psr2_sel_fetch_cff_enabled as false to allow selective
2992 				 * updates. Still keep cff bit enabled as we don't have proper
2993 				 * SU configuration in case update is sent for any reason after
2994 				 * sff bit gets cleared by the HW on next vblank.
2995 				 */
2996 				intel_de_write(dev_priv,
2997 					       PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
2998 					       val);
2999 				intel_de_write(dev_priv,
3000 					       CURSURFLIVE(dev_priv, intel_dp->psr.pipe),
3001 					       0);
3002 				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
3003 			}
3004 		} else {
3005 			/*
3006 			 * continuous full frame is disabled, only a single full
3007 			 * frame is required
3008 			 */
3009 			psr_force_hw_tracking_exit(intel_dp);
3010 		}
3011 	} else {
3012 		psr_force_hw_tracking_exit(intel_dp);
3013 
3014 		if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
3015 			queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
3016 	}
3017 }
3018 
3019 /**
3020  * intel_psr_flush - Flush PSR
3021  * @dev_priv: i915 device
3022  * @frontbuffer_bits: frontbuffer plane tracking bits
3023  * @origin: which operation caused the flush
3024  *
3025  * Since the hardware frontbuffer tracking has gaps we need to integrate
3026  * with the software frontbuffer tracking. This function gets called every
3027  * time frontbuffer rendering has completed and flushed out to memory. PSR
3028  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
3029  *
3030  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
3031  */
3032 void intel_psr_flush(struct drm_i915_private *dev_priv,
3033 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
3034 {
3035 	struct intel_encoder *encoder;
3036 
3037 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3038 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3039 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3040 
3041 		mutex_lock(&intel_dp->psr.lock);
3042 		if (!intel_dp->psr.enabled) {
3043 			mutex_unlock(&intel_dp->psr.lock);
3044 			continue;
3045 		}
3046 
3047 		pipe_frontbuffer_bits &=
3048 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3049 		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
3050 
3051 		/*
3052 		 * If the PSR is paused by an explicit intel_psr_paused() call,
3053 		 * we have to ensure that the PSR is not activated until
3054 		 * intel_psr_resume() is called.
3055 		 */
3056 		if (intel_dp->psr.paused)
3057 			goto unlock;
3058 
3059 		if (origin == ORIGIN_FLIP ||
3060 		    (origin == ORIGIN_CURSOR_UPDATE &&
3061 		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
3062 			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
3063 			goto unlock;
3064 		}
3065 
3066 		if (pipe_frontbuffer_bits == 0)
3067 			goto unlock;
3068 
3069 		/* By definition flush = invalidate + flush */
3070 		_psr_flush_handle(intel_dp);
3071 unlock:
3072 		mutex_unlock(&intel_dp->psr.lock);
3073 	}
3074 }
3075 
3076 /**
3077  * intel_psr_init - Init basic PSR work and mutex.
3078  * @intel_dp: Intel DP
3079  *
3080  * This function is called after the initializing connector.
3081  * (the initializing of connector treats the handling of connector capabilities)
3082  * And it initializes basic PSR stuff for each DP Encoder.
3083  */
3084 void intel_psr_init(struct intel_dp *intel_dp)
3085 {
3086 	struct intel_connector *connector = intel_dp->attached_connector;
3087 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3088 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3089 
3090 	if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
3091 		return;
3092 
3093 	/*
3094 	 * HSW spec explicitly says PSR is tied to port A.
3095 	 * BDW+ platforms have a instance of PSR registers per transcoder but
3096 	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
3097 	 * than eDP one.
3098 	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
3099 	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
3100 	 * But GEN12 supports a instance of PSR registers per transcoder.
3101 	 */
3102 	if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
3103 		drm_dbg_kms(&dev_priv->drm,
3104 			    "PSR condition failed: Port not supported\n");
3105 		return;
3106 	}
3107 
3108 	if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp))
3109 		intel_dp->psr.source_panel_replay_support = true;
3110 	else
3111 		intel_dp->psr.source_support = true;
3112 
3113 	/* Set link_standby x link_off defaults */
3114 	if (DISPLAY_VER(dev_priv) < 12)
3115 		/* For new platforms up to TGL let's respect VBT back again */
3116 		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
3117 
3118 	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
3119 	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
3120 	mutex_init(&intel_dp->psr.lock);
3121 }
3122 
3123 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
3124 					   u8 *status, u8 *error_status)
3125 {
3126 	struct drm_dp_aux *aux = &intel_dp->aux;
3127 	int ret;
3128 	unsigned int offset;
3129 
3130 	offset = intel_dp->psr.panel_replay_enabled ?
3131 		 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
3132 
3133 	ret = drm_dp_dpcd_readb(aux, offset, status);
3134 	if (ret != 1)
3135 		return ret;
3136 
3137 	offset = intel_dp->psr.panel_replay_enabled ?
3138 		 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
3139 
3140 	ret = drm_dp_dpcd_readb(aux, offset, error_status);
3141 	if (ret != 1)
3142 		return ret;
3143 
3144 	*status = *status & DP_PSR_SINK_STATE_MASK;
3145 
3146 	return 0;
3147 }
3148 
3149 static void psr_alpm_check(struct intel_dp *intel_dp)
3150 {
3151 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3152 	struct drm_dp_aux *aux = &intel_dp->aux;
3153 	struct intel_psr *psr = &intel_dp->psr;
3154 	u8 val;
3155 	int r;
3156 
3157 	if (!psr->sel_update_enabled)
3158 		return;
3159 
3160 	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
3161 	if (r != 1) {
3162 		drm_err(&dev_priv->drm, "Error reading ALPM status\n");
3163 		return;
3164 	}
3165 
3166 	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
3167 		intel_psr_disable_locked(intel_dp);
3168 		psr->sink_not_reliable = true;
3169 		drm_dbg_kms(&dev_priv->drm,
3170 			    "ALPM lock timeout error, disabling PSR\n");
3171 
3172 		/* Clearing error */
3173 		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
3174 	}
3175 }
3176 
3177 static void psr_capability_changed_check(struct intel_dp *intel_dp)
3178 {
3179 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3180 	struct intel_psr *psr = &intel_dp->psr;
3181 	u8 val;
3182 	int r;
3183 
3184 	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
3185 	if (r != 1) {
3186 		drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
3187 		return;
3188 	}
3189 
3190 	if (val & DP_PSR_CAPS_CHANGE) {
3191 		intel_psr_disable_locked(intel_dp);
3192 		psr->sink_not_reliable = true;
3193 		drm_dbg_kms(&dev_priv->drm,
3194 			    "Sink PSR capability changed, disabling PSR\n");
3195 
3196 		/* Clearing it */
3197 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
3198 	}
3199 }
3200 
3201 /*
3202  * On common bits:
3203  * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
3204  * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
3205  * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
3206  * this function is relying on PSR definitions
3207  */
3208 void intel_psr_short_pulse(struct intel_dp *intel_dp)
3209 {
3210 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3211 	struct intel_psr *psr = &intel_dp->psr;
3212 	u8 status, error_status;
3213 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3214 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3215 			  DP_PSR_LINK_CRC_ERROR;
3216 
3217 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
3218 		return;
3219 
3220 	mutex_lock(&psr->lock);
3221 
3222 	if (!psr->enabled)
3223 		goto exit;
3224 
3225 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
3226 		drm_err(&dev_priv->drm,
3227 			"Error reading PSR status or error status\n");
3228 		goto exit;
3229 	}
3230 
3231 	if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
3232 	    (error_status & errors)) {
3233 		intel_psr_disable_locked(intel_dp);
3234 		psr->sink_not_reliable = true;
3235 	}
3236 
3237 	if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
3238 	    !error_status)
3239 		drm_dbg_kms(&dev_priv->drm,
3240 			    "PSR sink internal error, disabling PSR\n");
3241 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3242 		drm_dbg_kms(&dev_priv->drm,
3243 			    "PSR RFB storage error, disabling PSR\n");
3244 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3245 		drm_dbg_kms(&dev_priv->drm,
3246 			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
3247 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3248 		drm_dbg_kms(&dev_priv->drm,
3249 			    "PSR Link CRC error, disabling PSR\n");
3250 
3251 	if (error_status & ~errors)
3252 		drm_err(&dev_priv->drm,
3253 			"PSR_ERROR_STATUS unhandled errors %x\n",
3254 			error_status & ~errors);
3255 	/* clear status register */
3256 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
3257 
3258 	if (!psr->panel_replay_enabled) {
3259 		psr_alpm_check(intel_dp);
3260 		psr_capability_changed_check(intel_dp);
3261 	}
3262 
3263 exit:
3264 	mutex_unlock(&psr->lock);
3265 }
3266 
3267 bool intel_psr_enabled(struct intel_dp *intel_dp)
3268 {
3269 	bool ret;
3270 
3271 	if (!CAN_PSR(intel_dp))
3272 		return false;
3273 
3274 	mutex_lock(&intel_dp->psr.lock);
3275 	ret = intel_dp->psr.enabled;
3276 	mutex_unlock(&intel_dp->psr.lock);
3277 
3278 	return ret;
3279 }
3280 
3281 /**
3282  * intel_psr_lock - grab PSR lock
3283  * @crtc_state: the crtc state
3284  *
3285  * This is initially meant to be used by around CRTC update, when
3286  * vblank sensitive registers are updated and we need grab the lock
3287  * before it to avoid vblank evasion.
3288  */
3289 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3290 {
3291 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3292 	struct intel_encoder *encoder;
3293 
3294 	if (!crtc_state->has_psr)
3295 		return;
3296 
3297 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
3298 					     crtc_state->uapi.encoder_mask) {
3299 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3300 
3301 		mutex_lock(&intel_dp->psr.lock);
3302 		break;
3303 	}
3304 }
3305 
3306 /**
3307  * intel_psr_unlock - release PSR lock
3308  * @crtc_state: the crtc state
3309  *
3310  * Release the PSR lock that was held during pipe update.
3311  */
3312 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3313 {
3314 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3315 	struct intel_encoder *encoder;
3316 
3317 	if (!crtc_state->has_psr)
3318 		return;
3319 
3320 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
3321 					     crtc_state->uapi.encoder_mask) {
3322 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3323 
3324 		mutex_unlock(&intel_dp->psr.lock);
3325 		break;
3326 	}
3327 }
3328 
3329 static void
3330 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3331 {
3332 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3333 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3334 	const char *status = "unknown";
3335 	u32 val, status_val;
3336 
3337 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
3338 					  intel_dp->psr.panel_replay_enabled)) {
3339 		static const char * const live_status[] = {
3340 			"IDLE",
3341 			"CAPTURE",
3342 			"CAPTURE_FS",
3343 			"SLEEP",
3344 			"BUFON_FW",
3345 			"ML_UP",
3346 			"SU_STANDBY",
3347 			"FAST_SLEEP",
3348 			"DEEP_SLEEP",
3349 			"BUF_ON",
3350 			"TG_ON"
3351 		};
3352 		val = intel_de_read(dev_priv,
3353 				    EDP_PSR2_STATUS(dev_priv, cpu_transcoder));
3354 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3355 		if (status_val < ARRAY_SIZE(live_status))
3356 			status = live_status[status_val];
3357 	} else {
3358 		static const char * const live_status[] = {
3359 			"IDLE",
3360 			"SRDONACK",
3361 			"SRDENT",
3362 			"BUFOFF",
3363 			"BUFON",
3364 			"AUXACK",
3365 			"SRDOFFACK",
3366 			"SRDENT_ON",
3367 		};
3368 		val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
3369 		status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3370 		if (status_val < ARRAY_SIZE(live_status))
3371 			status = live_status[status_val];
3372 	}
3373 
3374 	seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3375 }
3376 
3377 static void intel_psr_sink_capability(struct intel_dp *intel_dp,
3378 				      struct seq_file *m)
3379 {
3380 	struct intel_psr *psr = &intel_dp->psr;
3381 
3382 	seq_printf(m, "Sink support: PSR = %s",
3383 		   str_yes_no(psr->sink_support));
3384 
3385 	if (psr->sink_support)
3386 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3387 	if (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
3388 		seq_printf(m, " (Early Transport)");
3389 	seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
3390 	seq_printf(m, ", Panel Replay Selective Update = %s",
3391 		   str_yes_no(psr->sink_panel_replay_su_support));
3392 	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
3393 		seq_printf(m, " (Early Transport)");
3394 	seq_printf(m, "\n");
3395 }
3396 
3397 static void intel_psr_print_mode(struct intel_dp *intel_dp,
3398 				 struct seq_file *m)
3399 {
3400 	struct intel_psr *psr = &intel_dp->psr;
3401 	const char *status, *mode, *region_et;
3402 
3403 	if (psr->enabled)
3404 		status = " enabled";
3405 	else
3406 		status = "disabled";
3407 
3408 	if (psr->panel_replay_enabled && psr->sel_update_enabled)
3409 		mode = "Panel Replay Selective Update";
3410 	else if (psr->panel_replay_enabled)
3411 		mode = "Panel Replay";
3412 	else if (psr->sel_update_enabled)
3413 		mode = "PSR2";
3414 	else if (psr->enabled)
3415 		mode = "PSR1";
3416 	else
3417 		mode = "";
3418 
3419 	if (psr->su_region_et_enabled)
3420 		region_et = " (Early Transport)";
3421 	else
3422 		region_et = "";
3423 
3424 	seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et);
3425 }
3426 
3427 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3428 {
3429 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3430 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3431 	struct intel_psr *psr = &intel_dp->psr;
3432 	intel_wakeref_t wakeref;
3433 	bool enabled;
3434 	u32 val, psr2_ctl;
3435 
3436 	intel_psr_sink_capability(intel_dp, m);
3437 
3438 	if (!(psr->sink_support || psr->sink_panel_replay_support))
3439 		return 0;
3440 
3441 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3442 	mutex_lock(&psr->lock);
3443 
3444 	intel_psr_print_mode(intel_dp, m);
3445 
3446 	if (!psr->enabled) {
3447 		seq_printf(m, "PSR sink not reliable: %s\n",
3448 			   str_yes_no(psr->sink_not_reliable));
3449 
3450 		goto unlock;
3451 	}
3452 
3453 	if (psr->panel_replay_enabled) {
3454 		val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
3455 
3456 		if (intel_dp_is_edp(intel_dp))
3457 			psr2_ctl = intel_de_read(dev_priv,
3458 						 EDP_PSR2_CTL(dev_priv,
3459 							      cpu_transcoder));
3460 
3461 		enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3462 	} else if (psr->sel_update_enabled) {
3463 		val = intel_de_read(dev_priv,
3464 				    EDP_PSR2_CTL(dev_priv, cpu_transcoder));
3465 		enabled = val & EDP_PSR2_ENABLE;
3466 	} else {
3467 		val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
3468 		enabled = val & EDP_PSR_ENABLE;
3469 	}
3470 	seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
3471 		   str_enabled_disabled(enabled), val);
3472 	if (psr->panel_replay_enabled && intel_dp_is_edp(intel_dp))
3473 		seq_printf(m, "PSR2_CTL: 0x%08x\n",
3474 			   psr2_ctl);
3475 	psr_source_status(intel_dp, m);
3476 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3477 		   psr->busy_frontbuffer_bits);
3478 
3479 	/*
3480 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
3481 	 */
3482 	val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3483 	seq_printf(m, "Performance counter: %u\n",
3484 		   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3485 
3486 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
3487 		seq_printf(m, "Last attempted entry at: %lld\n",
3488 			   psr->last_entry_attempt);
3489 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3490 	}
3491 
3492 	if (psr->sel_update_enabled) {
3493 		u32 su_frames_val[3];
3494 		int frame;
3495 
3496 		/*
3497 		 * Reading all 3 registers before hand to minimize crossing a
3498 		 * frame boundary between register reads
3499 		 */
3500 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3501 			val = intel_de_read(dev_priv,
3502 					    PSR2_SU_STATUS(dev_priv, cpu_transcoder, frame));
3503 			su_frames_val[frame / 3] = val;
3504 		}
3505 
3506 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3507 
3508 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3509 			u32 su_blocks;
3510 
3511 			su_blocks = su_frames_val[frame / 3] &
3512 				    PSR2_SU_STATUS_MASK(frame);
3513 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3514 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
3515 		}
3516 
3517 		seq_printf(m, "PSR2 selective fetch: %s\n",
3518 			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3519 	}
3520 
3521 unlock:
3522 	mutex_unlock(&psr->lock);
3523 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3524 
3525 	return 0;
3526 }
3527 
3528 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3529 {
3530 	struct drm_i915_private *dev_priv = m->private;
3531 	struct intel_dp *intel_dp = NULL;
3532 	struct intel_encoder *encoder;
3533 
3534 	if (!HAS_PSR(dev_priv))
3535 		return -ENODEV;
3536 
3537 	/* Find the first EDP which supports PSR */
3538 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3539 		intel_dp = enc_to_intel_dp(encoder);
3540 		break;
3541 	}
3542 
3543 	if (!intel_dp)
3544 		return -ENODEV;
3545 
3546 	return intel_psr_status(m, intel_dp);
3547 }
3548 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3549 
3550 static int
3551 i915_edp_psr_debug_set(void *data, u64 val)
3552 {
3553 	struct drm_i915_private *dev_priv = data;
3554 	struct intel_encoder *encoder;
3555 	intel_wakeref_t wakeref;
3556 	int ret = -ENODEV;
3557 
3558 	if (!HAS_PSR(dev_priv))
3559 		return ret;
3560 
3561 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3562 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3563 
3564 		drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3565 
3566 		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3567 
3568 		// TODO: split to each transcoder's PSR debug state
3569 		ret = intel_psr_debug_set(intel_dp, val);
3570 
3571 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3572 	}
3573 
3574 	return ret;
3575 }
3576 
3577 static int
3578 i915_edp_psr_debug_get(void *data, u64 *val)
3579 {
3580 	struct drm_i915_private *dev_priv = data;
3581 	struct intel_encoder *encoder;
3582 
3583 	if (!HAS_PSR(dev_priv))
3584 		return -ENODEV;
3585 
3586 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3587 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3588 
3589 		// TODO: split to each transcoder's PSR debug state
3590 		*val = READ_ONCE(intel_dp->psr.debug);
3591 		return 0;
3592 	}
3593 
3594 	return -ENODEV;
3595 }
3596 
3597 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3598 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3599 			"%llu\n");
3600 
3601 void intel_psr_debugfs_register(struct drm_i915_private *i915)
3602 {
3603 	struct drm_minor *minor = i915->drm.primary;
3604 
3605 	debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3606 			    i915, &i915_edp_psr_debug_fops);
3607 
3608 	debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3609 			    i915, &i915_edp_psr_status_fops);
3610 }
3611 
3612 static const char *psr_mode_str(struct intel_dp *intel_dp)
3613 {
3614 	if (intel_dp->psr.panel_replay_enabled)
3615 		return "PANEL-REPLAY";
3616 	else if (intel_dp->psr.enabled)
3617 		return "PSR";
3618 
3619 	return "unknown";
3620 }
3621 
3622 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3623 {
3624 	struct intel_connector *connector = m->private;
3625 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3626 	static const char * const sink_status[] = {
3627 		"inactive",
3628 		"transition to active, capture and display",
3629 		"active, display from RFB",
3630 		"active, capture and display on sink device timings",
3631 		"transition to inactive, capture and display, timing re-sync",
3632 		"reserved",
3633 		"reserved",
3634 		"sink internal error",
3635 	};
3636 	const char *str;
3637 	int ret;
3638 	u8 status, error_status;
3639 
3640 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
3641 		seq_puts(m, "PSR/Panel-Replay Unsupported\n");
3642 		return -ENODEV;
3643 	}
3644 
3645 	if (connector->base.status != connector_status_connected)
3646 		return -ENODEV;
3647 
3648 	ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
3649 	if (ret)
3650 		return ret;
3651 
3652 	status &= DP_PSR_SINK_STATE_MASK;
3653 	if (status < ARRAY_SIZE(sink_status))
3654 		str = sink_status[status];
3655 	else
3656 		str = "unknown";
3657 
3658 	seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
3659 
3660 	seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
3661 
3662 	if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
3663 			    DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3664 			    DP_PSR_LINK_CRC_ERROR))
3665 		seq_puts(m, ":\n");
3666 	else
3667 		seq_puts(m, "\n");
3668 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3669 		seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
3670 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3671 		seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
3672 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3673 		seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
3674 
3675 	return ret;
3676 }
3677 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3678 
3679 static int i915_psr_status_show(struct seq_file *m, void *data)
3680 {
3681 	struct intel_connector *connector = m->private;
3682 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3683 
3684 	return intel_psr_status(m, intel_dp);
3685 }
3686 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3687 
3688 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3689 {
3690 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3691 	struct dentry *root = connector->base.debugfs_entry;
3692 
3693 	/* TODO: Add support for MST connectors as well. */
3694 	if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
3695 	     connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
3696 	    connector->mst_port)
3697 		return;
3698 
3699 	debugfs_create_file("i915_psr_sink_status", 0444, root,
3700 			    connector, &i915_psr_sink_status_fops);
3701 
3702 	if (HAS_PSR(i915) || HAS_DP20(i915))
3703 		debugfs_create_file("i915_psr_status", 0444, root,
3704 				    connector, &i915_psr_status_fops);
3705 }
3706