xref: /linux/drivers/gpu/drm/i915/display/intel_psr.c (revision 47cebb740a83682224654a6583a20efd9f3cfeae)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26 #include <drm/drm_debugfs.h>
27 
28 #include "i915_drv.h"
29 #include "i915_reg.h"
30 #include "intel_alpm.h"
31 #include "intel_atomic.h"
32 #include "intel_crtc.h"
33 #include "intel_cursor_regs.h"
34 #include "intel_ddi.h"
35 #include "intel_de.h"
36 #include "intel_display_types.h"
37 #include "intel_dp.h"
38 #include "intel_dp_aux.h"
39 #include "intel_frontbuffer.h"
40 #include "intel_hdmi.h"
41 #include "intel_psr.h"
42 #include "intel_psr_regs.h"
43 #include "intel_snps_phy.h"
44 #include "skl_universal_plane.h"
45 
46 /**
47  * DOC: Panel Self Refresh (PSR/SRD)
48  *
49  * Since Haswell Display controller supports Panel Self-Refresh on display
50  * panels witch have a remote frame buffer (RFB) implemented according to PSR
51  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
52  * when system is idle but display is on as it eliminates display refresh
53  * request to DDR memory completely as long as the frame buffer for that
54  * display is unchanged.
55  *
56  * Panel Self Refresh must be supported by both Hardware (source) and
57  * Panel (sink).
58  *
59  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
60  * to power down the link and memory controller. For DSI panels the same idea
61  * is called "manual mode".
62  *
63  * The implementation uses the hardware-based PSR support which automatically
64  * enters/exits self-refresh mode. The hardware takes care of sending the
65  * required DP aux message and could even retrain the link (that part isn't
66  * enabled yet though). The hardware also keeps track of any frontbuffer
67  * changes to know when to exit self-refresh mode again. Unfortunately that
68  * part doesn't work too well, hence why the i915 PSR support uses the
69  * software frontbuffer tracking to make sure it doesn't miss a screen
70  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
71  * get called by the frontbuffer tracking code. Note that because of locking
72  * issues the self-refresh re-enable code is done from a work queue, which
73  * must be correctly synchronized/cancelled when shutting down the pipe."
74  *
75  * DC3CO (DC3 clock off)
76  *
77  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
78  * clock off automatically during PSR2 idle state.
79  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
80  * entry/exit allows the HW to enter a low-power state even when page flipping
81  * periodically (for instance a 30fps video playback scenario).
82  *
83  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
84  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
85  * frames, if no other flip occurs and the function above is executed, DC3CO is
86  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
87  * of another flip.
88  * Front buffer modifications do not trigger DC3CO activation on purpose as it
89  * would bring a lot of complexity and most of the moderns systems will only
90  * use page flips.
91  */
92 
93 /*
94  * Description of PSR mask bits:
95  *
96  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
97  *
98  *  When unmasked (nearly) all display register writes (eg. even
99  *  SWF) trigger a PSR exit. Some registers are excluded from this
100  *  and they have a more specific mask (described below). On icl+
101  *  this bit no longer exists and is effectively always set.
102  *
103  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
104  *
105  *  When unmasked (nearly) all pipe/plane register writes
106  *  trigger a PSR exit. Some plane registers are excluded from this
107  *  and they have a more specific mask (described below).
108  *
109  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
110  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
111  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
112  *
113  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
114  *  SPR_SURF/CURBASE are not included in this and instead are
115  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
116  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
117  *
118  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
119  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
120  *
121  *  When unmasked PSR is blocked as long as the sprite
122  *  plane is enabled. skl+ with their universal planes no
123  *  longer have a mask bit like this, and no plane being
124  *  enabledb blocks PSR.
125  *
126  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
127  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
128  *
129  *  When umasked CURPOS writes trigger a PSR exit. On skl+
130  *  this doesn't exit but CURPOS is included in the
131  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
132  *
133  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
134  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
135  *
136  *  When unmasked PSR is blocked as long as vblank and/or vsync
137  *  interrupt is unmasked in IMR *and* enabled in IER.
138  *
139  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
140  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
141  *
142  *  Selectcs whether PSR exit generates an extra vblank before
143  *  the first frame is transmitted. Also note the opposite polarity
144  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
145  *  unmasked==do not generate the extra vblank).
146  *
147  *  With DC states enabled the extra vblank happens after link training,
148  *  with DC states disabled it happens immediately upuon PSR exit trigger.
149  *  No idea as of now why there is a difference. HSW/BDW (which don't
150  *  even have DMC) always generate it after link training. Go figure.
151  *
152  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
153  *  and thus won't latch until the first vblank. So with DC states
154  *  enabled the register effctively uses the reset value during DC5
155  *  exit+PSR exit sequence, and thus the bit does nothing until
156  *  latched by the vblank that it was trying to prevent from being
157  *  generated in the first place. So we should probably call this
158  *  one a chicken/egg bit instead on skl+.
159  *
160  *  In standby mode (as opposed to link-off) this makes no difference
161  *  as the timing generator keeps running the whole time generating
162  *  normal periodic vblanks.
163  *
164  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
165  *  and doing so makes the behaviour match the skl+ reset value.
166  *
167  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
168  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
169  *
170  *  On BDW without this bit is no vblanks whatsoever are
171  *  generated after PSR exit. On HSW this has no apparant effect.
172  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
173  *
174  * The rest of the bits are more self-explanatory and/or
175  * irrelevant for normal operation.
176  *
177  * Description of intel_crtc_state variables. has_psr, has_panel_replay and
178  * has_sel_update:
179  *
180  *  has_psr (alone):					PSR1
181  *  has_psr + has_sel_update:				PSR2
182  *  has_psr + has_panel_replay:				Panel Replay
183  *  has_psr + has_panel_replay + has_sel_update:	Panel Replay Selective Update
184  *
185  * Description of some intel_psr varibles. enabled, panel_replay_enabled,
186  * sel_update_enabled
187  *
188  *  enabled (alone):						PSR1
189  *  enabled + sel_update_enabled:				PSR2
190  *  enabled + panel_replay_enabled:				Panel Replay
191  *  enabled + panel_replay_enabled + sel_update_enabled:	Panel Replay SU
192  */
193 
194 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
195 			   (intel_dp)->psr.source_support)
196 
197 bool intel_encoder_can_psr(struct intel_encoder *encoder)
198 {
199 	if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
200 		return CAN_PSR(enc_to_intel_dp(encoder)) ||
201 		       CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
202 	else
203 		return false;
204 }
205 
206 static bool psr_global_enabled(struct intel_dp *intel_dp)
207 {
208 	struct intel_connector *connector = intel_dp->attached_connector;
209 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
210 
211 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
212 	case I915_PSR_DEBUG_DEFAULT:
213 		if (i915->display.params.enable_psr == -1)
214 			return connector->panel.vbt.psr.enable;
215 		return i915->display.params.enable_psr;
216 	case I915_PSR_DEBUG_DISABLE:
217 		return false;
218 	default:
219 		return true;
220 	}
221 }
222 
223 static bool psr2_global_enabled(struct intel_dp *intel_dp)
224 {
225 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
226 
227 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
228 	case I915_PSR_DEBUG_DISABLE:
229 	case I915_PSR_DEBUG_FORCE_PSR1:
230 		return false;
231 	default:
232 		if (i915->display.params.enable_psr == 1)
233 			return false;
234 		return true;
235 	}
236 }
237 
238 static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp)
239 {
240 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
241 
242 	if (i915->display.params.enable_psr != -1)
243 		return false;
244 
245 	return true;
246 }
247 
248 static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
249 {
250 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
251 
252 	if ((i915->display.params.enable_psr != -1) ||
253 	    (intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE))
254 		return false;
255 	return true;
256 }
257 
258 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
259 {
260 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
261 
262 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
263 		EDP_PSR_ERROR(intel_dp->psr.transcoder);
264 }
265 
266 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
267 {
268 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
269 
270 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
271 		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
272 }
273 
274 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
275 {
276 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
277 
278 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
279 		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
280 }
281 
282 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
283 {
284 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
285 
286 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
287 		EDP_PSR_MASK(intel_dp->psr.transcoder);
288 }
289 
290 static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
291 			      enum transcoder cpu_transcoder)
292 {
293 	if (DISPLAY_VER(dev_priv) >= 8)
294 		return EDP_PSR_CTL(dev_priv, cpu_transcoder);
295 	else
296 		return HSW_SRD_CTL;
297 }
298 
299 static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
300 				enum transcoder cpu_transcoder)
301 {
302 	if (DISPLAY_VER(dev_priv) >= 8)
303 		return EDP_PSR_DEBUG(dev_priv, cpu_transcoder);
304 	else
305 		return HSW_SRD_DEBUG;
306 }
307 
308 static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
309 				   enum transcoder cpu_transcoder)
310 {
311 	if (DISPLAY_VER(dev_priv) >= 8)
312 		return EDP_PSR_PERF_CNT(dev_priv, cpu_transcoder);
313 	else
314 		return HSW_SRD_PERF_CNT;
315 }
316 
317 static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
318 				 enum transcoder cpu_transcoder)
319 {
320 	if (DISPLAY_VER(dev_priv) >= 8)
321 		return EDP_PSR_STATUS(dev_priv, cpu_transcoder);
322 	else
323 		return HSW_SRD_STATUS;
324 }
325 
326 static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
327 			      enum transcoder cpu_transcoder)
328 {
329 	if (DISPLAY_VER(dev_priv) >= 12)
330 		return TRANS_PSR_IMR(dev_priv, cpu_transcoder);
331 	else
332 		return EDP_PSR_IMR;
333 }
334 
335 static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
336 			      enum transcoder cpu_transcoder)
337 {
338 	if (DISPLAY_VER(dev_priv) >= 12)
339 		return TRANS_PSR_IIR(dev_priv, cpu_transcoder);
340 	else
341 		return EDP_PSR_IIR;
342 }
343 
344 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
345 				  enum transcoder cpu_transcoder)
346 {
347 	if (DISPLAY_VER(dev_priv) >= 8)
348 		return EDP_PSR_AUX_CTL(dev_priv, cpu_transcoder);
349 	else
350 		return HSW_SRD_AUX_CTL;
351 }
352 
353 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
354 				   enum transcoder cpu_transcoder, int i)
355 {
356 	if (DISPLAY_VER(dev_priv) >= 8)
357 		return EDP_PSR_AUX_DATA(dev_priv, cpu_transcoder, i);
358 	else
359 		return HSW_SRD_AUX_DATA(i);
360 }
361 
362 static void psr_irq_control(struct intel_dp *intel_dp)
363 {
364 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
365 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
366 	u32 mask;
367 
368 	if (intel_dp->psr.panel_replay_enabled)
369 		return;
370 
371 	mask = psr_irq_psr_error_bit_get(intel_dp);
372 	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
373 		mask |= psr_irq_post_exit_bit_get(intel_dp) |
374 			psr_irq_pre_entry_bit_get(intel_dp);
375 
376 	intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
377 		     psr_irq_mask_get(intel_dp), ~mask);
378 }
379 
380 static void psr_event_print(struct drm_i915_private *i915,
381 			    u32 val, bool sel_update_enabled)
382 {
383 	drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
384 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
385 		drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
386 	if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled)
387 		drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
388 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
389 		drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
390 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
391 		drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
392 	if (val & PSR_EVENT_GRAPHICS_RESET)
393 		drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
394 	if (val & PSR_EVENT_PCH_INTERRUPT)
395 		drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
396 	if (val & PSR_EVENT_MEMORY_UP)
397 		drm_dbg_kms(&i915->drm, "\tMemory up\n");
398 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
399 		drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
400 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
401 		drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
402 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
403 		drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
404 	if (val & PSR_EVENT_REGISTER_UPDATE)
405 		drm_dbg_kms(&i915->drm, "\tRegister updated\n");
406 	if (val & PSR_EVENT_HDCP_ENABLE)
407 		drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
408 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
409 		drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
410 	if (val & PSR_EVENT_VBI_ENABLE)
411 		drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
412 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
413 		drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
414 	if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled)
415 		drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
416 }
417 
418 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
419 {
420 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
421 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
422 	ktime_t time_ns =  ktime_get();
423 
424 	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
425 		intel_dp->psr.last_entry_attempt = time_ns;
426 		drm_dbg_kms(&dev_priv->drm,
427 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
428 			    transcoder_name(cpu_transcoder));
429 	}
430 
431 	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
432 		intel_dp->psr.last_exit = time_ns;
433 		drm_dbg_kms(&dev_priv->drm,
434 			    "[transcoder %s] PSR exit completed\n",
435 			    transcoder_name(cpu_transcoder));
436 
437 		if (DISPLAY_VER(dev_priv) >= 9) {
438 			u32 val;
439 
440 			val = intel_de_rmw(dev_priv,
441 					   PSR_EVENT(dev_priv, cpu_transcoder),
442 					   0, 0);
443 
444 			psr_event_print(dev_priv, val, intel_dp->psr.sel_update_enabled);
445 		}
446 	}
447 
448 	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
449 		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
450 			 transcoder_name(cpu_transcoder));
451 
452 		intel_dp->psr.irq_aux_error = true;
453 
454 		/*
455 		 * If this interruption is not masked it will keep
456 		 * interrupting so fast that it prevents the scheduled
457 		 * work to run.
458 		 * Also after a PSR error, we don't want to arm PSR
459 		 * again so we don't care about unmask the interruption
460 		 * or unset irq_aux_error.
461 		 */
462 		intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
463 			     0, psr_irq_psr_error_bit_get(intel_dp));
464 
465 		queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
466 	}
467 }
468 
469 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
470 {
471 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
472 	u8 val = 8; /* assume the worst if we can't read the value */
473 
474 	if (drm_dp_dpcd_readb(&intel_dp->aux,
475 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
476 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
477 	else
478 		drm_dbg_kms(&i915->drm,
479 			    "Unable to get sink synchronization latency, assuming 8 frames\n");
480 	return val;
481 }
482 
483 static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
484 {
485 	u8 su_capability = 0;
486 
487 	if (intel_dp->psr.sink_panel_replay_su_support)
488 		drm_dp_dpcd_readb(&intel_dp->aux,
489 				  DP_PANEL_PANEL_REPLAY_CAPABILITY,
490 				  &su_capability);
491 	else
492 		su_capability = intel_dp->psr_dpcd[1];
493 
494 	return su_capability;
495 }
496 
497 static unsigned int
498 intel_dp_get_su_x_granularity_offset(struct intel_dp *intel_dp)
499 {
500 	return intel_dp->psr.sink_panel_replay_su_support ?
501 		DP_PANEL_PANEL_REPLAY_X_GRANULARITY :
502 		DP_PSR2_SU_X_GRANULARITY;
503 }
504 
505 static unsigned int
506 intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
507 {
508 	return intel_dp->psr.sink_panel_replay_su_support ?
509 		DP_PANEL_PANEL_REPLAY_Y_GRANULARITY :
510 		DP_PSR2_SU_Y_GRANULARITY;
511 }
512 
513 /*
514  * Note: Bits related to granularity are same in panel replay and psr
515  * registers. Rely on PSR definitions on these "common" bits.
516  */
517 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
518 {
519 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
520 	ssize_t r;
521 	u16 w;
522 	u8 y;
523 
524 	/*
525 	 * TODO: Do we need to take into account panel supporting both PSR and
526 	 * Panel replay?
527 	 */
528 
529 	/*
530 	 * If sink don't have specific granularity requirements set legacy
531 	 * ones.
532 	 */
533 	if (!(intel_dp_get_su_capability(intel_dp) &
534 	      DP_PSR2_SU_GRANULARITY_REQUIRED)) {
535 		/* As PSR2 HW sends full lines, we do not care about x granularity */
536 		w = 4;
537 		y = 4;
538 		goto exit;
539 	}
540 
541 	r = drm_dp_dpcd_read(&intel_dp->aux,
542 			     intel_dp_get_su_x_granularity_offset(intel_dp),
543 			     &w, 2);
544 	if (r != 2)
545 		drm_dbg_kms(&i915->drm,
546 			    "Unable to read selective update x granularity\n");
547 	/*
548 	 * Spec says that if the value read is 0 the default granularity should
549 	 * be used instead.
550 	 */
551 	if (r != 2 || w == 0)
552 		w = 4;
553 
554 	r = drm_dp_dpcd_read(&intel_dp->aux,
555 			     intel_dp_get_su_y_granularity_offset(intel_dp),
556 			     &y, 1);
557 	if (r != 1) {
558 		drm_dbg_kms(&i915->drm,
559 			    "Unable to read selective update y granularity\n");
560 		y = 4;
561 	}
562 	if (y == 0)
563 		y = 1;
564 
565 exit:
566 	intel_dp->psr.su_w_granularity = w;
567 	intel_dp->psr.su_y_granularity = y;
568 }
569 
570 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
571 {
572 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
573 
574 	if (intel_dp_is_edp(intel_dp)) {
575 		if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
576 			drm_dbg_kms(&i915->drm,
577 				    "Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n");
578 			return;
579 		}
580 
581 		if (!(intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
582 			drm_dbg_kms(&i915->drm,
583 				    "Panel doesn't support early transport, eDP Panel Replay not possible\n");
584 			return;
585 		}
586 	}
587 
588 	intel_dp->psr.sink_panel_replay_support = true;
589 
590 	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SU_SUPPORT)
591 		intel_dp->psr.sink_panel_replay_su_support = true;
592 
593 	drm_dbg_kms(&i915->drm,
594 		    "Panel replay %sis supported by panel\n",
595 		    intel_dp->psr.sink_panel_replay_su_support ?
596 		    "selective_update " : "");
597 }
598 
599 static void _psr_init_dpcd(struct intel_dp *intel_dp)
600 {
601 	struct drm_i915_private *i915 =
602 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
603 
604 	drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
605 		    intel_dp->psr_dpcd[0]);
606 
607 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
608 		drm_dbg_kms(&i915->drm,
609 			    "PSR support not currently available for this panel\n");
610 		return;
611 	}
612 
613 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
614 		drm_dbg_kms(&i915->drm,
615 			    "Panel lacks power state control, PSR cannot be enabled\n");
616 		return;
617 	}
618 
619 	intel_dp->psr.sink_support = true;
620 	intel_dp->psr.sink_sync_latency =
621 		intel_dp_get_sink_sync_latency(intel_dp);
622 
623 	if (DISPLAY_VER(i915) >= 9 &&
624 	    intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
625 		bool y_req = intel_dp->psr_dpcd[1] &
626 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
627 
628 		/*
629 		 * All panels that supports PSR version 03h (PSR2 +
630 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
631 		 * only sure that it is going to be used when required by the
632 		 * panel. This way panel is capable to do selective update
633 		 * without a aux frame sync.
634 		 *
635 		 * To support PSR version 02h and PSR version 03h without
636 		 * Y-coordinate requirement panels we would need to enable
637 		 * GTC first.
638 		 */
639 		intel_dp->psr.sink_psr2_support = y_req &&
640 			intel_alpm_aux_wake_supported(intel_dp);
641 		drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
642 			    intel_dp->psr.sink_psr2_support ? "" : "not ");
643 	}
644 }
645 
646 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
647 {
648 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
649 			 sizeof(intel_dp->psr_dpcd));
650 	drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP,
651 			  &intel_dp->pr_dpcd);
652 
653 	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SUPPORT)
654 		_panel_replay_init_dpcd(intel_dp);
655 
656 	if (intel_dp->psr_dpcd[0])
657 		_psr_init_dpcd(intel_dp);
658 
659 	if (intel_dp->psr.sink_psr2_support ||
660 	    intel_dp->psr.sink_panel_replay_su_support)
661 		intel_dp_get_su_granularity(intel_dp);
662 }
663 
664 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
665 {
666 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
667 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
668 	u32 aux_clock_divider, aux_ctl;
669 	/* write DP_SET_POWER=D0 */
670 	static const u8 aux_msg[] = {
671 		[0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
672 		[1] = (DP_SET_POWER >> 8) & 0xff,
673 		[2] = DP_SET_POWER & 0xff,
674 		[3] = 1 - 1,
675 		[4] = DP_SET_POWER_D0,
676 	};
677 	int i;
678 
679 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
680 	for (i = 0; i < sizeof(aux_msg); i += 4)
681 		intel_de_write(dev_priv,
682 			       psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
683 			       intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
684 
685 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
686 
687 	/* Start with bits set for DDI_AUX_CTL register */
688 	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
689 					     aux_clock_divider);
690 
691 	/* Select only valid bits for SRD_AUX_CTL */
692 	aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
693 		EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
694 		EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
695 		EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
696 
697 	intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
698 		       aux_ctl);
699 }
700 
701 static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay)
702 {
703 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
704 
705 	if (DISPLAY_VER(i915) < 20 || !intel_dp_is_edp(intel_dp) ||
706 	    intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)
707 		return false;
708 
709 	return panel_replay ?
710 		intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
711 		intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
712 		psr2_su_region_et_global_enabled(intel_dp);
713 }
714 
715 static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
716 				      const struct intel_crtc_state *crtc_state)
717 {
718 	u8 val = DP_PANEL_REPLAY_ENABLE |
719 		DP_PANEL_REPLAY_VSC_SDP_CRC_EN |
720 		DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
721 		DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN |
722 		DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN;
723 	u8 panel_replay_config2 = DP_PANEL_REPLAY_CRC_VERIFICATION;
724 
725 	if (crtc_state->has_sel_update)
726 		val |= DP_PANEL_REPLAY_SU_ENABLE;
727 
728 	if (crtc_state->enable_psr2_su_region_et)
729 		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
730 
731 	if (crtc_state->req_psr2_sdp_prior_scanline)
732 		panel_replay_config2 |=
733 			DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE;
734 
735 	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, val);
736 
737 	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG2,
738 			   panel_replay_config2);
739 }
740 
741 static void _psr_enable_sink(struct intel_dp *intel_dp,
742 			     const struct intel_crtc_state *crtc_state)
743 {
744 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
745 	u8 val = DP_PSR_ENABLE;
746 
747 	if (crtc_state->has_sel_update) {
748 		val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
749 	} else {
750 		if (intel_dp->psr.link_standby)
751 			val |= DP_PSR_MAIN_LINK_ACTIVE;
752 
753 		if (DISPLAY_VER(i915) >= 8)
754 			val |= DP_PSR_CRC_VERIFICATION;
755 	}
756 
757 	if (crtc_state->req_psr2_sdp_prior_scanline)
758 		val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
759 
760 	if (crtc_state->enable_psr2_su_region_et)
761 		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
762 
763 	if (intel_dp->psr.entry_setup_frames > 0)
764 		val |= DP_PSR_FRAME_CAPTURE;
765 
766 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
767 }
768 
769 static void intel_psr_enable_sink_alpm(struct intel_dp *intel_dp,
770 				       const struct intel_crtc_state *crtc_state)
771 {
772 	u8 val;
773 
774 	/*
775 	 * eDP Panel Replay uses always ALPM
776 	 * PSR2 uses ALPM but PSR1 doesn't
777 	 */
778 	if (!intel_dp_is_edp(intel_dp) || (!crtc_state->has_panel_replay &&
779 					   !crtc_state->has_sel_update))
780 		return;
781 
782 	val = DP_ALPM_ENABLE | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE;
783 
784 	if (crtc_state->has_panel_replay)
785 		val |= DP_ALPM_MODE_AUX_LESS;
786 
787 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, val);
788 }
789 
790 void intel_psr_enable_sink(struct intel_dp *intel_dp,
791 			   const struct intel_crtc_state *crtc_state)
792 {
793 	intel_psr_enable_sink_alpm(intel_dp, crtc_state);
794 
795 	crtc_state->has_panel_replay ?
796 		_panel_replay_enable_sink(intel_dp, crtc_state) :
797 		_psr_enable_sink(intel_dp, crtc_state);
798 
799 	if (intel_dp_is_edp(intel_dp))
800 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
801 }
802 
803 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
804 {
805 	struct intel_connector *connector = intel_dp->attached_connector;
806 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
807 	u32 val = 0;
808 
809 	if (DISPLAY_VER(dev_priv) >= 11)
810 		val |= EDP_PSR_TP4_TIME_0us;
811 
812 	if (dev_priv->display.params.psr_safest_params) {
813 		val |= EDP_PSR_TP1_TIME_2500us;
814 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
815 		goto check_tp3_sel;
816 	}
817 
818 	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
819 		val |= EDP_PSR_TP1_TIME_0us;
820 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
821 		val |= EDP_PSR_TP1_TIME_100us;
822 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
823 		val |= EDP_PSR_TP1_TIME_500us;
824 	else
825 		val |= EDP_PSR_TP1_TIME_2500us;
826 
827 	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
828 		val |= EDP_PSR_TP2_TP3_TIME_0us;
829 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
830 		val |= EDP_PSR_TP2_TP3_TIME_100us;
831 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
832 		val |= EDP_PSR_TP2_TP3_TIME_500us;
833 	else
834 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
835 
836 	/*
837 	 * WA 0479: hsw,bdw
838 	 * "Do not skip both TP1 and TP2/TP3"
839 	 */
840 	if (DISPLAY_VER(dev_priv) < 9 &&
841 	    connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
842 	    connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
843 		val |= EDP_PSR_TP2_TP3_TIME_100us;
844 
845 check_tp3_sel:
846 	if (intel_dp_source_supports_tps3(dev_priv) &&
847 	    drm_dp_tps3_supported(intel_dp->dpcd))
848 		val |= EDP_PSR_TP_TP1_TP3;
849 	else
850 		val |= EDP_PSR_TP_TP1_TP2;
851 
852 	return val;
853 }
854 
855 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
856 {
857 	struct intel_connector *connector = intel_dp->attached_connector;
858 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
859 	int idle_frames;
860 
861 	/* Let's use 6 as the minimum to cover all known cases including the
862 	 * off-by-one issue that HW has in some cases.
863 	 */
864 	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
865 	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
866 
867 	if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
868 		idle_frames = 0xf;
869 
870 	return idle_frames;
871 }
872 
873 static void hsw_activate_psr1(struct intel_dp *intel_dp)
874 {
875 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
876 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
877 	u32 max_sleep_time = 0x1f;
878 	u32 val = EDP_PSR_ENABLE;
879 
880 	val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
881 
882 	if (DISPLAY_VER(dev_priv) < 20)
883 		val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
884 
885 	if (IS_HASWELL(dev_priv))
886 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
887 
888 	if (intel_dp->psr.link_standby)
889 		val |= EDP_PSR_LINK_STANDBY;
890 
891 	val |= intel_psr1_get_tp_time(intel_dp);
892 
893 	if (DISPLAY_VER(dev_priv) >= 8)
894 		val |= EDP_PSR_CRC_ENABLE;
895 
896 	if (DISPLAY_VER(dev_priv) >= 20)
897 		val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
898 
899 	intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
900 		     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
901 }
902 
903 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
904 {
905 	struct intel_connector *connector = intel_dp->attached_connector;
906 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
907 	u32 val = 0;
908 
909 	if (dev_priv->display.params.psr_safest_params)
910 		return EDP_PSR2_TP2_TIME_2500us;
911 
912 	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
913 	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
914 		val |= EDP_PSR2_TP2_TIME_50us;
915 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
916 		val |= EDP_PSR2_TP2_TIME_100us;
917 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
918 		val |= EDP_PSR2_TP2_TIME_500us;
919 	else
920 		val |= EDP_PSR2_TP2_TIME_2500us;
921 
922 	return val;
923 }
924 
925 static int psr2_block_count_lines(struct intel_dp *intel_dp)
926 {
927 	return intel_dp->alpm_parameters.io_wake_lines < 9 &&
928 		intel_dp->alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
929 }
930 
931 static int psr2_block_count(struct intel_dp *intel_dp)
932 {
933 	return psr2_block_count_lines(intel_dp) / 4;
934 }
935 
936 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
937 {
938 	u8 frames_before_su_entry;
939 
940 	frames_before_su_entry = max_t(u8,
941 				       intel_dp->psr.sink_sync_latency + 1,
942 				       2);
943 
944 	/* Entry setup frames must be at least 1 less than frames before SU entry */
945 	if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
946 		frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
947 
948 	return frames_before_su_entry;
949 }
950 
951 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
952 {
953 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
954 	struct intel_psr *psr = &intel_dp->psr;
955 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
956 
957 	if (intel_dp_is_edp(intel_dp) && psr->sel_update_enabled) {
958 		u32 val = psr->su_region_et_enabled ?
959 			LNL_EDP_PSR2_SU_REGION_ET_ENABLE : 0;
960 
961 		if (intel_dp->psr.req_psr2_sdp_prior_scanline)
962 			val |= EDP_PSR2_SU_SDP_SCANLINE;
963 
964 		intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder),
965 			       val);
966 	}
967 
968 	intel_de_rmw(dev_priv,
969 		     PSR2_MAN_TRK_CTL(dev_priv, intel_dp->psr.transcoder),
970 		     0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
971 
972 	intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
973 		     TRANS_DP2_PANEL_REPLAY_ENABLE);
974 }
975 
976 static void hsw_activate_psr2(struct intel_dp *intel_dp)
977 {
978 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
979 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
980 	u32 val = EDP_PSR2_ENABLE;
981 	u32 psr_val = 0;
982 
983 	val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
984 
985 	if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
986 		val |= EDP_SU_TRACK_ENABLE;
987 
988 	if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
989 		val |= EDP_Y_COORDINATE_ENABLE;
990 
991 	val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
992 
993 	val |= intel_psr2_get_tp_time(intel_dp);
994 
995 	if (DISPLAY_VER(dev_priv) >= 12 && DISPLAY_VER(dev_priv) < 20) {
996 		if (psr2_block_count(intel_dp) > 2)
997 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
998 		else
999 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
1000 	}
1001 
1002 	/* Wa_22012278275:adl-p */
1003 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
1004 		static const u8 map[] = {
1005 			2, /* 5 lines */
1006 			1, /* 6 lines */
1007 			0, /* 7 lines */
1008 			3, /* 8 lines */
1009 			6, /* 9 lines */
1010 			5, /* 10 lines */
1011 			4, /* 11 lines */
1012 			7, /* 12 lines */
1013 		};
1014 		/*
1015 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
1016 		 * comments bellow for more information
1017 		 */
1018 		int tmp;
1019 
1020 		tmp = map[intel_dp->alpm_parameters.io_wake_lines -
1021 			  TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
1022 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
1023 
1024 		tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
1025 		val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
1026 	} else if (DISPLAY_VER(dev_priv) >= 20) {
1027 		val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1028 	} else if (DISPLAY_VER(dev_priv) >= 12) {
1029 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1030 		val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1031 	} else if (DISPLAY_VER(dev_priv) >= 9) {
1032 		val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1033 		val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1034 	}
1035 
1036 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
1037 		val |= EDP_PSR2_SU_SDP_SCANLINE;
1038 
1039 	if (DISPLAY_VER(dev_priv) >= 20)
1040 		psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
1041 
1042 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
1043 		u32 tmp;
1044 
1045 		tmp = intel_de_read(dev_priv,
1046 				    PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder));
1047 		drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
1048 	} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1049 		intel_de_write(dev_priv,
1050 			       PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder), 0);
1051 	}
1052 
1053 	if (intel_dp->psr.su_region_et_enabled)
1054 		val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
1055 
1056 	/*
1057 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
1058 	 * recommending keep this bit unset while PSR2 is enabled.
1059 	 */
1060 	intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
1061 
1062 	intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder), val);
1063 }
1064 
1065 static bool
1066 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
1067 {
1068 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1069 		return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
1070 	else if (DISPLAY_VER(dev_priv) >= 12)
1071 		return cpu_transcoder == TRANSCODER_A;
1072 	else if (DISPLAY_VER(dev_priv) >= 9)
1073 		return cpu_transcoder == TRANSCODER_EDP;
1074 	else
1075 		return false;
1076 }
1077 
1078 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
1079 {
1080 	if (!crtc_state->hw.active)
1081 		return 0;
1082 
1083 	return DIV_ROUND_UP(1000 * 1000,
1084 			    drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
1085 }
1086 
1087 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
1088 				     u32 idle_frames)
1089 {
1090 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1091 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1092 
1093 	intel_de_rmw(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder),
1094 		     EDP_PSR2_IDLE_FRAMES_MASK,
1095 		     EDP_PSR2_IDLE_FRAMES(idle_frames));
1096 }
1097 
1098 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
1099 {
1100 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1101 
1102 	psr2_program_idle_frames(intel_dp, 0);
1103 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
1104 }
1105 
1106 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
1107 {
1108 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1109 
1110 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1111 	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
1112 }
1113 
1114 static void tgl_dc3co_disable_work(struct work_struct *work)
1115 {
1116 	struct intel_dp *intel_dp =
1117 		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
1118 
1119 	mutex_lock(&intel_dp->psr.lock);
1120 	/* If delayed work is pending, it is not idle */
1121 	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
1122 		goto unlock;
1123 
1124 	tgl_psr2_disable_dc3co(intel_dp);
1125 unlock:
1126 	mutex_unlock(&intel_dp->psr.lock);
1127 }
1128 
1129 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
1130 {
1131 	if (!intel_dp->psr.dc3co_exitline)
1132 		return;
1133 
1134 	cancel_delayed_work(&intel_dp->psr.dc3co_work);
1135 	/* Before PSR2 exit disallow dc3co*/
1136 	tgl_psr2_disable_dc3co(intel_dp);
1137 }
1138 
1139 static bool
1140 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
1141 			      struct intel_crtc_state *crtc_state)
1142 {
1143 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1144 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1145 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1146 	enum port port = dig_port->base.port;
1147 
1148 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1149 		return pipe <= PIPE_B && port <= PORT_B;
1150 	else
1151 		return pipe == PIPE_A && port == PORT_A;
1152 }
1153 
1154 static void
1155 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
1156 				  struct intel_crtc_state *crtc_state)
1157 {
1158 	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
1159 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1160 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1161 	u32 exit_scanlines;
1162 
1163 	/*
1164 	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1165 	 * disable DC3CO until the changed dc3co activating/deactivating sequence
1166 	 * is applied. B.Specs:49196
1167 	 */
1168 	return;
1169 
1170 	/*
1171 	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1172 	 * TODO: when the issue is addressed, this restriction should be removed.
1173 	 */
1174 	if (crtc_state->enable_psr2_sel_fetch)
1175 		return;
1176 
1177 	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1178 		return;
1179 
1180 	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1181 		return;
1182 
1183 	/* Wa_16011303918:adl-p */
1184 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1185 		return;
1186 
1187 	/*
1188 	 * DC3CO Exit time 200us B.Spec 49196
1189 	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1190 	 */
1191 	exit_scanlines =
1192 		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1193 
1194 	if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
1195 		return;
1196 
1197 	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1198 }
1199 
1200 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1201 					      struct intel_crtc_state *crtc_state)
1202 {
1203 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1204 
1205 	if (!dev_priv->display.params.enable_psr2_sel_fetch &&
1206 	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1207 		drm_dbg_kms(&dev_priv->drm,
1208 			    "PSR2 sel fetch not enabled, disabled by parameter\n");
1209 		return false;
1210 	}
1211 
1212 	if (crtc_state->uapi.async_flip) {
1213 		drm_dbg_kms(&dev_priv->drm,
1214 			    "PSR2 sel fetch not enabled, async flip enabled\n");
1215 		return false;
1216 	}
1217 
1218 	return crtc_state->enable_psr2_sel_fetch = true;
1219 }
1220 
1221 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1222 				   struct intel_crtc_state *crtc_state)
1223 {
1224 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1225 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1226 	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1227 	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1228 	u16 y_granularity = 0;
1229 
1230 	/* PSR2 HW only send full lines so we only need to validate the width */
1231 	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1232 		return false;
1233 
1234 	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1235 		return false;
1236 
1237 	/* HW tracking is only aligned to 4 lines */
1238 	if (!crtc_state->enable_psr2_sel_fetch)
1239 		return intel_dp->psr.su_y_granularity == 4;
1240 
1241 	/*
1242 	 * adl_p and mtl platforms have 1 line granularity.
1243 	 * For other platforms with SW tracking we can adjust the y coordinates
1244 	 * to match sink requirement if multiple of 4.
1245 	 */
1246 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1247 		y_granularity = intel_dp->psr.su_y_granularity;
1248 	else if (intel_dp->psr.su_y_granularity <= 2)
1249 		y_granularity = 4;
1250 	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1251 		y_granularity = intel_dp->psr.su_y_granularity;
1252 
1253 	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1254 		return false;
1255 
1256 	if (crtc_state->dsc.compression_enable &&
1257 	    vdsc_cfg->slice_height % y_granularity)
1258 		return false;
1259 
1260 	crtc_state->su_y_granularity = y_granularity;
1261 	return true;
1262 }
1263 
1264 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1265 							struct intel_crtc_state *crtc_state)
1266 {
1267 	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1268 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1269 	u32 hblank_total, hblank_ns, req_ns;
1270 
1271 	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1272 	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1273 
1274 	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1275 	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1276 
1277 	if ((hblank_ns - req_ns) > 100)
1278 		return true;
1279 
1280 	/* Not supported <13 / Wa_22012279113:adl-p */
1281 	if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1282 		return false;
1283 
1284 	crtc_state->req_psr2_sdp_prior_scanline = true;
1285 	return true;
1286 }
1287 
1288 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1289 					const struct drm_display_mode *adjusted_mode)
1290 {
1291 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1292 	int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1293 	int entry_setup_frames = 0;
1294 
1295 	if (psr_setup_time < 0) {
1296 		drm_dbg_kms(&i915->drm,
1297 			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1298 			    intel_dp->psr_dpcd[1]);
1299 		return -ETIME;
1300 	}
1301 
1302 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1303 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1304 		if (DISPLAY_VER(i915) >= 20) {
1305 			/* setup entry frames can be up to 3 frames */
1306 			entry_setup_frames = 1;
1307 			drm_dbg_kms(&i915->drm,
1308 				    "PSR setup entry frames %d\n",
1309 				    entry_setup_frames);
1310 		} else {
1311 			drm_dbg_kms(&i915->drm,
1312 				    "PSR condition failed: PSR setup time (%d us) too long\n",
1313 				    psr_setup_time);
1314 			return -ETIME;
1315 		}
1316 	}
1317 
1318 	return entry_setup_frames;
1319 }
1320 
1321 static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
1322 				       const struct intel_crtc_state *crtc_state,
1323 				       bool aux_less)
1324 {
1325 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1326 	int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
1327 		crtc_state->hw.adjusted_mode.crtc_vblank_start;
1328 	int wake_lines;
1329 
1330 	if (aux_less)
1331 		wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
1332 	else
1333 		wake_lines = DISPLAY_VER(i915) < 20 ?
1334 			psr2_block_count_lines(intel_dp) :
1335 			intel_dp->alpm_parameters.io_wake_lines;
1336 
1337 	if (crtc_state->req_psr2_sdp_prior_scanline)
1338 		vblank -= 1;
1339 
1340 	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
1341 	if (vblank < wake_lines)
1342 		return false;
1343 
1344 	return true;
1345 }
1346 
1347 static bool alpm_config_valid(struct intel_dp *intel_dp,
1348 			      const struct intel_crtc_state *crtc_state,
1349 			      bool aux_less)
1350 {
1351 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1352 
1353 	if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
1354 		drm_dbg_kms(&i915->drm,
1355 			    "PSR2/Panel Replay  not enabled, Unable to use long enough wake times\n");
1356 		return false;
1357 	}
1358 
1359 	if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less)) {
1360 		drm_dbg_kms(&i915->drm,
1361 			    "PSR2/Panel Replay not enabled, too short vblank time\n");
1362 		return false;
1363 	}
1364 
1365 	return true;
1366 }
1367 
1368 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1369 				    struct intel_crtc_state *crtc_state)
1370 {
1371 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1372 	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1373 	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1374 	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1375 
1376 	if (!intel_dp->psr.sink_psr2_support)
1377 		return false;
1378 
1379 	/* JSL and EHL only supports eDP 1.3 */
1380 	if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1381 		drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1382 		return false;
1383 	}
1384 
1385 	/* Wa_16011181250 */
1386 	if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1387 	    IS_DG2(dev_priv)) {
1388 		drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1389 		return false;
1390 	}
1391 
1392 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1393 		drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1394 		return false;
1395 	}
1396 
1397 	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1398 		drm_dbg_kms(&dev_priv->drm,
1399 			    "PSR2 not supported in transcoder %s\n",
1400 			    transcoder_name(crtc_state->cpu_transcoder));
1401 		return false;
1402 	}
1403 
1404 	/*
1405 	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1406 	 * resolution requires DSC to be enabled, priority is given to DSC
1407 	 * over PSR2.
1408 	 */
1409 	if (crtc_state->dsc.compression_enable &&
1410 	    (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
1411 		drm_dbg_kms(&dev_priv->drm,
1412 			    "PSR2 cannot be enabled since DSC is enabled\n");
1413 		return false;
1414 	}
1415 
1416 	if (DISPLAY_VER(dev_priv) >= 12) {
1417 		psr_max_h = 5120;
1418 		psr_max_v = 3200;
1419 		max_bpp = 30;
1420 	} else if (DISPLAY_VER(dev_priv) >= 10) {
1421 		psr_max_h = 4096;
1422 		psr_max_v = 2304;
1423 		max_bpp = 24;
1424 	} else if (DISPLAY_VER(dev_priv) == 9) {
1425 		psr_max_h = 3640;
1426 		psr_max_v = 2304;
1427 		max_bpp = 24;
1428 	}
1429 
1430 	if (crtc_state->pipe_bpp > max_bpp) {
1431 		drm_dbg_kms(&dev_priv->drm,
1432 			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1433 			    crtc_state->pipe_bpp, max_bpp);
1434 		return false;
1435 	}
1436 
1437 	/* Wa_16011303918:adl-p */
1438 	if (crtc_state->vrr.enable &&
1439 	    IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1440 		drm_dbg_kms(&dev_priv->drm,
1441 			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1442 		return false;
1443 	}
1444 
1445 	if (!alpm_config_valid(intel_dp, crtc_state, false))
1446 		return false;
1447 
1448 	if (!crtc_state->enable_psr2_sel_fetch &&
1449 	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1450 		drm_dbg_kms(&dev_priv->drm,
1451 			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1452 			    crtc_hdisplay, crtc_vdisplay,
1453 			    psr_max_h, psr_max_v);
1454 		return false;
1455 	}
1456 
1457 	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1458 
1459 	return true;
1460 }
1461 
1462 static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
1463 					  struct intel_crtc_state *crtc_state)
1464 {
1465 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1466 
1467 	if (HAS_PSR2_SEL_FETCH(dev_priv) &&
1468 	    !intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1469 	    !HAS_PSR_HW_TRACKING(dev_priv)) {
1470 		drm_dbg_kms(&dev_priv->drm,
1471 			    "Selective update not enabled, selective fetch not valid and no HW tracking available\n");
1472 		goto unsupported;
1473 	}
1474 
1475 	if (!psr2_global_enabled(intel_dp)) {
1476 		drm_dbg_kms(&dev_priv->drm, "Selective update disabled by flag\n");
1477 		goto unsupported;
1478 	}
1479 
1480 	if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state))
1481 		goto unsupported;
1482 
1483 	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1484 		drm_dbg_kms(&dev_priv->drm,
1485 			    "Selective update not enabled, SDP indication do not fit in hblank\n");
1486 		goto unsupported;
1487 	}
1488 
1489 	if (crtc_state->has_panel_replay && (DISPLAY_VER(dev_priv) < 14 ||
1490 					     !intel_dp->psr.sink_panel_replay_su_support))
1491 		goto unsupported;
1492 
1493 	if (crtc_state->crc_enabled) {
1494 		drm_dbg_kms(&dev_priv->drm,
1495 			    "Selective update not enabled because it would inhibit pipe CRC calculation\n");
1496 		goto unsupported;
1497 	}
1498 
1499 	if (!psr2_granularity_check(intel_dp, crtc_state)) {
1500 		drm_dbg_kms(&dev_priv->drm,
1501 			    "Selective update not enabled, SU granularity not compatible\n");
1502 		goto unsupported;
1503 	}
1504 
1505 	crtc_state->enable_psr2_su_region_et =
1506 		psr2_su_region_et_valid(intel_dp, crtc_state->has_panel_replay);
1507 
1508 	return true;
1509 
1510 unsupported:
1511 	crtc_state->enable_psr2_sel_fetch = false;
1512 	return false;
1513 }
1514 
1515 static bool _psr_compute_config(struct intel_dp *intel_dp,
1516 				struct intel_crtc_state *crtc_state)
1517 {
1518 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1519 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1520 	int entry_setup_frames;
1521 
1522 	/*
1523 	 * Current PSR panels don't work reliably with VRR enabled
1524 	 * So if VRR is enabled, do not enable PSR.
1525 	 */
1526 	if (crtc_state->vrr.enable)
1527 		return false;
1528 
1529 	if (!CAN_PSR(intel_dp))
1530 		return false;
1531 
1532 	entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1533 
1534 	if (entry_setup_frames >= 0) {
1535 		intel_dp->psr.entry_setup_frames = entry_setup_frames;
1536 	} else {
1537 		drm_dbg_kms(&dev_priv->drm,
1538 			    "PSR condition failed: PSR setup timing not met\n");
1539 		return false;
1540 	}
1541 
1542 	return true;
1543 }
1544 
1545 static bool
1546 _panel_replay_compute_config(struct intel_dp *intel_dp,
1547 			     const struct intel_crtc_state *crtc_state,
1548 			     const struct drm_connector_state *conn_state)
1549 {
1550 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1551 	struct intel_connector *connector =
1552 		to_intel_connector(conn_state->connector);
1553 	struct intel_hdcp *hdcp = &connector->hdcp;
1554 
1555 	if (!CAN_PANEL_REPLAY(intel_dp))
1556 		return false;
1557 
1558 	if (!panel_replay_global_enabled(intel_dp)) {
1559 		drm_dbg_kms(&i915->drm, "Panel Replay disabled by flag\n");
1560 		return false;
1561 	}
1562 
1563 	if (!intel_dp_is_edp(intel_dp))
1564 		return true;
1565 
1566 	/* Remaining checks are for eDP only */
1567 
1568 	/* 128b/132b Panel Replay is not supported on eDP */
1569 	if (intel_dp_is_uhbr(crtc_state)) {
1570 		drm_dbg_kms(&i915->drm,
1571 			    "Panel Replay is not supported with 128b/132b\n");
1572 		return false;
1573 	}
1574 
1575 	/* HW will not allow Panel Replay on eDP when HDCP enabled */
1576 	if (conn_state->content_protection ==
1577 	    DRM_MODE_CONTENT_PROTECTION_DESIRED ||
1578 	    (conn_state->content_protection ==
1579 	     DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
1580 	     DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) {
1581 		drm_dbg_kms(&i915->drm,
1582 			    "Panel Replay is not supported with HDCP\n");
1583 		return false;
1584 	}
1585 
1586 	if (!alpm_config_valid(intel_dp, crtc_state, true))
1587 		return false;
1588 
1589 	if (crtc_state->crc_enabled) {
1590 		drm_dbg_kms(&i915->drm,
1591 			    "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
1592 		return false;
1593 	}
1594 
1595 	return true;
1596 }
1597 
1598 void intel_psr_compute_config(struct intel_dp *intel_dp,
1599 			      struct intel_crtc_state *crtc_state,
1600 			      struct drm_connector_state *conn_state)
1601 {
1602 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1603 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1604 
1605 	if (!psr_global_enabled(intel_dp)) {
1606 		drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1607 		return;
1608 	}
1609 
1610 	if (intel_dp->psr.sink_not_reliable) {
1611 		drm_dbg_kms(&dev_priv->drm,
1612 			    "PSR sink implementation is not reliable\n");
1613 		return;
1614 	}
1615 
1616 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1617 		drm_dbg_kms(&dev_priv->drm,
1618 			    "PSR condition failed: Interlaced mode enabled\n");
1619 		return;
1620 	}
1621 
1622 	/*
1623 	 * FIXME figure out what is wrong with PSR+joiner and
1624 	 * fix it. Presumably something related to the fact that
1625 	 * PSR is a transcoder level feature.
1626 	 */
1627 	if (crtc_state->joiner_pipes) {
1628 		drm_dbg_kms(&dev_priv->drm,
1629 			    "PSR disabled due to joiner\n");
1630 		return;
1631 	}
1632 
1633 	crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
1634 								    crtc_state,
1635 								    conn_state);
1636 
1637 	crtc_state->has_psr = crtc_state->has_panel_replay ? true :
1638 		_psr_compute_config(intel_dp, crtc_state);
1639 
1640 	if (!crtc_state->has_psr)
1641 		return;
1642 
1643 	crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
1644 }
1645 
1646 void intel_psr_get_config(struct intel_encoder *encoder,
1647 			  struct intel_crtc_state *pipe_config)
1648 {
1649 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1650 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1651 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1652 	struct intel_dp *intel_dp;
1653 	u32 val;
1654 
1655 	if (!dig_port)
1656 		return;
1657 
1658 	intel_dp = &dig_port->dp;
1659 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1660 		return;
1661 
1662 	mutex_lock(&intel_dp->psr.lock);
1663 	if (!intel_dp->psr.enabled)
1664 		goto unlock;
1665 
1666 	if (intel_dp->psr.panel_replay_enabled) {
1667 		pipe_config->has_psr = pipe_config->has_panel_replay = true;
1668 	} else {
1669 		/*
1670 		 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1671 		 * enabled/disabled because of frontbuffer tracking and others.
1672 		 */
1673 		pipe_config->has_psr = true;
1674 	}
1675 
1676 	pipe_config->has_sel_update = intel_dp->psr.sel_update_enabled;
1677 	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1678 
1679 	if (!intel_dp->psr.sel_update_enabled)
1680 		goto unlock;
1681 
1682 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1683 		val = intel_de_read(dev_priv,
1684 				    PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder));
1685 		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1686 			pipe_config->enable_psr2_sel_fetch = true;
1687 	}
1688 
1689 	pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled;
1690 
1691 	if (DISPLAY_VER(dev_priv) >= 12) {
1692 		val = intel_de_read(dev_priv,
1693 				    TRANS_EXITLINE(dev_priv, cpu_transcoder));
1694 		pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1695 	}
1696 unlock:
1697 	mutex_unlock(&intel_dp->psr.lock);
1698 }
1699 
1700 static void intel_psr_activate(struct intel_dp *intel_dp)
1701 {
1702 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1703 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1704 
1705 	drm_WARN_ON(&dev_priv->drm,
1706 		    transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1707 		    intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder)) & EDP_PSR2_ENABLE);
1708 
1709 	drm_WARN_ON(&dev_priv->drm,
1710 		    intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1711 
1712 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1713 
1714 	lockdep_assert_held(&intel_dp->psr.lock);
1715 
1716 	/* psr1, psr2 and panel-replay are mutually exclusive.*/
1717 	if (intel_dp->psr.panel_replay_enabled)
1718 		dg2_activate_panel_replay(intel_dp);
1719 	else if (intel_dp->psr.sel_update_enabled)
1720 		hsw_activate_psr2(intel_dp);
1721 	else
1722 		hsw_activate_psr1(intel_dp);
1723 
1724 	intel_dp->psr.active = true;
1725 }
1726 
1727 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1728 {
1729 	switch (intel_dp->psr.pipe) {
1730 	case PIPE_A:
1731 		return LATENCY_REPORTING_REMOVED_PIPE_A;
1732 	case PIPE_B:
1733 		return LATENCY_REPORTING_REMOVED_PIPE_B;
1734 	case PIPE_C:
1735 		return LATENCY_REPORTING_REMOVED_PIPE_C;
1736 	case PIPE_D:
1737 		return LATENCY_REPORTING_REMOVED_PIPE_D;
1738 	default:
1739 		MISSING_CASE(intel_dp->psr.pipe);
1740 		return 0;
1741 	}
1742 }
1743 
1744 /*
1745  * Wa_16013835468
1746  * Wa_14015648006
1747  */
1748 static void wm_optimization_wa(struct intel_dp *intel_dp,
1749 			       const struct intel_crtc_state *crtc_state)
1750 {
1751 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1752 	bool set_wa_bit = false;
1753 
1754 	/* Wa_14015648006 */
1755 	if (IS_DISPLAY_VER(dev_priv, 11, 14))
1756 		set_wa_bit |= crtc_state->wm_level_disabled;
1757 
1758 	/* Wa_16013835468 */
1759 	if (DISPLAY_VER(dev_priv) == 12)
1760 		set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1761 			crtc_state->hw.adjusted_mode.crtc_vdisplay;
1762 
1763 	if (set_wa_bit)
1764 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1765 			     0, wa_16013835468_bit_get(intel_dp));
1766 	else
1767 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1768 			     wa_16013835468_bit_get(intel_dp), 0);
1769 }
1770 
1771 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1772 				    const struct intel_crtc_state *crtc_state)
1773 {
1774 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1775 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1776 	u32 mask = 0;
1777 
1778 	/*
1779 	 * Only HSW and BDW have PSR AUX registers that need to be setup.
1780 	 * SKL+ use hardcoded values PSR AUX transactions
1781 	 */
1782 	if (DISPLAY_VER(dev_priv) < 9)
1783 		hsw_psr_setup_aux(intel_dp);
1784 
1785 	/*
1786 	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1787 	 * mask LPSP to avoid dependency on other drivers that might block
1788 	 * runtime_pm besides preventing  other hw tracking issues now we
1789 	 * can rely on frontbuffer tracking.
1790 	 *
1791 	 * From bspec prior LunarLake:
1792 	 * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
1793 	 * panel replay mode.
1794 	 *
1795 	 * From bspec beyod LunarLake:
1796 	 * Panel Replay on DP: No bits are applicable
1797 	 * Panel Replay on eDP: All bits are applicable
1798 	 */
1799 	if (DISPLAY_VER(dev_priv) < 20 || intel_dp_is_edp(intel_dp))
1800 		mask = EDP_PSR_DEBUG_MASK_HPD;
1801 
1802 	if (intel_dp_is_edp(intel_dp)) {
1803 		mask |= EDP_PSR_DEBUG_MASK_MEMUP;
1804 
1805 		/*
1806 		 * For some unknown reason on HSW non-ULT (or at least on
1807 		 * Dell Latitude E6540) external displays start to flicker
1808 		 * when PSR is enabled on the eDP. SR/PC6 residency is much
1809 		 * higher than should be possible with an external display.
1810 		 * As a workaround leave LPSP unmasked to prevent PSR entry
1811 		 * when external displays are active.
1812 		 */
1813 		if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
1814 			mask |= EDP_PSR_DEBUG_MASK_LPSP;
1815 
1816 		if (DISPLAY_VER(dev_priv) < 20)
1817 			mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1818 
1819 		/*
1820 		 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1821 		 * registers in order to keep the CURSURFLIVE tricks working :(
1822 		 */
1823 		if (IS_DISPLAY_VER(dev_priv, 9, 10))
1824 			mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1825 
1826 		/* allow PSR with sprite enabled */
1827 		if (IS_HASWELL(dev_priv))
1828 			mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1829 	}
1830 
1831 	intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1832 
1833 	psr_irq_control(intel_dp);
1834 
1835 	/*
1836 	 * TODO: if future platforms supports DC3CO in more than one
1837 	 * transcoder, EXITLINE will need to be unset when disabling PSR
1838 	 */
1839 	if (intel_dp->psr.dc3co_exitline)
1840 		intel_de_rmw(dev_priv,
1841 			     TRANS_EXITLINE(dev_priv, cpu_transcoder),
1842 			     EXITLINE_MASK,
1843 			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1844 
1845 	if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1846 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1847 			     intel_dp->psr.psr2_sel_fetch_enabled ?
1848 			     IGNORE_PSR2_HW_TRACKING : 0);
1849 
1850 	if (intel_dp_is_edp(intel_dp))
1851 		intel_alpm_configure(intel_dp, crtc_state);
1852 
1853 	/*
1854 	 * Wa_16013835468
1855 	 * Wa_14015648006
1856 	 */
1857 	wm_optimization_wa(intel_dp, crtc_state);
1858 
1859 	if (intel_dp->psr.sel_update_enabled) {
1860 		if (DISPLAY_VER(dev_priv) == 9)
1861 			intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1862 				     PSR2_VSC_ENABLE_PROG_HEADER |
1863 				     PSR2_ADD_VERTICAL_LINE_COUNT);
1864 
1865 		/*
1866 		 * Wa_16014451276:adlp,mtl[a0,b0]
1867 		 * All supported adlp panels have 1-based X granularity, this may
1868 		 * cause issues if non-supported panels are used.
1869 		 */
1870 		if (!intel_dp->psr.panel_replay_enabled &&
1871 		    (IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
1872 		     IS_ALDERLAKE_P(dev_priv)))
1873 			intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
1874 				     0, ADLP_1_BASED_X_GRANULARITY);
1875 
1876 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1877 		if (!intel_dp->psr.panel_replay_enabled &&
1878 		    IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1879 			intel_de_rmw(dev_priv,
1880 				     MTL_CLKGATE_DIS_TRANS(dev_priv, cpu_transcoder),
1881 				     0,
1882 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1883 		else if (IS_ALDERLAKE_P(dev_priv))
1884 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1885 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1886 	}
1887 }
1888 
1889 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1890 {
1891 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1892 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1893 	u32 val;
1894 
1895 	if (intel_dp->psr.panel_replay_enabled)
1896 		goto no_err;
1897 
1898 	/*
1899 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1900 	 * will still keep the error set even after the reset done in the
1901 	 * irq_preinstall and irq_uninstall hooks.
1902 	 * And enabling in this situation cause the screen to freeze in the
1903 	 * first time that PSR HW tries to activate so lets keep PSR disabled
1904 	 * to avoid any rendering problems.
1905 	 */
1906 	val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1907 	val &= psr_irq_psr_error_bit_get(intel_dp);
1908 	if (val) {
1909 		intel_dp->psr.sink_not_reliable = true;
1910 		drm_dbg_kms(&dev_priv->drm,
1911 			    "PSR interruption error set, not enabling PSR\n");
1912 		return false;
1913 	}
1914 
1915 no_err:
1916 	return true;
1917 }
1918 
1919 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1920 				    const struct intel_crtc_state *crtc_state)
1921 {
1922 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1923 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1924 	u32 val;
1925 
1926 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1927 
1928 	intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update;
1929 	intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
1930 	intel_dp->psr.busy_frontbuffer_bits = 0;
1931 	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1932 	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1933 	/* DC5/DC6 requires at least 6 idle frames */
1934 	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1935 	intel_dp->psr.dc3co_exit_delay = val;
1936 	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1937 	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1938 	intel_dp->psr.su_region_et_enabled = crtc_state->enable_psr2_su_region_et;
1939 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1940 	intel_dp->psr.req_psr2_sdp_prior_scanline =
1941 		crtc_state->req_psr2_sdp_prior_scanline;
1942 
1943 	if (!psr_interrupt_error_check(intel_dp))
1944 		return;
1945 
1946 	if (intel_dp->psr.panel_replay_enabled) {
1947 		drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
1948 	} else {
1949 		drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1950 			    intel_dp->psr.sel_update_enabled ? "2" : "1");
1951 
1952 		/*
1953 		 * Panel replay has to be enabled before link training: doing it
1954 		 * only for PSR here.
1955 		 */
1956 		intel_psr_enable_sink(intel_dp, crtc_state);
1957 	}
1958 
1959 	if (intel_dp_is_edp(intel_dp))
1960 		intel_snps_phy_update_psr_power_state(&dig_port->base, true);
1961 
1962 	intel_psr_enable_source(intel_dp, crtc_state);
1963 	intel_dp->psr.enabled = true;
1964 	intel_dp->psr.paused = false;
1965 
1966 	intel_psr_activate(intel_dp);
1967 }
1968 
1969 static void intel_psr_exit(struct intel_dp *intel_dp)
1970 {
1971 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1972 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1973 	u32 val;
1974 
1975 	if (!intel_dp->psr.active) {
1976 		if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1977 			val = intel_de_read(dev_priv,
1978 					    EDP_PSR2_CTL(dev_priv, cpu_transcoder));
1979 			drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1980 		}
1981 
1982 		val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
1983 		drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1984 
1985 		return;
1986 	}
1987 
1988 	if (intel_dp->psr.panel_replay_enabled) {
1989 		intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
1990 			     TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
1991 	} else if (intel_dp->psr.sel_update_enabled) {
1992 		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1993 
1994 		val = intel_de_rmw(dev_priv,
1995 				   EDP_PSR2_CTL(dev_priv, cpu_transcoder),
1996 				   EDP_PSR2_ENABLE, 0);
1997 
1998 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1999 	} else {
2000 		val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
2001 				   EDP_PSR_ENABLE, 0);
2002 
2003 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
2004 	}
2005 	intel_dp->psr.active = false;
2006 }
2007 
2008 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
2009 {
2010 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2011 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2012 	i915_reg_t psr_status;
2013 	u32 psr_status_mask;
2014 
2015 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
2016 					  intel_dp->psr.panel_replay_enabled)) {
2017 		psr_status = EDP_PSR2_STATUS(dev_priv, cpu_transcoder);
2018 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
2019 	} else {
2020 		psr_status = psr_status_reg(dev_priv, cpu_transcoder);
2021 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
2022 	}
2023 
2024 	/* Wait till PSR is idle */
2025 	if (intel_de_wait_for_clear(dev_priv, psr_status,
2026 				    psr_status_mask, 2000))
2027 		drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
2028 }
2029 
2030 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
2031 {
2032 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2033 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2034 
2035 	lockdep_assert_held(&intel_dp->psr.lock);
2036 
2037 	if (!intel_dp->psr.enabled)
2038 		return;
2039 
2040 	if (intel_dp->psr.panel_replay_enabled)
2041 		drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
2042 	else
2043 		drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
2044 			    intel_dp->psr.sel_update_enabled ? "2" : "1");
2045 
2046 	intel_psr_exit(intel_dp);
2047 	intel_psr_wait_exit_locked(intel_dp);
2048 
2049 	/*
2050 	 * Wa_16013835468
2051 	 * Wa_14015648006
2052 	 */
2053 	if (DISPLAY_VER(dev_priv) >= 11)
2054 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
2055 			     wa_16013835468_bit_get(intel_dp), 0);
2056 
2057 	if (intel_dp->psr.sel_update_enabled) {
2058 		/* Wa_16012604467:adlp,mtl[a0,b0] */
2059 		if (!intel_dp->psr.panel_replay_enabled &&
2060 		    IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
2061 			intel_de_rmw(dev_priv,
2062 				     MTL_CLKGATE_DIS_TRANS(dev_priv, cpu_transcoder),
2063 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
2064 		else if (IS_ALDERLAKE_P(dev_priv))
2065 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
2066 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
2067 	}
2068 
2069 	if (intel_dp_is_edp(intel_dp))
2070 		intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
2071 
2072 	/* Panel Replay on eDP is always using ALPM aux less. */
2073 	if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
2074 		intel_de_rmw(dev_priv, ALPM_CTL(dev_priv, cpu_transcoder),
2075 			     ALPM_CTL_ALPM_ENABLE |
2076 			     ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
2077 
2078 		intel_de_rmw(dev_priv,
2079 			     PORT_ALPM_CTL(dev_priv, cpu_transcoder),
2080 			     PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
2081 	}
2082 
2083 	/* Disable PSR on Sink */
2084 	if (!intel_dp->psr.panel_replay_enabled) {
2085 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
2086 
2087 		if (intel_dp->psr.sel_update_enabled)
2088 			drm_dp_dpcd_writeb(&intel_dp->aux,
2089 					   DP_RECEIVER_ALPM_CONFIG, 0);
2090 	}
2091 
2092 	intel_dp->psr.enabled = false;
2093 	intel_dp->psr.panel_replay_enabled = false;
2094 	intel_dp->psr.sel_update_enabled = false;
2095 	intel_dp->psr.psr2_sel_fetch_enabled = false;
2096 	intel_dp->psr.su_region_et_enabled = false;
2097 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2098 }
2099 
2100 /**
2101  * intel_psr_disable - Disable PSR
2102  * @intel_dp: Intel DP
2103  * @old_crtc_state: old CRTC state
2104  *
2105  * This function needs to be called before disabling pipe.
2106  */
2107 void intel_psr_disable(struct intel_dp *intel_dp,
2108 		       const struct intel_crtc_state *old_crtc_state)
2109 {
2110 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2111 
2112 	if (!old_crtc_state->has_psr)
2113 		return;
2114 
2115 	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
2116 		return;
2117 
2118 	mutex_lock(&intel_dp->psr.lock);
2119 
2120 	intel_psr_disable_locked(intel_dp);
2121 
2122 	mutex_unlock(&intel_dp->psr.lock);
2123 	cancel_work_sync(&intel_dp->psr.work);
2124 	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
2125 }
2126 
2127 /**
2128  * intel_psr_pause - Pause PSR
2129  * @intel_dp: Intel DP
2130  *
2131  * This function need to be called after enabling psr.
2132  */
2133 void intel_psr_pause(struct intel_dp *intel_dp)
2134 {
2135 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2136 	struct intel_psr *psr = &intel_dp->psr;
2137 
2138 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2139 		return;
2140 
2141 	mutex_lock(&psr->lock);
2142 
2143 	if (!psr->enabled) {
2144 		mutex_unlock(&psr->lock);
2145 		return;
2146 	}
2147 
2148 	/* If we ever hit this, we will need to add refcount to pause/resume */
2149 	drm_WARN_ON(&dev_priv->drm, psr->paused);
2150 
2151 	intel_psr_exit(intel_dp);
2152 	intel_psr_wait_exit_locked(intel_dp);
2153 	psr->paused = true;
2154 
2155 	mutex_unlock(&psr->lock);
2156 
2157 	cancel_work_sync(&psr->work);
2158 	cancel_delayed_work_sync(&psr->dc3co_work);
2159 }
2160 
2161 /**
2162  * intel_psr_resume - Resume PSR
2163  * @intel_dp: Intel DP
2164  *
2165  * This function need to be called after pausing psr.
2166  */
2167 void intel_psr_resume(struct intel_dp *intel_dp)
2168 {
2169 	struct intel_psr *psr = &intel_dp->psr;
2170 
2171 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2172 		return;
2173 
2174 	mutex_lock(&psr->lock);
2175 
2176 	if (!psr->paused)
2177 		goto unlock;
2178 
2179 	psr->paused = false;
2180 	intel_psr_activate(intel_dp);
2181 
2182 unlock:
2183 	mutex_unlock(&psr->lock);
2184 }
2185 
2186 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
2187 {
2188 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
2189 		PSR2_MAN_TRK_CTL_ENABLE;
2190 }
2191 
2192 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
2193 {
2194 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
2195 	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
2196 	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
2197 }
2198 
2199 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
2200 {
2201 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
2202 	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
2203 	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
2204 }
2205 
2206 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
2207 {
2208 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
2209 	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
2210 	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
2211 }
2212 
2213 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
2214 {
2215 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2216 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2217 
2218 	if (intel_dp->psr.psr2_sel_fetch_enabled)
2219 		intel_de_write(dev_priv,
2220 			       PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
2221 			       man_trk_ctl_enable_bit_get(dev_priv) |
2222 			       man_trk_ctl_partial_frame_bit_get(dev_priv) |
2223 			       man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2224 			       man_trk_ctl_continuos_full_frame(dev_priv));
2225 
2226 	/*
2227 	 * Display WA #0884: skl+
2228 	 * This documented WA for bxt can be safely applied
2229 	 * broadly so we can force HW tracking to exit PSR
2230 	 * instead of disabling and re-enabling.
2231 	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
2232 	 * but it makes more sense write to the current active
2233 	 * pipe.
2234 	 *
2235 	 * This workaround do not exist for platforms with display 10 or newer
2236 	 * but testing proved that it works for up display 13, for newer
2237 	 * than that testing will be needed.
2238 	 */
2239 	intel_de_write(dev_priv, CURSURFLIVE(dev_priv, intel_dp->psr.pipe), 0);
2240 }
2241 
2242 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
2243 {
2244 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2245 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2246 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2247 	struct intel_encoder *encoder;
2248 
2249 	if (!crtc_state->enable_psr2_sel_fetch)
2250 		return;
2251 
2252 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2253 					     crtc_state->uapi.encoder_mask) {
2254 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2255 
2256 		lockdep_assert_held(&intel_dp->psr.lock);
2257 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
2258 			return;
2259 		break;
2260 	}
2261 
2262 	intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
2263 		       crtc_state->psr2_man_track_ctl);
2264 
2265 	if (!crtc_state->enable_psr2_su_region_et)
2266 		return;
2267 
2268 	intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
2269 		       crtc_state->pipe_srcsz_early_tpt);
2270 }
2271 
2272 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2273 				  bool full_update)
2274 {
2275 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2276 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2277 	u32 val = man_trk_ctl_enable_bit_get(dev_priv);
2278 
2279 	/* SF partial frame enable has to be set even on full update */
2280 	val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
2281 
2282 	if (full_update) {
2283 		val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
2284 		val |= man_trk_ctl_continuos_full_frame(dev_priv);
2285 		goto exit;
2286 	}
2287 
2288 	if (crtc_state->psr2_su_area.y1 == -1)
2289 		goto exit;
2290 
2291 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
2292 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2293 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2294 	} else {
2295 		drm_WARN_ON(crtc_state->uapi.crtc->dev,
2296 			    crtc_state->psr2_su_area.y1 % 4 ||
2297 			    crtc_state->psr2_su_area.y2 % 4);
2298 
2299 		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2300 			crtc_state->psr2_su_area.y1 / 4 + 1);
2301 		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2302 			crtc_state->psr2_su_area.y2 / 4 + 1);
2303 	}
2304 exit:
2305 	crtc_state->psr2_man_track_ctl = val;
2306 }
2307 
2308 static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
2309 					  bool full_update)
2310 {
2311 	int width, height;
2312 
2313 	if (!crtc_state->enable_psr2_su_region_et || full_update)
2314 		return 0;
2315 
2316 	width = drm_rect_width(&crtc_state->psr2_su_area);
2317 	height = drm_rect_height(&crtc_state->psr2_su_area);
2318 
2319 	return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
2320 }
2321 
2322 static void clip_area_update(struct drm_rect *overlap_damage_area,
2323 			     struct drm_rect *damage_area,
2324 			     struct drm_rect *pipe_src)
2325 {
2326 	if (!drm_rect_intersect(damage_area, pipe_src))
2327 		return;
2328 
2329 	if (overlap_damage_area->y1 == -1) {
2330 		overlap_damage_area->y1 = damage_area->y1;
2331 		overlap_damage_area->y2 = damage_area->y2;
2332 		return;
2333 	}
2334 
2335 	if (damage_area->y1 < overlap_damage_area->y1)
2336 		overlap_damage_area->y1 = damage_area->y1;
2337 
2338 	if (damage_area->y2 > overlap_damage_area->y2)
2339 		overlap_damage_area->y2 = damage_area->y2;
2340 }
2341 
2342 static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2343 {
2344 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2345 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2346 	u16 y_alignment;
2347 
2348 	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2349 	if (crtc_state->dsc.compression_enable &&
2350 	    (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
2351 		y_alignment = vdsc_cfg->slice_height;
2352 	else
2353 		y_alignment = crtc_state->su_y_granularity;
2354 
2355 	crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2356 	if (crtc_state->psr2_su_area.y2 % y_alignment)
2357 		crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2358 						y_alignment) + 1) * y_alignment;
2359 }
2360 
2361 /*
2362  * When early transport is in use we need to extend SU area to cover
2363  * cursor fully when cursor is in SU area.
2364  */
2365 static void
2366 intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
2367 				  struct intel_crtc *crtc,
2368 				  bool *cursor_in_su_area)
2369 {
2370 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2371 	struct intel_plane_state *new_plane_state;
2372 	struct intel_plane *plane;
2373 	int i;
2374 
2375 	if (!crtc_state->enable_psr2_su_region_et)
2376 		return;
2377 
2378 	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
2379 		struct drm_rect inter;
2380 
2381 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2382 			continue;
2383 
2384 		if (plane->id != PLANE_CURSOR)
2385 			continue;
2386 
2387 		if (!new_plane_state->uapi.visible)
2388 			continue;
2389 
2390 		inter = crtc_state->psr2_su_area;
2391 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2392 			continue;
2393 
2394 		clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
2395 				 &crtc_state->pipe_src);
2396 		*cursor_in_su_area = true;
2397 	}
2398 }
2399 
2400 /*
2401  * TODO: Not clear how to handle planes with negative position,
2402  * also planes are not updated if they have a negative X
2403  * position so for now doing a full update in this cases
2404  *
2405  * Plane scaling and rotation is not supported by selective fetch and both
2406  * properties can change without a modeset, so need to be check at every
2407  * atomic commit.
2408  */
2409 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2410 {
2411 	if (plane_state->uapi.dst.y1 < 0 ||
2412 	    plane_state->uapi.dst.x1 < 0 ||
2413 	    plane_state->scaler_id >= 0 ||
2414 	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2415 		return false;
2416 
2417 	return true;
2418 }
2419 
2420 /*
2421  * Check for pipe properties that is not supported by selective fetch.
2422  *
2423  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2424  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2425  * enabled and going to the full update path.
2426  */
2427 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2428 {
2429 	if (crtc_state->scaler_state.scaler_id >= 0)
2430 		return false;
2431 
2432 	return true;
2433 }
2434 
2435 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2436 				struct intel_crtc *crtc)
2437 {
2438 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2439 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2440 	struct intel_plane_state *new_plane_state, *old_plane_state;
2441 	struct intel_plane *plane;
2442 	bool full_update = false, cursor_in_su_area = false;
2443 	int i, ret;
2444 
2445 	if (!crtc_state->enable_psr2_sel_fetch)
2446 		return 0;
2447 
2448 	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2449 		full_update = true;
2450 		goto skip_sel_fetch_set_loop;
2451 	}
2452 
2453 	crtc_state->psr2_su_area.x1 = 0;
2454 	crtc_state->psr2_su_area.y1 = -1;
2455 	crtc_state->psr2_su_area.x2 = drm_rect_width(&crtc_state->pipe_src);
2456 	crtc_state->psr2_su_area.y2 = -1;
2457 
2458 	/*
2459 	 * Calculate minimal selective fetch area of each plane and calculate
2460 	 * the pipe damaged area.
2461 	 * In the next loop the plane selective fetch area will actually be set
2462 	 * using whole pipe damaged area.
2463 	 */
2464 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2465 					     new_plane_state, i) {
2466 		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2467 						      .x2 = INT_MAX };
2468 
2469 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2470 			continue;
2471 
2472 		if (!new_plane_state->uapi.visible &&
2473 		    !old_plane_state->uapi.visible)
2474 			continue;
2475 
2476 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2477 			full_update = true;
2478 			break;
2479 		}
2480 
2481 		/*
2482 		 * If visibility or plane moved, mark the whole plane area as
2483 		 * damaged as it needs to be complete redraw in the new and old
2484 		 * position.
2485 		 */
2486 		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2487 		    !drm_rect_equals(&new_plane_state->uapi.dst,
2488 				     &old_plane_state->uapi.dst)) {
2489 			if (old_plane_state->uapi.visible) {
2490 				damaged_area.y1 = old_plane_state->uapi.dst.y1;
2491 				damaged_area.y2 = old_plane_state->uapi.dst.y2;
2492 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2493 						 &crtc_state->pipe_src);
2494 			}
2495 
2496 			if (new_plane_state->uapi.visible) {
2497 				damaged_area.y1 = new_plane_state->uapi.dst.y1;
2498 				damaged_area.y2 = new_plane_state->uapi.dst.y2;
2499 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2500 						 &crtc_state->pipe_src);
2501 			}
2502 			continue;
2503 		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2504 			/* If alpha changed mark the whole plane area as damaged */
2505 			damaged_area.y1 = new_plane_state->uapi.dst.y1;
2506 			damaged_area.y2 = new_plane_state->uapi.dst.y2;
2507 			clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2508 					 &crtc_state->pipe_src);
2509 			continue;
2510 		}
2511 
2512 		src = drm_plane_state_src(&new_plane_state->uapi);
2513 		drm_rect_fp_to_int(&src, &src);
2514 
2515 		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2516 						     &new_plane_state->uapi, &damaged_area))
2517 			continue;
2518 
2519 		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2520 		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2521 		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2522 		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2523 
2524 		clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
2525 	}
2526 
2527 	/*
2528 	 * TODO: For now we are just using full update in case
2529 	 * selective fetch area calculation fails. To optimize this we
2530 	 * should identify cases where this happens and fix the area
2531 	 * calculation for those.
2532 	 */
2533 	if (crtc_state->psr2_su_area.y1 == -1) {
2534 		drm_info_once(&dev_priv->drm,
2535 			      "Selective fetch area calculation failed in pipe %c\n",
2536 			      pipe_name(crtc->pipe));
2537 		full_update = true;
2538 	}
2539 
2540 	if (full_update)
2541 		goto skip_sel_fetch_set_loop;
2542 
2543 	/* Wa_14014971492 */
2544 	if (!crtc_state->has_panel_replay &&
2545 	    ((IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
2546 	      IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv))) &&
2547 	    crtc_state->splitter.enable)
2548 		crtc_state->psr2_su_area.y1 = 0;
2549 
2550 	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2551 	if (ret)
2552 		return ret;
2553 
2554 	/*
2555 	 * Adjust su area to cover cursor fully as necessary (early
2556 	 * transport). This needs to be done after
2557 	 * drm_atomic_add_affected_planes to ensure visible cursor is added into
2558 	 * affected planes even when cursor is not updated by itself.
2559 	 */
2560 	intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
2561 
2562 	intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2563 
2564 	/*
2565 	 * Now that we have the pipe damaged area check if it intersect with
2566 	 * every plane, if it does set the plane selective fetch area.
2567 	 */
2568 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2569 					     new_plane_state, i) {
2570 		struct drm_rect *sel_fetch_area, inter;
2571 		struct intel_plane *linked = new_plane_state->planar_linked_plane;
2572 
2573 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2574 		    !new_plane_state->uapi.visible)
2575 			continue;
2576 
2577 		inter = crtc_state->psr2_su_area;
2578 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2579 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2580 			sel_fetch_area->y1 = -1;
2581 			sel_fetch_area->y2 = -1;
2582 			/*
2583 			 * if plane sel fetch was previously enabled ->
2584 			 * disable it
2585 			 */
2586 			if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2587 				crtc_state->update_planes |= BIT(plane->id);
2588 
2589 			continue;
2590 		}
2591 
2592 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2593 			full_update = true;
2594 			break;
2595 		}
2596 
2597 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2598 		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2599 		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2600 		crtc_state->update_planes |= BIT(plane->id);
2601 
2602 		/*
2603 		 * Sel_fetch_area is calculated for UV plane. Use
2604 		 * same area for Y plane as well.
2605 		 */
2606 		if (linked) {
2607 			struct intel_plane_state *linked_new_plane_state;
2608 			struct drm_rect *linked_sel_fetch_area;
2609 
2610 			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2611 			if (IS_ERR(linked_new_plane_state))
2612 				return PTR_ERR(linked_new_plane_state);
2613 
2614 			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2615 			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2616 			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2617 			crtc_state->update_planes |= BIT(linked->id);
2618 		}
2619 	}
2620 
2621 skip_sel_fetch_set_loop:
2622 	psr2_man_trk_ctl_calc(crtc_state, full_update);
2623 	crtc_state->pipe_srcsz_early_tpt =
2624 		psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
2625 	return 0;
2626 }
2627 
2628 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2629 				struct intel_crtc *crtc)
2630 {
2631 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2632 	const struct intel_crtc_state *old_crtc_state =
2633 		intel_atomic_get_old_crtc_state(state, crtc);
2634 	const struct intel_crtc_state *new_crtc_state =
2635 		intel_atomic_get_new_crtc_state(state, crtc);
2636 	struct intel_encoder *encoder;
2637 
2638 	if (!HAS_PSR(i915))
2639 		return;
2640 
2641 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2642 					     old_crtc_state->uapi.encoder_mask) {
2643 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2644 		struct intel_psr *psr = &intel_dp->psr;
2645 		bool needs_to_disable = false;
2646 
2647 		mutex_lock(&psr->lock);
2648 
2649 		/*
2650 		 * Reasons to disable:
2651 		 * - PSR disabled in new state
2652 		 * - All planes will go inactive
2653 		 * - Changing between PSR versions
2654 		 * - Region Early Transport changing
2655 		 * - Display WA #1136: skl, bxt
2656 		 */
2657 		needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2658 		needs_to_disable |= !new_crtc_state->has_psr;
2659 		needs_to_disable |= !new_crtc_state->active_planes;
2660 		needs_to_disable |= new_crtc_state->has_sel_update != psr->sel_update_enabled;
2661 		needs_to_disable |= new_crtc_state->enable_psr2_su_region_et !=
2662 			psr->su_region_et_enabled;
2663 		needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2664 			new_crtc_state->wm_level_disabled;
2665 
2666 		if (psr->enabled && needs_to_disable)
2667 			intel_psr_disable_locked(intel_dp);
2668 		else if (psr->enabled && new_crtc_state->wm_level_disabled)
2669 			/* Wa_14015648006 */
2670 			wm_optimization_wa(intel_dp, new_crtc_state);
2671 
2672 		mutex_unlock(&psr->lock);
2673 	}
2674 }
2675 
2676 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2677 				 struct intel_crtc *crtc)
2678 {
2679 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2680 	const struct intel_crtc_state *crtc_state =
2681 		intel_atomic_get_new_crtc_state(state, crtc);
2682 	struct intel_encoder *encoder;
2683 
2684 	if (!crtc_state->has_psr)
2685 		return;
2686 
2687 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2688 					     crtc_state->uapi.encoder_mask) {
2689 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2690 		struct intel_psr *psr = &intel_dp->psr;
2691 		bool keep_disabled = false;
2692 
2693 		mutex_lock(&psr->lock);
2694 
2695 		drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2696 
2697 		keep_disabled |= psr->sink_not_reliable;
2698 		keep_disabled |= !crtc_state->active_planes;
2699 
2700 		/* Display WA #1136: skl, bxt */
2701 		keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2702 			crtc_state->wm_level_disabled;
2703 
2704 		if (!psr->enabled && !keep_disabled)
2705 			intel_psr_enable_locked(intel_dp, crtc_state);
2706 		else if (psr->enabled && !crtc_state->wm_level_disabled)
2707 			/* Wa_14015648006 */
2708 			wm_optimization_wa(intel_dp, crtc_state);
2709 
2710 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2711 		if (crtc_state->crc_enabled && psr->enabled)
2712 			psr_force_hw_tracking_exit(intel_dp);
2713 
2714 		/*
2715 		 * Clear possible busy bits in case we have
2716 		 * invalidate -> flip -> flush sequence.
2717 		 */
2718 		intel_dp->psr.busy_frontbuffer_bits = 0;
2719 
2720 		mutex_unlock(&psr->lock);
2721 	}
2722 }
2723 
2724 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2725 {
2726 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2727 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2728 
2729 	/*
2730 	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2731 	 * As all higher states has bit 4 of PSR2 state set we can just wait for
2732 	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2733 	 */
2734 	return intel_de_wait_for_clear(dev_priv,
2735 				       EDP_PSR2_STATUS(dev_priv, cpu_transcoder),
2736 				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2737 }
2738 
2739 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2740 {
2741 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2742 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2743 
2744 	/*
2745 	 * From bspec: Panel Self Refresh (BDW+)
2746 	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2747 	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2748 	 * defensive enough to cover everything.
2749 	 */
2750 	return intel_de_wait_for_clear(dev_priv,
2751 				       psr_status_reg(dev_priv, cpu_transcoder),
2752 				       EDP_PSR_STATUS_STATE_MASK, 50);
2753 }
2754 
2755 static int _panel_replay_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2756 {
2757 	return intel_dp_is_edp(intel_dp) ?
2758 		_psr2_ready_for_pipe_update_locked(intel_dp) :
2759 		_psr1_ready_for_pipe_update_locked(intel_dp);
2760 }
2761 
2762 /**
2763  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2764  * @new_crtc_state: new CRTC state
2765  *
2766  * This function is expected to be called from pipe_update_start() where it is
2767  * not expected to race with PSR enable or disable.
2768  */
2769 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2770 {
2771 	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2772 	struct intel_encoder *encoder;
2773 
2774 	if (!new_crtc_state->has_psr)
2775 		return;
2776 
2777 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2778 					     new_crtc_state->uapi.encoder_mask) {
2779 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2780 		int ret;
2781 
2782 		lockdep_assert_held(&intel_dp->psr.lock);
2783 
2784 		if (!intel_dp->psr.enabled)
2785 			continue;
2786 
2787 		if (intel_dp->psr.panel_replay_enabled)
2788 			ret = _panel_replay_ready_for_pipe_update_locked(intel_dp);
2789 		else if (intel_dp->psr.sel_update_enabled)
2790 			ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2791 		else
2792 			ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2793 
2794 		if (ret)
2795 			drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2796 	}
2797 }
2798 
2799 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2800 {
2801 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2802 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2803 	i915_reg_t reg;
2804 	u32 mask;
2805 	int err;
2806 
2807 	if (!intel_dp->psr.enabled)
2808 		return false;
2809 
2810 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
2811 					  intel_dp->psr.panel_replay_enabled)) {
2812 		reg = EDP_PSR2_STATUS(dev_priv, cpu_transcoder);
2813 		mask = EDP_PSR2_STATUS_STATE_MASK;
2814 	} else {
2815 		reg = psr_status_reg(dev_priv, cpu_transcoder);
2816 		mask = EDP_PSR_STATUS_STATE_MASK;
2817 	}
2818 
2819 	mutex_unlock(&intel_dp->psr.lock);
2820 
2821 	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2822 	if (err)
2823 		drm_err(&dev_priv->drm,
2824 			"Timed out waiting for PSR Idle for re-enable\n");
2825 
2826 	/* After the unlocked wait, verify that PSR is still wanted! */
2827 	mutex_lock(&intel_dp->psr.lock);
2828 	return err == 0 && intel_dp->psr.enabled;
2829 }
2830 
2831 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2832 {
2833 	struct drm_connector_list_iter conn_iter;
2834 	struct drm_modeset_acquire_ctx ctx;
2835 	struct drm_atomic_state *state;
2836 	struct drm_connector *conn;
2837 	int err = 0;
2838 
2839 	state = drm_atomic_state_alloc(&dev_priv->drm);
2840 	if (!state)
2841 		return -ENOMEM;
2842 
2843 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2844 
2845 	state->acquire_ctx = &ctx;
2846 	to_intel_atomic_state(state)->internal = true;
2847 
2848 retry:
2849 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2850 	drm_for_each_connector_iter(conn, &conn_iter) {
2851 		struct drm_connector_state *conn_state;
2852 		struct drm_crtc_state *crtc_state;
2853 
2854 		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2855 			continue;
2856 
2857 		conn_state = drm_atomic_get_connector_state(state, conn);
2858 		if (IS_ERR(conn_state)) {
2859 			err = PTR_ERR(conn_state);
2860 			break;
2861 		}
2862 
2863 		if (!conn_state->crtc)
2864 			continue;
2865 
2866 		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2867 		if (IS_ERR(crtc_state)) {
2868 			err = PTR_ERR(crtc_state);
2869 			break;
2870 		}
2871 
2872 		/* Mark mode as changed to trigger a pipe->update() */
2873 		crtc_state->mode_changed = true;
2874 	}
2875 	drm_connector_list_iter_end(&conn_iter);
2876 
2877 	if (err == 0)
2878 		err = drm_atomic_commit(state);
2879 
2880 	if (err == -EDEADLK) {
2881 		drm_atomic_state_clear(state);
2882 		err = drm_modeset_backoff(&ctx);
2883 		if (!err)
2884 			goto retry;
2885 	}
2886 
2887 	drm_modeset_drop_locks(&ctx);
2888 	drm_modeset_acquire_fini(&ctx);
2889 	drm_atomic_state_put(state);
2890 
2891 	return err;
2892 }
2893 
2894 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2895 {
2896 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2897 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2898 	const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
2899 					I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
2900 	u32 old_mode, old_disable_bits;
2901 	int ret;
2902 
2903 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
2904 		    I915_PSR_DEBUG_PANEL_REPLAY_DISABLE |
2905 		    I915_PSR_DEBUG_MODE_MASK) ||
2906 	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2907 		drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2908 		return -EINVAL;
2909 	}
2910 
2911 	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2912 	if (ret)
2913 		return ret;
2914 
2915 	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2916 	old_disable_bits = intel_dp->psr.debug &
2917 		(I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
2918 		 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
2919 
2920 	intel_dp->psr.debug = val;
2921 
2922 	/*
2923 	 * Do it right away if it's already enabled, otherwise it will be done
2924 	 * when enabling the source.
2925 	 */
2926 	if (intel_dp->psr.enabled)
2927 		psr_irq_control(intel_dp);
2928 
2929 	mutex_unlock(&intel_dp->psr.lock);
2930 
2931 	if (old_mode != mode || old_disable_bits != disable_bits)
2932 		ret = intel_psr_fastset_force(dev_priv);
2933 
2934 	return ret;
2935 }
2936 
2937 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2938 {
2939 	struct intel_psr *psr = &intel_dp->psr;
2940 
2941 	intel_psr_disable_locked(intel_dp);
2942 	psr->sink_not_reliable = true;
2943 	/* let's make sure that sink is awaken */
2944 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2945 }
2946 
2947 static void intel_psr_work(struct work_struct *work)
2948 {
2949 	struct intel_dp *intel_dp =
2950 		container_of(work, typeof(*intel_dp), psr.work);
2951 
2952 	mutex_lock(&intel_dp->psr.lock);
2953 
2954 	if (!intel_dp->psr.enabled)
2955 		goto unlock;
2956 
2957 	if (READ_ONCE(intel_dp->psr.irq_aux_error))
2958 		intel_psr_handle_irq(intel_dp);
2959 
2960 	/*
2961 	 * We have to make sure PSR is ready for re-enable
2962 	 * otherwise it keeps disabled until next full enable/disable cycle.
2963 	 * PSR might take some time to get fully disabled
2964 	 * and be ready for re-enable.
2965 	 */
2966 	if (!__psr_wait_for_idle_locked(intel_dp))
2967 		goto unlock;
2968 
2969 	/*
2970 	 * The delayed work can race with an invalidate hence we need to
2971 	 * recheck. Since psr_flush first clears this and then reschedules we
2972 	 * won't ever miss a flush when bailing out here.
2973 	 */
2974 	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2975 		goto unlock;
2976 
2977 	intel_psr_activate(intel_dp);
2978 unlock:
2979 	mutex_unlock(&intel_dp->psr.lock);
2980 }
2981 
2982 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2983 {
2984 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2985 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2986 
2987 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2988 		u32 val;
2989 
2990 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2991 			/* Send one update otherwise lag is observed in screen */
2992 			intel_de_write(dev_priv,
2993 				       CURSURFLIVE(dev_priv, intel_dp->psr.pipe),
2994 				       0);
2995 			return;
2996 		}
2997 
2998 		val = man_trk_ctl_enable_bit_get(dev_priv) |
2999 		      man_trk_ctl_partial_frame_bit_get(dev_priv) |
3000 		      man_trk_ctl_continuos_full_frame(dev_priv);
3001 		intel_de_write(dev_priv,
3002 			       PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
3003 			       val);
3004 		intel_de_write(dev_priv,
3005 			       CURSURFLIVE(dev_priv, intel_dp->psr.pipe), 0);
3006 		intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
3007 	} else {
3008 		intel_psr_exit(intel_dp);
3009 	}
3010 }
3011 
3012 /**
3013  * intel_psr_invalidate - Invalidate PSR
3014  * @dev_priv: i915 device
3015  * @frontbuffer_bits: frontbuffer plane tracking bits
3016  * @origin: which operation caused the invalidate
3017  *
3018  * Since the hardware frontbuffer tracking has gaps we need to integrate
3019  * with the software frontbuffer tracking. This function gets called every
3020  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
3021  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
3022  *
3023  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
3024  */
3025 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
3026 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
3027 {
3028 	struct intel_encoder *encoder;
3029 
3030 	if (origin == ORIGIN_FLIP)
3031 		return;
3032 
3033 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3034 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3035 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3036 
3037 		mutex_lock(&intel_dp->psr.lock);
3038 		if (!intel_dp->psr.enabled) {
3039 			mutex_unlock(&intel_dp->psr.lock);
3040 			continue;
3041 		}
3042 
3043 		pipe_frontbuffer_bits &=
3044 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3045 		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
3046 
3047 		if (pipe_frontbuffer_bits)
3048 			_psr_invalidate_handle(intel_dp);
3049 
3050 		mutex_unlock(&intel_dp->psr.lock);
3051 	}
3052 }
3053 /*
3054  * When we will be completely rely on PSR2 S/W tracking in future,
3055  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
3056  * event also therefore tgl_dc3co_flush_locked() require to be changed
3057  * accordingly in future.
3058  */
3059 static void
3060 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
3061 		       enum fb_op_origin origin)
3062 {
3063 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3064 
3065 	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
3066 	    !intel_dp->psr.active)
3067 		return;
3068 
3069 	/*
3070 	 * At every frontbuffer flush flip event modified delay of delayed work,
3071 	 * when delayed work schedules that means display has been idle.
3072 	 */
3073 	if (!(frontbuffer_bits &
3074 	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
3075 		return;
3076 
3077 	tgl_psr2_enable_dc3co(intel_dp);
3078 	mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
3079 			 intel_dp->psr.dc3co_exit_delay);
3080 }
3081 
3082 static void _psr_flush_handle(struct intel_dp *intel_dp)
3083 {
3084 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3085 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3086 
3087 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
3088 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3089 			/* can we turn CFF off? */
3090 			if (intel_dp->psr.busy_frontbuffer_bits == 0) {
3091 				u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
3092 					man_trk_ctl_partial_frame_bit_get(dev_priv) |
3093 					man_trk_ctl_single_full_frame_bit_get(dev_priv) |
3094 					man_trk_ctl_continuos_full_frame(dev_priv);
3095 
3096 				/*
3097 				 * Set psr2_sel_fetch_cff_enabled as false to allow selective
3098 				 * updates. Still keep cff bit enabled as we don't have proper
3099 				 * SU configuration in case update is sent for any reason after
3100 				 * sff bit gets cleared by the HW on next vblank.
3101 				 */
3102 				intel_de_write(dev_priv,
3103 					       PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
3104 					       val);
3105 				intel_de_write(dev_priv,
3106 					       CURSURFLIVE(dev_priv, intel_dp->psr.pipe),
3107 					       0);
3108 				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
3109 			}
3110 		} else {
3111 			/*
3112 			 * continuous full frame is disabled, only a single full
3113 			 * frame is required
3114 			 */
3115 			psr_force_hw_tracking_exit(intel_dp);
3116 		}
3117 	} else {
3118 		psr_force_hw_tracking_exit(intel_dp);
3119 
3120 		if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
3121 			queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
3122 	}
3123 }
3124 
3125 /**
3126  * intel_psr_flush - Flush PSR
3127  * @dev_priv: i915 device
3128  * @frontbuffer_bits: frontbuffer plane tracking bits
3129  * @origin: which operation caused the flush
3130  *
3131  * Since the hardware frontbuffer tracking has gaps we need to integrate
3132  * with the software frontbuffer tracking. This function gets called every
3133  * time frontbuffer rendering has completed and flushed out to memory. PSR
3134  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
3135  *
3136  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
3137  */
3138 void intel_psr_flush(struct drm_i915_private *dev_priv,
3139 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
3140 {
3141 	struct intel_encoder *encoder;
3142 
3143 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3144 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3145 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3146 
3147 		mutex_lock(&intel_dp->psr.lock);
3148 		if (!intel_dp->psr.enabled) {
3149 			mutex_unlock(&intel_dp->psr.lock);
3150 			continue;
3151 		}
3152 
3153 		pipe_frontbuffer_bits &=
3154 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3155 		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
3156 
3157 		/*
3158 		 * If the PSR is paused by an explicit intel_psr_paused() call,
3159 		 * we have to ensure that the PSR is not activated until
3160 		 * intel_psr_resume() is called.
3161 		 */
3162 		if (intel_dp->psr.paused)
3163 			goto unlock;
3164 
3165 		if (origin == ORIGIN_FLIP ||
3166 		    (origin == ORIGIN_CURSOR_UPDATE &&
3167 		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
3168 			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
3169 			goto unlock;
3170 		}
3171 
3172 		if (pipe_frontbuffer_bits == 0)
3173 			goto unlock;
3174 
3175 		/* By definition flush = invalidate + flush */
3176 		_psr_flush_handle(intel_dp);
3177 unlock:
3178 		mutex_unlock(&intel_dp->psr.lock);
3179 	}
3180 }
3181 
3182 /**
3183  * intel_psr_init - Init basic PSR work and mutex.
3184  * @intel_dp: Intel DP
3185  *
3186  * This function is called after the initializing connector.
3187  * (the initializing of connector treats the handling of connector capabilities)
3188  * And it initializes basic PSR stuff for each DP Encoder.
3189  */
3190 void intel_psr_init(struct intel_dp *intel_dp)
3191 {
3192 	struct intel_connector *connector = intel_dp->attached_connector;
3193 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3194 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3195 
3196 	if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
3197 		return;
3198 
3199 	/*
3200 	 * HSW spec explicitly says PSR is tied to port A.
3201 	 * BDW+ platforms have a instance of PSR registers per transcoder but
3202 	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
3203 	 * than eDP one.
3204 	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
3205 	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
3206 	 * But GEN12 supports a instance of PSR registers per transcoder.
3207 	 */
3208 	if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
3209 		drm_dbg_kms(&dev_priv->drm,
3210 			    "PSR condition failed: Port not supported\n");
3211 		return;
3212 	}
3213 
3214 	if ((HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp)) ||
3215 	    DISPLAY_VER(dev_priv) >= 20)
3216 		intel_dp->psr.source_panel_replay_support = true;
3217 
3218 	if (HAS_PSR(dev_priv) && intel_dp_is_edp(intel_dp))
3219 		intel_dp->psr.source_support = true;
3220 
3221 	/* Set link_standby x link_off defaults */
3222 	if (DISPLAY_VER(dev_priv) < 12)
3223 		/* For new platforms up to TGL let's respect VBT back again */
3224 		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
3225 
3226 	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
3227 	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
3228 	mutex_init(&intel_dp->psr.lock);
3229 }
3230 
3231 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
3232 					   u8 *status, u8 *error_status)
3233 {
3234 	struct drm_dp_aux *aux = &intel_dp->aux;
3235 	int ret;
3236 	unsigned int offset;
3237 
3238 	offset = intel_dp->psr.panel_replay_enabled ?
3239 		 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
3240 
3241 	ret = drm_dp_dpcd_readb(aux, offset, status);
3242 	if (ret != 1)
3243 		return ret;
3244 
3245 	offset = intel_dp->psr.panel_replay_enabled ?
3246 		 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
3247 
3248 	ret = drm_dp_dpcd_readb(aux, offset, error_status);
3249 	if (ret != 1)
3250 		return ret;
3251 
3252 	*status = *status & DP_PSR_SINK_STATE_MASK;
3253 
3254 	return 0;
3255 }
3256 
3257 static void psr_alpm_check(struct intel_dp *intel_dp)
3258 {
3259 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3260 	struct drm_dp_aux *aux = &intel_dp->aux;
3261 	struct intel_psr *psr = &intel_dp->psr;
3262 	u8 val;
3263 	int r;
3264 
3265 	if (!psr->sel_update_enabled)
3266 		return;
3267 
3268 	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
3269 	if (r != 1) {
3270 		drm_err(&dev_priv->drm, "Error reading ALPM status\n");
3271 		return;
3272 	}
3273 
3274 	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
3275 		intel_psr_disable_locked(intel_dp);
3276 		psr->sink_not_reliable = true;
3277 		drm_dbg_kms(&dev_priv->drm,
3278 			    "ALPM lock timeout error, disabling PSR\n");
3279 
3280 		/* Clearing error */
3281 		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
3282 	}
3283 }
3284 
3285 static void psr_capability_changed_check(struct intel_dp *intel_dp)
3286 {
3287 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3288 	struct intel_psr *psr = &intel_dp->psr;
3289 	u8 val;
3290 	int r;
3291 
3292 	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
3293 	if (r != 1) {
3294 		drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
3295 		return;
3296 	}
3297 
3298 	if (val & DP_PSR_CAPS_CHANGE) {
3299 		intel_psr_disable_locked(intel_dp);
3300 		psr->sink_not_reliable = true;
3301 		drm_dbg_kms(&dev_priv->drm,
3302 			    "Sink PSR capability changed, disabling PSR\n");
3303 
3304 		/* Clearing it */
3305 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
3306 	}
3307 }
3308 
3309 /*
3310  * On common bits:
3311  * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
3312  * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
3313  * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
3314  * this function is relying on PSR definitions
3315  */
3316 void intel_psr_short_pulse(struct intel_dp *intel_dp)
3317 {
3318 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3319 	struct intel_psr *psr = &intel_dp->psr;
3320 	u8 status, error_status;
3321 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3322 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3323 			  DP_PSR_LINK_CRC_ERROR;
3324 
3325 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
3326 		return;
3327 
3328 	mutex_lock(&psr->lock);
3329 
3330 	if (!psr->enabled)
3331 		goto exit;
3332 
3333 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
3334 		drm_err(&dev_priv->drm,
3335 			"Error reading PSR status or error status\n");
3336 		goto exit;
3337 	}
3338 
3339 	if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
3340 	    (error_status & errors)) {
3341 		intel_psr_disable_locked(intel_dp);
3342 		psr->sink_not_reliable = true;
3343 	}
3344 
3345 	if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
3346 	    !error_status)
3347 		drm_dbg_kms(&dev_priv->drm,
3348 			    "PSR sink internal error, disabling PSR\n");
3349 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3350 		drm_dbg_kms(&dev_priv->drm,
3351 			    "PSR RFB storage error, disabling PSR\n");
3352 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3353 		drm_dbg_kms(&dev_priv->drm,
3354 			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
3355 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3356 		drm_dbg_kms(&dev_priv->drm,
3357 			    "PSR Link CRC error, disabling PSR\n");
3358 
3359 	if (error_status & ~errors)
3360 		drm_err(&dev_priv->drm,
3361 			"PSR_ERROR_STATUS unhandled errors %x\n",
3362 			error_status & ~errors);
3363 	/* clear status register */
3364 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
3365 
3366 	if (!psr->panel_replay_enabled) {
3367 		psr_alpm_check(intel_dp);
3368 		psr_capability_changed_check(intel_dp);
3369 	}
3370 
3371 exit:
3372 	mutex_unlock(&psr->lock);
3373 }
3374 
3375 bool intel_psr_enabled(struct intel_dp *intel_dp)
3376 {
3377 	bool ret;
3378 
3379 	if (!CAN_PSR(intel_dp))
3380 		return false;
3381 
3382 	mutex_lock(&intel_dp->psr.lock);
3383 	ret = intel_dp->psr.enabled;
3384 	mutex_unlock(&intel_dp->psr.lock);
3385 
3386 	return ret;
3387 }
3388 
3389 /**
3390  * intel_psr_lock - grab PSR lock
3391  * @crtc_state: the crtc state
3392  *
3393  * This is initially meant to be used by around CRTC update, when
3394  * vblank sensitive registers are updated and we need grab the lock
3395  * before it to avoid vblank evasion.
3396  */
3397 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3398 {
3399 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3400 	struct intel_encoder *encoder;
3401 
3402 	if (!crtc_state->has_psr)
3403 		return;
3404 
3405 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
3406 					     crtc_state->uapi.encoder_mask) {
3407 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3408 
3409 		mutex_lock(&intel_dp->psr.lock);
3410 		break;
3411 	}
3412 }
3413 
3414 /**
3415  * intel_psr_unlock - release PSR lock
3416  * @crtc_state: the crtc state
3417  *
3418  * Release the PSR lock that was held during pipe update.
3419  */
3420 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3421 {
3422 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3423 	struct intel_encoder *encoder;
3424 
3425 	if (!crtc_state->has_psr)
3426 		return;
3427 
3428 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
3429 					     crtc_state->uapi.encoder_mask) {
3430 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3431 
3432 		mutex_unlock(&intel_dp->psr.lock);
3433 		break;
3434 	}
3435 }
3436 
3437 static void
3438 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3439 {
3440 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3441 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3442 	const char *status = "unknown";
3443 	u32 val, status_val;
3444 
3445 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
3446 					  intel_dp->psr.panel_replay_enabled)) {
3447 		static const char * const live_status[] = {
3448 			"IDLE",
3449 			"CAPTURE",
3450 			"CAPTURE_FS",
3451 			"SLEEP",
3452 			"BUFON_FW",
3453 			"ML_UP",
3454 			"SU_STANDBY",
3455 			"FAST_SLEEP",
3456 			"DEEP_SLEEP",
3457 			"BUF_ON",
3458 			"TG_ON"
3459 		};
3460 		val = intel_de_read(dev_priv,
3461 				    EDP_PSR2_STATUS(dev_priv, cpu_transcoder));
3462 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3463 		if (status_val < ARRAY_SIZE(live_status))
3464 			status = live_status[status_val];
3465 	} else {
3466 		static const char * const live_status[] = {
3467 			"IDLE",
3468 			"SRDONACK",
3469 			"SRDENT",
3470 			"BUFOFF",
3471 			"BUFON",
3472 			"AUXACK",
3473 			"SRDOFFACK",
3474 			"SRDENT_ON",
3475 		};
3476 		val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
3477 		status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3478 		if (status_val < ARRAY_SIZE(live_status))
3479 			status = live_status[status_val];
3480 	}
3481 
3482 	seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3483 }
3484 
3485 static void intel_psr_sink_capability(struct intel_dp *intel_dp,
3486 				      struct seq_file *m)
3487 {
3488 	struct intel_psr *psr = &intel_dp->psr;
3489 
3490 	seq_printf(m, "Sink support: PSR = %s",
3491 		   str_yes_no(psr->sink_support));
3492 
3493 	if (psr->sink_support)
3494 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3495 	if (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
3496 		seq_printf(m, " (Early Transport)");
3497 	seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
3498 	seq_printf(m, ", Panel Replay Selective Update = %s",
3499 		   str_yes_no(psr->sink_panel_replay_su_support));
3500 	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
3501 		seq_printf(m, " (Early Transport)");
3502 	seq_printf(m, "\n");
3503 }
3504 
3505 static void intel_psr_print_mode(struct intel_dp *intel_dp,
3506 				 struct seq_file *m)
3507 {
3508 	struct intel_psr *psr = &intel_dp->psr;
3509 	const char *status, *mode, *region_et;
3510 
3511 	if (psr->enabled)
3512 		status = " enabled";
3513 	else
3514 		status = "disabled";
3515 
3516 	if (psr->panel_replay_enabled && psr->sel_update_enabled)
3517 		mode = "Panel Replay Selective Update";
3518 	else if (psr->panel_replay_enabled)
3519 		mode = "Panel Replay";
3520 	else if (psr->sel_update_enabled)
3521 		mode = "PSR2";
3522 	else if (psr->enabled)
3523 		mode = "PSR1";
3524 	else
3525 		mode = "";
3526 
3527 	if (psr->su_region_et_enabled)
3528 		region_et = " (Early Transport)";
3529 	else
3530 		region_et = "";
3531 
3532 	seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et);
3533 }
3534 
3535 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3536 {
3537 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3538 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3539 	struct intel_psr *psr = &intel_dp->psr;
3540 	intel_wakeref_t wakeref;
3541 	bool enabled;
3542 	u32 val, psr2_ctl;
3543 
3544 	intel_psr_sink_capability(intel_dp, m);
3545 
3546 	if (!(psr->sink_support || psr->sink_panel_replay_support))
3547 		return 0;
3548 
3549 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3550 	mutex_lock(&psr->lock);
3551 
3552 	intel_psr_print_mode(intel_dp, m);
3553 
3554 	if (!psr->enabled) {
3555 		seq_printf(m, "PSR sink not reliable: %s\n",
3556 			   str_yes_no(psr->sink_not_reliable));
3557 
3558 		goto unlock;
3559 	}
3560 
3561 	if (psr->panel_replay_enabled) {
3562 		val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
3563 
3564 		if (intel_dp_is_edp(intel_dp))
3565 			psr2_ctl = intel_de_read(dev_priv,
3566 						 EDP_PSR2_CTL(dev_priv,
3567 							      cpu_transcoder));
3568 
3569 		enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3570 	} else if (psr->sel_update_enabled) {
3571 		val = intel_de_read(dev_priv,
3572 				    EDP_PSR2_CTL(dev_priv, cpu_transcoder));
3573 		enabled = val & EDP_PSR2_ENABLE;
3574 	} else {
3575 		val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
3576 		enabled = val & EDP_PSR_ENABLE;
3577 	}
3578 	seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
3579 		   str_enabled_disabled(enabled), val);
3580 	if (psr->panel_replay_enabled && intel_dp_is_edp(intel_dp))
3581 		seq_printf(m, "PSR2_CTL: 0x%08x\n",
3582 			   psr2_ctl);
3583 	psr_source_status(intel_dp, m);
3584 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3585 		   psr->busy_frontbuffer_bits);
3586 
3587 	/*
3588 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
3589 	 */
3590 	val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3591 	seq_printf(m, "Performance counter: %u\n",
3592 		   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3593 
3594 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
3595 		seq_printf(m, "Last attempted entry at: %lld\n",
3596 			   psr->last_entry_attempt);
3597 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3598 	}
3599 
3600 	if (psr->sel_update_enabled) {
3601 		u32 su_frames_val[3];
3602 		int frame;
3603 
3604 		/*
3605 		 * Reading all 3 registers before hand to minimize crossing a
3606 		 * frame boundary between register reads
3607 		 */
3608 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3609 			val = intel_de_read(dev_priv,
3610 					    PSR2_SU_STATUS(dev_priv, cpu_transcoder, frame));
3611 			su_frames_val[frame / 3] = val;
3612 		}
3613 
3614 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3615 
3616 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3617 			u32 su_blocks;
3618 
3619 			su_blocks = su_frames_val[frame / 3] &
3620 				    PSR2_SU_STATUS_MASK(frame);
3621 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3622 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
3623 		}
3624 
3625 		seq_printf(m, "PSR2 selective fetch: %s\n",
3626 			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3627 	}
3628 
3629 unlock:
3630 	mutex_unlock(&psr->lock);
3631 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3632 
3633 	return 0;
3634 }
3635 
3636 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3637 {
3638 	struct drm_i915_private *dev_priv = m->private;
3639 	struct intel_dp *intel_dp = NULL;
3640 	struct intel_encoder *encoder;
3641 
3642 	if (!HAS_PSR(dev_priv))
3643 		return -ENODEV;
3644 
3645 	/* Find the first EDP which supports PSR */
3646 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3647 		intel_dp = enc_to_intel_dp(encoder);
3648 		break;
3649 	}
3650 
3651 	if (!intel_dp)
3652 		return -ENODEV;
3653 
3654 	return intel_psr_status(m, intel_dp);
3655 }
3656 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3657 
3658 static int
3659 i915_edp_psr_debug_set(void *data, u64 val)
3660 {
3661 	struct drm_i915_private *dev_priv = data;
3662 	struct intel_encoder *encoder;
3663 	intel_wakeref_t wakeref;
3664 	int ret = -ENODEV;
3665 
3666 	if (!HAS_PSR(dev_priv))
3667 		return ret;
3668 
3669 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3670 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3671 
3672 		drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3673 
3674 		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3675 
3676 		// TODO: split to each transcoder's PSR debug state
3677 		ret = intel_psr_debug_set(intel_dp, val);
3678 
3679 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3680 	}
3681 
3682 	return ret;
3683 }
3684 
3685 static int
3686 i915_edp_psr_debug_get(void *data, u64 *val)
3687 {
3688 	struct drm_i915_private *dev_priv = data;
3689 	struct intel_encoder *encoder;
3690 
3691 	if (!HAS_PSR(dev_priv))
3692 		return -ENODEV;
3693 
3694 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3695 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3696 
3697 		// TODO: split to each transcoder's PSR debug state
3698 		*val = READ_ONCE(intel_dp->psr.debug);
3699 		return 0;
3700 	}
3701 
3702 	return -ENODEV;
3703 }
3704 
3705 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3706 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3707 			"%llu\n");
3708 
3709 void intel_psr_debugfs_register(struct drm_i915_private *i915)
3710 {
3711 	struct drm_minor *minor = i915->drm.primary;
3712 
3713 	debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3714 			    i915, &i915_edp_psr_debug_fops);
3715 
3716 	debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3717 			    i915, &i915_edp_psr_status_fops);
3718 }
3719 
3720 static const char *psr_mode_str(struct intel_dp *intel_dp)
3721 {
3722 	if (intel_dp->psr.panel_replay_enabled)
3723 		return "PANEL-REPLAY";
3724 	else if (intel_dp->psr.enabled)
3725 		return "PSR";
3726 
3727 	return "unknown";
3728 }
3729 
3730 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3731 {
3732 	struct intel_connector *connector = m->private;
3733 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3734 	static const char * const sink_status[] = {
3735 		"inactive",
3736 		"transition to active, capture and display",
3737 		"active, display from RFB",
3738 		"active, capture and display on sink device timings",
3739 		"transition to inactive, capture and display, timing re-sync",
3740 		"reserved",
3741 		"reserved",
3742 		"sink internal error",
3743 	};
3744 	const char *str;
3745 	int ret;
3746 	u8 status, error_status;
3747 
3748 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
3749 		seq_puts(m, "PSR/Panel-Replay Unsupported\n");
3750 		return -ENODEV;
3751 	}
3752 
3753 	if (connector->base.status != connector_status_connected)
3754 		return -ENODEV;
3755 
3756 	ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
3757 	if (ret)
3758 		return ret;
3759 
3760 	status &= DP_PSR_SINK_STATE_MASK;
3761 	if (status < ARRAY_SIZE(sink_status))
3762 		str = sink_status[status];
3763 	else
3764 		str = "unknown";
3765 
3766 	seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
3767 
3768 	seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
3769 
3770 	if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
3771 			    DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3772 			    DP_PSR_LINK_CRC_ERROR))
3773 		seq_puts(m, ":\n");
3774 	else
3775 		seq_puts(m, "\n");
3776 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3777 		seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
3778 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3779 		seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
3780 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3781 		seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
3782 
3783 	return ret;
3784 }
3785 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3786 
3787 static int i915_psr_status_show(struct seq_file *m, void *data)
3788 {
3789 	struct intel_connector *connector = m->private;
3790 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3791 
3792 	return intel_psr_status(m, intel_dp);
3793 }
3794 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3795 
3796 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3797 {
3798 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3799 	struct dentry *root = connector->base.debugfs_entry;
3800 
3801 	/* TODO: Add support for MST connectors as well. */
3802 	if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
3803 	     connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
3804 	    connector->mst_port)
3805 		return;
3806 
3807 	debugfs_create_file("i915_psr_sink_status", 0444, root,
3808 			    connector, &i915_psr_sink_status_fops);
3809 
3810 	if (HAS_PSR(i915) || HAS_DP20(i915))
3811 		debugfs_create_file("i915_psr_status", 0444, root,
3812 				    connector, &i915_psr_status_fops);
3813 }
3814