xref: /linux/drivers/gpu/drm/i915/display/intel_psr.c (revision c8faf11cd192214e231626c3ee973a35d8fc33f2)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26 #include <drm/drm_debugfs.h>
27 
28 #include "i915_drv.h"
29 #include "i915_reg.h"
30 #include "intel_alpm.h"
31 #include "intel_atomic.h"
32 #include "intel_crtc.h"
33 #include "intel_cursor_regs.h"
34 #include "intel_ddi.h"
35 #include "intel_de.h"
36 #include "intel_display_types.h"
37 #include "intel_dp.h"
38 #include "intel_dp_aux.h"
39 #include "intel_frontbuffer.h"
40 #include "intel_hdmi.h"
41 #include "intel_psr.h"
42 #include "intel_psr_regs.h"
43 #include "intel_snps_phy.h"
44 #include "skl_universal_plane.h"
45 
46 /**
47  * DOC: Panel Self Refresh (PSR/SRD)
48  *
49  * Since Haswell Display controller supports Panel Self-Refresh on display
50  * panels witch have a remote frame buffer (RFB) implemented according to PSR
51  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
52  * when system is idle but display is on as it eliminates display refresh
53  * request to DDR memory completely as long as the frame buffer for that
54  * display is unchanged.
55  *
56  * Panel Self Refresh must be supported by both Hardware (source) and
57  * Panel (sink).
58  *
59  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
60  * to power down the link and memory controller. For DSI panels the same idea
61  * is called "manual mode".
62  *
63  * The implementation uses the hardware-based PSR support which automatically
64  * enters/exits self-refresh mode. The hardware takes care of sending the
65  * required DP aux message and could even retrain the link (that part isn't
66  * enabled yet though). The hardware also keeps track of any frontbuffer
67  * changes to know when to exit self-refresh mode again. Unfortunately that
68  * part doesn't work too well, hence why the i915 PSR support uses the
69  * software frontbuffer tracking to make sure it doesn't miss a screen
70  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
71  * get called by the frontbuffer tracking code. Note that because of locking
72  * issues the self-refresh re-enable code is done from a work queue, which
73  * must be correctly synchronized/cancelled when shutting down the pipe."
74  *
75  * DC3CO (DC3 clock off)
76  *
77  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
78  * clock off automatically during PSR2 idle state.
79  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
80  * entry/exit allows the HW to enter a low-power state even when page flipping
81  * periodically (for instance a 30fps video playback scenario).
82  *
83  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
84  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
85  * frames, if no other flip occurs and the function above is executed, DC3CO is
86  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
87  * of another flip.
88  * Front buffer modifications do not trigger DC3CO activation on purpose as it
89  * would bring a lot of complexity and most of the moderns systems will only
90  * use page flips.
91  */
92 
93 /*
94  * Description of PSR mask bits:
95  *
96  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
97  *
98  *  When unmasked (nearly) all display register writes (eg. even
99  *  SWF) trigger a PSR exit. Some registers are excluded from this
100  *  and they have a more specific mask (described below). On icl+
101  *  this bit no longer exists and is effectively always set.
102  *
103  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
104  *
105  *  When unmasked (nearly) all pipe/plane register writes
106  *  trigger a PSR exit. Some plane registers are excluded from this
107  *  and they have a more specific mask (described below).
108  *
109  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
110  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
111  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
112  *
113  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
114  *  SPR_SURF/CURBASE are not included in this and instead are
115  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
116  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
117  *
118  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
119  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
120  *
121  *  When unmasked PSR is blocked as long as the sprite
122  *  plane is enabled. skl+ with their universal planes no
123  *  longer have a mask bit like this, and no plane being
124  *  enabledb blocks PSR.
125  *
126  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
127  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
128  *
129  *  When umasked CURPOS writes trigger a PSR exit. On skl+
130  *  this doesn't exit but CURPOS is included in the
131  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
132  *
133  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
134  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
135  *
136  *  When unmasked PSR is blocked as long as vblank and/or vsync
137  *  interrupt is unmasked in IMR *and* enabled in IER.
138  *
139  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
140  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
141  *
142  *  Selectcs whether PSR exit generates an extra vblank before
143  *  the first frame is transmitted. Also note the opposite polarity
144  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
145  *  unmasked==do not generate the extra vblank).
146  *
147  *  With DC states enabled the extra vblank happens after link training,
148  *  with DC states disabled it happens immediately upuon PSR exit trigger.
149  *  No idea as of now why there is a difference. HSW/BDW (which don't
150  *  even have DMC) always generate it after link training. Go figure.
151  *
152  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
153  *  and thus won't latch until the first vblank. So with DC states
154  *  enabled the register effctively uses the reset value during DC5
155  *  exit+PSR exit sequence, and thus the bit does nothing until
156  *  latched by the vblank that it was trying to prevent from being
157  *  generated in the first place. So we should probably call this
158  *  one a chicken/egg bit instead on skl+.
159  *
160  *  In standby mode (as opposed to link-off) this makes no difference
161  *  as the timing generator keeps running the whole time generating
162  *  normal periodic vblanks.
163  *
164  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
165  *  and doing so makes the behaviour match the skl+ reset value.
166  *
167  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
168  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
169  *
170  *  On BDW without this bit is no vblanks whatsoever are
171  *  generated after PSR exit. On HSW this has no apparant effect.
172  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
173  *
174  * The rest of the bits are more self-explanatory and/or
175  * irrelevant for normal operation.
176  *
177  * Description of intel_crtc_state variables. has_psr, has_panel_replay and
178  * has_sel_update:
179  *
180  *  has_psr (alone):					PSR1
181  *  has_psr + has_sel_update:				PSR2
182  *  has_psr + has_panel_replay:				Panel Replay
183  *  has_psr + has_panel_replay + has_sel_update:	Panel Replay Selective Update
184  *
185  * Description of some intel_psr varibles. enabled, panel_replay_enabled,
186  * sel_update_enabled
187  *
188  *  enabled (alone):						PSR1
189  *  enabled + sel_update_enabled:				PSR2
190  *  enabled + panel_replay_enabled:				Panel Replay
191  *  enabled + panel_replay_enabled + sel_update_enabled:	Panel Replay SU
192  */
193 
194 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
195 			   (intel_dp)->psr.source_support)
196 
197 bool intel_encoder_can_psr(struct intel_encoder *encoder)
198 {
199 	if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
200 		return CAN_PSR(enc_to_intel_dp(encoder)) ||
201 		       CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
202 	else
203 		return false;
204 }
205 
206 static bool psr_global_enabled(struct intel_dp *intel_dp)
207 {
208 	struct intel_connector *connector = intel_dp->attached_connector;
209 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
210 
211 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
212 	case I915_PSR_DEBUG_DEFAULT:
213 		if (i915->display.params.enable_psr == -1)
214 			return connector->panel.vbt.psr.enable;
215 		return i915->display.params.enable_psr;
216 	case I915_PSR_DEBUG_DISABLE:
217 		return false;
218 	default:
219 		return true;
220 	}
221 }
222 
223 static bool psr2_global_enabled(struct intel_dp *intel_dp)
224 {
225 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
226 
227 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
228 	case I915_PSR_DEBUG_DISABLE:
229 	case I915_PSR_DEBUG_FORCE_PSR1:
230 		return false;
231 	default:
232 		if (i915->display.params.enable_psr == 1)
233 			return false;
234 		return true;
235 	}
236 }
237 
238 static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp)
239 {
240 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
241 
242 	if (i915->display.params.enable_psr != -1)
243 		return false;
244 
245 	return true;
246 }
247 
248 static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
249 {
250 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
251 
252 	if ((i915->display.params.enable_psr != -1) ||
253 	    (intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE))
254 		return false;
255 	return true;
256 }
257 
258 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
259 {
260 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
261 
262 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
263 		EDP_PSR_ERROR(intel_dp->psr.transcoder);
264 }
265 
266 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
267 {
268 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
269 
270 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
271 		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
272 }
273 
274 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
275 {
276 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
277 
278 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
279 		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
280 }
281 
282 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
283 {
284 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
285 
286 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
287 		EDP_PSR_MASK(intel_dp->psr.transcoder);
288 }
289 
290 static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
291 			      enum transcoder cpu_transcoder)
292 {
293 	if (DISPLAY_VER(dev_priv) >= 8)
294 		return EDP_PSR_CTL(dev_priv, cpu_transcoder);
295 	else
296 		return HSW_SRD_CTL;
297 }
298 
299 static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
300 				enum transcoder cpu_transcoder)
301 {
302 	if (DISPLAY_VER(dev_priv) >= 8)
303 		return EDP_PSR_DEBUG(dev_priv, cpu_transcoder);
304 	else
305 		return HSW_SRD_DEBUG;
306 }
307 
308 static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
309 				   enum transcoder cpu_transcoder)
310 {
311 	if (DISPLAY_VER(dev_priv) >= 8)
312 		return EDP_PSR_PERF_CNT(dev_priv, cpu_transcoder);
313 	else
314 		return HSW_SRD_PERF_CNT;
315 }
316 
317 static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
318 				 enum transcoder cpu_transcoder)
319 {
320 	if (DISPLAY_VER(dev_priv) >= 8)
321 		return EDP_PSR_STATUS(dev_priv, cpu_transcoder);
322 	else
323 		return HSW_SRD_STATUS;
324 }
325 
326 static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
327 			      enum transcoder cpu_transcoder)
328 {
329 	if (DISPLAY_VER(dev_priv) >= 12)
330 		return TRANS_PSR_IMR(dev_priv, cpu_transcoder);
331 	else
332 		return EDP_PSR_IMR;
333 }
334 
335 static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
336 			      enum transcoder cpu_transcoder)
337 {
338 	if (DISPLAY_VER(dev_priv) >= 12)
339 		return TRANS_PSR_IIR(dev_priv, cpu_transcoder);
340 	else
341 		return EDP_PSR_IIR;
342 }
343 
344 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
345 				  enum transcoder cpu_transcoder)
346 {
347 	if (DISPLAY_VER(dev_priv) >= 8)
348 		return EDP_PSR_AUX_CTL(dev_priv, cpu_transcoder);
349 	else
350 		return HSW_SRD_AUX_CTL;
351 }
352 
353 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
354 				   enum transcoder cpu_transcoder, int i)
355 {
356 	if (DISPLAY_VER(dev_priv) >= 8)
357 		return EDP_PSR_AUX_DATA(dev_priv, cpu_transcoder, i);
358 	else
359 		return HSW_SRD_AUX_DATA(i);
360 }
361 
362 static void psr_irq_control(struct intel_dp *intel_dp)
363 {
364 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
365 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
366 	u32 mask;
367 
368 	if (intel_dp->psr.panel_replay_enabled)
369 		return;
370 
371 	mask = psr_irq_psr_error_bit_get(intel_dp);
372 	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
373 		mask |= psr_irq_post_exit_bit_get(intel_dp) |
374 			psr_irq_pre_entry_bit_get(intel_dp);
375 
376 	intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
377 		     psr_irq_mask_get(intel_dp), ~mask);
378 }
379 
380 static void psr_event_print(struct drm_i915_private *i915,
381 			    u32 val, bool sel_update_enabled)
382 {
383 	drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
384 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
385 		drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
386 	if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled)
387 		drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
388 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
389 		drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
390 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
391 		drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
392 	if (val & PSR_EVENT_GRAPHICS_RESET)
393 		drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
394 	if (val & PSR_EVENT_PCH_INTERRUPT)
395 		drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
396 	if (val & PSR_EVENT_MEMORY_UP)
397 		drm_dbg_kms(&i915->drm, "\tMemory up\n");
398 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
399 		drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
400 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
401 		drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
402 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
403 		drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
404 	if (val & PSR_EVENT_REGISTER_UPDATE)
405 		drm_dbg_kms(&i915->drm, "\tRegister updated\n");
406 	if (val & PSR_EVENT_HDCP_ENABLE)
407 		drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
408 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
409 		drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
410 	if (val & PSR_EVENT_VBI_ENABLE)
411 		drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
412 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
413 		drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
414 	if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled)
415 		drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
416 }
417 
418 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
419 {
420 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
421 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
422 	ktime_t time_ns =  ktime_get();
423 
424 	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
425 		intel_dp->psr.last_entry_attempt = time_ns;
426 		drm_dbg_kms(&dev_priv->drm,
427 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
428 			    transcoder_name(cpu_transcoder));
429 	}
430 
431 	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
432 		intel_dp->psr.last_exit = time_ns;
433 		drm_dbg_kms(&dev_priv->drm,
434 			    "[transcoder %s] PSR exit completed\n",
435 			    transcoder_name(cpu_transcoder));
436 
437 		if (DISPLAY_VER(dev_priv) >= 9) {
438 			u32 val;
439 
440 			val = intel_de_rmw(dev_priv,
441 					   PSR_EVENT(dev_priv, cpu_transcoder),
442 					   0, 0);
443 
444 			psr_event_print(dev_priv, val, intel_dp->psr.sel_update_enabled);
445 		}
446 	}
447 
448 	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
449 		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
450 			 transcoder_name(cpu_transcoder));
451 
452 		intel_dp->psr.irq_aux_error = true;
453 
454 		/*
455 		 * If this interruption is not masked it will keep
456 		 * interrupting so fast that it prevents the scheduled
457 		 * work to run.
458 		 * Also after a PSR error, we don't want to arm PSR
459 		 * again so we don't care about unmask the interruption
460 		 * or unset irq_aux_error.
461 		 */
462 		intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
463 			     0, psr_irq_psr_error_bit_get(intel_dp));
464 
465 		queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
466 	}
467 }
468 
469 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
470 {
471 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
472 	u8 val = 8; /* assume the worst if we can't read the value */
473 
474 	if (drm_dp_dpcd_readb(&intel_dp->aux,
475 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
476 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
477 	else
478 		drm_dbg_kms(&i915->drm,
479 			    "Unable to get sink synchronization latency, assuming 8 frames\n");
480 	return val;
481 }
482 
483 static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
484 {
485 	u8 su_capability = 0;
486 
487 	if (intel_dp->psr.sink_panel_replay_su_support)
488 		drm_dp_dpcd_readb(&intel_dp->aux,
489 				  DP_PANEL_PANEL_REPLAY_CAPABILITY,
490 				  &su_capability);
491 	else
492 		su_capability = intel_dp->psr_dpcd[1];
493 
494 	return su_capability;
495 }
496 
497 static unsigned int
498 intel_dp_get_su_x_granularity_offset(struct intel_dp *intel_dp)
499 {
500 	return intel_dp->psr.sink_panel_replay_su_support ?
501 		DP_PANEL_PANEL_REPLAY_X_GRANULARITY :
502 		DP_PSR2_SU_X_GRANULARITY;
503 }
504 
505 static unsigned int
506 intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
507 {
508 	return intel_dp->psr.sink_panel_replay_su_support ?
509 		DP_PANEL_PANEL_REPLAY_Y_GRANULARITY :
510 		DP_PSR2_SU_Y_GRANULARITY;
511 }
512 
513 /*
514  * Note: Bits related to granularity are same in panel replay and psr
515  * registers. Rely on PSR definitions on these "common" bits.
516  */
517 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
518 {
519 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
520 	ssize_t r;
521 	u16 w;
522 	u8 y;
523 
524 	/*
525 	 * TODO: Do we need to take into account panel supporting both PSR and
526 	 * Panel replay?
527 	 */
528 
529 	/*
530 	 * If sink don't have specific granularity requirements set legacy
531 	 * ones.
532 	 */
533 	if (!(intel_dp_get_su_capability(intel_dp) &
534 	      DP_PSR2_SU_GRANULARITY_REQUIRED)) {
535 		/* As PSR2 HW sends full lines, we do not care about x granularity */
536 		w = 4;
537 		y = 4;
538 		goto exit;
539 	}
540 
541 	r = drm_dp_dpcd_read(&intel_dp->aux,
542 			     intel_dp_get_su_x_granularity_offset(intel_dp),
543 			     &w, 2);
544 	if (r != 2)
545 		drm_dbg_kms(&i915->drm,
546 			    "Unable to read selective update x granularity\n");
547 	/*
548 	 * Spec says that if the value read is 0 the default granularity should
549 	 * be used instead.
550 	 */
551 	if (r != 2 || w == 0)
552 		w = 4;
553 
554 	r = drm_dp_dpcd_read(&intel_dp->aux,
555 			     intel_dp_get_su_y_granularity_offset(intel_dp),
556 			     &y, 1);
557 	if (r != 1) {
558 		drm_dbg_kms(&i915->drm,
559 			    "Unable to read selective update y granularity\n");
560 		y = 4;
561 	}
562 	if (y == 0)
563 		y = 1;
564 
565 exit:
566 	intel_dp->psr.su_w_granularity = w;
567 	intel_dp->psr.su_y_granularity = y;
568 }
569 
570 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
571 {
572 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
573 
574 	if (intel_dp_is_edp(intel_dp)) {
575 		if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
576 			drm_dbg_kms(&i915->drm,
577 				    "Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n");
578 			return;
579 		}
580 
581 		if (!(intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
582 			drm_dbg_kms(&i915->drm,
583 				    "Panel doesn't support early transport, eDP Panel Replay not possible\n");
584 			return;
585 		}
586 	}
587 
588 	intel_dp->psr.sink_panel_replay_support = true;
589 
590 	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SU_SUPPORT)
591 		intel_dp->psr.sink_panel_replay_su_support = true;
592 
593 	drm_dbg_kms(&i915->drm,
594 		    "Panel replay %sis supported by panel\n",
595 		    intel_dp->psr.sink_panel_replay_su_support ?
596 		    "selective_update " : "");
597 }
598 
599 static void _psr_init_dpcd(struct intel_dp *intel_dp)
600 {
601 	struct drm_i915_private *i915 =
602 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
603 
604 	drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
605 		    intel_dp->psr_dpcd[0]);
606 
607 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
608 		drm_dbg_kms(&i915->drm,
609 			    "PSR support not currently available for this panel\n");
610 		return;
611 	}
612 
613 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
614 		drm_dbg_kms(&i915->drm,
615 			    "Panel lacks power state control, PSR cannot be enabled\n");
616 		return;
617 	}
618 
619 	intel_dp->psr.sink_support = true;
620 	intel_dp->psr.sink_sync_latency =
621 		intel_dp_get_sink_sync_latency(intel_dp);
622 
623 	if (DISPLAY_VER(i915) >= 9 &&
624 	    intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
625 		bool y_req = intel_dp->psr_dpcd[1] &
626 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
627 
628 		/*
629 		 * All panels that supports PSR version 03h (PSR2 +
630 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
631 		 * only sure that it is going to be used when required by the
632 		 * panel. This way panel is capable to do selective update
633 		 * without a aux frame sync.
634 		 *
635 		 * To support PSR version 02h and PSR version 03h without
636 		 * Y-coordinate requirement panels we would need to enable
637 		 * GTC first.
638 		 */
639 		intel_dp->psr.sink_psr2_support = y_req &&
640 			intel_alpm_aux_wake_supported(intel_dp);
641 		drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
642 			    intel_dp->psr.sink_psr2_support ? "" : "not ");
643 	}
644 }
645 
646 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
647 {
648 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
649 			 sizeof(intel_dp->psr_dpcd));
650 	drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP,
651 			  &intel_dp->pr_dpcd);
652 
653 	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SUPPORT)
654 		_panel_replay_init_dpcd(intel_dp);
655 
656 	if (intel_dp->psr_dpcd[0])
657 		_psr_init_dpcd(intel_dp);
658 
659 	if (intel_dp->psr.sink_psr2_support ||
660 	    intel_dp->psr.sink_panel_replay_su_support)
661 		intel_dp_get_su_granularity(intel_dp);
662 }
663 
664 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
665 {
666 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
667 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
668 	u32 aux_clock_divider, aux_ctl;
669 	/* write DP_SET_POWER=D0 */
670 	static const u8 aux_msg[] = {
671 		[0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
672 		[1] = (DP_SET_POWER >> 8) & 0xff,
673 		[2] = DP_SET_POWER & 0xff,
674 		[3] = 1 - 1,
675 		[4] = DP_SET_POWER_D0,
676 	};
677 	int i;
678 
679 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
680 	for (i = 0; i < sizeof(aux_msg); i += 4)
681 		intel_de_write(dev_priv,
682 			       psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
683 			       intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
684 
685 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
686 
687 	/* Start with bits set for DDI_AUX_CTL register */
688 	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
689 					     aux_clock_divider);
690 
691 	/* Select only valid bits for SRD_AUX_CTL */
692 	aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
693 		EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
694 		EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
695 		EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
696 
697 	intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
698 		       aux_ctl);
699 }
700 
701 static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay)
702 {
703 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
704 
705 	if (DISPLAY_VER(i915) < 20 || !intel_dp_is_edp(intel_dp) ||
706 	    intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)
707 		return false;
708 
709 	return panel_replay ?
710 		intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
711 		intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
712 		psr2_su_region_et_global_enabled(intel_dp);
713 }
714 
715 static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
716 				      const struct intel_crtc_state *crtc_state)
717 {
718 	u8 val = DP_PANEL_REPLAY_ENABLE |
719 		DP_PANEL_REPLAY_VSC_SDP_CRC_EN |
720 		DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
721 		DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN |
722 		DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN;
723 	u8 panel_replay_config2 = DP_PANEL_REPLAY_CRC_VERIFICATION;
724 
725 	if (crtc_state->has_sel_update)
726 		val |= DP_PANEL_REPLAY_SU_ENABLE;
727 
728 	if (crtc_state->enable_psr2_su_region_et)
729 		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
730 
731 	if (crtc_state->req_psr2_sdp_prior_scanline)
732 		panel_replay_config2 |=
733 			DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE;
734 
735 	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, val);
736 
737 	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG2,
738 			   panel_replay_config2);
739 }
740 
741 static void _psr_enable_sink(struct intel_dp *intel_dp,
742 			     const struct intel_crtc_state *crtc_state)
743 {
744 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
745 	u8 val = DP_PSR_ENABLE;
746 
747 	if (crtc_state->has_sel_update) {
748 		val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
749 	} else {
750 		if (intel_dp->psr.link_standby)
751 			val |= DP_PSR_MAIN_LINK_ACTIVE;
752 
753 		if (DISPLAY_VER(i915) >= 8)
754 			val |= DP_PSR_CRC_VERIFICATION;
755 	}
756 
757 	if (crtc_state->req_psr2_sdp_prior_scanline)
758 		val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
759 
760 	if (crtc_state->enable_psr2_su_region_et)
761 		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
762 
763 	if (intel_dp->psr.entry_setup_frames > 0)
764 		val |= DP_PSR_FRAME_CAPTURE;
765 
766 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
767 }
768 
769 static void intel_psr_enable_sink_alpm(struct intel_dp *intel_dp,
770 				       const struct intel_crtc_state *crtc_state)
771 {
772 	u8 val;
773 
774 	/*
775 	 * eDP Panel Replay uses always ALPM
776 	 * PSR2 uses ALPM but PSR1 doesn't
777 	 */
778 	if (!intel_dp_is_edp(intel_dp) || (!crtc_state->has_panel_replay &&
779 					   !crtc_state->has_sel_update))
780 		return;
781 
782 	val = DP_ALPM_ENABLE | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE;
783 
784 	if (crtc_state->has_panel_replay)
785 		val |= DP_ALPM_MODE_AUX_LESS;
786 
787 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, val);
788 }
789 
790 void intel_psr_enable_sink(struct intel_dp *intel_dp,
791 			   const struct intel_crtc_state *crtc_state)
792 {
793 	intel_psr_enable_sink_alpm(intel_dp, crtc_state);
794 
795 	crtc_state->has_panel_replay ?
796 		_panel_replay_enable_sink(intel_dp, crtc_state) :
797 		_psr_enable_sink(intel_dp, crtc_state);
798 
799 	if (intel_dp_is_edp(intel_dp))
800 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
801 }
802 
803 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
804 {
805 	struct intel_connector *connector = intel_dp->attached_connector;
806 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
807 	u32 val = 0;
808 
809 	if (DISPLAY_VER(dev_priv) >= 11)
810 		val |= EDP_PSR_TP4_TIME_0us;
811 
812 	if (dev_priv->display.params.psr_safest_params) {
813 		val |= EDP_PSR_TP1_TIME_2500us;
814 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
815 		goto check_tp3_sel;
816 	}
817 
818 	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
819 		val |= EDP_PSR_TP1_TIME_0us;
820 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
821 		val |= EDP_PSR_TP1_TIME_100us;
822 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
823 		val |= EDP_PSR_TP1_TIME_500us;
824 	else
825 		val |= EDP_PSR_TP1_TIME_2500us;
826 
827 	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
828 		val |= EDP_PSR_TP2_TP3_TIME_0us;
829 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
830 		val |= EDP_PSR_TP2_TP3_TIME_100us;
831 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
832 		val |= EDP_PSR_TP2_TP3_TIME_500us;
833 	else
834 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
835 
836 	/*
837 	 * WA 0479: hsw,bdw
838 	 * "Do not skip both TP1 and TP2/TP3"
839 	 */
840 	if (DISPLAY_VER(dev_priv) < 9 &&
841 	    connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
842 	    connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
843 		val |= EDP_PSR_TP2_TP3_TIME_100us;
844 
845 check_tp3_sel:
846 	if (intel_dp_source_supports_tps3(dev_priv) &&
847 	    drm_dp_tps3_supported(intel_dp->dpcd))
848 		val |= EDP_PSR_TP_TP1_TP3;
849 	else
850 		val |= EDP_PSR_TP_TP1_TP2;
851 
852 	return val;
853 }
854 
855 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
856 {
857 	struct intel_connector *connector = intel_dp->attached_connector;
858 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
859 	int idle_frames;
860 
861 	/* Let's use 6 as the minimum to cover all known cases including the
862 	 * off-by-one issue that HW has in some cases.
863 	 */
864 	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
865 	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
866 
867 	if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
868 		idle_frames = 0xf;
869 
870 	return idle_frames;
871 }
872 
873 static void hsw_activate_psr1(struct intel_dp *intel_dp)
874 {
875 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
876 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
877 	u32 max_sleep_time = 0x1f;
878 	u32 val = EDP_PSR_ENABLE;
879 
880 	val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
881 
882 	if (DISPLAY_VER(dev_priv) < 20)
883 		val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
884 
885 	if (IS_HASWELL(dev_priv))
886 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
887 
888 	if (intel_dp->psr.link_standby)
889 		val |= EDP_PSR_LINK_STANDBY;
890 
891 	val |= intel_psr1_get_tp_time(intel_dp);
892 
893 	if (DISPLAY_VER(dev_priv) >= 8)
894 		val |= EDP_PSR_CRC_ENABLE;
895 
896 	if (DISPLAY_VER(dev_priv) >= 20)
897 		val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
898 
899 	intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
900 		     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
901 }
902 
903 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
904 {
905 	struct intel_connector *connector = intel_dp->attached_connector;
906 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
907 	u32 val = 0;
908 
909 	if (dev_priv->display.params.psr_safest_params)
910 		return EDP_PSR2_TP2_TIME_2500us;
911 
912 	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
913 	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
914 		val |= EDP_PSR2_TP2_TIME_50us;
915 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
916 		val |= EDP_PSR2_TP2_TIME_100us;
917 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
918 		val |= EDP_PSR2_TP2_TIME_500us;
919 	else
920 		val |= EDP_PSR2_TP2_TIME_2500us;
921 
922 	return val;
923 }
924 
925 static int psr2_block_count_lines(struct intel_dp *intel_dp)
926 {
927 	return intel_dp->alpm_parameters.io_wake_lines < 9 &&
928 		intel_dp->alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
929 }
930 
931 static int psr2_block_count(struct intel_dp *intel_dp)
932 {
933 	return psr2_block_count_lines(intel_dp) / 4;
934 }
935 
936 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
937 {
938 	u8 frames_before_su_entry;
939 
940 	frames_before_su_entry = max_t(u8,
941 				       intel_dp->psr.sink_sync_latency + 1,
942 				       2);
943 
944 	/* Entry setup frames must be at least 1 less than frames before SU entry */
945 	if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
946 		frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
947 
948 	return frames_before_su_entry;
949 }
950 
951 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
952 {
953 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
954 	struct intel_psr *psr = &intel_dp->psr;
955 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
956 
957 	if (intel_dp_is_edp(intel_dp) && psr->sel_update_enabled) {
958 		u32 val = psr->su_region_et_enabled ?
959 			LNL_EDP_PSR2_SU_REGION_ET_ENABLE : 0;
960 
961 		if (intel_dp->psr.req_psr2_sdp_prior_scanline)
962 			val |= EDP_PSR2_SU_SDP_SCANLINE;
963 
964 		intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder),
965 			       val);
966 	}
967 
968 	intel_de_rmw(dev_priv,
969 		     PSR2_MAN_TRK_CTL(dev_priv, intel_dp->psr.transcoder),
970 		     0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
971 
972 	intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
973 		     TRANS_DP2_PANEL_REPLAY_ENABLE);
974 }
975 
976 static void hsw_activate_psr2(struct intel_dp *intel_dp)
977 {
978 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
979 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
980 	u32 val = EDP_PSR2_ENABLE;
981 	u32 psr_val = 0;
982 
983 	val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
984 
985 	if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
986 		val |= EDP_SU_TRACK_ENABLE;
987 
988 	if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
989 		val |= EDP_Y_COORDINATE_ENABLE;
990 
991 	val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
992 
993 	val |= intel_psr2_get_tp_time(intel_dp);
994 
995 	if (DISPLAY_VER(dev_priv) >= 12 && DISPLAY_VER(dev_priv) < 20) {
996 		if (psr2_block_count(intel_dp) > 2)
997 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
998 		else
999 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
1000 	}
1001 
1002 	/* Wa_22012278275:adl-p */
1003 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
1004 		static const u8 map[] = {
1005 			2, /* 5 lines */
1006 			1, /* 6 lines */
1007 			0, /* 7 lines */
1008 			3, /* 8 lines */
1009 			6, /* 9 lines */
1010 			5, /* 10 lines */
1011 			4, /* 11 lines */
1012 			7, /* 12 lines */
1013 		};
1014 		/*
1015 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
1016 		 * comments bellow for more information
1017 		 */
1018 		int tmp;
1019 
1020 		tmp = map[intel_dp->alpm_parameters.io_wake_lines -
1021 			  TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
1022 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
1023 
1024 		tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
1025 		val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
1026 	} else if (DISPLAY_VER(dev_priv) >= 20) {
1027 		val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1028 	} else if (DISPLAY_VER(dev_priv) >= 12) {
1029 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1030 		val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1031 	} else if (DISPLAY_VER(dev_priv) >= 9) {
1032 		val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1033 		val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1034 	}
1035 
1036 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
1037 		val |= EDP_PSR2_SU_SDP_SCANLINE;
1038 
1039 	if (DISPLAY_VER(dev_priv) >= 20)
1040 		psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
1041 
1042 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
1043 		u32 tmp;
1044 
1045 		tmp = intel_de_read(dev_priv,
1046 				    PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder));
1047 		drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
1048 	} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1049 		intel_de_write(dev_priv,
1050 			       PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder), 0);
1051 	}
1052 
1053 	if (intel_dp->psr.su_region_et_enabled)
1054 		val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
1055 
1056 	/*
1057 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
1058 	 * recommending keep this bit unset while PSR2 is enabled.
1059 	 */
1060 	intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
1061 
1062 	intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder), val);
1063 }
1064 
1065 static bool
1066 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
1067 {
1068 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1069 		return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
1070 	else if (DISPLAY_VER(dev_priv) >= 12)
1071 		return cpu_transcoder == TRANSCODER_A;
1072 	else if (DISPLAY_VER(dev_priv) >= 9)
1073 		return cpu_transcoder == TRANSCODER_EDP;
1074 	else
1075 		return false;
1076 }
1077 
1078 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
1079 {
1080 	if (!crtc_state->hw.active)
1081 		return 0;
1082 
1083 	return DIV_ROUND_UP(1000 * 1000,
1084 			    drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
1085 }
1086 
1087 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
1088 				     u32 idle_frames)
1089 {
1090 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1091 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1092 
1093 	intel_de_rmw(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder),
1094 		     EDP_PSR2_IDLE_FRAMES_MASK,
1095 		     EDP_PSR2_IDLE_FRAMES(idle_frames));
1096 }
1097 
1098 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
1099 {
1100 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1101 
1102 	psr2_program_idle_frames(intel_dp, 0);
1103 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
1104 }
1105 
1106 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
1107 {
1108 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1109 
1110 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1111 	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
1112 }
1113 
1114 static void tgl_dc3co_disable_work(struct work_struct *work)
1115 {
1116 	struct intel_dp *intel_dp =
1117 		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
1118 
1119 	mutex_lock(&intel_dp->psr.lock);
1120 	/* If delayed work is pending, it is not idle */
1121 	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
1122 		goto unlock;
1123 
1124 	tgl_psr2_disable_dc3co(intel_dp);
1125 unlock:
1126 	mutex_unlock(&intel_dp->psr.lock);
1127 }
1128 
1129 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
1130 {
1131 	if (!intel_dp->psr.dc3co_exitline)
1132 		return;
1133 
1134 	cancel_delayed_work(&intel_dp->psr.dc3co_work);
1135 	/* Before PSR2 exit disallow dc3co*/
1136 	tgl_psr2_disable_dc3co(intel_dp);
1137 }
1138 
1139 static bool
1140 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
1141 			      struct intel_crtc_state *crtc_state)
1142 {
1143 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1144 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1145 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1146 	enum port port = dig_port->base.port;
1147 
1148 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1149 		return pipe <= PIPE_B && port <= PORT_B;
1150 	else
1151 		return pipe == PIPE_A && port == PORT_A;
1152 }
1153 
1154 static void
1155 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
1156 				  struct intel_crtc_state *crtc_state)
1157 {
1158 	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
1159 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1160 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1161 	u32 exit_scanlines;
1162 
1163 	/*
1164 	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1165 	 * disable DC3CO until the changed dc3co activating/deactivating sequence
1166 	 * is applied. B.Specs:49196
1167 	 */
1168 	return;
1169 
1170 	/*
1171 	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1172 	 * TODO: when the issue is addressed, this restriction should be removed.
1173 	 */
1174 	if (crtc_state->enable_psr2_sel_fetch)
1175 		return;
1176 
1177 	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1178 		return;
1179 
1180 	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1181 		return;
1182 
1183 	/* Wa_16011303918:adl-p */
1184 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1185 		return;
1186 
1187 	/*
1188 	 * DC3CO Exit time 200us B.Spec 49196
1189 	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1190 	 */
1191 	exit_scanlines =
1192 		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1193 
1194 	if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
1195 		return;
1196 
1197 	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1198 }
1199 
1200 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1201 					      struct intel_crtc_state *crtc_state)
1202 {
1203 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1204 
1205 	if (!dev_priv->display.params.enable_psr2_sel_fetch &&
1206 	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1207 		drm_dbg_kms(&dev_priv->drm,
1208 			    "PSR2 sel fetch not enabled, disabled by parameter\n");
1209 		return false;
1210 	}
1211 
1212 	if (crtc_state->uapi.async_flip) {
1213 		drm_dbg_kms(&dev_priv->drm,
1214 			    "PSR2 sel fetch not enabled, async flip enabled\n");
1215 		return false;
1216 	}
1217 
1218 	return crtc_state->enable_psr2_sel_fetch = true;
1219 }
1220 
1221 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1222 				   struct intel_crtc_state *crtc_state)
1223 {
1224 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1225 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1226 	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1227 	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1228 	u16 y_granularity = 0;
1229 
1230 	/* PSR2 HW only send full lines so we only need to validate the width */
1231 	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1232 		return false;
1233 
1234 	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1235 		return false;
1236 
1237 	/* HW tracking is only aligned to 4 lines */
1238 	if (!crtc_state->enable_psr2_sel_fetch)
1239 		return intel_dp->psr.su_y_granularity == 4;
1240 
1241 	/*
1242 	 * adl_p and mtl platforms have 1 line granularity.
1243 	 * For other platforms with SW tracking we can adjust the y coordinates
1244 	 * to match sink requirement if multiple of 4.
1245 	 */
1246 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1247 		y_granularity = intel_dp->psr.su_y_granularity;
1248 	else if (intel_dp->psr.su_y_granularity <= 2)
1249 		y_granularity = 4;
1250 	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1251 		y_granularity = intel_dp->psr.su_y_granularity;
1252 
1253 	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1254 		return false;
1255 
1256 	if (crtc_state->dsc.compression_enable &&
1257 	    vdsc_cfg->slice_height % y_granularity)
1258 		return false;
1259 
1260 	crtc_state->su_y_granularity = y_granularity;
1261 	return true;
1262 }
1263 
1264 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1265 							struct intel_crtc_state *crtc_state)
1266 {
1267 	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1268 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1269 	u32 hblank_total, hblank_ns, req_ns;
1270 
1271 	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1272 	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1273 
1274 	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1275 	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1276 
1277 	if ((hblank_ns - req_ns) > 100)
1278 		return true;
1279 
1280 	/* Not supported <13 / Wa_22012279113:adl-p */
1281 	if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1282 		return false;
1283 
1284 	crtc_state->req_psr2_sdp_prior_scanline = true;
1285 	return true;
1286 }
1287 
1288 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1289 					const struct drm_display_mode *adjusted_mode)
1290 {
1291 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1292 	int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1293 	int entry_setup_frames = 0;
1294 
1295 	if (psr_setup_time < 0) {
1296 		drm_dbg_kms(&i915->drm,
1297 			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1298 			    intel_dp->psr_dpcd[1]);
1299 		return -ETIME;
1300 	}
1301 
1302 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1303 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1304 		if (DISPLAY_VER(i915) >= 20) {
1305 			/* setup entry frames can be up to 3 frames */
1306 			entry_setup_frames = 1;
1307 			drm_dbg_kms(&i915->drm,
1308 				    "PSR setup entry frames %d\n",
1309 				    entry_setup_frames);
1310 		} else {
1311 			drm_dbg_kms(&i915->drm,
1312 				    "PSR condition failed: PSR setup time (%d us) too long\n",
1313 				    psr_setup_time);
1314 			return -ETIME;
1315 		}
1316 	}
1317 
1318 	return entry_setup_frames;
1319 }
1320 
1321 static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
1322 				       const struct intel_crtc_state *crtc_state,
1323 				       bool aux_less)
1324 {
1325 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1326 	int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
1327 		crtc_state->hw.adjusted_mode.crtc_vblank_start;
1328 	int wake_lines;
1329 
1330 	if (aux_less)
1331 		wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
1332 	else
1333 		wake_lines = DISPLAY_VER(i915) < 20 ?
1334 			psr2_block_count_lines(intel_dp) :
1335 			intel_dp->alpm_parameters.io_wake_lines;
1336 
1337 	if (crtc_state->req_psr2_sdp_prior_scanline)
1338 		vblank -= 1;
1339 
1340 	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
1341 	if (vblank < wake_lines)
1342 		return false;
1343 
1344 	return true;
1345 }
1346 
1347 static bool alpm_config_valid(struct intel_dp *intel_dp,
1348 			      const struct intel_crtc_state *crtc_state,
1349 			      bool aux_less)
1350 {
1351 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1352 
1353 	if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
1354 		drm_dbg_kms(&i915->drm,
1355 			    "PSR2/Panel Replay  not enabled, Unable to use long enough wake times\n");
1356 		return false;
1357 	}
1358 
1359 	if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less)) {
1360 		drm_dbg_kms(&i915->drm,
1361 			    "PSR2/Panel Replay not enabled, too short vblank time\n");
1362 		return false;
1363 	}
1364 
1365 	return true;
1366 }
1367 
1368 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1369 				    struct intel_crtc_state *crtc_state)
1370 {
1371 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1372 	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1373 	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1374 	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1375 
1376 	if (!intel_dp->psr.sink_psr2_support)
1377 		return false;
1378 
1379 	/* JSL and EHL only supports eDP 1.3 */
1380 	if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1381 		drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1382 		return false;
1383 	}
1384 
1385 	/* Wa_16011181250 */
1386 	if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1387 	    IS_DG2(dev_priv)) {
1388 		drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1389 		return false;
1390 	}
1391 
1392 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1393 		drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1394 		return false;
1395 	}
1396 
1397 	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1398 		drm_dbg_kms(&dev_priv->drm,
1399 			    "PSR2 not supported in transcoder %s\n",
1400 			    transcoder_name(crtc_state->cpu_transcoder));
1401 		return false;
1402 	}
1403 
1404 	/*
1405 	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1406 	 * resolution requires DSC to be enabled, priority is given to DSC
1407 	 * over PSR2.
1408 	 */
1409 	if (crtc_state->dsc.compression_enable &&
1410 	    (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
1411 		drm_dbg_kms(&dev_priv->drm,
1412 			    "PSR2 cannot be enabled since DSC is enabled\n");
1413 		return false;
1414 	}
1415 
1416 	if (DISPLAY_VER(dev_priv) >= 12) {
1417 		psr_max_h = 5120;
1418 		psr_max_v = 3200;
1419 		max_bpp = 30;
1420 	} else if (DISPLAY_VER(dev_priv) >= 10) {
1421 		psr_max_h = 4096;
1422 		psr_max_v = 2304;
1423 		max_bpp = 24;
1424 	} else if (DISPLAY_VER(dev_priv) == 9) {
1425 		psr_max_h = 3640;
1426 		psr_max_v = 2304;
1427 		max_bpp = 24;
1428 	}
1429 
1430 	if (crtc_state->pipe_bpp > max_bpp) {
1431 		drm_dbg_kms(&dev_priv->drm,
1432 			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1433 			    crtc_state->pipe_bpp, max_bpp);
1434 		return false;
1435 	}
1436 
1437 	/* Wa_16011303918:adl-p */
1438 	if (crtc_state->vrr.enable &&
1439 	    IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1440 		drm_dbg_kms(&dev_priv->drm,
1441 			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1442 		return false;
1443 	}
1444 
1445 	if (!alpm_config_valid(intel_dp, crtc_state, false))
1446 		return false;
1447 
1448 	if (!crtc_state->enable_psr2_sel_fetch &&
1449 	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1450 		drm_dbg_kms(&dev_priv->drm,
1451 			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1452 			    crtc_hdisplay, crtc_vdisplay,
1453 			    psr_max_h, psr_max_v);
1454 		return false;
1455 	}
1456 
1457 	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1458 
1459 	return true;
1460 }
1461 
1462 static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
1463 					  struct intel_crtc_state *crtc_state)
1464 {
1465 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1466 
1467 	if (HAS_PSR2_SEL_FETCH(dev_priv) &&
1468 	    !intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1469 	    !HAS_PSR_HW_TRACKING(dev_priv)) {
1470 		drm_dbg_kms(&dev_priv->drm,
1471 			    "Selective update not enabled, selective fetch not valid and no HW tracking available\n");
1472 		goto unsupported;
1473 	}
1474 
1475 	if (!psr2_global_enabled(intel_dp)) {
1476 		drm_dbg_kms(&dev_priv->drm, "Selective update disabled by flag\n");
1477 		goto unsupported;
1478 	}
1479 
1480 	if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state))
1481 		goto unsupported;
1482 
1483 	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1484 		drm_dbg_kms(&dev_priv->drm,
1485 			    "Selective update not enabled, SDP indication do not fit in hblank\n");
1486 		goto unsupported;
1487 	}
1488 
1489 	if (crtc_state->has_panel_replay && (DISPLAY_VER(dev_priv) < 14 ||
1490 					     !intel_dp->psr.sink_panel_replay_su_support))
1491 		goto unsupported;
1492 
1493 	if (crtc_state->crc_enabled) {
1494 		drm_dbg_kms(&dev_priv->drm,
1495 			    "Selective update not enabled because it would inhibit pipe CRC calculation\n");
1496 		goto unsupported;
1497 	}
1498 
1499 	if (!psr2_granularity_check(intel_dp, crtc_state)) {
1500 		drm_dbg_kms(&dev_priv->drm,
1501 			    "Selective update not enabled, SU granularity not compatible\n");
1502 		goto unsupported;
1503 	}
1504 
1505 	crtc_state->enable_psr2_su_region_et =
1506 		psr2_su_region_et_valid(intel_dp, crtc_state->has_panel_replay);
1507 
1508 	return true;
1509 
1510 unsupported:
1511 	crtc_state->enable_psr2_sel_fetch = false;
1512 	return false;
1513 }
1514 
1515 static bool _psr_compute_config(struct intel_dp *intel_dp,
1516 				struct intel_crtc_state *crtc_state)
1517 {
1518 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1519 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1520 	int entry_setup_frames;
1521 
1522 	/*
1523 	 * Current PSR panels don't work reliably with VRR enabled
1524 	 * So if VRR is enabled, do not enable PSR.
1525 	 */
1526 	if (crtc_state->vrr.enable)
1527 		return false;
1528 
1529 	if (!CAN_PSR(intel_dp))
1530 		return false;
1531 
1532 	entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1533 
1534 	if (entry_setup_frames >= 0) {
1535 		intel_dp->psr.entry_setup_frames = entry_setup_frames;
1536 	} else {
1537 		drm_dbg_kms(&dev_priv->drm,
1538 			    "PSR condition failed: PSR setup timing not met\n");
1539 		return false;
1540 	}
1541 
1542 	return true;
1543 }
1544 
1545 static bool
1546 _panel_replay_compute_config(struct intel_dp *intel_dp,
1547 			     const struct intel_crtc_state *crtc_state,
1548 			     const struct drm_connector_state *conn_state)
1549 {
1550 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1551 	struct intel_connector *connector =
1552 		to_intel_connector(conn_state->connector);
1553 	struct intel_hdcp *hdcp = &connector->hdcp;
1554 
1555 	if (!CAN_PANEL_REPLAY(intel_dp))
1556 		return false;
1557 
1558 	if (!panel_replay_global_enabled(intel_dp)) {
1559 		drm_dbg_kms(&i915->drm, "Panel Replay disabled by flag\n");
1560 		return false;
1561 	}
1562 
1563 	if (!intel_dp_is_edp(intel_dp))
1564 		return true;
1565 
1566 	/* Remaining checks are for eDP only */
1567 
1568 	/* 128b/132b Panel Replay is not supported on eDP */
1569 	if (intel_dp_is_uhbr(crtc_state)) {
1570 		drm_dbg_kms(&i915->drm,
1571 			    "Panel Replay is not supported with 128b/132b\n");
1572 		return false;
1573 	}
1574 
1575 	/* HW will not allow Panel Replay on eDP when HDCP enabled */
1576 	if (conn_state->content_protection ==
1577 	    DRM_MODE_CONTENT_PROTECTION_DESIRED ||
1578 	    (conn_state->content_protection ==
1579 	     DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
1580 	     DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) {
1581 		drm_dbg_kms(&i915->drm,
1582 			    "Panel Replay is not supported with HDCP\n");
1583 		return false;
1584 	}
1585 
1586 	if (!alpm_config_valid(intel_dp, crtc_state, true))
1587 		return false;
1588 
1589 	return true;
1590 }
1591 
1592 void intel_psr_compute_config(struct intel_dp *intel_dp,
1593 			      struct intel_crtc_state *crtc_state,
1594 			      struct drm_connector_state *conn_state)
1595 {
1596 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1597 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1598 
1599 	if (!psr_global_enabled(intel_dp)) {
1600 		drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1601 		return;
1602 	}
1603 
1604 	if (intel_dp->psr.sink_not_reliable) {
1605 		drm_dbg_kms(&dev_priv->drm,
1606 			    "PSR sink implementation is not reliable\n");
1607 		return;
1608 	}
1609 
1610 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1611 		drm_dbg_kms(&dev_priv->drm,
1612 			    "PSR condition failed: Interlaced mode enabled\n");
1613 		return;
1614 	}
1615 
1616 	/*
1617 	 * FIXME figure out what is wrong with PSR+joiner and
1618 	 * fix it. Presumably something related to the fact that
1619 	 * PSR is a transcoder level feature.
1620 	 */
1621 	if (crtc_state->joiner_pipes) {
1622 		drm_dbg_kms(&dev_priv->drm,
1623 			    "PSR disabled due to joiner\n");
1624 		return;
1625 	}
1626 
1627 	crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
1628 								    crtc_state,
1629 								    conn_state);
1630 
1631 	crtc_state->has_psr = crtc_state->has_panel_replay ? true :
1632 		_psr_compute_config(intel_dp, crtc_state);
1633 
1634 	if (!crtc_state->has_psr)
1635 		return;
1636 
1637 	crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
1638 }
1639 
1640 void intel_psr_get_config(struct intel_encoder *encoder,
1641 			  struct intel_crtc_state *pipe_config)
1642 {
1643 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1644 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1645 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1646 	struct intel_dp *intel_dp;
1647 	u32 val;
1648 
1649 	if (!dig_port)
1650 		return;
1651 
1652 	intel_dp = &dig_port->dp;
1653 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1654 		return;
1655 
1656 	mutex_lock(&intel_dp->psr.lock);
1657 	if (!intel_dp->psr.enabled)
1658 		goto unlock;
1659 
1660 	if (intel_dp->psr.panel_replay_enabled) {
1661 		pipe_config->has_psr = pipe_config->has_panel_replay = true;
1662 	} else {
1663 		/*
1664 		 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1665 		 * enabled/disabled because of frontbuffer tracking and others.
1666 		 */
1667 		pipe_config->has_psr = true;
1668 	}
1669 
1670 	pipe_config->has_sel_update = intel_dp->psr.sel_update_enabled;
1671 	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1672 
1673 	if (!intel_dp->psr.sel_update_enabled)
1674 		goto unlock;
1675 
1676 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1677 		val = intel_de_read(dev_priv,
1678 				    PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder));
1679 		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1680 			pipe_config->enable_psr2_sel_fetch = true;
1681 	}
1682 
1683 	pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled;
1684 
1685 	if (DISPLAY_VER(dev_priv) >= 12) {
1686 		val = intel_de_read(dev_priv,
1687 				    TRANS_EXITLINE(dev_priv, cpu_transcoder));
1688 		pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1689 	}
1690 unlock:
1691 	mutex_unlock(&intel_dp->psr.lock);
1692 }
1693 
1694 static void intel_psr_activate(struct intel_dp *intel_dp)
1695 {
1696 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1697 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1698 
1699 	drm_WARN_ON(&dev_priv->drm,
1700 		    transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1701 		    intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder)) & EDP_PSR2_ENABLE);
1702 
1703 	drm_WARN_ON(&dev_priv->drm,
1704 		    intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1705 
1706 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1707 
1708 	lockdep_assert_held(&intel_dp->psr.lock);
1709 
1710 	/* psr1, psr2 and panel-replay are mutually exclusive.*/
1711 	if (intel_dp->psr.panel_replay_enabled)
1712 		dg2_activate_panel_replay(intel_dp);
1713 	else if (intel_dp->psr.sel_update_enabled)
1714 		hsw_activate_psr2(intel_dp);
1715 	else
1716 		hsw_activate_psr1(intel_dp);
1717 
1718 	intel_dp->psr.active = true;
1719 }
1720 
1721 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1722 {
1723 	switch (intel_dp->psr.pipe) {
1724 	case PIPE_A:
1725 		return LATENCY_REPORTING_REMOVED_PIPE_A;
1726 	case PIPE_B:
1727 		return LATENCY_REPORTING_REMOVED_PIPE_B;
1728 	case PIPE_C:
1729 		return LATENCY_REPORTING_REMOVED_PIPE_C;
1730 	case PIPE_D:
1731 		return LATENCY_REPORTING_REMOVED_PIPE_D;
1732 	default:
1733 		MISSING_CASE(intel_dp->psr.pipe);
1734 		return 0;
1735 	}
1736 }
1737 
1738 /*
1739  * Wa_16013835468
1740  * Wa_14015648006
1741  */
1742 static void wm_optimization_wa(struct intel_dp *intel_dp,
1743 			       const struct intel_crtc_state *crtc_state)
1744 {
1745 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1746 	bool set_wa_bit = false;
1747 
1748 	/* Wa_14015648006 */
1749 	if (IS_DISPLAY_VER(dev_priv, 11, 14))
1750 		set_wa_bit |= crtc_state->wm_level_disabled;
1751 
1752 	/* Wa_16013835468 */
1753 	if (DISPLAY_VER(dev_priv) == 12)
1754 		set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1755 			crtc_state->hw.adjusted_mode.crtc_vdisplay;
1756 
1757 	if (set_wa_bit)
1758 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1759 			     0, wa_16013835468_bit_get(intel_dp));
1760 	else
1761 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1762 			     wa_16013835468_bit_get(intel_dp), 0);
1763 }
1764 
1765 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1766 				    const struct intel_crtc_state *crtc_state)
1767 {
1768 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1769 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1770 	u32 mask = 0;
1771 
1772 	/*
1773 	 * Only HSW and BDW have PSR AUX registers that need to be setup.
1774 	 * SKL+ use hardcoded values PSR AUX transactions
1775 	 */
1776 	if (DISPLAY_VER(dev_priv) < 9)
1777 		hsw_psr_setup_aux(intel_dp);
1778 
1779 	/*
1780 	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1781 	 * mask LPSP to avoid dependency on other drivers that might block
1782 	 * runtime_pm besides preventing  other hw tracking issues now we
1783 	 * can rely on frontbuffer tracking.
1784 	 *
1785 	 * From bspec prior LunarLake:
1786 	 * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
1787 	 * panel replay mode.
1788 	 *
1789 	 * From bspec beyod LunarLake:
1790 	 * Panel Replay on DP: No bits are applicable
1791 	 * Panel Replay on eDP: All bits are applicable
1792 	 */
1793 	if (DISPLAY_VER(dev_priv) < 20 || intel_dp_is_edp(intel_dp))
1794 		mask = EDP_PSR_DEBUG_MASK_HPD;
1795 
1796 	if (intel_dp_is_edp(intel_dp)) {
1797 		mask |= EDP_PSR_DEBUG_MASK_MEMUP;
1798 
1799 		/*
1800 		 * For some unknown reason on HSW non-ULT (or at least on
1801 		 * Dell Latitude E6540) external displays start to flicker
1802 		 * when PSR is enabled on the eDP. SR/PC6 residency is much
1803 		 * higher than should be possible with an external display.
1804 		 * As a workaround leave LPSP unmasked to prevent PSR entry
1805 		 * when external displays are active.
1806 		 */
1807 		if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
1808 			mask |= EDP_PSR_DEBUG_MASK_LPSP;
1809 
1810 		if (DISPLAY_VER(dev_priv) < 20)
1811 			mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1812 
1813 		/*
1814 		 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1815 		 * registers in order to keep the CURSURFLIVE tricks working :(
1816 		 */
1817 		if (IS_DISPLAY_VER(dev_priv, 9, 10))
1818 			mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1819 
1820 		/* allow PSR with sprite enabled */
1821 		if (IS_HASWELL(dev_priv))
1822 			mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1823 	}
1824 
1825 	intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1826 
1827 	psr_irq_control(intel_dp);
1828 
1829 	/*
1830 	 * TODO: if future platforms supports DC3CO in more than one
1831 	 * transcoder, EXITLINE will need to be unset when disabling PSR
1832 	 */
1833 	if (intel_dp->psr.dc3co_exitline)
1834 		intel_de_rmw(dev_priv,
1835 			     TRANS_EXITLINE(dev_priv, cpu_transcoder),
1836 			     EXITLINE_MASK,
1837 			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1838 
1839 	if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1840 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1841 			     intel_dp->psr.psr2_sel_fetch_enabled ?
1842 			     IGNORE_PSR2_HW_TRACKING : 0);
1843 
1844 	if (intel_dp_is_edp(intel_dp))
1845 		intel_alpm_configure(intel_dp, crtc_state);
1846 
1847 	/*
1848 	 * Wa_16013835468
1849 	 * Wa_14015648006
1850 	 */
1851 	wm_optimization_wa(intel_dp, crtc_state);
1852 
1853 	if (intel_dp->psr.sel_update_enabled) {
1854 		if (DISPLAY_VER(dev_priv) == 9)
1855 			intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1856 				     PSR2_VSC_ENABLE_PROG_HEADER |
1857 				     PSR2_ADD_VERTICAL_LINE_COUNT);
1858 
1859 		/*
1860 		 * Wa_16014451276:adlp,mtl[a0,b0]
1861 		 * All supported adlp panels have 1-based X granularity, this may
1862 		 * cause issues if non-supported panels are used.
1863 		 */
1864 		if (!intel_dp->psr.panel_replay_enabled &&
1865 		    (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
1866 		     IS_ALDERLAKE_P(dev_priv)))
1867 			intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
1868 				     0, ADLP_1_BASED_X_GRANULARITY);
1869 
1870 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1871 		if (!intel_dp->psr.panel_replay_enabled &&
1872 		    IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1873 			intel_de_rmw(dev_priv,
1874 				     MTL_CLKGATE_DIS_TRANS(dev_priv, cpu_transcoder),
1875 				     0,
1876 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1877 		else if (IS_ALDERLAKE_P(dev_priv))
1878 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1879 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1880 	}
1881 }
1882 
1883 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1884 {
1885 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1886 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1887 	u32 val;
1888 
1889 	if (intel_dp->psr.panel_replay_enabled)
1890 		goto no_err;
1891 
1892 	/*
1893 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1894 	 * will still keep the error set even after the reset done in the
1895 	 * irq_preinstall and irq_uninstall hooks.
1896 	 * And enabling in this situation cause the screen to freeze in the
1897 	 * first time that PSR HW tries to activate so lets keep PSR disabled
1898 	 * to avoid any rendering problems.
1899 	 */
1900 	val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1901 	val &= psr_irq_psr_error_bit_get(intel_dp);
1902 	if (val) {
1903 		intel_dp->psr.sink_not_reliable = true;
1904 		drm_dbg_kms(&dev_priv->drm,
1905 			    "PSR interruption error set, not enabling PSR\n");
1906 		return false;
1907 	}
1908 
1909 no_err:
1910 	return true;
1911 }
1912 
1913 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1914 				    const struct intel_crtc_state *crtc_state)
1915 {
1916 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1917 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1918 	u32 val;
1919 
1920 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1921 
1922 	intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update;
1923 	intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
1924 	intel_dp->psr.busy_frontbuffer_bits = 0;
1925 	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1926 	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1927 	/* DC5/DC6 requires at least 6 idle frames */
1928 	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1929 	intel_dp->psr.dc3co_exit_delay = val;
1930 	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1931 	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1932 	intel_dp->psr.su_region_et_enabled = crtc_state->enable_psr2_su_region_et;
1933 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1934 	intel_dp->psr.req_psr2_sdp_prior_scanline =
1935 		crtc_state->req_psr2_sdp_prior_scanline;
1936 
1937 	if (!psr_interrupt_error_check(intel_dp))
1938 		return;
1939 
1940 	if (intel_dp->psr.panel_replay_enabled) {
1941 		drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
1942 	} else {
1943 		drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1944 			    intel_dp->psr.sel_update_enabled ? "2" : "1");
1945 
1946 		/*
1947 		 * Panel replay has to be enabled before link training: doing it
1948 		 * only for PSR here.
1949 		 */
1950 		intel_psr_enable_sink(intel_dp, crtc_state);
1951 	}
1952 
1953 	if (intel_dp_is_edp(intel_dp))
1954 		intel_snps_phy_update_psr_power_state(&dig_port->base, true);
1955 
1956 	intel_psr_enable_source(intel_dp, crtc_state);
1957 	intel_dp->psr.enabled = true;
1958 	intel_dp->psr.paused = false;
1959 
1960 	intel_psr_activate(intel_dp);
1961 }
1962 
1963 static void intel_psr_exit(struct intel_dp *intel_dp)
1964 {
1965 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1966 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1967 	u32 val;
1968 
1969 	if (!intel_dp->psr.active) {
1970 		if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1971 			val = intel_de_read(dev_priv,
1972 					    EDP_PSR2_CTL(dev_priv, cpu_transcoder));
1973 			drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1974 		}
1975 
1976 		val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
1977 		drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1978 
1979 		return;
1980 	}
1981 
1982 	if (intel_dp->psr.panel_replay_enabled) {
1983 		intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
1984 			     TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
1985 	} else if (intel_dp->psr.sel_update_enabled) {
1986 		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1987 
1988 		val = intel_de_rmw(dev_priv,
1989 				   EDP_PSR2_CTL(dev_priv, cpu_transcoder),
1990 				   EDP_PSR2_ENABLE, 0);
1991 
1992 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1993 	} else {
1994 		val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
1995 				   EDP_PSR_ENABLE, 0);
1996 
1997 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1998 	}
1999 	intel_dp->psr.active = false;
2000 }
2001 
2002 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
2003 {
2004 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2005 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2006 	i915_reg_t psr_status;
2007 	u32 psr_status_mask;
2008 
2009 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
2010 					  intel_dp->psr.panel_replay_enabled)) {
2011 		psr_status = EDP_PSR2_STATUS(dev_priv, cpu_transcoder);
2012 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
2013 	} else {
2014 		psr_status = psr_status_reg(dev_priv, cpu_transcoder);
2015 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
2016 	}
2017 
2018 	/* Wait till PSR is idle */
2019 	if (intel_de_wait_for_clear(dev_priv, psr_status,
2020 				    psr_status_mask, 2000))
2021 		drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
2022 }
2023 
2024 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
2025 {
2026 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2027 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2028 
2029 	lockdep_assert_held(&intel_dp->psr.lock);
2030 
2031 	if (!intel_dp->psr.enabled)
2032 		return;
2033 
2034 	if (intel_dp->psr.panel_replay_enabled)
2035 		drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
2036 	else
2037 		drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
2038 			    intel_dp->psr.sel_update_enabled ? "2" : "1");
2039 
2040 	intel_psr_exit(intel_dp);
2041 	intel_psr_wait_exit_locked(intel_dp);
2042 
2043 	/*
2044 	 * Wa_16013835468
2045 	 * Wa_14015648006
2046 	 */
2047 	if (DISPLAY_VER(dev_priv) >= 11)
2048 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
2049 			     wa_16013835468_bit_get(intel_dp), 0);
2050 
2051 	if (intel_dp->psr.sel_update_enabled) {
2052 		/* Wa_16012604467:adlp,mtl[a0,b0] */
2053 		if (!intel_dp->psr.panel_replay_enabled &&
2054 		    IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
2055 			intel_de_rmw(dev_priv,
2056 				     MTL_CLKGATE_DIS_TRANS(dev_priv, cpu_transcoder),
2057 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
2058 		else if (IS_ALDERLAKE_P(dev_priv))
2059 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
2060 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
2061 	}
2062 
2063 	if (intel_dp_is_edp(intel_dp))
2064 		intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
2065 
2066 	/* Panel Replay on eDP is always using ALPM aux less. */
2067 	if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
2068 		intel_de_rmw(dev_priv, ALPM_CTL(dev_priv, cpu_transcoder),
2069 			     ALPM_CTL_ALPM_ENABLE |
2070 			     ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
2071 
2072 		intel_de_rmw(dev_priv,
2073 			     PORT_ALPM_CTL(dev_priv, cpu_transcoder),
2074 			     PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
2075 	}
2076 
2077 	/* Disable PSR on Sink */
2078 	if (!intel_dp->psr.panel_replay_enabled) {
2079 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
2080 
2081 		if (intel_dp->psr.sel_update_enabled)
2082 			drm_dp_dpcd_writeb(&intel_dp->aux,
2083 					   DP_RECEIVER_ALPM_CONFIG, 0);
2084 	}
2085 
2086 	intel_dp->psr.enabled = false;
2087 	intel_dp->psr.panel_replay_enabled = false;
2088 	intel_dp->psr.sel_update_enabled = false;
2089 	intel_dp->psr.psr2_sel_fetch_enabled = false;
2090 	intel_dp->psr.su_region_et_enabled = false;
2091 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2092 }
2093 
2094 /**
2095  * intel_psr_disable - Disable PSR
2096  * @intel_dp: Intel DP
2097  * @old_crtc_state: old CRTC state
2098  *
2099  * This function needs to be called before disabling pipe.
2100  */
2101 void intel_psr_disable(struct intel_dp *intel_dp,
2102 		       const struct intel_crtc_state *old_crtc_state)
2103 {
2104 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2105 
2106 	if (!old_crtc_state->has_psr)
2107 		return;
2108 
2109 	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
2110 		return;
2111 
2112 	mutex_lock(&intel_dp->psr.lock);
2113 
2114 	intel_psr_disable_locked(intel_dp);
2115 
2116 	mutex_unlock(&intel_dp->psr.lock);
2117 	cancel_work_sync(&intel_dp->psr.work);
2118 	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
2119 }
2120 
2121 /**
2122  * intel_psr_pause - Pause PSR
2123  * @intel_dp: Intel DP
2124  *
2125  * This function need to be called after enabling psr.
2126  */
2127 void intel_psr_pause(struct intel_dp *intel_dp)
2128 {
2129 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2130 	struct intel_psr *psr = &intel_dp->psr;
2131 
2132 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2133 		return;
2134 
2135 	mutex_lock(&psr->lock);
2136 
2137 	if (!psr->enabled) {
2138 		mutex_unlock(&psr->lock);
2139 		return;
2140 	}
2141 
2142 	/* If we ever hit this, we will need to add refcount to pause/resume */
2143 	drm_WARN_ON(&dev_priv->drm, psr->paused);
2144 
2145 	intel_psr_exit(intel_dp);
2146 	intel_psr_wait_exit_locked(intel_dp);
2147 	psr->paused = true;
2148 
2149 	mutex_unlock(&psr->lock);
2150 
2151 	cancel_work_sync(&psr->work);
2152 	cancel_delayed_work_sync(&psr->dc3co_work);
2153 }
2154 
2155 /**
2156  * intel_psr_resume - Resume PSR
2157  * @intel_dp: Intel DP
2158  *
2159  * This function need to be called after pausing psr.
2160  */
2161 void intel_psr_resume(struct intel_dp *intel_dp)
2162 {
2163 	struct intel_psr *psr = &intel_dp->psr;
2164 
2165 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2166 		return;
2167 
2168 	mutex_lock(&psr->lock);
2169 
2170 	if (!psr->paused)
2171 		goto unlock;
2172 
2173 	psr->paused = false;
2174 	intel_psr_activate(intel_dp);
2175 
2176 unlock:
2177 	mutex_unlock(&psr->lock);
2178 }
2179 
2180 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
2181 {
2182 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
2183 		PSR2_MAN_TRK_CTL_ENABLE;
2184 }
2185 
2186 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
2187 {
2188 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
2189 	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
2190 	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
2191 }
2192 
2193 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
2194 {
2195 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
2196 	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
2197 	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
2198 }
2199 
2200 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
2201 {
2202 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
2203 	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
2204 	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
2205 }
2206 
2207 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
2208 {
2209 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2210 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2211 
2212 	if (intel_dp->psr.psr2_sel_fetch_enabled)
2213 		intel_de_write(dev_priv,
2214 			       PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
2215 			       man_trk_ctl_enable_bit_get(dev_priv) |
2216 			       man_trk_ctl_partial_frame_bit_get(dev_priv) |
2217 			       man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2218 			       man_trk_ctl_continuos_full_frame(dev_priv));
2219 
2220 	/*
2221 	 * Display WA #0884: skl+
2222 	 * This documented WA for bxt can be safely applied
2223 	 * broadly so we can force HW tracking to exit PSR
2224 	 * instead of disabling and re-enabling.
2225 	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
2226 	 * but it makes more sense write to the current active
2227 	 * pipe.
2228 	 *
2229 	 * This workaround do not exist for platforms with display 10 or newer
2230 	 * but testing proved that it works for up display 13, for newer
2231 	 * than that testing will be needed.
2232 	 */
2233 	intel_de_write(dev_priv, CURSURFLIVE(dev_priv, intel_dp->psr.pipe), 0);
2234 }
2235 
2236 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
2237 {
2238 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2239 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2240 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2241 	struct intel_encoder *encoder;
2242 
2243 	if (!crtc_state->enable_psr2_sel_fetch)
2244 		return;
2245 
2246 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2247 					     crtc_state->uapi.encoder_mask) {
2248 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2249 
2250 		lockdep_assert_held(&intel_dp->psr.lock);
2251 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
2252 			return;
2253 		break;
2254 	}
2255 
2256 	intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
2257 		       crtc_state->psr2_man_track_ctl);
2258 
2259 	if (!crtc_state->enable_psr2_su_region_et)
2260 		return;
2261 
2262 	intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
2263 		       crtc_state->pipe_srcsz_early_tpt);
2264 }
2265 
2266 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2267 				  bool full_update)
2268 {
2269 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2270 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2271 	u32 val = man_trk_ctl_enable_bit_get(dev_priv);
2272 
2273 	/* SF partial frame enable has to be set even on full update */
2274 	val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
2275 
2276 	if (full_update) {
2277 		val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
2278 		val |= man_trk_ctl_continuos_full_frame(dev_priv);
2279 		goto exit;
2280 	}
2281 
2282 	if (crtc_state->psr2_su_area.y1 == -1)
2283 		goto exit;
2284 
2285 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
2286 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2287 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2288 	} else {
2289 		drm_WARN_ON(crtc_state->uapi.crtc->dev,
2290 			    crtc_state->psr2_su_area.y1 % 4 ||
2291 			    crtc_state->psr2_su_area.y2 % 4);
2292 
2293 		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2294 			crtc_state->psr2_su_area.y1 / 4 + 1);
2295 		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2296 			crtc_state->psr2_su_area.y2 / 4 + 1);
2297 	}
2298 exit:
2299 	crtc_state->psr2_man_track_ctl = val;
2300 }
2301 
2302 static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
2303 					  bool full_update)
2304 {
2305 	int width, height;
2306 
2307 	if (!crtc_state->enable_psr2_su_region_et || full_update)
2308 		return 0;
2309 
2310 	width = drm_rect_width(&crtc_state->psr2_su_area);
2311 	height = drm_rect_height(&crtc_state->psr2_su_area);
2312 
2313 	return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
2314 }
2315 
2316 static void clip_area_update(struct drm_rect *overlap_damage_area,
2317 			     struct drm_rect *damage_area,
2318 			     struct drm_rect *pipe_src)
2319 {
2320 	if (!drm_rect_intersect(damage_area, pipe_src))
2321 		return;
2322 
2323 	if (overlap_damage_area->y1 == -1) {
2324 		overlap_damage_area->y1 = damage_area->y1;
2325 		overlap_damage_area->y2 = damage_area->y2;
2326 		return;
2327 	}
2328 
2329 	if (damage_area->y1 < overlap_damage_area->y1)
2330 		overlap_damage_area->y1 = damage_area->y1;
2331 
2332 	if (damage_area->y2 > overlap_damage_area->y2)
2333 		overlap_damage_area->y2 = damage_area->y2;
2334 }
2335 
2336 static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2337 {
2338 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2339 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2340 	u16 y_alignment;
2341 
2342 	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2343 	if (crtc_state->dsc.compression_enable &&
2344 	    (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
2345 		y_alignment = vdsc_cfg->slice_height;
2346 	else
2347 		y_alignment = crtc_state->su_y_granularity;
2348 
2349 	crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2350 	if (crtc_state->psr2_su_area.y2 % y_alignment)
2351 		crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2352 						y_alignment) + 1) * y_alignment;
2353 }
2354 
2355 /*
2356  * When early transport is in use we need to extend SU area to cover
2357  * cursor fully when cursor is in SU area.
2358  */
2359 static void
2360 intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
2361 				  struct intel_crtc *crtc,
2362 				  bool *cursor_in_su_area)
2363 {
2364 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2365 	struct intel_plane_state *new_plane_state;
2366 	struct intel_plane *plane;
2367 	int i;
2368 
2369 	if (!crtc_state->enable_psr2_su_region_et)
2370 		return;
2371 
2372 	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
2373 		struct drm_rect inter;
2374 
2375 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2376 			continue;
2377 
2378 		if (plane->id != PLANE_CURSOR)
2379 			continue;
2380 
2381 		if (!new_plane_state->uapi.visible)
2382 			continue;
2383 
2384 		inter = crtc_state->psr2_su_area;
2385 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2386 			continue;
2387 
2388 		clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
2389 				 &crtc_state->pipe_src);
2390 		*cursor_in_su_area = true;
2391 	}
2392 }
2393 
2394 /*
2395  * TODO: Not clear how to handle planes with negative position,
2396  * also planes are not updated if they have a negative X
2397  * position so for now doing a full update in this cases
2398  *
2399  * Plane scaling and rotation is not supported by selective fetch and both
2400  * properties can change without a modeset, so need to be check at every
2401  * atomic commit.
2402  */
2403 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2404 {
2405 	if (plane_state->uapi.dst.y1 < 0 ||
2406 	    plane_state->uapi.dst.x1 < 0 ||
2407 	    plane_state->scaler_id >= 0 ||
2408 	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2409 		return false;
2410 
2411 	return true;
2412 }
2413 
2414 /*
2415  * Check for pipe properties that is not supported by selective fetch.
2416  *
2417  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2418  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2419  * enabled and going to the full update path.
2420  */
2421 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2422 {
2423 	if (crtc_state->scaler_state.scaler_id >= 0)
2424 		return false;
2425 
2426 	return true;
2427 }
2428 
2429 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2430 				struct intel_crtc *crtc)
2431 {
2432 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2433 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2434 	struct intel_plane_state *new_plane_state, *old_plane_state;
2435 	struct intel_plane *plane;
2436 	bool full_update = false, cursor_in_su_area = false;
2437 	int i, ret;
2438 
2439 	if (!crtc_state->enable_psr2_sel_fetch)
2440 		return 0;
2441 
2442 	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2443 		full_update = true;
2444 		goto skip_sel_fetch_set_loop;
2445 	}
2446 
2447 	crtc_state->psr2_su_area.x1 = 0;
2448 	crtc_state->psr2_su_area.y1 = -1;
2449 	crtc_state->psr2_su_area.x2 = drm_rect_width(&crtc_state->pipe_src);
2450 	crtc_state->psr2_su_area.y2 = -1;
2451 
2452 	/*
2453 	 * Calculate minimal selective fetch area of each plane and calculate
2454 	 * the pipe damaged area.
2455 	 * In the next loop the plane selective fetch area will actually be set
2456 	 * using whole pipe damaged area.
2457 	 */
2458 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2459 					     new_plane_state, i) {
2460 		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2461 						      .x2 = INT_MAX };
2462 
2463 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2464 			continue;
2465 
2466 		if (!new_plane_state->uapi.visible &&
2467 		    !old_plane_state->uapi.visible)
2468 			continue;
2469 
2470 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2471 			full_update = true;
2472 			break;
2473 		}
2474 
2475 		/*
2476 		 * If visibility or plane moved, mark the whole plane area as
2477 		 * damaged as it needs to be complete redraw in the new and old
2478 		 * position.
2479 		 */
2480 		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2481 		    !drm_rect_equals(&new_plane_state->uapi.dst,
2482 				     &old_plane_state->uapi.dst)) {
2483 			if (old_plane_state->uapi.visible) {
2484 				damaged_area.y1 = old_plane_state->uapi.dst.y1;
2485 				damaged_area.y2 = old_plane_state->uapi.dst.y2;
2486 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2487 						 &crtc_state->pipe_src);
2488 			}
2489 
2490 			if (new_plane_state->uapi.visible) {
2491 				damaged_area.y1 = new_plane_state->uapi.dst.y1;
2492 				damaged_area.y2 = new_plane_state->uapi.dst.y2;
2493 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2494 						 &crtc_state->pipe_src);
2495 			}
2496 			continue;
2497 		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2498 			/* If alpha changed mark the whole plane area as damaged */
2499 			damaged_area.y1 = new_plane_state->uapi.dst.y1;
2500 			damaged_area.y2 = new_plane_state->uapi.dst.y2;
2501 			clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2502 					 &crtc_state->pipe_src);
2503 			continue;
2504 		}
2505 
2506 		src = drm_plane_state_src(&new_plane_state->uapi);
2507 		drm_rect_fp_to_int(&src, &src);
2508 
2509 		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2510 						     &new_plane_state->uapi, &damaged_area))
2511 			continue;
2512 
2513 		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2514 		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2515 		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2516 		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2517 
2518 		clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
2519 	}
2520 
2521 	/*
2522 	 * TODO: For now we are just using full update in case
2523 	 * selective fetch area calculation fails. To optimize this we
2524 	 * should identify cases where this happens and fix the area
2525 	 * calculation for those.
2526 	 */
2527 	if (crtc_state->psr2_su_area.y1 == -1) {
2528 		drm_info_once(&dev_priv->drm,
2529 			      "Selective fetch area calculation failed in pipe %c\n",
2530 			      pipe_name(crtc->pipe));
2531 		full_update = true;
2532 	}
2533 
2534 	if (full_update)
2535 		goto skip_sel_fetch_set_loop;
2536 
2537 	/* Wa_14014971492 */
2538 	if (!crtc_state->has_panel_replay &&
2539 	    ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
2540 	      IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv))) &&
2541 	    crtc_state->splitter.enable)
2542 		crtc_state->psr2_su_area.y1 = 0;
2543 
2544 	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2545 	if (ret)
2546 		return ret;
2547 
2548 	/*
2549 	 * Adjust su area to cover cursor fully as necessary (early
2550 	 * transport). This needs to be done after
2551 	 * drm_atomic_add_affected_planes to ensure visible cursor is added into
2552 	 * affected planes even when cursor is not updated by itself.
2553 	 */
2554 	intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
2555 
2556 	intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2557 
2558 	/*
2559 	 * Now that we have the pipe damaged area check if it intersect with
2560 	 * every plane, if it does set the plane selective fetch area.
2561 	 */
2562 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2563 					     new_plane_state, i) {
2564 		struct drm_rect *sel_fetch_area, inter;
2565 		struct intel_plane *linked = new_plane_state->planar_linked_plane;
2566 
2567 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2568 		    !new_plane_state->uapi.visible)
2569 			continue;
2570 
2571 		inter = crtc_state->psr2_su_area;
2572 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2573 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2574 			sel_fetch_area->y1 = -1;
2575 			sel_fetch_area->y2 = -1;
2576 			/*
2577 			 * if plane sel fetch was previously enabled ->
2578 			 * disable it
2579 			 */
2580 			if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2581 				crtc_state->update_planes |= BIT(plane->id);
2582 
2583 			continue;
2584 		}
2585 
2586 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2587 			full_update = true;
2588 			break;
2589 		}
2590 
2591 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2592 		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2593 		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2594 		crtc_state->update_planes |= BIT(plane->id);
2595 
2596 		/*
2597 		 * Sel_fetch_area is calculated for UV plane. Use
2598 		 * same area for Y plane as well.
2599 		 */
2600 		if (linked) {
2601 			struct intel_plane_state *linked_new_plane_state;
2602 			struct drm_rect *linked_sel_fetch_area;
2603 
2604 			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2605 			if (IS_ERR(linked_new_plane_state))
2606 				return PTR_ERR(linked_new_plane_state);
2607 
2608 			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2609 			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2610 			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2611 			crtc_state->update_planes |= BIT(linked->id);
2612 		}
2613 	}
2614 
2615 skip_sel_fetch_set_loop:
2616 	psr2_man_trk_ctl_calc(crtc_state, full_update);
2617 	crtc_state->pipe_srcsz_early_tpt =
2618 		psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
2619 	return 0;
2620 }
2621 
2622 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2623 				struct intel_crtc *crtc)
2624 {
2625 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2626 	const struct intel_crtc_state *old_crtc_state =
2627 		intel_atomic_get_old_crtc_state(state, crtc);
2628 	const struct intel_crtc_state *new_crtc_state =
2629 		intel_atomic_get_new_crtc_state(state, crtc);
2630 	struct intel_encoder *encoder;
2631 
2632 	if (!HAS_PSR(i915))
2633 		return;
2634 
2635 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2636 					     old_crtc_state->uapi.encoder_mask) {
2637 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2638 		struct intel_psr *psr = &intel_dp->psr;
2639 		bool needs_to_disable = false;
2640 
2641 		mutex_lock(&psr->lock);
2642 
2643 		/*
2644 		 * Reasons to disable:
2645 		 * - PSR disabled in new state
2646 		 * - All planes will go inactive
2647 		 * - Changing between PSR versions
2648 		 * - Region Early Transport changing
2649 		 * - Display WA #1136: skl, bxt
2650 		 */
2651 		needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2652 		needs_to_disable |= !new_crtc_state->has_psr;
2653 		needs_to_disable |= !new_crtc_state->active_planes;
2654 		needs_to_disable |= new_crtc_state->has_sel_update != psr->sel_update_enabled;
2655 		needs_to_disable |= new_crtc_state->enable_psr2_su_region_et !=
2656 			psr->su_region_et_enabled;
2657 		needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2658 			new_crtc_state->wm_level_disabled;
2659 
2660 		if (psr->enabled && needs_to_disable)
2661 			intel_psr_disable_locked(intel_dp);
2662 		else if (psr->enabled && new_crtc_state->wm_level_disabled)
2663 			/* Wa_14015648006 */
2664 			wm_optimization_wa(intel_dp, new_crtc_state);
2665 
2666 		mutex_unlock(&psr->lock);
2667 	}
2668 }
2669 
2670 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2671 				 struct intel_crtc *crtc)
2672 {
2673 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2674 	const struct intel_crtc_state *crtc_state =
2675 		intel_atomic_get_new_crtc_state(state, crtc);
2676 	struct intel_encoder *encoder;
2677 
2678 	if (!crtc_state->has_psr)
2679 		return;
2680 
2681 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2682 					     crtc_state->uapi.encoder_mask) {
2683 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2684 		struct intel_psr *psr = &intel_dp->psr;
2685 		bool keep_disabled = false;
2686 
2687 		mutex_lock(&psr->lock);
2688 
2689 		drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2690 
2691 		keep_disabled |= psr->sink_not_reliable;
2692 		keep_disabled |= !crtc_state->active_planes;
2693 
2694 		/* Display WA #1136: skl, bxt */
2695 		keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2696 			crtc_state->wm_level_disabled;
2697 
2698 		if (!psr->enabled && !keep_disabled)
2699 			intel_psr_enable_locked(intel_dp, crtc_state);
2700 		else if (psr->enabled && !crtc_state->wm_level_disabled)
2701 			/* Wa_14015648006 */
2702 			wm_optimization_wa(intel_dp, crtc_state);
2703 
2704 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2705 		if (crtc_state->crc_enabled && psr->enabled)
2706 			psr_force_hw_tracking_exit(intel_dp);
2707 
2708 		/*
2709 		 * Clear possible busy bits in case we have
2710 		 * invalidate -> flip -> flush sequence.
2711 		 */
2712 		intel_dp->psr.busy_frontbuffer_bits = 0;
2713 
2714 		mutex_unlock(&psr->lock);
2715 	}
2716 }
2717 
2718 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2719 {
2720 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2721 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2722 
2723 	/*
2724 	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2725 	 * As all higher states has bit 4 of PSR2 state set we can just wait for
2726 	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2727 	 */
2728 	return intel_de_wait_for_clear(dev_priv,
2729 				       EDP_PSR2_STATUS(dev_priv, cpu_transcoder),
2730 				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2731 }
2732 
2733 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2734 {
2735 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2736 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2737 
2738 	/*
2739 	 * From bspec: Panel Self Refresh (BDW+)
2740 	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2741 	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2742 	 * defensive enough to cover everything.
2743 	 */
2744 	return intel_de_wait_for_clear(dev_priv,
2745 				       psr_status_reg(dev_priv, cpu_transcoder),
2746 				       EDP_PSR_STATUS_STATE_MASK, 50);
2747 }
2748 
2749 static int _panel_replay_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2750 {
2751 	return intel_dp_is_edp(intel_dp) ?
2752 		_psr2_ready_for_pipe_update_locked(intel_dp) :
2753 		_psr1_ready_for_pipe_update_locked(intel_dp);
2754 }
2755 
2756 /**
2757  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2758  * @new_crtc_state: new CRTC state
2759  *
2760  * This function is expected to be called from pipe_update_start() where it is
2761  * not expected to race with PSR enable or disable.
2762  */
2763 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2764 {
2765 	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2766 	struct intel_encoder *encoder;
2767 
2768 	if (!new_crtc_state->has_psr)
2769 		return;
2770 
2771 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2772 					     new_crtc_state->uapi.encoder_mask) {
2773 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2774 		int ret;
2775 
2776 		lockdep_assert_held(&intel_dp->psr.lock);
2777 
2778 		if (!intel_dp->psr.enabled)
2779 			continue;
2780 
2781 		if (intel_dp->psr.panel_replay_enabled)
2782 			ret = _panel_replay_ready_for_pipe_update_locked(intel_dp);
2783 		else if (intel_dp->psr.sel_update_enabled)
2784 			ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2785 		else
2786 			ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2787 
2788 		if (ret)
2789 			drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2790 	}
2791 }
2792 
2793 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2794 {
2795 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2796 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2797 	i915_reg_t reg;
2798 	u32 mask;
2799 	int err;
2800 
2801 	if (!intel_dp->psr.enabled)
2802 		return false;
2803 
2804 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
2805 					  intel_dp->psr.panel_replay_enabled)) {
2806 		reg = EDP_PSR2_STATUS(dev_priv, cpu_transcoder);
2807 		mask = EDP_PSR2_STATUS_STATE_MASK;
2808 	} else {
2809 		reg = psr_status_reg(dev_priv, cpu_transcoder);
2810 		mask = EDP_PSR_STATUS_STATE_MASK;
2811 	}
2812 
2813 	mutex_unlock(&intel_dp->psr.lock);
2814 
2815 	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2816 	if (err)
2817 		drm_err(&dev_priv->drm,
2818 			"Timed out waiting for PSR Idle for re-enable\n");
2819 
2820 	/* After the unlocked wait, verify that PSR is still wanted! */
2821 	mutex_lock(&intel_dp->psr.lock);
2822 	return err == 0 && intel_dp->psr.enabled;
2823 }
2824 
2825 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2826 {
2827 	struct drm_connector_list_iter conn_iter;
2828 	struct drm_modeset_acquire_ctx ctx;
2829 	struct drm_atomic_state *state;
2830 	struct drm_connector *conn;
2831 	int err = 0;
2832 
2833 	state = drm_atomic_state_alloc(&dev_priv->drm);
2834 	if (!state)
2835 		return -ENOMEM;
2836 
2837 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2838 
2839 	state->acquire_ctx = &ctx;
2840 	to_intel_atomic_state(state)->internal = true;
2841 
2842 retry:
2843 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2844 	drm_for_each_connector_iter(conn, &conn_iter) {
2845 		struct drm_connector_state *conn_state;
2846 		struct drm_crtc_state *crtc_state;
2847 
2848 		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2849 			continue;
2850 
2851 		conn_state = drm_atomic_get_connector_state(state, conn);
2852 		if (IS_ERR(conn_state)) {
2853 			err = PTR_ERR(conn_state);
2854 			break;
2855 		}
2856 
2857 		if (!conn_state->crtc)
2858 			continue;
2859 
2860 		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2861 		if (IS_ERR(crtc_state)) {
2862 			err = PTR_ERR(crtc_state);
2863 			break;
2864 		}
2865 
2866 		/* Mark mode as changed to trigger a pipe->update() */
2867 		crtc_state->mode_changed = true;
2868 	}
2869 	drm_connector_list_iter_end(&conn_iter);
2870 
2871 	if (err == 0)
2872 		err = drm_atomic_commit(state);
2873 
2874 	if (err == -EDEADLK) {
2875 		drm_atomic_state_clear(state);
2876 		err = drm_modeset_backoff(&ctx);
2877 		if (!err)
2878 			goto retry;
2879 	}
2880 
2881 	drm_modeset_drop_locks(&ctx);
2882 	drm_modeset_acquire_fini(&ctx);
2883 	drm_atomic_state_put(state);
2884 
2885 	return err;
2886 }
2887 
2888 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2889 {
2890 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2891 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2892 	const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
2893 					I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
2894 	u32 old_mode, old_disable_bits;
2895 	int ret;
2896 
2897 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
2898 		    I915_PSR_DEBUG_PANEL_REPLAY_DISABLE |
2899 		    I915_PSR_DEBUG_MODE_MASK) ||
2900 	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2901 		drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2902 		return -EINVAL;
2903 	}
2904 
2905 	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2906 	if (ret)
2907 		return ret;
2908 
2909 	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2910 	old_disable_bits = intel_dp->psr.debug &
2911 		(I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
2912 		 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
2913 
2914 	intel_dp->psr.debug = val;
2915 
2916 	/*
2917 	 * Do it right away if it's already enabled, otherwise it will be done
2918 	 * when enabling the source.
2919 	 */
2920 	if (intel_dp->psr.enabled)
2921 		psr_irq_control(intel_dp);
2922 
2923 	mutex_unlock(&intel_dp->psr.lock);
2924 
2925 	if (old_mode != mode || old_disable_bits != disable_bits)
2926 		ret = intel_psr_fastset_force(dev_priv);
2927 
2928 	return ret;
2929 }
2930 
2931 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2932 {
2933 	struct intel_psr *psr = &intel_dp->psr;
2934 
2935 	intel_psr_disable_locked(intel_dp);
2936 	psr->sink_not_reliable = true;
2937 	/* let's make sure that sink is awaken */
2938 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2939 }
2940 
2941 static void intel_psr_work(struct work_struct *work)
2942 {
2943 	struct intel_dp *intel_dp =
2944 		container_of(work, typeof(*intel_dp), psr.work);
2945 
2946 	mutex_lock(&intel_dp->psr.lock);
2947 
2948 	if (!intel_dp->psr.enabled)
2949 		goto unlock;
2950 
2951 	if (READ_ONCE(intel_dp->psr.irq_aux_error))
2952 		intel_psr_handle_irq(intel_dp);
2953 
2954 	/*
2955 	 * We have to make sure PSR is ready for re-enable
2956 	 * otherwise it keeps disabled until next full enable/disable cycle.
2957 	 * PSR might take some time to get fully disabled
2958 	 * and be ready for re-enable.
2959 	 */
2960 	if (!__psr_wait_for_idle_locked(intel_dp))
2961 		goto unlock;
2962 
2963 	/*
2964 	 * The delayed work can race with an invalidate hence we need to
2965 	 * recheck. Since psr_flush first clears this and then reschedules we
2966 	 * won't ever miss a flush when bailing out here.
2967 	 */
2968 	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2969 		goto unlock;
2970 
2971 	intel_psr_activate(intel_dp);
2972 unlock:
2973 	mutex_unlock(&intel_dp->psr.lock);
2974 }
2975 
2976 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2977 {
2978 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2979 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2980 
2981 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2982 		u32 val;
2983 
2984 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2985 			/* Send one update otherwise lag is observed in screen */
2986 			intel_de_write(dev_priv,
2987 				       CURSURFLIVE(dev_priv, intel_dp->psr.pipe),
2988 				       0);
2989 			return;
2990 		}
2991 
2992 		val = man_trk_ctl_enable_bit_get(dev_priv) |
2993 		      man_trk_ctl_partial_frame_bit_get(dev_priv) |
2994 		      man_trk_ctl_continuos_full_frame(dev_priv);
2995 		intel_de_write(dev_priv,
2996 			       PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
2997 			       val);
2998 		intel_de_write(dev_priv,
2999 			       CURSURFLIVE(dev_priv, intel_dp->psr.pipe), 0);
3000 		intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
3001 	} else {
3002 		intel_psr_exit(intel_dp);
3003 	}
3004 }
3005 
3006 /**
3007  * intel_psr_invalidate - Invalidate PSR
3008  * @dev_priv: i915 device
3009  * @frontbuffer_bits: frontbuffer plane tracking bits
3010  * @origin: which operation caused the invalidate
3011  *
3012  * Since the hardware frontbuffer tracking has gaps we need to integrate
3013  * with the software frontbuffer tracking. This function gets called every
3014  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
3015  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
3016  *
3017  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
3018  */
3019 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
3020 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
3021 {
3022 	struct intel_encoder *encoder;
3023 
3024 	if (origin == ORIGIN_FLIP)
3025 		return;
3026 
3027 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3028 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3029 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3030 
3031 		mutex_lock(&intel_dp->psr.lock);
3032 		if (!intel_dp->psr.enabled) {
3033 			mutex_unlock(&intel_dp->psr.lock);
3034 			continue;
3035 		}
3036 
3037 		pipe_frontbuffer_bits &=
3038 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3039 		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
3040 
3041 		if (pipe_frontbuffer_bits)
3042 			_psr_invalidate_handle(intel_dp);
3043 
3044 		mutex_unlock(&intel_dp->psr.lock);
3045 	}
3046 }
3047 /*
3048  * When we will be completely rely on PSR2 S/W tracking in future,
3049  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
3050  * event also therefore tgl_dc3co_flush_locked() require to be changed
3051  * accordingly in future.
3052  */
3053 static void
3054 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
3055 		       enum fb_op_origin origin)
3056 {
3057 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3058 
3059 	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
3060 	    !intel_dp->psr.active)
3061 		return;
3062 
3063 	/*
3064 	 * At every frontbuffer flush flip event modified delay of delayed work,
3065 	 * when delayed work schedules that means display has been idle.
3066 	 */
3067 	if (!(frontbuffer_bits &
3068 	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
3069 		return;
3070 
3071 	tgl_psr2_enable_dc3co(intel_dp);
3072 	mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
3073 			 intel_dp->psr.dc3co_exit_delay);
3074 }
3075 
3076 static void _psr_flush_handle(struct intel_dp *intel_dp)
3077 {
3078 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3079 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3080 
3081 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
3082 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3083 			/* can we turn CFF off? */
3084 			if (intel_dp->psr.busy_frontbuffer_bits == 0) {
3085 				u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
3086 					man_trk_ctl_partial_frame_bit_get(dev_priv) |
3087 					man_trk_ctl_single_full_frame_bit_get(dev_priv) |
3088 					man_trk_ctl_continuos_full_frame(dev_priv);
3089 
3090 				/*
3091 				 * Set psr2_sel_fetch_cff_enabled as false to allow selective
3092 				 * updates. Still keep cff bit enabled as we don't have proper
3093 				 * SU configuration in case update is sent for any reason after
3094 				 * sff bit gets cleared by the HW on next vblank.
3095 				 */
3096 				intel_de_write(dev_priv,
3097 					       PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
3098 					       val);
3099 				intel_de_write(dev_priv,
3100 					       CURSURFLIVE(dev_priv, intel_dp->psr.pipe),
3101 					       0);
3102 				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
3103 			}
3104 		} else {
3105 			/*
3106 			 * continuous full frame is disabled, only a single full
3107 			 * frame is required
3108 			 */
3109 			psr_force_hw_tracking_exit(intel_dp);
3110 		}
3111 	} else {
3112 		psr_force_hw_tracking_exit(intel_dp);
3113 
3114 		if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
3115 			queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
3116 	}
3117 }
3118 
3119 /**
3120  * intel_psr_flush - Flush PSR
3121  * @dev_priv: i915 device
3122  * @frontbuffer_bits: frontbuffer plane tracking bits
3123  * @origin: which operation caused the flush
3124  *
3125  * Since the hardware frontbuffer tracking has gaps we need to integrate
3126  * with the software frontbuffer tracking. This function gets called every
3127  * time frontbuffer rendering has completed and flushed out to memory. PSR
3128  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
3129  *
3130  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
3131  */
3132 void intel_psr_flush(struct drm_i915_private *dev_priv,
3133 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
3134 {
3135 	struct intel_encoder *encoder;
3136 
3137 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3138 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3139 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3140 
3141 		mutex_lock(&intel_dp->psr.lock);
3142 		if (!intel_dp->psr.enabled) {
3143 			mutex_unlock(&intel_dp->psr.lock);
3144 			continue;
3145 		}
3146 
3147 		pipe_frontbuffer_bits &=
3148 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3149 		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
3150 
3151 		/*
3152 		 * If the PSR is paused by an explicit intel_psr_paused() call,
3153 		 * we have to ensure that the PSR is not activated until
3154 		 * intel_psr_resume() is called.
3155 		 */
3156 		if (intel_dp->psr.paused)
3157 			goto unlock;
3158 
3159 		if (origin == ORIGIN_FLIP ||
3160 		    (origin == ORIGIN_CURSOR_UPDATE &&
3161 		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
3162 			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
3163 			goto unlock;
3164 		}
3165 
3166 		if (pipe_frontbuffer_bits == 0)
3167 			goto unlock;
3168 
3169 		/* By definition flush = invalidate + flush */
3170 		_psr_flush_handle(intel_dp);
3171 unlock:
3172 		mutex_unlock(&intel_dp->psr.lock);
3173 	}
3174 }
3175 
3176 /**
3177  * intel_psr_init - Init basic PSR work and mutex.
3178  * @intel_dp: Intel DP
3179  *
3180  * This function is called after the initializing connector.
3181  * (the initializing of connector treats the handling of connector capabilities)
3182  * And it initializes basic PSR stuff for each DP Encoder.
3183  */
3184 void intel_psr_init(struct intel_dp *intel_dp)
3185 {
3186 	struct intel_connector *connector = intel_dp->attached_connector;
3187 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3188 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3189 
3190 	if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
3191 		return;
3192 
3193 	/*
3194 	 * HSW spec explicitly says PSR is tied to port A.
3195 	 * BDW+ platforms have a instance of PSR registers per transcoder but
3196 	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
3197 	 * than eDP one.
3198 	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
3199 	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
3200 	 * But GEN12 supports a instance of PSR registers per transcoder.
3201 	 */
3202 	if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
3203 		drm_dbg_kms(&dev_priv->drm,
3204 			    "PSR condition failed: Port not supported\n");
3205 		return;
3206 	}
3207 
3208 	if ((HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp)) ||
3209 	    DISPLAY_VER(dev_priv) >= 20)
3210 		intel_dp->psr.source_panel_replay_support = true;
3211 
3212 	if (HAS_PSR(dev_priv) && intel_dp_is_edp(intel_dp))
3213 		intel_dp->psr.source_support = true;
3214 
3215 	/* Set link_standby x link_off defaults */
3216 	if (DISPLAY_VER(dev_priv) < 12)
3217 		/* For new platforms up to TGL let's respect VBT back again */
3218 		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
3219 
3220 	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
3221 	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
3222 	mutex_init(&intel_dp->psr.lock);
3223 }
3224 
3225 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
3226 					   u8 *status, u8 *error_status)
3227 {
3228 	struct drm_dp_aux *aux = &intel_dp->aux;
3229 	int ret;
3230 	unsigned int offset;
3231 
3232 	offset = intel_dp->psr.panel_replay_enabled ?
3233 		 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
3234 
3235 	ret = drm_dp_dpcd_readb(aux, offset, status);
3236 	if (ret != 1)
3237 		return ret;
3238 
3239 	offset = intel_dp->psr.panel_replay_enabled ?
3240 		 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
3241 
3242 	ret = drm_dp_dpcd_readb(aux, offset, error_status);
3243 	if (ret != 1)
3244 		return ret;
3245 
3246 	*status = *status & DP_PSR_SINK_STATE_MASK;
3247 
3248 	return 0;
3249 }
3250 
3251 static void psr_alpm_check(struct intel_dp *intel_dp)
3252 {
3253 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3254 	struct drm_dp_aux *aux = &intel_dp->aux;
3255 	struct intel_psr *psr = &intel_dp->psr;
3256 	u8 val;
3257 	int r;
3258 
3259 	if (!psr->sel_update_enabled)
3260 		return;
3261 
3262 	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
3263 	if (r != 1) {
3264 		drm_err(&dev_priv->drm, "Error reading ALPM status\n");
3265 		return;
3266 	}
3267 
3268 	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
3269 		intel_psr_disable_locked(intel_dp);
3270 		psr->sink_not_reliable = true;
3271 		drm_dbg_kms(&dev_priv->drm,
3272 			    "ALPM lock timeout error, disabling PSR\n");
3273 
3274 		/* Clearing error */
3275 		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
3276 	}
3277 }
3278 
3279 static void psr_capability_changed_check(struct intel_dp *intel_dp)
3280 {
3281 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3282 	struct intel_psr *psr = &intel_dp->psr;
3283 	u8 val;
3284 	int r;
3285 
3286 	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
3287 	if (r != 1) {
3288 		drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
3289 		return;
3290 	}
3291 
3292 	if (val & DP_PSR_CAPS_CHANGE) {
3293 		intel_psr_disable_locked(intel_dp);
3294 		psr->sink_not_reliable = true;
3295 		drm_dbg_kms(&dev_priv->drm,
3296 			    "Sink PSR capability changed, disabling PSR\n");
3297 
3298 		/* Clearing it */
3299 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
3300 	}
3301 }
3302 
3303 /*
3304  * On common bits:
3305  * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
3306  * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
3307  * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
3308  * this function is relying on PSR definitions
3309  */
3310 void intel_psr_short_pulse(struct intel_dp *intel_dp)
3311 {
3312 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3313 	struct intel_psr *psr = &intel_dp->psr;
3314 	u8 status, error_status;
3315 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3316 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3317 			  DP_PSR_LINK_CRC_ERROR;
3318 
3319 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
3320 		return;
3321 
3322 	mutex_lock(&psr->lock);
3323 
3324 	if (!psr->enabled)
3325 		goto exit;
3326 
3327 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
3328 		drm_err(&dev_priv->drm,
3329 			"Error reading PSR status or error status\n");
3330 		goto exit;
3331 	}
3332 
3333 	if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
3334 	    (error_status & errors)) {
3335 		intel_psr_disable_locked(intel_dp);
3336 		psr->sink_not_reliable = true;
3337 	}
3338 
3339 	if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
3340 	    !error_status)
3341 		drm_dbg_kms(&dev_priv->drm,
3342 			    "PSR sink internal error, disabling PSR\n");
3343 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3344 		drm_dbg_kms(&dev_priv->drm,
3345 			    "PSR RFB storage error, disabling PSR\n");
3346 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3347 		drm_dbg_kms(&dev_priv->drm,
3348 			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
3349 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3350 		drm_dbg_kms(&dev_priv->drm,
3351 			    "PSR Link CRC error, disabling PSR\n");
3352 
3353 	if (error_status & ~errors)
3354 		drm_err(&dev_priv->drm,
3355 			"PSR_ERROR_STATUS unhandled errors %x\n",
3356 			error_status & ~errors);
3357 	/* clear status register */
3358 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
3359 
3360 	if (!psr->panel_replay_enabled) {
3361 		psr_alpm_check(intel_dp);
3362 		psr_capability_changed_check(intel_dp);
3363 	}
3364 
3365 exit:
3366 	mutex_unlock(&psr->lock);
3367 }
3368 
3369 bool intel_psr_enabled(struct intel_dp *intel_dp)
3370 {
3371 	bool ret;
3372 
3373 	if (!CAN_PSR(intel_dp))
3374 		return false;
3375 
3376 	mutex_lock(&intel_dp->psr.lock);
3377 	ret = intel_dp->psr.enabled;
3378 	mutex_unlock(&intel_dp->psr.lock);
3379 
3380 	return ret;
3381 }
3382 
3383 /**
3384  * intel_psr_lock - grab PSR lock
3385  * @crtc_state: the crtc state
3386  *
3387  * This is initially meant to be used by around CRTC update, when
3388  * vblank sensitive registers are updated and we need grab the lock
3389  * before it to avoid vblank evasion.
3390  */
3391 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3392 {
3393 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3394 	struct intel_encoder *encoder;
3395 
3396 	if (!crtc_state->has_psr)
3397 		return;
3398 
3399 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
3400 					     crtc_state->uapi.encoder_mask) {
3401 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3402 
3403 		mutex_lock(&intel_dp->psr.lock);
3404 		break;
3405 	}
3406 }
3407 
3408 /**
3409  * intel_psr_unlock - release PSR lock
3410  * @crtc_state: the crtc state
3411  *
3412  * Release the PSR lock that was held during pipe update.
3413  */
3414 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3415 {
3416 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3417 	struct intel_encoder *encoder;
3418 
3419 	if (!crtc_state->has_psr)
3420 		return;
3421 
3422 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
3423 					     crtc_state->uapi.encoder_mask) {
3424 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3425 
3426 		mutex_unlock(&intel_dp->psr.lock);
3427 		break;
3428 	}
3429 }
3430 
3431 static void
3432 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3433 {
3434 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3435 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3436 	const char *status = "unknown";
3437 	u32 val, status_val;
3438 
3439 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
3440 					  intel_dp->psr.panel_replay_enabled)) {
3441 		static const char * const live_status[] = {
3442 			"IDLE",
3443 			"CAPTURE",
3444 			"CAPTURE_FS",
3445 			"SLEEP",
3446 			"BUFON_FW",
3447 			"ML_UP",
3448 			"SU_STANDBY",
3449 			"FAST_SLEEP",
3450 			"DEEP_SLEEP",
3451 			"BUF_ON",
3452 			"TG_ON"
3453 		};
3454 		val = intel_de_read(dev_priv,
3455 				    EDP_PSR2_STATUS(dev_priv, cpu_transcoder));
3456 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3457 		if (status_val < ARRAY_SIZE(live_status))
3458 			status = live_status[status_val];
3459 	} else {
3460 		static const char * const live_status[] = {
3461 			"IDLE",
3462 			"SRDONACK",
3463 			"SRDENT",
3464 			"BUFOFF",
3465 			"BUFON",
3466 			"AUXACK",
3467 			"SRDOFFACK",
3468 			"SRDENT_ON",
3469 		};
3470 		val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
3471 		status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3472 		if (status_val < ARRAY_SIZE(live_status))
3473 			status = live_status[status_val];
3474 	}
3475 
3476 	seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3477 }
3478 
3479 static void intel_psr_sink_capability(struct intel_dp *intel_dp,
3480 				      struct seq_file *m)
3481 {
3482 	struct intel_psr *psr = &intel_dp->psr;
3483 
3484 	seq_printf(m, "Sink support: PSR = %s",
3485 		   str_yes_no(psr->sink_support));
3486 
3487 	if (psr->sink_support)
3488 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3489 	if (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
3490 		seq_printf(m, " (Early Transport)");
3491 	seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
3492 	seq_printf(m, ", Panel Replay Selective Update = %s",
3493 		   str_yes_no(psr->sink_panel_replay_su_support));
3494 	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
3495 		seq_printf(m, " (Early Transport)");
3496 	seq_printf(m, "\n");
3497 }
3498 
3499 static void intel_psr_print_mode(struct intel_dp *intel_dp,
3500 				 struct seq_file *m)
3501 {
3502 	struct intel_psr *psr = &intel_dp->psr;
3503 	const char *status, *mode, *region_et;
3504 
3505 	if (psr->enabled)
3506 		status = " enabled";
3507 	else
3508 		status = "disabled";
3509 
3510 	if (psr->panel_replay_enabled && psr->sel_update_enabled)
3511 		mode = "Panel Replay Selective Update";
3512 	else if (psr->panel_replay_enabled)
3513 		mode = "Panel Replay";
3514 	else if (psr->sel_update_enabled)
3515 		mode = "PSR2";
3516 	else if (psr->enabled)
3517 		mode = "PSR1";
3518 	else
3519 		mode = "";
3520 
3521 	if (psr->su_region_et_enabled)
3522 		region_et = " (Early Transport)";
3523 	else
3524 		region_et = "";
3525 
3526 	seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et);
3527 }
3528 
3529 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3530 {
3531 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3532 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3533 	struct intel_psr *psr = &intel_dp->psr;
3534 	intel_wakeref_t wakeref;
3535 	bool enabled;
3536 	u32 val, psr2_ctl;
3537 
3538 	intel_psr_sink_capability(intel_dp, m);
3539 
3540 	if (!(psr->sink_support || psr->sink_panel_replay_support))
3541 		return 0;
3542 
3543 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3544 	mutex_lock(&psr->lock);
3545 
3546 	intel_psr_print_mode(intel_dp, m);
3547 
3548 	if (!psr->enabled) {
3549 		seq_printf(m, "PSR sink not reliable: %s\n",
3550 			   str_yes_no(psr->sink_not_reliable));
3551 
3552 		goto unlock;
3553 	}
3554 
3555 	if (psr->panel_replay_enabled) {
3556 		val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
3557 
3558 		if (intel_dp_is_edp(intel_dp))
3559 			psr2_ctl = intel_de_read(dev_priv,
3560 						 EDP_PSR2_CTL(dev_priv,
3561 							      cpu_transcoder));
3562 
3563 		enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3564 	} else if (psr->sel_update_enabled) {
3565 		val = intel_de_read(dev_priv,
3566 				    EDP_PSR2_CTL(dev_priv, cpu_transcoder));
3567 		enabled = val & EDP_PSR2_ENABLE;
3568 	} else {
3569 		val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
3570 		enabled = val & EDP_PSR_ENABLE;
3571 	}
3572 	seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
3573 		   str_enabled_disabled(enabled), val);
3574 	if (psr->panel_replay_enabled && intel_dp_is_edp(intel_dp))
3575 		seq_printf(m, "PSR2_CTL: 0x%08x\n",
3576 			   psr2_ctl);
3577 	psr_source_status(intel_dp, m);
3578 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3579 		   psr->busy_frontbuffer_bits);
3580 
3581 	/*
3582 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
3583 	 */
3584 	val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3585 	seq_printf(m, "Performance counter: %u\n",
3586 		   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3587 
3588 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
3589 		seq_printf(m, "Last attempted entry at: %lld\n",
3590 			   psr->last_entry_attempt);
3591 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3592 	}
3593 
3594 	if (psr->sel_update_enabled) {
3595 		u32 su_frames_val[3];
3596 		int frame;
3597 
3598 		/*
3599 		 * Reading all 3 registers before hand to minimize crossing a
3600 		 * frame boundary between register reads
3601 		 */
3602 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3603 			val = intel_de_read(dev_priv,
3604 					    PSR2_SU_STATUS(dev_priv, cpu_transcoder, frame));
3605 			su_frames_val[frame / 3] = val;
3606 		}
3607 
3608 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3609 
3610 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3611 			u32 su_blocks;
3612 
3613 			su_blocks = su_frames_val[frame / 3] &
3614 				    PSR2_SU_STATUS_MASK(frame);
3615 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3616 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
3617 		}
3618 
3619 		seq_printf(m, "PSR2 selective fetch: %s\n",
3620 			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3621 	}
3622 
3623 unlock:
3624 	mutex_unlock(&psr->lock);
3625 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3626 
3627 	return 0;
3628 }
3629 
3630 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3631 {
3632 	struct drm_i915_private *dev_priv = m->private;
3633 	struct intel_dp *intel_dp = NULL;
3634 	struct intel_encoder *encoder;
3635 
3636 	if (!HAS_PSR(dev_priv))
3637 		return -ENODEV;
3638 
3639 	/* Find the first EDP which supports PSR */
3640 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3641 		intel_dp = enc_to_intel_dp(encoder);
3642 		break;
3643 	}
3644 
3645 	if (!intel_dp)
3646 		return -ENODEV;
3647 
3648 	return intel_psr_status(m, intel_dp);
3649 }
3650 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3651 
3652 static int
3653 i915_edp_psr_debug_set(void *data, u64 val)
3654 {
3655 	struct drm_i915_private *dev_priv = data;
3656 	struct intel_encoder *encoder;
3657 	intel_wakeref_t wakeref;
3658 	int ret = -ENODEV;
3659 
3660 	if (!HAS_PSR(dev_priv))
3661 		return ret;
3662 
3663 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3664 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3665 
3666 		drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3667 
3668 		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3669 
3670 		// TODO: split to each transcoder's PSR debug state
3671 		ret = intel_psr_debug_set(intel_dp, val);
3672 
3673 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3674 	}
3675 
3676 	return ret;
3677 }
3678 
3679 static int
3680 i915_edp_psr_debug_get(void *data, u64 *val)
3681 {
3682 	struct drm_i915_private *dev_priv = data;
3683 	struct intel_encoder *encoder;
3684 
3685 	if (!HAS_PSR(dev_priv))
3686 		return -ENODEV;
3687 
3688 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3689 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3690 
3691 		// TODO: split to each transcoder's PSR debug state
3692 		*val = READ_ONCE(intel_dp->psr.debug);
3693 		return 0;
3694 	}
3695 
3696 	return -ENODEV;
3697 }
3698 
3699 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3700 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3701 			"%llu\n");
3702 
3703 void intel_psr_debugfs_register(struct drm_i915_private *i915)
3704 {
3705 	struct drm_minor *minor = i915->drm.primary;
3706 
3707 	debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3708 			    i915, &i915_edp_psr_debug_fops);
3709 
3710 	debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3711 			    i915, &i915_edp_psr_status_fops);
3712 }
3713 
3714 static const char *psr_mode_str(struct intel_dp *intel_dp)
3715 {
3716 	if (intel_dp->psr.panel_replay_enabled)
3717 		return "PANEL-REPLAY";
3718 	else if (intel_dp->psr.enabled)
3719 		return "PSR";
3720 
3721 	return "unknown";
3722 }
3723 
3724 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3725 {
3726 	struct intel_connector *connector = m->private;
3727 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3728 	static const char * const sink_status[] = {
3729 		"inactive",
3730 		"transition to active, capture and display",
3731 		"active, display from RFB",
3732 		"active, capture and display on sink device timings",
3733 		"transition to inactive, capture and display, timing re-sync",
3734 		"reserved",
3735 		"reserved",
3736 		"sink internal error",
3737 	};
3738 	const char *str;
3739 	int ret;
3740 	u8 status, error_status;
3741 
3742 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
3743 		seq_puts(m, "PSR/Panel-Replay Unsupported\n");
3744 		return -ENODEV;
3745 	}
3746 
3747 	if (connector->base.status != connector_status_connected)
3748 		return -ENODEV;
3749 
3750 	ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
3751 	if (ret)
3752 		return ret;
3753 
3754 	status &= DP_PSR_SINK_STATE_MASK;
3755 	if (status < ARRAY_SIZE(sink_status))
3756 		str = sink_status[status];
3757 	else
3758 		str = "unknown";
3759 
3760 	seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
3761 
3762 	seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
3763 
3764 	if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
3765 			    DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3766 			    DP_PSR_LINK_CRC_ERROR))
3767 		seq_puts(m, ":\n");
3768 	else
3769 		seq_puts(m, "\n");
3770 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3771 		seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
3772 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3773 		seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
3774 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3775 		seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
3776 
3777 	return ret;
3778 }
3779 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3780 
3781 static int i915_psr_status_show(struct seq_file *m, void *data)
3782 {
3783 	struct intel_connector *connector = m->private;
3784 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3785 
3786 	return intel_psr_status(m, intel_dp);
3787 }
3788 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3789 
3790 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3791 {
3792 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3793 	struct dentry *root = connector->base.debugfs_entry;
3794 
3795 	/* TODO: Add support for MST connectors as well. */
3796 	if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
3797 	     connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
3798 	    connector->mst_port)
3799 		return;
3800 
3801 	debugfs_create_file("i915_psr_sink_status", 0444, root,
3802 			    connector, &i915_psr_sink_status_fops);
3803 
3804 	if (HAS_PSR(i915) || HAS_DP20(i915))
3805 		debugfs_create_file("i915_psr_status", 0444, root,
3806 				    connector, &i915_psr_status_fops);
3807 }
3808