xref: /linux/drivers/gpu/drm/i915/display/intel_psr.c (revision 88872790923e2d80edf29a00b4e440f1473fa8f5)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26 #include <drm/drm_debugfs.h>
27 
28 #include "i915_drv.h"
29 #include "i915_reg.h"
30 #include "intel_atomic.h"
31 #include "intel_crtc.h"
32 #include "intel_de.h"
33 #include "intel_display_types.h"
34 #include "intel_dp.h"
35 #include "intel_dp_aux.h"
36 #include "intel_frontbuffer.h"
37 #include "intel_hdmi.h"
38 #include "intel_psr.h"
39 #include "intel_psr_regs.h"
40 #include "intel_snps_phy.h"
41 #include "skl_universal_plane.h"
42 
43 /**
44  * DOC: Panel Self Refresh (PSR/SRD)
45  *
46  * Since Haswell Display controller supports Panel Self-Refresh on display
47  * panels witch have a remote frame buffer (RFB) implemented according to PSR
48  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
49  * when system is idle but display is on as it eliminates display refresh
50  * request to DDR memory completely as long as the frame buffer for that
51  * display is unchanged.
52  *
53  * Panel Self Refresh must be supported by both Hardware (source) and
54  * Panel (sink).
55  *
56  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
57  * to power down the link and memory controller. For DSI panels the same idea
58  * is called "manual mode".
59  *
60  * The implementation uses the hardware-based PSR support which automatically
61  * enters/exits self-refresh mode. The hardware takes care of sending the
62  * required DP aux message and could even retrain the link (that part isn't
63  * enabled yet though). The hardware also keeps track of any frontbuffer
64  * changes to know when to exit self-refresh mode again. Unfortunately that
65  * part doesn't work too well, hence why the i915 PSR support uses the
66  * software frontbuffer tracking to make sure it doesn't miss a screen
67  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
68  * get called by the frontbuffer tracking code. Note that because of locking
69  * issues the self-refresh re-enable code is done from a work queue, which
70  * must be correctly synchronized/cancelled when shutting down the pipe."
71  *
72  * DC3CO (DC3 clock off)
73  *
74  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
75  * clock off automatically during PSR2 idle state.
76  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
77  * entry/exit allows the HW to enter a low-power state even when page flipping
78  * periodically (for instance a 30fps video playback scenario).
79  *
80  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
81  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
82  * frames, if no other flip occurs and the function above is executed, DC3CO is
83  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
84  * of another flip.
85  * Front buffer modifications do not trigger DC3CO activation on purpose as it
86  * would bring a lot of complexity and most of the moderns systems will only
87  * use page flips.
88  */
89 
90 /*
91  * Description of PSR mask bits:
92  *
93  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
94  *
95  *  When unmasked (nearly) all display register writes (eg. even
96  *  SWF) trigger a PSR exit. Some registers are excluded from this
97  *  and they have a more specific mask (described below). On icl+
98  *  this bit no longer exists and is effectively always set.
99  *
100  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
101  *
102  *  When unmasked (nearly) all pipe/plane register writes
103  *  trigger a PSR exit. Some plane registers are excluded from this
104  *  and they have a more specific mask (described below).
105  *
106  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
107  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
108  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
109  *
110  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
111  *  SPR_SURF/CURBASE are not included in this and instead are
112  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
113  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
114  *
115  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
116  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
117  *
118  *  When unmasked PSR is blocked as long as the sprite
119  *  plane is enabled. skl+ with their universal planes no
120  *  longer have a mask bit like this, and no plane being
121  *  enabledb blocks PSR.
122  *
123  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
124  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
125  *
126  *  When umasked CURPOS writes trigger a PSR exit. On skl+
127  *  this doesn't exit but CURPOS is included in the
128  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
129  *
130  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
131  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
132  *
133  *  When unmasked PSR is blocked as long as vblank and/or vsync
134  *  interrupt is unmasked in IMR *and* enabled in IER.
135  *
136  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
137  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
138  *
139  *  Selectcs whether PSR exit generates an extra vblank before
140  *  the first frame is transmitted. Also note the opposite polarity
141  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
142  *  unmasked==do not generate the extra vblank).
143  *
144  *  With DC states enabled the extra vblank happens after link training,
145  *  with DC states disabled it happens immediately upuon PSR exit trigger.
146  *  No idea as of now why there is a difference. HSW/BDW (which don't
147  *  even have DMC) always generate it after link training. Go figure.
148  *
149  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
150  *  and thus won't latch until the first vblank. So with DC states
151  *  enabled the register effctively uses the reset value during DC5
152  *  exit+PSR exit sequence, and thus the bit does nothing until
153  *  latched by the vblank that it was trying to prevent from being
154  *  generated in the first place. So we should probably call this
155  *  one a chicken/egg bit instead on skl+.
156  *
157  *  In standby mode (as opposed to link-off) this makes no difference
158  *  as the timing generator keeps running the whole time generating
159  *  normal periodic vblanks.
160  *
161  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
162  *  and doing so makes the behaviour match the skl+ reset value.
163  *
164  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
165  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
166  *
167  *  On BDW without this bit is no vblanks whatsoever are
168  *  generated after PSR exit. On HSW this has no apparant effect.
169  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
170  *
171  * The rest of the bits are more self-explanatory and/or
172  * irrelevant for normal operation.
173  */
174 
175 static bool psr_global_enabled(struct intel_dp *intel_dp)
176 {
177 	struct intel_connector *connector = intel_dp->attached_connector;
178 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
179 
180 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
181 	case I915_PSR_DEBUG_DEFAULT:
182 		if (i915->params.enable_psr == -1)
183 			return connector->panel.vbt.psr.enable;
184 		return i915->params.enable_psr;
185 	case I915_PSR_DEBUG_DISABLE:
186 		return false;
187 	default:
188 		return true;
189 	}
190 }
191 
192 static bool psr2_global_enabled(struct intel_dp *intel_dp)
193 {
194 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
195 
196 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
197 	case I915_PSR_DEBUG_DISABLE:
198 	case I915_PSR_DEBUG_FORCE_PSR1:
199 		return false;
200 	default:
201 		if (i915->params.enable_psr == 1)
202 			return false;
203 		return true;
204 	}
205 }
206 
207 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
208 {
209 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
210 
211 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
212 		EDP_PSR_ERROR(intel_dp->psr.transcoder);
213 }
214 
215 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
216 {
217 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
218 
219 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
220 		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
221 }
222 
223 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
224 {
225 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
226 
227 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
228 		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
229 }
230 
231 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
232 {
233 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
234 
235 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
236 		EDP_PSR_MASK(intel_dp->psr.transcoder);
237 }
238 
239 static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
240 			      enum transcoder cpu_transcoder)
241 {
242 	if (DISPLAY_VER(dev_priv) >= 8)
243 		return EDP_PSR_CTL(cpu_transcoder);
244 	else
245 		return HSW_SRD_CTL;
246 }
247 
248 static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
249 				enum transcoder cpu_transcoder)
250 {
251 	if (DISPLAY_VER(dev_priv) >= 8)
252 		return EDP_PSR_DEBUG(cpu_transcoder);
253 	else
254 		return HSW_SRD_DEBUG;
255 }
256 
257 static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
258 				   enum transcoder cpu_transcoder)
259 {
260 	if (DISPLAY_VER(dev_priv) >= 8)
261 		return EDP_PSR_PERF_CNT(cpu_transcoder);
262 	else
263 		return HSW_SRD_PERF_CNT;
264 }
265 
266 static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
267 				 enum transcoder cpu_transcoder)
268 {
269 	if (DISPLAY_VER(dev_priv) >= 8)
270 		return EDP_PSR_STATUS(cpu_transcoder);
271 	else
272 		return HSW_SRD_STATUS;
273 }
274 
275 static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
276 			      enum transcoder cpu_transcoder)
277 {
278 	if (DISPLAY_VER(dev_priv) >= 12)
279 		return TRANS_PSR_IMR(cpu_transcoder);
280 	else
281 		return EDP_PSR_IMR;
282 }
283 
284 static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
285 			      enum transcoder cpu_transcoder)
286 {
287 	if (DISPLAY_VER(dev_priv) >= 12)
288 		return TRANS_PSR_IIR(cpu_transcoder);
289 	else
290 		return EDP_PSR_IIR;
291 }
292 
293 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
294 				  enum transcoder cpu_transcoder)
295 {
296 	if (DISPLAY_VER(dev_priv) >= 8)
297 		return EDP_PSR_AUX_CTL(cpu_transcoder);
298 	else
299 		return HSW_SRD_AUX_CTL;
300 }
301 
302 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
303 				   enum transcoder cpu_transcoder, int i)
304 {
305 	if (DISPLAY_VER(dev_priv) >= 8)
306 		return EDP_PSR_AUX_DATA(cpu_transcoder, i);
307 	else
308 		return HSW_SRD_AUX_DATA(i);
309 }
310 
311 static void psr_irq_control(struct intel_dp *intel_dp)
312 {
313 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
314 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
315 	u32 mask;
316 
317 	mask = psr_irq_psr_error_bit_get(intel_dp);
318 	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
319 		mask |= psr_irq_post_exit_bit_get(intel_dp) |
320 			psr_irq_pre_entry_bit_get(intel_dp);
321 
322 	intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
323 		     psr_irq_mask_get(intel_dp), ~mask);
324 }
325 
326 static void psr_event_print(struct drm_i915_private *i915,
327 			    u32 val, bool psr2_enabled)
328 {
329 	drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
330 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
331 		drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
332 	if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
333 		drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
334 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
335 		drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
336 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
337 		drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
338 	if (val & PSR_EVENT_GRAPHICS_RESET)
339 		drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
340 	if (val & PSR_EVENT_PCH_INTERRUPT)
341 		drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
342 	if (val & PSR_EVENT_MEMORY_UP)
343 		drm_dbg_kms(&i915->drm, "\tMemory up\n");
344 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
345 		drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
346 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
347 		drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
348 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
349 		drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
350 	if (val & PSR_EVENT_REGISTER_UPDATE)
351 		drm_dbg_kms(&i915->drm, "\tRegister updated\n");
352 	if (val & PSR_EVENT_HDCP_ENABLE)
353 		drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
354 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
355 		drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
356 	if (val & PSR_EVENT_VBI_ENABLE)
357 		drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
358 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
359 		drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
360 	if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
361 		drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
362 }
363 
364 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
365 {
366 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
367 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
368 	ktime_t time_ns =  ktime_get();
369 
370 	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
371 		intel_dp->psr.last_entry_attempt = time_ns;
372 		drm_dbg_kms(&dev_priv->drm,
373 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
374 			    transcoder_name(cpu_transcoder));
375 	}
376 
377 	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
378 		intel_dp->psr.last_exit = time_ns;
379 		drm_dbg_kms(&dev_priv->drm,
380 			    "[transcoder %s] PSR exit completed\n",
381 			    transcoder_name(cpu_transcoder));
382 
383 		if (DISPLAY_VER(dev_priv) >= 9) {
384 			u32 val;
385 
386 			val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
387 
388 			psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
389 		}
390 	}
391 
392 	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
393 		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
394 			 transcoder_name(cpu_transcoder));
395 
396 		intel_dp->psr.irq_aux_error = true;
397 
398 		/*
399 		 * If this interruption is not masked it will keep
400 		 * interrupting so fast that it prevents the scheduled
401 		 * work to run.
402 		 * Also after a PSR error, we don't want to arm PSR
403 		 * again so we don't care about unmask the interruption
404 		 * or unset irq_aux_error.
405 		 */
406 		intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
407 			     0, psr_irq_psr_error_bit_get(intel_dp));
408 
409 		queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
410 	}
411 }
412 
413 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
414 {
415 	u8 alpm_caps = 0;
416 
417 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
418 			      &alpm_caps) != 1)
419 		return false;
420 	return alpm_caps & DP_ALPM_CAP;
421 }
422 
423 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
424 {
425 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
426 	u8 val = 8; /* assume the worst if we can't read the value */
427 
428 	if (drm_dp_dpcd_readb(&intel_dp->aux,
429 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
430 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
431 	else
432 		drm_dbg_kms(&i915->drm,
433 			    "Unable to get sink synchronization latency, assuming 8 frames\n");
434 	return val;
435 }
436 
437 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
438 {
439 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
440 	ssize_t r;
441 	u16 w;
442 	u8 y;
443 
444 	/* If sink don't have specific granularity requirements set legacy ones */
445 	if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
446 		/* As PSR2 HW sends full lines, we do not care about x granularity */
447 		w = 4;
448 		y = 4;
449 		goto exit;
450 	}
451 
452 	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
453 	if (r != 2)
454 		drm_dbg_kms(&i915->drm,
455 			    "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
456 	/*
457 	 * Spec says that if the value read is 0 the default granularity should
458 	 * be used instead.
459 	 */
460 	if (r != 2 || w == 0)
461 		w = 4;
462 
463 	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
464 	if (r != 1) {
465 		drm_dbg_kms(&i915->drm,
466 			    "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
467 		y = 4;
468 	}
469 	if (y == 0)
470 		y = 1;
471 
472 exit:
473 	intel_dp->psr.su_w_granularity = w;
474 	intel_dp->psr.su_y_granularity = y;
475 }
476 
477 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
478 {
479 	struct drm_i915_private *dev_priv =
480 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
481 
482 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
483 			 sizeof(intel_dp->psr_dpcd));
484 
485 	if (!intel_dp->psr_dpcd[0])
486 		return;
487 	drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
488 		    intel_dp->psr_dpcd[0]);
489 
490 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
491 		drm_dbg_kms(&dev_priv->drm,
492 			    "PSR support not currently available for this panel\n");
493 		return;
494 	}
495 
496 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
497 		drm_dbg_kms(&dev_priv->drm,
498 			    "Panel lacks power state control, PSR cannot be enabled\n");
499 		return;
500 	}
501 
502 	intel_dp->psr.sink_support = true;
503 	intel_dp->psr.sink_sync_latency =
504 		intel_dp_get_sink_sync_latency(intel_dp);
505 
506 	if (DISPLAY_VER(dev_priv) >= 9 &&
507 	    (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
508 		bool y_req = intel_dp->psr_dpcd[1] &
509 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
510 		bool alpm = intel_dp_get_alpm_status(intel_dp);
511 
512 		/*
513 		 * All panels that supports PSR version 03h (PSR2 +
514 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
515 		 * only sure that it is going to be used when required by the
516 		 * panel. This way panel is capable to do selective update
517 		 * without a aux frame sync.
518 		 *
519 		 * To support PSR version 02h and PSR version 03h without
520 		 * Y-coordinate requirement panels we would need to enable
521 		 * GTC first.
522 		 */
523 		intel_dp->psr.sink_psr2_support = y_req && alpm;
524 		drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
525 			    intel_dp->psr.sink_psr2_support ? "" : "not ");
526 
527 		if (intel_dp->psr.sink_psr2_support) {
528 			intel_dp->psr.colorimetry_support =
529 				intel_dp_get_colorimetry_status(intel_dp);
530 			intel_dp_get_su_granularity(intel_dp);
531 		}
532 	}
533 }
534 
535 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
536 {
537 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
538 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
539 	u32 aux_clock_divider, aux_ctl;
540 	/* write DP_SET_POWER=D0 */
541 	static const u8 aux_msg[] = {
542 		[0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
543 		[1] = (DP_SET_POWER >> 8) & 0xff,
544 		[2] = DP_SET_POWER & 0xff,
545 		[3] = 1 - 1,
546 		[4] = DP_SET_POWER_D0,
547 	};
548 	int i;
549 
550 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
551 	for (i = 0; i < sizeof(aux_msg); i += 4)
552 		intel_de_write(dev_priv,
553 			       psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
554 			       intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
555 
556 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
557 
558 	/* Start with bits set for DDI_AUX_CTL register */
559 	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
560 					     aux_clock_divider);
561 
562 	/* Select only valid bits for SRD_AUX_CTL */
563 	aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
564 		EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
565 		EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
566 		EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
567 
568 	intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
569 		       aux_ctl);
570 }
571 
572 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
573 {
574 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
575 	u8 dpcd_val = DP_PSR_ENABLE;
576 
577 	/* Enable ALPM at sink for psr2 */
578 	if (intel_dp->psr.psr2_enabled) {
579 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
580 				   DP_ALPM_ENABLE |
581 				   DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
582 
583 		dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
584 	} else {
585 		if (intel_dp->psr.link_standby)
586 			dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
587 
588 		if (DISPLAY_VER(dev_priv) >= 8)
589 			dpcd_val |= DP_PSR_CRC_VERIFICATION;
590 	}
591 
592 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
593 		dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
594 
595 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
596 
597 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
598 }
599 
600 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
601 {
602 	struct intel_connector *connector = intel_dp->attached_connector;
603 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
604 	u32 val = 0;
605 
606 	if (DISPLAY_VER(dev_priv) >= 11)
607 		val |= EDP_PSR_TP4_TIME_0us;
608 
609 	if (dev_priv->params.psr_safest_params) {
610 		val |= EDP_PSR_TP1_TIME_2500us;
611 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
612 		goto check_tp3_sel;
613 	}
614 
615 	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
616 		val |= EDP_PSR_TP1_TIME_0us;
617 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
618 		val |= EDP_PSR_TP1_TIME_100us;
619 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
620 		val |= EDP_PSR_TP1_TIME_500us;
621 	else
622 		val |= EDP_PSR_TP1_TIME_2500us;
623 
624 	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
625 		val |= EDP_PSR_TP2_TP3_TIME_0us;
626 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
627 		val |= EDP_PSR_TP2_TP3_TIME_100us;
628 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
629 		val |= EDP_PSR_TP2_TP3_TIME_500us;
630 	else
631 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
632 
633 	/*
634 	 * WA 0479: hsw,bdw
635 	 * "Do not skip both TP1 and TP2/TP3"
636 	 */
637 	if (DISPLAY_VER(dev_priv) < 9 &&
638 	    connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
639 	    connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
640 		val |= EDP_PSR_TP2_TP3_TIME_100us;
641 
642 check_tp3_sel:
643 	if (intel_dp_source_supports_tps3(dev_priv) &&
644 	    drm_dp_tps3_supported(intel_dp->dpcd))
645 		val |= EDP_PSR_TP_TP1_TP3;
646 	else
647 		val |= EDP_PSR_TP_TP1_TP2;
648 
649 	return val;
650 }
651 
652 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
653 {
654 	struct intel_connector *connector = intel_dp->attached_connector;
655 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
656 	int idle_frames;
657 
658 	/* Let's use 6 as the minimum to cover all known cases including the
659 	 * off-by-one issue that HW has in some cases.
660 	 */
661 	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
662 	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
663 
664 	if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
665 		idle_frames = 0xf;
666 
667 	return idle_frames;
668 }
669 
670 static void hsw_activate_psr1(struct intel_dp *intel_dp)
671 {
672 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
673 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
674 	u32 max_sleep_time = 0x1f;
675 	u32 val = EDP_PSR_ENABLE;
676 
677 	val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
678 
679 	val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
680 	if (IS_HASWELL(dev_priv))
681 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
682 
683 	if (intel_dp->psr.link_standby)
684 		val |= EDP_PSR_LINK_STANDBY;
685 
686 	val |= intel_psr1_get_tp_time(intel_dp);
687 
688 	if (DISPLAY_VER(dev_priv) >= 8)
689 		val |= EDP_PSR_CRC_ENABLE;
690 
691 	intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
692 		     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
693 }
694 
695 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
696 {
697 	struct intel_connector *connector = intel_dp->attached_connector;
698 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
699 	u32 val = 0;
700 
701 	if (dev_priv->params.psr_safest_params)
702 		return EDP_PSR2_TP2_TIME_2500us;
703 
704 	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
705 	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
706 		val |= EDP_PSR2_TP2_TIME_50us;
707 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
708 		val |= EDP_PSR2_TP2_TIME_100us;
709 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
710 		val |= EDP_PSR2_TP2_TIME_500us;
711 	else
712 		val |= EDP_PSR2_TP2_TIME_2500us;
713 
714 	return val;
715 }
716 
717 static int psr2_block_count_lines(struct intel_dp *intel_dp)
718 {
719 	return intel_dp->psr.io_wake_lines < 9 &&
720 		intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
721 }
722 
723 static int psr2_block_count(struct intel_dp *intel_dp)
724 {
725 	return psr2_block_count_lines(intel_dp) / 4;
726 }
727 
728 static void hsw_activate_psr2(struct intel_dp *intel_dp)
729 {
730 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
731 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
732 	u32 val = EDP_PSR2_ENABLE;
733 
734 	val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
735 
736 	if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))
737 		val |= EDP_SU_TRACK_ENABLE;
738 
739 	if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
740 		val |= EDP_Y_COORDINATE_ENABLE;
741 
742 	val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
743 	val |= intel_psr2_get_tp_time(intel_dp);
744 
745 	if (DISPLAY_VER(dev_priv) >= 12) {
746 		if (psr2_block_count(intel_dp) > 2)
747 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
748 		else
749 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
750 	}
751 
752 	/* Wa_22012278275:adl-p */
753 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
754 		static const u8 map[] = {
755 			2, /* 5 lines */
756 			1, /* 6 lines */
757 			0, /* 7 lines */
758 			3, /* 8 lines */
759 			6, /* 9 lines */
760 			5, /* 10 lines */
761 			4, /* 11 lines */
762 			7, /* 12 lines */
763 		};
764 		/*
765 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
766 		 * comments bellow for more information
767 		 */
768 		int tmp;
769 
770 		tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
771 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
772 
773 		tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
774 		val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
775 	} else if (DISPLAY_VER(dev_priv) >= 12) {
776 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
777 		val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
778 	} else if (DISPLAY_VER(dev_priv) >= 9) {
779 		val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
780 		val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
781 	}
782 
783 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
784 		val |= EDP_PSR2_SU_SDP_SCANLINE;
785 
786 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
787 		u32 tmp;
788 
789 		tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
790 		drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
791 	} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
792 		intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
793 	}
794 
795 	/*
796 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
797 	 * recommending keep this bit unset while PSR2 is enabled.
798 	 */
799 	intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), 0);
800 
801 	intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
802 }
803 
804 static bool
805 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
806 {
807 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
808 		return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
809 	else if (DISPLAY_VER(dev_priv) >= 12)
810 		return cpu_transcoder == TRANSCODER_A;
811 	else if (DISPLAY_VER(dev_priv) >= 9)
812 		return cpu_transcoder == TRANSCODER_EDP;
813 	else
814 		return false;
815 }
816 
817 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
818 {
819 	if (!cstate || !cstate->hw.active)
820 		return 0;
821 
822 	return DIV_ROUND_UP(1000 * 1000,
823 			    drm_mode_vrefresh(&cstate->hw.adjusted_mode));
824 }
825 
826 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
827 				     u32 idle_frames)
828 {
829 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
830 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
831 
832 	intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
833 		     EDP_PSR2_IDLE_FRAMES_MASK,
834 		     EDP_PSR2_IDLE_FRAMES(idle_frames));
835 }
836 
837 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
838 {
839 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
840 
841 	psr2_program_idle_frames(intel_dp, 0);
842 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
843 }
844 
845 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
846 {
847 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
848 
849 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
850 	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
851 }
852 
853 static void tgl_dc3co_disable_work(struct work_struct *work)
854 {
855 	struct intel_dp *intel_dp =
856 		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
857 
858 	mutex_lock(&intel_dp->psr.lock);
859 	/* If delayed work is pending, it is not idle */
860 	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
861 		goto unlock;
862 
863 	tgl_psr2_disable_dc3co(intel_dp);
864 unlock:
865 	mutex_unlock(&intel_dp->psr.lock);
866 }
867 
868 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
869 {
870 	if (!intel_dp->psr.dc3co_exitline)
871 		return;
872 
873 	cancel_delayed_work(&intel_dp->psr.dc3co_work);
874 	/* Before PSR2 exit disallow dc3co*/
875 	tgl_psr2_disable_dc3co(intel_dp);
876 }
877 
878 static bool
879 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
880 			      struct intel_crtc_state *crtc_state)
881 {
882 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
883 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
884 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
885 	enum port port = dig_port->base.port;
886 
887 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
888 		return pipe <= PIPE_B && port <= PORT_B;
889 	else
890 		return pipe == PIPE_A && port == PORT_A;
891 }
892 
893 static void
894 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
895 				  struct intel_crtc_state *crtc_state)
896 {
897 	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
898 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
899 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
900 	u32 exit_scanlines;
901 
902 	/*
903 	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
904 	 * disable DC3CO until the changed dc3co activating/deactivating sequence
905 	 * is applied. B.Specs:49196
906 	 */
907 	return;
908 
909 	/*
910 	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
911 	 * TODO: when the issue is addressed, this restriction should be removed.
912 	 */
913 	if (crtc_state->enable_psr2_sel_fetch)
914 		return;
915 
916 	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
917 		return;
918 
919 	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
920 		return;
921 
922 	/* Wa_16011303918:adl-p */
923 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
924 		return;
925 
926 	/*
927 	 * DC3CO Exit time 200us B.Spec 49196
928 	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
929 	 */
930 	exit_scanlines =
931 		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
932 
933 	if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
934 		return;
935 
936 	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
937 }
938 
939 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
940 					      struct intel_crtc_state *crtc_state)
941 {
942 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
943 
944 	if (!dev_priv->params.enable_psr2_sel_fetch &&
945 	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
946 		drm_dbg_kms(&dev_priv->drm,
947 			    "PSR2 sel fetch not enabled, disabled by parameter\n");
948 		return false;
949 	}
950 
951 	if (crtc_state->uapi.async_flip) {
952 		drm_dbg_kms(&dev_priv->drm,
953 			    "PSR2 sel fetch not enabled, async flip enabled\n");
954 		return false;
955 	}
956 
957 	return crtc_state->enable_psr2_sel_fetch = true;
958 }
959 
960 static bool psr2_granularity_check(struct intel_dp *intel_dp,
961 				   struct intel_crtc_state *crtc_state)
962 {
963 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
964 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
965 	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
966 	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
967 	u16 y_granularity = 0;
968 
969 	/* PSR2 HW only send full lines so we only need to validate the width */
970 	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
971 		return false;
972 
973 	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
974 		return false;
975 
976 	/* HW tracking is only aligned to 4 lines */
977 	if (!crtc_state->enable_psr2_sel_fetch)
978 		return intel_dp->psr.su_y_granularity == 4;
979 
980 	/*
981 	 * adl_p and mtl platforms have 1 line granularity.
982 	 * For other platforms with SW tracking we can adjust the y coordinates
983 	 * to match sink requirement if multiple of 4.
984 	 */
985 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
986 		y_granularity = intel_dp->psr.su_y_granularity;
987 	else if (intel_dp->psr.su_y_granularity <= 2)
988 		y_granularity = 4;
989 	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
990 		y_granularity = intel_dp->psr.su_y_granularity;
991 
992 	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
993 		return false;
994 
995 	if (crtc_state->dsc.compression_enable &&
996 	    vdsc_cfg->slice_height % y_granularity)
997 		return false;
998 
999 	crtc_state->su_y_granularity = y_granularity;
1000 	return true;
1001 }
1002 
1003 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1004 							struct intel_crtc_state *crtc_state)
1005 {
1006 	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1007 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1008 	u32 hblank_total, hblank_ns, req_ns;
1009 
1010 	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1011 	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1012 
1013 	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1014 	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1015 
1016 	if ((hblank_ns - req_ns) > 100)
1017 		return true;
1018 
1019 	/* Not supported <13 / Wa_22012279113:adl-p */
1020 	if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1021 		return false;
1022 
1023 	crtc_state->req_psr2_sdp_prior_scanline = true;
1024 	return true;
1025 }
1026 
1027 static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
1028 				     struct intel_crtc_state *crtc_state)
1029 {
1030 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1031 	int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
1032 	u8 max_wake_lines;
1033 
1034 	if (DISPLAY_VER(i915) >= 12) {
1035 		io_wake_time = 42;
1036 		/*
1037 		 * According to Bspec it's 42us, but based on testing
1038 		 * it is not enough -> use 45 us.
1039 		 */
1040 		fast_wake_time = 45;
1041 		max_wake_lines = 12;
1042 	} else {
1043 		io_wake_time = 50;
1044 		fast_wake_time = 32;
1045 		max_wake_lines = 8;
1046 	}
1047 
1048 	io_wake_lines = intel_usecs_to_scanlines(
1049 		&crtc_state->hw.adjusted_mode, io_wake_time);
1050 	fast_wake_lines = intel_usecs_to_scanlines(
1051 		&crtc_state->hw.adjusted_mode, fast_wake_time);
1052 
1053 	if (io_wake_lines > max_wake_lines ||
1054 	    fast_wake_lines > max_wake_lines)
1055 		return false;
1056 
1057 	if (i915->params.psr_safest_params)
1058 		io_wake_lines = fast_wake_lines = max_wake_lines;
1059 
1060 	/* According to Bspec lower limit should be set as 7 lines. */
1061 	intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
1062 	intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
1063 
1064 	return true;
1065 }
1066 
1067 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1068 				    struct intel_crtc_state *crtc_state)
1069 {
1070 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1071 	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1072 	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1073 	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1074 
1075 	if (!intel_dp->psr.sink_psr2_support)
1076 		return false;
1077 
1078 	/* JSL and EHL only supports eDP 1.3 */
1079 	if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1080 		drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1081 		return false;
1082 	}
1083 
1084 	/* Wa_16011181250 */
1085 	if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1086 	    IS_DG2(dev_priv)) {
1087 		drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1088 		return false;
1089 	}
1090 
1091 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1092 		drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1093 		return false;
1094 	}
1095 
1096 	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1097 		drm_dbg_kms(&dev_priv->drm,
1098 			    "PSR2 not supported in transcoder %s\n",
1099 			    transcoder_name(crtc_state->cpu_transcoder));
1100 		return false;
1101 	}
1102 
1103 	if (!psr2_global_enabled(intel_dp)) {
1104 		drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
1105 		return false;
1106 	}
1107 
1108 	/*
1109 	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1110 	 * resolution requires DSC to be enabled, priority is given to DSC
1111 	 * over PSR2.
1112 	 */
1113 	if (crtc_state->dsc.compression_enable &&
1114 	    (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))) {
1115 		drm_dbg_kms(&dev_priv->drm,
1116 			    "PSR2 cannot be enabled since DSC is enabled\n");
1117 		return false;
1118 	}
1119 
1120 	if (crtc_state->crc_enabled) {
1121 		drm_dbg_kms(&dev_priv->drm,
1122 			    "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1123 		return false;
1124 	}
1125 
1126 	if (DISPLAY_VER(dev_priv) >= 12) {
1127 		psr_max_h = 5120;
1128 		psr_max_v = 3200;
1129 		max_bpp = 30;
1130 	} else if (DISPLAY_VER(dev_priv) >= 10) {
1131 		psr_max_h = 4096;
1132 		psr_max_v = 2304;
1133 		max_bpp = 24;
1134 	} else if (DISPLAY_VER(dev_priv) == 9) {
1135 		psr_max_h = 3640;
1136 		psr_max_v = 2304;
1137 		max_bpp = 24;
1138 	}
1139 
1140 	if (crtc_state->pipe_bpp > max_bpp) {
1141 		drm_dbg_kms(&dev_priv->drm,
1142 			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1143 			    crtc_state->pipe_bpp, max_bpp);
1144 		return false;
1145 	}
1146 
1147 	/* Wa_16011303918:adl-p */
1148 	if (crtc_state->vrr.enable &&
1149 	    IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1150 		drm_dbg_kms(&dev_priv->drm,
1151 			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1152 		return false;
1153 	}
1154 
1155 	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1156 		drm_dbg_kms(&dev_priv->drm,
1157 			    "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1158 		return false;
1159 	}
1160 
1161 	if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
1162 		drm_dbg_kms(&dev_priv->drm,
1163 			    "PSR2 not enabled, Unable to use long enough wake times\n");
1164 		return false;
1165 	}
1166 
1167 	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
1168 	if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1169 	    crtc_state->hw.adjusted_mode.crtc_vblank_start <
1170 	    psr2_block_count_lines(intel_dp)) {
1171 		drm_dbg_kms(&dev_priv->drm,
1172 			    "PSR2 not enabled, too short vblank time\n");
1173 		return false;
1174 	}
1175 
1176 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1177 		if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1178 		    !HAS_PSR_HW_TRACKING(dev_priv)) {
1179 			drm_dbg_kms(&dev_priv->drm,
1180 				    "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1181 			return false;
1182 		}
1183 	}
1184 
1185 	if (!psr2_granularity_check(intel_dp, crtc_state)) {
1186 		drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1187 		goto unsupported;
1188 	}
1189 
1190 	if (!crtc_state->enable_psr2_sel_fetch &&
1191 	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1192 		drm_dbg_kms(&dev_priv->drm,
1193 			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1194 			    crtc_hdisplay, crtc_vdisplay,
1195 			    psr_max_h, psr_max_v);
1196 		goto unsupported;
1197 	}
1198 
1199 	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1200 	return true;
1201 
1202 unsupported:
1203 	crtc_state->enable_psr2_sel_fetch = false;
1204 	return false;
1205 }
1206 
1207 void intel_psr_compute_config(struct intel_dp *intel_dp,
1208 			      struct intel_crtc_state *crtc_state,
1209 			      struct drm_connector_state *conn_state)
1210 {
1211 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1212 	const struct drm_display_mode *adjusted_mode =
1213 		&crtc_state->hw.adjusted_mode;
1214 	int psr_setup_time;
1215 
1216 	/*
1217 	 * Current PSR panels don't work reliably with VRR enabled
1218 	 * So if VRR is enabled, do not enable PSR.
1219 	 */
1220 	if (crtc_state->vrr.enable)
1221 		return;
1222 
1223 	if (!CAN_PSR(intel_dp))
1224 		return;
1225 
1226 	if (!psr_global_enabled(intel_dp)) {
1227 		drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1228 		return;
1229 	}
1230 
1231 	if (intel_dp->psr.sink_not_reliable) {
1232 		drm_dbg_kms(&dev_priv->drm,
1233 			    "PSR sink implementation is not reliable\n");
1234 		return;
1235 	}
1236 
1237 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1238 		drm_dbg_kms(&dev_priv->drm,
1239 			    "PSR condition failed: Interlaced mode enabled\n");
1240 		return;
1241 	}
1242 
1243 	psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1244 	if (psr_setup_time < 0) {
1245 		drm_dbg_kms(&dev_priv->drm,
1246 			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1247 			    intel_dp->psr_dpcd[1]);
1248 		return;
1249 	}
1250 
1251 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1252 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1253 		drm_dbg_kms(&dev_priv->drm,
1254 			    "PSR condition failed: PSR setup time (%d us) too long\n",
1255 			    psr_setup_time);
1256 		return;
1257 	}
1258 
1259 	crtc_state->has_psr = true;
1260 	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1261 
1262 	crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1263 	intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1264 				     &crtc_state->psr_vsc);
1265 }
1266 
1267 void intel_psr_get_config(struct intel_encoder *encoder,
1268 			  struct intel_crtc_state *pipe_config)
1269 {
1270 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1271 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1272 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1273 	struct intel_dp *intel_dp;
1274 	u32 val;
1275 
1276 	if (!dig_port)
1277 		return;
1278 
1279 	intel_dp = &dig_port->dp;
1280 	if (!CAN_PSR(intel_dp))
1281 		return;
1282 
1283 	mutex_lock(&intel_dp->psr.lock);
1284 	if (!intel_dp->psr.enabled)
1285 		goto unlock;
1286 
1287 	/*
1288 	 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1289 	 * enabled/disabled because of frontbuffer tracking and others.
1290 	 */
1291 	pipe_config->has_psr = true;
1292 	pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1293 	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1294 
1295 	if (!intel_dp->psr.psr2_enabled)
1296 		goto unlock;
1297 
1298 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1299 		val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
1300 		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1301 			pipe_config->enable_psr2_sel_fetch = true;
1302 	}
1303 
1304 	if (DISPLAY_VER(dev_priv) >= 12) {
1305 		val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
1306 		pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1307 	}
1308 unlock:
1309 	mutex_unlock(&intel_dp->psr.lock);
1310 }
1311 
1312 static void intel_psr_activate(struct intel_dp *intel_dp)
1313 {
1314 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1315 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1316 
1317 	drm_WARN_ON(&dev_priv->drm,
1318 		    transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1319 		    intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1320 
1321 	drm_WARN_ON(&dev_priv->drm,
1322 		    intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1323 
1324 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1325 
1326 	lockdep_assert_held(&intel_dp->psr.lock);
1327 
1328 	/* psr1 and psr2 are mutually exclusive.*/
1329 	if (intel_dp->psr.psr2_enabled)
1330 		hsw_activate_psr2(intel_dp);
1331 	else
1332 		hsw_activate_psr1(intel_dp);
1333 
1334 	intel_dp->psr.active = true;
1335 }
1336 
1337 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1338 {
1339 	switch (intel_dp->psr.pipe) {
1340 	case PIPE_A:
1341 		return LATENCY_REPORTING_REMOVED_PIPE_A;
1342 	case PIPE_B:
1343 		return LATENCY_REPORTING_REMOVED_PIPE_B;
1344 	case PIPE_C:
1345 		return LATENCY_REPORTING_REMOVED_PIPE_C;
1346 	case PIPE_D:
1347 		return LATENCY_REPORTING_REMOVED_PIPE_D;
1348 	default:
1349 		MISSING_CASE(intel_dp->psr.pipe);
1350 		return 0;
1351 	}
1352 }
1353 
1354 /*
1355  * Wa_16013835468
1356  * Wa_14015648006
1357  */
1358 static void wm_optimization_wa(struct intel_dp *intel_dp,
1359 			       const struct intel_crtc_state *crtc_state)
1360 {
1361 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1362 	bool set_wa_bit = false;
1363 
1364 	/* Wa_14015648006 */
1365 	if (IS_DISPLAY_VER(dev_priv, 11, 14))
1366 		set_wa_bit |= crtc_state->wm_level_disabled;
1367 
1368 	/* Wa_16013835468 */
1369 	if (DISPLAY_VER(dev_priv) == 12)
1370 		set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1371 			crtc_state->hw.adjusted_mode.crtc_vdisplay;
1372 
1373 	if (set_wa_bit)
1374 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1375 			     0, wa_16013835468_bit_get(intel_dp));
1376 	else
1377 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1378 			     wa_16013835468_bit_get(intel_dp), 0);
1379 }
1380 
1381 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1382 				    const struct intel_crtc_state *crtc_state)
1383 {
1384 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1385 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1386 	u32 mask;
1387 
1388 	/*
1389 	 * Only HSW and BDW have PSR AUX registers that need to be setup.
1390 	 * SKL+ use hardcoded values PSR AUX transactions
1391 	 */
1392 	if (DISPLAY_VER(dev_priv) < 9)
1393 		hsw_psr_setup_aux(intel_dp);
1394 
1395 	/*
1396 	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1397 	 * mask LPSP to avoid dependency on other drivers that might block
1398 	 * runtime_pm besides preventing  other hw tracking issues now we
1399 	 * can rely on frontbuffer tracking.
1400 	 */
1401 	mask = EDP_PSR_DEBUG_MASK_MEMUP |
1402 	       EDP_PSR_DEBUG_MASK_HPD |
1403 	       EDP_PSR_DEBUG_MASK_LPSP |
1404 	       EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1405 
1406 	/*
1407 	 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1408 	 * registers in order to keep the CURSURFLIVE tricks working :(
1409 	 */
1410 	if (IS_DISPLAY_VER(dev_priv, 9, 10))
1411 		mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1412 
1413 	/* allow PSR with sprite enabled */
1414 	if (IS_HASWELL(dev_priv))
1415 		mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1416 
1417 	intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1418 
1419 	psr_irq_control(intel_dp);
1420 
1421 	/*
1422 	 * TODO: if future platforms supports DC3CO in more than one
1423 	 * transcoder, EXITLINE will need to be unset when disabling PSR
1424 	 */
1425 	if (intel_dp->psr.dc3co_exitline)
1426 		intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1427 			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1428 
1429 	if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1430 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1431 			     intel_dp->psr.psr2_sel_fetch_enabled ?
1432 			     IGNORE_PSR2_HW_TRACKING : 0);
1433 
1434 	/*
1435 	 * Wa_16013835468
1436 	 * Wa_14015648006
1437 	 */
1438 	wm_optimization_wa(intel_dp, crtc_state);
1439 
1440 	if (intel_dp->psr.psr2_enabled) {
1441 		if (DISPLAY_VER(dev_priv) == 9)
1442 			intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1443 				     PSR2_VSC_ENABLE_PROG_HEADER |
1444 				     PSR2_ADD_VERTICAL_LINE_COUNT);
1445 
1446 		/*
1447 		 * Wa_16014451276:adlp,mtl[a0,b0]
1448 		 * All supported adlp panels have 1-based X granularity, this may
1449 		 * cause issues if non-supported panels are used.
1450 		 */
1451 		if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1452 			intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0,
1453 				     ADLP_1_BASED_X_GRANULARITY);
1454 		else if (IS_ALDERLAKE_P(dev_priv))
1455 			intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1456 				     ADLP_1_BASED_X_GRANULARITY);
1457 
1458 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1459 		if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1460 			intel_de_rmw(dev_priv,
1461 				     MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1462 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1463 		else if (IS_ALDERLAKE_P(dev_priv))
1464 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1465 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1466 	}
1467 }
1468 
1469 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1470 {
1471 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1472 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1473 	u32 val;
1474 
1475 	/*
1476 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1477 	 * will still keep the error set even after the reset done in the
1478 	 * irq_preinstall and irq_uninstall hooks.
1479 	 * And enabling in this situation cause the screen to freeze in the
1480 	 * first time that PSR HW tries to activate so lets keep PSR disabled
1481 	 * to avoid any rendering problems.
1482 	 */
1483 	val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1484 	val &= psr_irq_psr_error_bit_get(intel_dp);
1485 	if (val) {
1486 		intel_dp->psr.sink_not_reliable = true;
1487 		drm_dbg_kms(&dev_priv->drm,
1488 			    "PSR interruption error set, not enabling PSR\n");
1489 		return false;
1490 	}
1491 
1492 	return true;
1493 }
1494 
1495 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1496 				    const struct intel_crtc_state *crtc_state)
1497 {
1498 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1499 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1500 	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1501 	struct intel_encoder *encoder = &dig_port->base;
1502 	u32 val;
1503 
1504 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1505 
1506 	intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1507 	intel_dp->psr.busy_frontbuffer_bits = 0;
1508 	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1509 	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1510 	/* DC5/DC6 requires at least 6 idle frames */
1511 	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1512 	intel_dp->psr.dc3co_exit_delay = val;
1513 	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1514 	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1515 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1516 	intel_dp->psr.req_psr2_sdp_prior_scanline =
1517 		crtc_state->req_psr2_sdp_prior_scanline;
1518 
1519 	if (!psr_interrupt_error_check(intel_dp))
1520 		return;
1521 
1522 	drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1523 		    intel_dp->psr.psr2_enabled ? "2" : "1");
1524 	intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
1525 	intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1526 	intel_psr_enable_sink(intel_dp);
1527 	intel_psr_enable_source(intel_dp, crtc_state);
1528 	intel_dp->psr.enabled = true;
1529 	intel_dp->psr.paused = false;
1530 
1531 	intel_psr_activate(intel_dp);
1532 }
1533 
1534 static void intel_psr_exit(struct intel_dp *intel_dp)
1535 {
1536 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1537 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1538 	u32 val;
1539 
1540 	if (!intel_dp->psr.active) {
1541 		if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1542 			val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
1543 			drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1544 		}
1545 
1546 		val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
1547 		drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1548 
1549 		return;
1550 	}
1551 
1552 	if (intel_dp->psr.psr2_enabled) {
1553 		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1554 
1555 		val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
1556 				   EDP_PSR2_ENABLE, 0);
1557 
1558 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1559 	} else {
1560 		val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
1561 				   EDP_PSR_ENABLE, 0);
1562 
1563 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1564 	}
1565 	intel_dp->psr.active = false;
1566 }
1567 
1568 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1569 {
1570 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1571 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1572 	i915_reg_t psr_status;
1573 	u32 psr_status_mask;
1574 
1575 	if (intel_dp->psr.psr2_enabled) {
1576 		psr_status = EDP_PSR2_STATUS(cpu_transcoder);
1577 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1578 	} else {
1579 		psr_status = psr_status_reg(dev_priv, cpu_transcoder);
1580 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1581 	}
1582 
1583 	/* Wait till PSR is idle */
1584 	if (intel_de_wait_for_clear(dev_priv, psr_status,
1585 				    psr_status_mask, 2000))
1586 		drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1587 }
1588 
1589 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1590 {
1591 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1592 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1593 	enum phy phy = intel_port_to_phy(dev_priv,
1594 					 dp_to_dig_port(intel_dp)->base.port);
1595 
1596 	lockdep_assert_held(&intel_dp->psr.lock);
1597 
1598 	if (!intel_dp->psr.enabled)
1599 		return;
1600 
1601 	drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1602 		    intel_dp->psr.psr2_enabled ? "2" : "1");
1603 
1604 	intel_psr_exit(intel_dp);
1605 	intel_psr_wait_exit_locked(intel_dp);
1606 
1607 	/*
1608 	 * Wa_16013835468
1609 	 * Wa_14015648006
1610 	 */
1611 	if (DISPLAY_VER(dev_priv) >= 11)
1612 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1613 			     wa_16013835468_bit_get(intel_dp), 0);
1614 
1615 	if (intel_dp->psr.psr2_enabled) {
1616 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1617 		if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1618 			intel_de_rmw(dev_priv,
1619 				     MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
1620 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1621 		else if (IS_ALDERLAKE_P(dev_priv))
1622 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1623 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1624 	}
1625 
1626 	intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1627 
1628 	/* Disable PSR on Sink */
1629 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1630 
1631 	if (intel_dp->psr.psr2_enabled)
1632 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1633 
1634 	intel_dp->psr.enabled = false;
1635 	intel_dp->psr.psr2_enabled = false;
1636 	intel_dp->psr.psr2_sel_fetch_enabled = false;
1637 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1638 }
1639 
1640 /**
1641  * intel_psr_disable - Disable PSR
1642  * @intel_dp: Intel DP
1643  * @old_crtc_state: old CRTC state
1644  *
1645  * This function needs to be called before disabling pipe.
1646  */
1647 void intel_psr_disable(struct intel_dp *intel_dp,
1648 		       const struct intel_crtc_state *old_crtc_state)
1649 {
1650 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1651 
1652 	if (!old_crtc_state->has_psr)
1653 		return;
1654 
1655 	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1656 		return;
1657 
1658 	mutex_lock(&intel_dp->psr.lock);
1659 
1660 	intel_psr_disable_locked(intel_dp);
1661 
1662 	mutex_unlock(&intel_dp->psr.lock);
1663 	cancel_work_sync(&intel_dp->psr.work);
1664 	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1665 }
1666 
1667 /**
1668  * intel_psr_pause - Pause PSR
1669  * @intel_dp: Intel DP
1670  *
1671  * This function need to be called after enabling psr.
1672  */
1673 void intel_psr_pause(struct intel_dp *intel_dp)
1674 {
1675 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1676 	struct intel_psr *psr = &intel_dp->psr;
1677 
1678 	if (!CAN_PSR(intel_dp))
1679 		return;
1680 
1681 	mutex_lock(&psr->lock);
1682 
1683 	if (!psr->enabled) {
1684 		mutex_unlock(&psr->lock);
1685 		return;
1686 	}
1687 
1688 	/* If we ever hit this, we will need to add refcount to pause/resume */
1689 	drm_WARN_ON(&dev_priv->drm, psr->paused);
1690 
1691 	intel_psr_exit(intel_dp);
1692 	intel_psr_wait_exit_locked(intel_dp);
1693 	psr->paused = true;
1694 
1695 	mutex_unlock(&psr->lock);
1696 
1697 	cancel_work_sync(&psr->work);
1698 	cancel_delayed_work_sync(&psr->dc3co_work);
1699 }
1700 
1701 /**
1702  * intel_psr_resume - Resume PSR
1703  * @intel_dp: Intel DP
1704  *
1705  * This function need to be called after pausing psr.
1706  */
1707 void intel_psr_resume(struct intel_dp *intel_dp)
1708 {
1709 	struct intel_psr *psr = &intel_dp->psr;
1710 
1711 	if (!CAN_PSR(intel_dp))
1712 		return;
1713 
1714 	mutex_lock(&psr->lock);
1715 
1716 	if (!psr->paused)
1717 		goto unlock;
1718 
1719 	psr->paused = false;
1720 	intel_psr_activate(intel_dp);
1721 
1722 unlock:
1723 	mutex_unlock(&psr->lock);
1724 }
1725 
1726 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1727 {
1728 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1729 		PSR2_MAN_TRK_CTL_ENABLE;
1730 }
1731 
1732 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1733 {
1734 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1735 	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1736 	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1737 }
1738 
1739 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1740 {
1741 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1742 	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1743 	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1744 }
1745 
1746 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1747 {
1748 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1749 	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1750 	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1751 }
1752 
1753 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1754 {
1755 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1756 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1757 
1758 	if (intel_dp->psr.psr2_sel_fetch_enabled)
1759 		intel_de_write(dev_priv,
1760 			       PSR2_MAN_TRK_CTL(cpu_transcoder),
1761 			       man_trk_ctl_enable_bit_get(dev_priv) |
1762 			       man_trk_ctl_partial_frame_bit_get(dev_priv) |
1763 			       man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1764 			       man_trk_ctl_continuos_full_frame(dev_priv));
1765 
1766 	/*
1767 	 * Display WA #0884: skl+
1768 	 * This documented WA for bxt can be safely applied
1769 	 * broadly so we can force HW tracking to exit PSR
1770 	 * instead of disabling and re-enabling.
1771 	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1772 	 * but it makes more sense write to the current active
1773 	 * pipe.
1774 	 *
1775 	 * This workaround do not exist for platforms with display 10 or newer
1776 	 * but testing proved that it works for up display 13, for newer
1777 	 * than that testing will be needed.
1778 	 */
1779 	intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1780 }
1781 
1782 void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
1783 					    const struct intel_crtc_state *crtc_state)
1784 {
1785 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1786 	enum pipe pipe = plane->pipe;
1787 
1788 	if (!crtc_state->enable_psr2_sel_fetch)
1789 		return;
1790 
1791 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
1792 }
1793 
1794 void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
1795 					    const struct intel_crtc_state *crtc_state,
1796 					    const struct intel_plane_state *plane_state)
1797 {
1798 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
1799 	enum pipe pipe = plane->pipe;
1800 
1801 	if (!crtc_state->enable_psr2_sel_fetch)
1802 		return;
1803 
1804 	if (plane->id == PLANE_CURSOR)
1805 		intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1806 				  plane_state->ctl);
1807 	else
1808 		intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1809 				  PLANE_SEL_FETCH_CTL_ENABLE);
1810 }
1811 
1812 void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
1813 					      const struct intel_crtc_state *crtc_state,
1814 					      const struct intel_plane_state *plane_state,
1815 					      int color_plane)
1816 {
1817 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1818 	enum pipe pipe = plane->pipe;
1819 	const struct drm_rect *clip;
1820 	u32 val;
1821 	int x, y;
1822 
1823 	if (!crtc_state->enable_psr2_sel_fetch)
1824 		return;
1825 
1826 	if (plane->id == PLANE_CURSOR)
1827 		return;
1828 
1829 	clip = &plane_state->psr2_sel_fetch_area;
1830 
1831 	val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
1832 	val |= plane_state->uapi.dst.x1;
1833 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
1834 
1835 	x = plane_state->view.color_plane[color_plane].x;
1836 
1837 	/*
1838 	 * From Bspec: UV surface Start Y Position = half of Y plane Y
1839 	 * start position.
1840 	 */
1841 	if (!color_plane)
1842 		y = plane_state->view.color_plane[color_plane].y + clip->y1;
1843 	else
1844 		y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
1845 
1846 	val = y << 16 | x;
1847 
1848 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
1849 			  val);
1850 
1851 	/* Sizes are 0 based */
1852 	val = (drm_rect_height(clip) - 1) << 16;
1853 	val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
1854 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
1855 }
1856 
1857 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1858 {
1859 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1860 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1861 	struct intel_encoder *encoder;
1862 
1863 	if (!crtc_state->enable_psr2_sel_fetch)
1864 		return;
1865 
1866 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1867 					     crtc_state->uapi.encoder_mask) {
1868 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1869 
1870 		lockdep_assert_held(&intel_dp->psr.lock);
1871 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
1872 			return;
1873 		break;
1874 	}
1875 
1876 	intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
1877 		       crtc_state->psr2_man_track_ctl);
1878 }
1879 
1880 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1881 				  struct drm_rect *clip, bool full_update)
1882 {
1883 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1884 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1885 	u32 val = man_trk_ctl_enable_bit_get(dev_priv);
1886 
1887 	/* SF partial frame enable has to be set even on full update */
1888 	val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
1889 
1890 	if (full_update) {
1891 		val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
1892 		val |= man_trk_ctl_continuos_full_frame(dev_priv);
1893 		goto exit;
1894 	}
1895 
1896 	if (clip->y1 == -1)
1897 		goto exit;
1898 
1899 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
1900 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
1901 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
1902 	} else {
1903 		drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1904 
1905 		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1906 		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1907 	}
1908 exit:
1909 	crtc_state->psr2_man_track_ctl = val;
1910 }
1911 
1912 static void clip_area_update(struct drm_rect *overlap_damage_area,
1913 			     struct drm_rect *damage_area,
1914 			     struct drm_rect *pipe_src)
1915 {
1916 	if (!drm_rect_intersect(damage_area, pipe_src))
1917 		return;
1918 
1919 	if (overlap_damage_area->y1 == -1) {
1920 		overlap_damage_area->y1 = damage_area->y1;
1921 		overlap_damage_area->y2 = damage_area->y2;
1922 		return;
1923 	}
1924 
1925 	if (damage_area->y1 < overlap_damage_area->y1)
1926 		overlap_damage_area->y1 = damage_area->y1;
1927 
1928 	if (damage_area->y2 > overlap_damage_area->y2)
1929 		overlap_damage_area->y2 = damage_area->y2;
1930 }
1931 
1932 static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
1933 						struct drm_rect *pipe_clip)
1934 {
1935 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1936 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1937 	u16 y_alignment;
1938 
1939 	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
1940 	if (crtc_state->dsc.compression_enable &&
1941 	    (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
1942 		y_alignment = vdsc_cfg->slice_height;
1943 	else
1944 		y_alignment = crtc_state->su_y_granularity;
1945 
1946 	pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
1947 	if (pipe_clip->y2 % y_alignment)
1948 		pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
1949 }
1950 
1951 /*
1952  * TODO: Not clear how to handle planes with negative position,
1953  * also planes are not updated if they have a negative X
1954  * position so for now doing a full update in this cases
1955  *
1956  * Plane scaling and rotation is not supported by selective fetch and both
1957  * properties can change without a modeset, so need to be check at every
1958  * atomic commit.
1959  */
1960 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
1961 {
1962 	if (plane_state->uapi.dst.y1 < 0 ||
1963 	    plane_state->uapi.dst.x1 < 0 ||
1964 	    plane_state->scaler_id >= 0 ||
1965 	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
1966 		return false;
1967 
1968 	return true;
1969 }
1970 
1971 /*
1972  * Check for pipe properties that is not supported by selective fetch.
1973  *
1974  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
1975  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
1976  * enabled and going to the full update path.
1977  */
1978 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
1979 {
1980 	if (crtc_state->scaler_state.scaler_id >= 0)
1981 		return false;
1982 
1983 	return true;
1984 }
1985 
1986 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
1987 				struct intel_crtc *crtc)
1988 {
1989 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1990 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1991 	struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
1992 	struct intel_plane_state *new_plane_state, *old_plane_state;
1993 	struct intel_plane *plane;
1994 	bool full_update = false;
1995 	int i, ret;
1996 
1997 	if (!crtc_state->enable_psr2_sel_fetch)
1998 		return 0;
1999 
2000 	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2001 		full_update = true;
2002 		goto skip_sel_fetch_set_loop;
2003 	}
2004 
2005 	/*
2006 	 * Calculate minimal selective fetch area of each plane and calculate
2007 	 * the pipe damaged area.
2008 	 * In the next loop the plane selective fetch area will actually be set
2009 	 * using whole pipe damaged area.
2010 	 */
2011 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2012 					     new_plane_state, i) {
2013 		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2014 						      .x2 = INT_MAX };
2015 
2016 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2017 			continue;
2018 
2019 		if (!new_plane_state->uapi.visible &&
2020 		    !old_plane_state->uapi.visible)
2021 			continue;
2022 
2023 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2024 			full_update = true;
2025 			break;
2026 		}
2027 
2028 		/*
2029 		 * If visibility or plane moved, mark the whole plane area as
2030 		 * damaged as it needs to be complete redraw in the new and old
2031 		 * position.
2032 		 */
2033 		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2034 		    !drm_rect_equals(&new_plane_state->uapi.dst,
2035 				     &old_plane_state->uapi.dst)) {
2036 			if (old_plane_state->uapi.visible) {
2037 				damaged_area.y1 = old_plane_state->uapi.dst.y1;
2038 				damaged_area.y2 = old_plane_state->uapi.dst.y2;
2039 				clip_area_update(&pipe_clip, &damaged_area,
2040 						 &crtc_state->pipe_src);
2041 			}
2042 
2043 			if (new_plane_state->uapi.visible) {
2044 				damaged_area.y1 = new_plane_state->uapi.dst.y1;
2045 				damaged_area.y2 = new_plane_state->uapi.dst.y2;
2046 				clip_area_update(&pipe_clip, &damaged_area,
2047 						 &crtc_state->pipe_src);
2048 			}
2049 			continue;
2050 		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2051 			/* If alpha changed mark the whole plane area as damaged */
2052 			damaged_area.y1 = new_plane_state->uapi.dst.y1;
2053 			damaged_area.y2 = new_plane_state->uapi.dst.y2;
2054 			clip_area_update(&pipe_clip, &damaged_area,
2055 					 &crtc_state->pipe_src);
2056 			continue;
2057 		}
2058 
2059 		src = drm_plane_state_src(&new_plane_state->uapi);
2060 		drm_rect_fp_to_int(&src, &src);
2061 
2062 		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2063 						     &new_plane_state->uapi, &damaged_area))
2064 			continue;
2065 
2066 		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2067 		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2068 		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2069 		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2070 
2071 		clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
2072 	}
2073 
2074 	/*
2075 	 * TODO: For now we are just using full update in case
2076 	 * selective fetch area calculation fails. To optimize this we
2077 	 * should identify cases where this happens and fix the area
2078 	 * calculation for those.
2079 	 */
2080 	if (pipe_clip.y1 == -1) {
2081 		drm_info_once(&dev_priv->drm,
2082 			      "Selective fetch area calculation failed in pipe %c\n",
2083 			      pipe_name(crtc->pipe));
2084 		full_update = true;
2085 	}
2086 
2087 	if (full_update)
2088 		goto skip_sel_fetch_set_loop;
2089 
2090 	/* Wa_14014971492 */
2091 	if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
2092 	     IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
2093 	    crtc_state->splitter.enable)
2094 		pipe_clip.y1 = 0;
2095 
2096 	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2097 	if (ret)
2098 		return ret;
2099 
2100 	intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
2101 
2102 	/*
2103 	 * Now that we have the pipe damaged area check if it intersect with
2104 	 * every plane, if it does set the plane selective fetch area.
2105 	 */
2106 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2107 					     new_plane_state, i) {
2108 		struct drm_rect *sel_fetch_area, inter;
2109 		struct intel_plane *linked = new_plane_state->planar_linked_plane;
2110 
2111 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2112 		    !new_plane_state->uapi.visible)
2113 			continue;
2114 
2115 		inter = pipe_clip;
2116 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2117 			continue;
2118 
2119 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2120 			full_update = true;
2121 			break;
2122 		}
2123 
2124 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2125 		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2126 		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2127 		crtc_state->update_planes |= BIT(plane->id);
2128 
2129 		/*
2130 		 * Sel_fetch_area is calculated for UV plane. Use
2131 		 * same area for Y plane as well.
2132 		 */
2133 		if (linked) {
2134 			struct intel_plane_state *linked_new_plane_state;
2135 			struct drm_rect *linked_sel_fetch_area;
2136 
2137 			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2138 			if (IS_ERR(linked_new_plane_state))
2139 				return PTR_ERR(linked_new_plane_state);
2140 
2141 			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2142 			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2143 			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2144 			crtc_state->update_planes |= BIT(linked->id);
2145 		}
2146 	}
2147 
2148 skip_sel_fetch_set_loop:
2149 	psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
2150 	return 0;
2151 }
2152 
2153 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2154 				struct intel_crtc *crtc)
2155 {
2156 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2157 	const struct intel_crtc_state *old_crtc_state =
2158 		intel_atomic_get_old_crtc_state(state, crtc);
2159 	const struct intel_crtc_state *new_crtc_state =
2160 		intel_atomic_get_new_crtc_state(state, crtc);
2161 	struct intel_encoder *encoder;
2162 
2163 	if (!HAS_PSR(i915))
2164 		return;
2165 
2166 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2167 					     old_crtc_state->uapi.encoder_mask) {
2168 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2169 		struct intel_psr *psr = &intel_dp->psr;
2170 		bool needs_to_disable = false;
2171 
2172 		mutex_lock(&psr->lock);
2173 
2174 		/*
2175 		 * Reasons to disable:
2176 		 * - PSR disabled in new state
2177 		 * - All planes will go inactive
2178 		 * - Changing between PSR versions
2179 		 * - Display WA #1136: skl, bxt
2180 		 */
2181 		needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2182 		needs_to_disable |= !new_crtc_state->has_psr;
2183 		needs_to_disable |= !new_crtc_state->active_planes;
2184 		needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2185 		needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2186 			new_crtc_state->wm_level_disabled;
2187 
2188 		if (psr->enabled && needs_to_disable)
2189 			intel_psr_disable_locked(intel_dp);
2190 		else if (psr->enabled && new_crtc_state->wm_level_disabled)
2191 			/* Wa_14015648006 */
2192 			wm_optimization_wa(intel_dp, new_crtc_state);
2193 
2194 		mutex_unlock(&psr->lock);
2195 	}
2196 }
2197 
2198 static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
2199 					 const struct intel_crtc_state *crtc_state)
2200 {
2201 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2202 	struct intel_encoder *encoder;
2203 
2204 	if (!crtc_state->has_psr)
2205 		return;
2206 
2207 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2208 					     crtc_state->uapi.encoder_mask) {
2209 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2210 		struct intel_psr *psr = &intel_dp->psr;
2211 		bool keep_disabled = false;
2212 
2213 		mutex_lock(&psr->lock);
2214 
2215 		drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2216 
2217 		keep_disabled |= psr->sink_not_reliable;
2218 		keep_disabled |= !crtc_state->active_planes;
2219 
2220 		/* Display WA #1136: skl, bxt */
2221 		keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2222 			crtc_state->wm_level_disabled;
2223 
2224 		if (!psr->enabled && !keep_disabled)
2225 			intel_psr_enable_locked(intel_dp, crtc_state);
2226 		else if (psr->enabled && !crtc_state->wm_level_disabled)
2227 			/* Wa_14015648006 */
2228 			wm_optimization_wa(intel_dp, crtc_state);
2229 
2230 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2231 		if (crtc_state->crc_enabled && psr->enabled)
2232 			psr_force_hw_tracking_exit(intel_dp);
2233 
2234 		/*
2235 		 * Clear possible busy bits in case we have
2236 		 * invalidate -> flip -> flush sequence.
2237 		 */
2238 		intel_dp->psr.busy_frontbuffer_bits = 0;
2239 
2240 		mutex_unlock(&psr->lock);
2241 	}
2242 }
2243 
2244 void intel_psr_post_plane_update(const struct intel_atomic_state *state)
2245 {
2246 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2247 	struct intel_crtc_state *crtc_state;
2248 	struct intel_crtc *crtc;
2249 	int i;
2250 
2251 	if (!HAS_PSR(dev_priv))
2252 		return;
2253 
2254 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
2255 		_intel_psr_post_plane_update(state, crtc_state);
2256 }
2257 
2258 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2259 {
2260 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2261 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2262 
2263 	/*
2264 	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2265 	 * As all higher states has bit 4 of PSR2 state set we can just wait for
2266 	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2267 	 */
2268 	return intel_de_wait_for_clear(dev_priv,
2269 				       EDP_PSR2_STATUS(cpu_transcoder),
2270 				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2271 }
2272 
2273 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2274 {
2275 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2276 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2277 
2278 	/*
2279 	 * From bspec: Panel Self Refresh (BDW+)
2280 	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2281 	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2282 	 * defensive enough to cover everything.
2283 	 */
2284 	return intel_de_wait_for_clear(dev_priv,
2285 				       psr_status_reg(dev_priv, cpu_transcoder),
2286 				       EDP_PSR_STATUS_STATE_MASK, 50);
2287 }
2288 
2289 /**
2290  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2291  * @new_crtc_state: new CRTC state
2292  *
2293  * This function is expected to be called from pipe_update_start() where it is
2294  * not expected to race with PSR enable or disable.
2295  */
2296 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2297 {
2298 	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2299 	struct intel_encoder *encoder;
2300 
2301 	if (!new_crtc_state->has_psr)
2302 		return;
2303 
2304 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2305 					     new_crtc_state->uapi.encoder_mask) {
2306 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2307 		int ret;
2308 
2309 		lockdep_assert_held(&intel_dp->psr.lock);
2310 
2311 		if (!intel_dp->psr.enabled)
2312 			continue;
2313 
2314 		if (intel_dp->psr.psr2_enabled)
2315 			ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2316 		else
2317 			ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2318 
2319 		if (ret)
2320 			drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2321 	}
2322 }
2323 
2324 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2325 {
2326 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2327 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2328 	i915_reg_t reg;
2329 	u32 mask;
2330 	int err;
2331 
2332 	if (!intel_dp->psr.enabled)
2333 		return false;
2334 
2335 	if (intel_dp->psr.psr2_enabled) {
2336 		reg = EDP_PSR2_STATUS(cpu_transcoder);
2337 		mask = EDP_PSR2_STATUS_STATE_MASK;
2338 	} else {
2339 		reg = psr_status_reg(dev_priv, cpu_transcoder);
2340 		mask = EDP_PSR_STATUS_STATE_MASK;
2341 	}
2342 
2343 	mutex_unlock(&intel_dp->psr.lock);
2344 
2345 	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2346 	if (err)
2347 		drm_err(&dev_priv->drm,
2348 			"Timed out waiting for PSR Idle for re-enable\n");
2349 
2350 	/* After the unlocked wait, verify that PSR is still wanted! */
2351 	mutex_lock(&intel_dp->psr.lock);
2352 	return err == 0 && intel_dp->psr.enabled;
2353 }
2354 
2355 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2356 {
2357 	struct drm_connector_list_iter conn_iter;
2358 	struct drm_modeset_acquire_ctx ctx;
2359 	struct drm_atomic_state *state;
2360 	struct drm_connector *conn;
2361 	int err = 0;
2362 
2363 	state = drm_atomic_state_alloc(&dev_priv->drm);
2364 	if (!state)
2365 		return -ENOMEM;
2366 
2367 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2368 
2369 	state->acquire_ctx = &ctx;
2370 	to_intel_atomic_state(state)->internal = true;
2371 
2372 retry:
2373 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2374 	drm_for_each_connector_iter(conn, &conn_iter) {
2375 		struct drm_connector_state *conn_state;
2376 		struct drm_crtc_state *crtc_state;
2377 
2378 		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2379 			continue;
2380 
2381 		conn_state = drm_atomic_get_connector_state(state, conn);
2382 		if (IS_ERR(conn_state)) {
2383 			err = PTR_ERR(conn_state);
2384 			break;
2385 		}
2386 
2387 		if (!conn_state->crtc)
2388 			continue;
2389 
2390 		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2391 		if (IS_ERR(crtc_state)) {
2392 			err = PTR_ERR(crtc_state);
2393 			break;
2394 		}
2395 
2396 		/* Mark mode as changed to trigger a pipe->update() */
2397 		crtc_state->mode_changed = true;
2398 	}
2399 	drm_connector_list_iter_end(&conn_iter);
2400 
2401 	if (err == 0)
2402 		err = drm_atomic_commit(state);
2403 
2404 	if (err == -EDEADLK) {
2405 		drm_atomic_state_clear(state);
2406 		err = drm_modeset_backoff(&ctx);
2407 		if (!err)
2408 			goto retry;
2409 	}
2410 
2411 	drm_modeset_drop_locks(&ctx);
2412 	drm_modeset_acquire_fini(&ctx);
2413 	drm_atomic_state_put(state);
2414 
2415 	return err;
2416 }
2417 
2418 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2419 {
2420 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2421 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2422 	u32 old_mode;
2423 	int ret;
2424 
2425 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2426 	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2427 		drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2428 		return -EINVAL;
2429 	}
2430 
2431 	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2432 	if (ret)
2433 		return ret;
2434 
2435 	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2436 	intel_dp->psr.debug = val;
2437 
2438 	/*
2439 	 * Do it right away if it's already enabled, otherwise it will be done
2440 	 * when enabling the source.
2441 	 */
2442 	if (intel_dp->psr.enabled)
2443 		psr_irq_control(intel_dp);
2444 
2445 	mutex_unlock(&intel_dp->psr.lock);
2446 
2447 	if (old_mode != mode)
2448 		ret = intel_psr_fastset_force(dev_priv);
2449 
2450 	return ret;
2451 }
2452 
2453 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2454 {
2455 	struct intel_psr *psr = &intel_dp->psr;
2456 
2457 	intel_psr_disable_locked(intel_dp);
2458 	psr->sink_not_reliable = true;
2459 	/* let's make sure that sink is awaken */
2460 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2461 }
2462 
2463 static void intel_psr_work(struct work_struct *work)
2464 {
2465 	struct intel_dp *intel_dp =
2466 		container_of(work, typeof(*intel_dp), psr.work);
2467 
2468 	mutex_lock(&intel_dp->psr.lock);
2469 
2470 	if (!intel_dp->psr.enabled)
2471 		goto unlock;
2472 
2473 	if (READ_ONCE(intel_dp->psr.irq_aux_error))
2474 		intel_psr_handle_irq(intel_dp);
2475 
2476 	/*
2477 	 * We have to make sure PSR is ready for re-enable
2478 	 * otherwise it keeps disabled until next full enable/disable cycle.
2479 	 * PSR might take some time to get fully disabled
2480 	 * and be ready for re-enable.
2481 	 */
2482 	if (!__psr_wait_for_idle_locked(intel_dp))
2483 		goto unlock;
2484 
2485 	/*
2486 	 * The delayed work can race with an invalidate hence we need to
2487 	 * recheck. Since psr_flush first clears this and then reschedules we
2488 	 * won't ever miss a flush when bailing out here.
2489 	 */
2490 	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2491 		goto unlock;
2492 
2493 	intel_psr_activate(intel_dp);
2494 unlock:
2495 	mutex_unlock(&intel_dp->psr.lock);
2496 }
2497 
2498 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2499 {
2500 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2501 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2502 
2503 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2504 		u32 val;
2505 
2506 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2507 			/* Send one update otherwise lag is observed in screen */
2508 			intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2509 			return;
2510 		}
2511 
2512 		val = man_trk_ctl_enable_bit_get(dev_priv) |
2513 		      man_trk_ctl_partial_frame_bit_get(dev_priv) |
2514 		      man_trk_ctl_continuos_full_frame(dev_priv);
2515 		intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
2516 		intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2517 		intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2518 	} else {
2519 		intel_psr_exit(intel_dp);
2520 	}
2521 }
2522 
2523 /**
2524  * intel_psr_invalidate - Invalidate PSR
2525  * @dev_priv: i915 device
2526  * @frontbuffer_bits: frontbuffer plane tracking bits
2527  * @origin: which operation caused the invalidate
2528  *
2529  * Since the hardware frontbuffer tracking has gaps we need to integrate
2530  * with the software frontbuffer tracking. This function gets called every
2531  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2532  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2533  *
2534  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2535  */
2536 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2537 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
2538 {
2539 	struct intel_encoder *encoder;
2540 
2541 	if (origin == ORIGIN_FLIP)
2542 		return;
2543 
2544 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2545 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2546 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2547 
2548 		mutex_lock(&intel_dp->psr.lock);
2549 		if (!intel_dp->psr.enabled) {
2550 			mutex_unlock(&intel_dp->psr.lock);
2551 			continue;
2552 		}
2553 
2554 		pipe_frontbuffer_bits &=
2555 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2556 		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2557 
2558 		if (pipe_frontbuffer_bits)
2559 			_psr_invalidate_handle(intel_dp);
2560 
2561 		mutex_unlock(&intel_dp->psr.lock);
2562 	}
2563 }
2564 /*
2565  * When we will be completely rely on PSR2 S/W tracking in future,
2566  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2567  * event also therefore tgl_dc3co_flush_locked() require to be changed
2568  * accordingly in future.
2569  */
2570 static void
2571 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2572 		       enum fb_op_origin origin)
2573 {
2574 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2575 
2576 	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2577 	    !intel_dp->psr.active)
2578 		return;
2579 
2580 	/*
2581 	 * At every frontbuffer flush flip event modified delay of delayed work,
2582 	 * when delayed work schedules that means display has been idle.
2583 	 */
2584 	if (!(frontbuffer_bits &
2585 	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2586 		return;
2587 
2588 	tgl_psr2_enable_dc3co(intel_dp);
2589 	mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
2590 			 intel_dp->psr.dc3co_exit_delay);
2591 }
2592 
2593 static void _psr_flush_handle(struct intel_dp *intel_dp)
2594 {
2595 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2596 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2597 
2598 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2599 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2600 			/* can we turn CFF off? */
2601 			if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2602 				u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2603 					man_trk_ctl_partial_frame_bit_get(dev_priv) |
2604 					man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2605 					man_trk_ctl_continuos_full_frame(dev_priv);
2606 
2607 				/*
2608 				 * Set psr2_sel_fetch_cff_enabled as false to allow selective
2609 				 * updates. Still keep cff bit enabled as we don't have proper
2610 				 * SU configuration in case update is sent for any reason after
2611 				 * sff bit gets cleared by the HW on next vblank.
2612 				 */
2613 				intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2614 					       val);
2615 				intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2616 				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2617 			}
2618 		} else {
2619 			/*
2620 			 * continuous full frame is disabled, only a single full
2621 			 * frame is required
2622 			 */
2623 			psr_force_hw_tracking_exit(intel_dp);
2624 		}
2625 	} else {
2626 		psr_force_hw_tracking_exit(intel_dp);
2627 
2628 		if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2629 			queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
2630 	}
2631 }
2632 
2633 /**
2634  * intel_psr_flush - Flush PSR
2635  * @dev_priv: i915 device
2636  * @frontbuffer_bits: frontbuffer plane tracking bits
2637  * @origin: which operation caused the flush
2638  *
2639  * Since the hardware frontbuffer tracking has gaps we need to integrate
2640  * with the software frontbuffer tracking. This function gets called every
2641  * time frontbuffer rendering has completed and flushed out to memory. PSR
2642  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2643  *
2644  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2645  */
2646 void intel_psr_flush(struct drm_i915_private *dev_priv,
2647 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
2648 {
2649 	struct intel_encoder *encoder;
2650 
2651 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2652 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2653 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2654 
2655 		mutex_lock(&intel_dp->psr.lock);
2656 		if (!intel_dp->psr.enabled) {
2657 			mutex_unlock(&intel_dp->psr.lock);
2658 			continue;
2659 		}
2660 
2661 		pipe_frontbuffer_bits &=
2662 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2663 		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2664 
2665 		/*
2666 		 * If the PSR is paused by an explicit intel_psr_paused() call,
2667 		 * we have to ensure that the PSR is not activated until
2668 		 * intel_psr_resume() is called.
2669 		 */
2670 		if (intel_dp->psr.paused)
2671 			goto unlock;
2672 
2673 		if (origin == ORIGIN_FLIP ||
2674 		    (origin == ORIGIN_CURSOR_UPDATE &&
2675 		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
2676 			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2677 			goto unlock;
2678 		}
2679 
2680 		if (pipe_frontbuffer_bits == 0)
2681 			goto unlock;
2682 
2683 		/* By definition flush = invalidate + flush */
2684 		_psr_flush_handle(intel_dp);
2685 unlock:
2686 		mutex_unlock(&intel_dp->psr.lock);
2687 	}
2688 }
2689 
2690 /**
2691  * intel_psr_init - Init basic PSR work and mutex.
2692  * @intel_dp: Intel DP
2693  *
2694  * This function is called after the initializing connector.
2695  * (the initializing of connector treats the handling of connector capabilities)
2696  * And it initializes basic PSR stuff for each DP Encoder.
2697  */
2698 void intel_psr_init(struct intel_dp *intel_dp)
2699 {
2700 	struct intel_connector *connector = intel_dp->attached_connector;
2701 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2702 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2703 
2704 	if (!HAS_PSR(dev_priv))
2705 		return;
2706 
2707 	/*
2708 	 * HSW spec explicitly says PSR is tied to port A.
2709 	 * BDW+ platforms have a instance of PSR registers per transcoder but
2710 	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2711 	 * than eDP one.
2712 	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2713 	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2714 	 * But GEN12 supports a instance of PSR registers per transcoder.
2715 	 */
2716 	if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2717 		drm_dbg_kms(&dev_priv->drm,
2718 			    "PSR condition failed: Port not supported\n");
2719 		return;
2720 	}
2721 
2722 	intel_dp->psr.source_support = true;
2723 
2724 	/* Set link_standby x link_off defaults */
2725 	if (DISPLAY_VER(dev_priv) < 12)
2726 		/* For new platforms up to TGL let's respect VBT back again */
2727 		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2728 
2729 	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2730 	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2731 	mutex_init(&intel_dp->psr.lock);
2732 }
2733 
2734 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2735 					   u8 *status, u8 *error_status)
2736 {
2737 	struct drm_dp_aux *aux = &intel_dp->aux;
2738 	int ret;
2739 
2740 	ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
2741 	if (ret != 1)
2742 		return ret;
2743 
2744 	ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
2745 	if (ret != 1)
2746 		return ret;
2747 
2748 	*status = *status & DP_PSR_SINK_STATE_MASK;
2749 
2750 	return 0;
2751 }
2752 
2753 static void psr_alpm_check(struct intel_dp *intel_dp)
2754 {
2755 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2756 	struct drm_dp_aux *aux = &intel_dp->aux;
2757 	struct intel_psr *psr = &intel_dp->psr;
2758 	u8 val;
2759 	int r;
2760 
2761 	if (!psr->psr2_enabled)
2762 		return;
2763 
2764 	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2765 	if (r != 1) {
2766 		drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2767 		return;
2768 	}
2769 
2770 	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2771 		intel_psr_disable_locked(intel_dp);
2772 		psr->sink_not_reliable = true;
2773 		drm_dbg_kms(&dev_priv->drm,
2774 			    "ALPM lock timeout error, disabling PSR\n");
2775 
2776 		/* Clearing error */
2777 		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2778 	}
2779 }
2780 
2781 static void psr_capability_changed_check(struct intel_dp *intel_dp)
2782 {
2783 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2784 	struct intel_psr *psr = &intel_dp->psr;
2785 	u8 val;
2786 	int r;
2787 
2788 	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2789 	if (r != 1) {
2790 		drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2791 		return;
2792 	}
2793 
2794 	if (val & DP_PSR_CAPS_CHANGE) {
2795 		intel_psr_disable_locked(intel_dp);
2796 		psr->sink_not_reliable = true;
2797 		drm_dbg_kms(&dev_priv->drm,
2798 			    "Sink PSR capability changed, disabling PSR\n");
2799 
2800 		/* Clearing it */
2801 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2802 	}
2803 }
2804 
2805 void intel_psr_short_pulse(struct intel_dp *intel_dp)
2806 {
2807 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2808 	struct intel_psr *psr = &intel_dp->psr;
2809 	u8 status, error_status;
2810 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2811 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2812 			  DP_PSR_LINK_CRC_ERROR;
2813 
2814 	if (!CAN_PSR(intel_dp))
2815 		return;
2816 
2817 	mutex_lock(&psr->lock);
2818 
2819 	if (!psr->enabled)
2820 		goto exit;
2821 
2822 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2823 		drm_err(&dev_priv->drm,
2824 			"Error reading PSR status or error status\n");
2825 		goto exit;
2826 	}
2827 
2828 	if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2829 		intel_psr_disable_locked(intel_dp);
2830 		psr->sink_not_reliable = true;
2831 	}
2832 
2833 	if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2834 		drm_dbg_kms(&dev_priv->drm,
2835 			    "PSR sink internal error, disabling PSR\n");
2836 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2837 		drm_dbg_kms(&dev_priv->drm,
2838 			    "PSR RFB storage error, disabling PSR\n");
2839 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2840 		drm_dbg_kms(&dev_priv->drm,
2841 			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
2842 	if (error_status & DP_PSR_LINK_CRC_ERROR)
2843 		drm_dbg_kms(&dev_priv->drm,
2844 			    "PSR Link CRC error, disabling PSR\n");
2845 
2846 	if (error_status & ~errors)
2847 		drm_err(&dev_priv->drm,
2848 			"PSR_ERROR_STATUS unhandled errors %x\n",
2849 			error_status & ~errors);
2850 	/* clear status register */
2851 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2852 
2853 	psr_alpm_check(intel_dp);
2854 	psr_capability_changed_check(intel_dp);
2855 
2856 exit:
2857 	mutex_unlock(&psr->lock);
2858 }
2859 
2860 bool intel_psr_enabled(struct intel_dp *intel_dp)
2861 {
2862 	bool ret;
2863 
2864 	if (!CAN_PSR(intel_dp))
2865 		return false;
2866 
2867 	mutex_lock(&intel_dp->psr.lock);
2868 	ret = intel_dp->psr.enabled;
2869 	mutex_unlock(&intel_dp->psr.lock);
2870 
2871 	return ret;
2872 }
2873 
2874 /**
2875  * intel_psr_lock - grab PSR lock
2876  * @crtc_state: the crtc state
2877  *
2878  * This is initially meant to be used by around CRTC update, when
2879  * vblank sensitive registers are updated and we need grab the lock
2880  * before it to avoid vblank evasion.
2881  */
2882 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
2883 {
2884 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2885 	struct intel_encoder *encoder;
2886 
2887 	if (!crtc_state->has_psr)
2888 		return;
2889 
2890 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2891 					     crtc_state->uapi.encoder_mask) {
2892 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2893 
2894 		mutex_lock(&intel_dp->psr.lock);
2895 		break;
2896 	}
2897 }
2898 
2899 /**
2900  * intel_psr_unlock - release PSR lock
2901  * @crtc_state: the crtc state
2902  *
2903  * Release the PSR lock that was held during pipe update.
2904  */
2905 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
2906 {
2907 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2908 	struct intel_encoder *encoder;
2909 
2910 	if (!crtc_state->has_psr)
2911 		return;
2912 
2913 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2914 					     crtc_state->uapi.encoder_mask) {
2915 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2916 
2917 		mutex_unlock(&intel_dp->psr.lock);
2918 		break;
2919 	}
2920 }
2921 
2922 static void
2923 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
2924 {
2925 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2926 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2927 	const char *status = "unknown";
2928 	u32 val, status_val;
2929 
2930 	if (intel_dp->psr.psr2_enabled) {
2931 		static const char * const live_status[] = {
2932 			"IDLE",
2933 			"CAPTURE",
2934 			"CAPTURE_FS",
2935 			"SLEEP",
2936 			"BUFON_FW",
2937 			"ML_UP",
2938 			"SU_STANDBY",
2939 			"FAST_SLEEP",
2940 			"DEEP_SLEEP",
2941 			"BUF_ON",
2942 			"TG_ON"
2943 		};
2944 		val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
2945 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
2946 		if (status_val < ARRAY_SIZE(live_status))
2947 			status = live_status[status_val];
2948 	} else {
2949 		static const char * const live_status[] = {
2950 			"IDLE",
2951 			"SRDONACK",
2952 			"SRDENT",
2953 			"BUFOFF",
2954 			"BUFON",
2955 			"AUXACK",
2956 			"SRDOFFACK",
2957 			"SRDENT_ON",
2958 		};
2959 		val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
2960 		status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
2961 		if (status_val < ARRAY_SIZE(live_status))
2962 			status = live_status[status_val];
2963 	}
2964 
2965 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2966 }
2967 
2968 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
2969 {
2970 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2971 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2972 	struct intel_psr *psr = &intel_dp->psr;
2973 	intel_wakeref_t wakeref;
2974 	const char *status;
2975 	bool enabled;
2976 	u32 val;
2977 
2978 	seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
2979 	if (psr->sink_support)
2980 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
2981 	seq_puts(m, "\n");
2982 
2983 	if (!psr->sink_support)
2984 		return 0;
2985 
2986 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2987 	mutex_lock(&psr->lock);
2988 
2989 	if (psr->enabled)
2990 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2991 	else
2992 		status = "disabled";
2993 	seq_printf(m, "PSR mode: %s\n", status);
2994 
2995 	if (!psr->enabled) {
2996 		seq_printf(m, "PSR sink not reliable: %s\n",
2997 			   str_yes_no(psr->sink_not_reliable));
2998 
2999 		goto unlock;
3000 	}
3001 
3002 	if (psr->psr2_enabled) {
3003 		val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
3004 		enabled = val & EDP_PSR2_ENABLE;
3005 	} else {
3006 		val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
3007 		enabled = val & EDP_PSR_ENABLE;
3008 	}
3009 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
3010 		   str_enabled_disabled(enabled), val);
3011 	psr_source_status(intel_dp, m);
3012 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3013 		   psr->busy_frontbuffer_bits);
3014 
3015 	/*
3016 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
3017 	 */
3018 	val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3019 	seq_printf(m, "Performance counter: %u\n",
3020 		   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3021 
3022 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
3023 		seq_printf(m, "Last attempted entry at: %lld\n",
3024 			   psr->last_entry_attempt);
3025 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3026 	}
3027 
3028 	if (psr->psr2_enabled) {
3029 		u32 su_frames_val[3];
3030 		int frame;
3031 
3032 		/*
3033 		 * Reading all 3 registers before hand to minimize crossing a
3034 		 * frame boundary between register reads
3035 		 */
3036 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3037 			val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
3038 			su_frames_val[frame / 3] = val;
3039 		}
3040 
3041 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3042 
3043 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3044 			u32 su_blocks;
3045 
3046 			su_blocks = su_frames_val[frame / 3] &
3047 				    PSR2_SU_STATUS_MASK(frame);
3048 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3049 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
3050 		}
3051 
3052 		seq_printf(m, "PSR2 selective fetch: %s\n",
3053 			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3054 	}
3055 
3056 unlock:
3057 	mutex_unlock(&psr->lock);
3058 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3059 
3060 	return 0;
3061 }
3062 
3063 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3064 {
3065 	struct drm_i915_private *dev_priv = m->private;
3066 	struct intel_dp *intel_dp = NULL;
3067 	struct intel_encoder *encoder;
3068 
3069 	if (!HAS_PSR(dev_priv))
3070 		return -ENODEV;
3071 
3072 	/* Find the first EDP which supports PSR */
3073 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3074 		intel_dp = enc_to_intel_dp(encoder);
3075 		break;
3076 	}
3077 
3078 	if (!intel_dp)
3079 		return -ENODEV;
3080 
3081 	return intel_psr_status(m, intel_dp);
3082 }
3083 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3084 
3085 static int
3086 i915_edp_psr_debug_set(void *data, u64 val)
3087 {
3088 	struct drm_i915_private *dev_priv = data;
3089 	struct intel_encoder *encoder;
3090 	intel_wakeref_t wakeref;
3091 	int ret = -ENODEV;
3092 
3093 	if (!HAS_PSR(dev_priv))
3094 		return ret;
3095 
3096 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3097 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3098 
3099 		drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3100 
3101 		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3102 
3103 		// TODO: split to each transcoder's PSR debug state
3104 		ret = intel_psr_debug_set(intel_dp, val);
3105 
3106 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3107 	}
3108 
3109 	return ret;
3110 }
3111 
3112 static int
3113 i915_edp_psr_debug_get(void *data, u64 *val)
3114 {
3115 	struct drm_i915_private *dev_priv = data;
3116 	struct intel_encoder *encoder;
3117 
3118 	if (!HAS_PSR(dev_priv))
3119 		return -ENODEV;
3120 
3121 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3122 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3123 
3124 		// TODO: split to each transcoder's PSR debug state
3125 		*val = READ_ONCE(intel_dp->psr.debug);
3126 		return 0;
3127 	}
3128 
3129 	return -ENODEV;
3130 }
3131 
3132 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3133 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3134 			"%llu\n");
3135 
3136 void intel_psr_debugfs_register(struct drm_i915_private *i915)
3137 {
3138 	struct drm_minor *minor = i915->drm.primary;
3139 
3140 	debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3141 			    i915, &i915_edp_psr_debug_fops);
3142 
3143 	debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3144 			    i915, &i915_edp_psr_status_fops);
3145 }
3146 
3147 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3148 {
3149 	struct intel_connector *connector = m->private;
3150 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3151 	static const char * const sink_status[] = {
3152 		"inactive",
3153 		"transition to active, capture and display",
3154 		"active, display from RFB",
3155 		"active, capture and display on sink device timings",
3156 		"transition to inactive, capture and display, timing re-sync",
3157 		"reserved",
3158 		"reserved",
3159 		"sink internal error",
3160 	};
3161 	const char *str;
3162 	int ret;
3163 	u8 status, error_status;
3164 
3165 	if (!CAN_PSR(intel_dp)) {
3166 		seq_puts(m, "PSR Unsupported\n");
3167 		return -ENODEV;
3168 	}
3169 
3170 	if (connector->base.status != connector_status_connected)
3171 		return -ENODEV;
3172 
3173 	ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
3174 	if (ret)
3175 		return ret;
3176 
3177 	status &= DP_PSR_SINK_STATE_MASK;
3178 	if (status < ARRAY_SIZE(sink_status))
3179 		str = sink_status[status];
3180 	else
3181 		str = "unknown";
3182 
3183 	seq_printf(m, "Sink PSR status: 0x%x [%s]\n", status, str);
3184 
3185 	seq_printf(m, "Sink PSR error status: 0x%x", error_status);
3186 
3187 	if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
3188 			    DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3189 			    DP_PSR_LINK_CRC_ERROR))
3190 		seq_puts(m, ":\n");
3191 	else
3192 		seq_puts(m, "\n");
3193 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3194 		seq_puts(m, "\tPSR RFB storage error\n");
3195 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3196 		seq_puts(m, "\tPSR VSC SDP uncorrectable error\n");
3197 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3198 		seq_puts(m, "\tPSR Link CRC error\n");
3199 
3200 	return ret;
3201 }
3202 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3203 
3204 static int i915_psr_status_show(struct seq_file *m, void *data)
3205 {
3206 	struct intel_connector *connector = m->private;
3207 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3208 
3209 	return intel_psr_status(m, intel_dp);
3210 }
3211 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3212 
3213 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3214 {
3215 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3216 	struct dentry *root = connector->base.debugfs_entry;
3217 
3218 	if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
3219 		return;
3220 
3221 	debugfs_create_file("i915_psr_sink_status", 0444, root,
3222 			    connector, &i915_psr_sink_status_fops);
3223 
3224 	if (HAS_PSR(i915))
3225 		debugfs_create_file("i915_psr_status", 0444, root,
3226 				    connector, &i915_psr_status_fops);
3227 }
3228