xref: /linux/drivers/gpu/drm/i915/display/intel_psr.c (revision 3cc808e3239cf566b3d3b15cf2beee066b60f241)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26 #include <drm/drm_debugfs.h>
27 
28 #include "i915_drv.h"
29 #include "i915_reg.h"
30 #include "intel_atomic.h"
31 #include "intel_crtc.h"
32 #include "intel_ddi.h"
33 #include "intel_de.h"
34 #include "intel_display_types.h"
35 #include "intel_dp.h"
36 #include "intel_dp_aux.h"
37 #include "intel_frontbuffer.h"
38 #include "intel_hdmi.h"
39 #include "intel_psr.h"
40 #include "intel_psr_regs.h"
41 #include "intel_snps_phy.h"
42 #include "skl_universal_plane.h"
43 
44 /**
45  * DOC: Panel Self Refresh (PSR/SRD)
46  *
47  * Since Haswell Display controller supports Panel Self-Refresh on display
48  * panels witch have a remote frame buffer (RFB) implemented according to PSR
49  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
50  * when system is idle but display is on as it eliminates display refresh
51  * request to DDR memory completely as long as the frame buffer for that
52  * display is unchanged.
53  *
54  * Panel Self Refresh must be supported by both Hardware (source) and
55  * Panel (sink).
56  *
57  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
58  * to power down the link and memory controller. For DSI panels the same idea
59  * is called "manual mode".
60  *
61  * The implementation uses the hardware-based PSR support which automatically
62  * enters/exits self-refresh mode. The hardware takes care of sending the
63  * required DP aux message and could even retrain the link (that part isn't
64  * enabled yet though). The hardware also keeps track of any frontbuffer
65  * changes to know when to exit self-refresh mode again. Unfortunately that
66  * part doesn't work too well, hence why the i915 PSR support uses the
67  * software frontbuffer tracking to make sure it doesn't miss a screen
68  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
69  * get called by the frontbuffer tracking code. Note that because of locking
70  * issues the self-refresh re-enable code is done from a work queue, which
71  * must be correctly synchronized/cancelled when shutting down the pipe."
72  *
73  * DC3CO (DC3 clock off)
74  *
75  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
76  * clock off automatically during PSR2 idle state.
77  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
78  * entry/exit allows the HW to enter a low-power state even when page flipping
79  * periodically (for instance a 30fps video playback scenario).
80  *
81  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
82  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
83  * frames, if no other flip occurs and the function above is executed, DC3CO is
84  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
85  * of another flip.
86  * Front buffer modifications do not trigger DC3CO activation on purpose as it
87  * would bring a lot of complexity and most of the moderns systems will only
88  * use page flips.
89  */
90 
91 /*
92  * Description of PSR mask bits:
93  *
94  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
95  *
96  *  When unmasked (nearly) all display register writes (eg. even
97  *  SWF) trigger a PSR exit. Some registers are excluded from this
98  *  and they have a more specific mask (described below). On icl+
99  *  this bit no longer exists and is effectively always set.
100  *
101  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
102  *
103  *  When unmasked (nearly) all pipe/plane register writes
104  *  trigger a PSR exit. Some plane registers are excluded from this
105  *  and they have a more specific mask (described below).
106  *
107  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
108  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
109  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
110  *
111  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
112  *  SPR_SURF/CURBASE are not included in this and instead are
113  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
114  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
115  *
116  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
117  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
118  *
119  *  When unmasked PSR is blocked as long as the sprite
120  *  plane is enabled. skl+ with their universal planes no
121  *  longer have a mask bit like this, and no plane being
122  *  enabledb blocks PSR.
123  *
124  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
125  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
126  *
127  *  When umasked CURPOS writes trigger a PSR exit. On skl+
128  *  this doesn't exit but CURPOS is included in the
129  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
130  *
131  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
132  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
133  *
134  *  When unmasked PSR is blocked as long as vblank and/or vsync
135  *  interrupt is unmasked in IMR *and* enabled in IER.
136  *
137  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
138  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
139  *
140  *  Selectcs whether PSR exit generates an extra vblank before
141  *  the first frame is transmitted. Also note the opposite polarity
142  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
143  *  unmasked==do not generate the extra vblank).
144  *
145  *  With DC states enabled the extra vblank happens after link training,
146  *  with DC states disabled it happens immediately upuon PSR exit trigger.
147  *  No idea as of now why there is a difference. HSW/BDW (which don't
148  *  even have DMC) always generate it after link training. Go figure.
149  *
150  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
151  *  and thus won't latch until the first vblank. So with DC states
152  *  enabled the register effctively uses the reset value during DC5
153  *  exit+PSR exit sequence, and thus the bit does nothing until
154  *  latched by the vblank that it was trying to prevent from being
155  *  generated in the first place. So we should probably call this
156  *  one a chicken/egg bit instead on skl+.
157  *
158  *  In standby mode (as opposed to link-off) this makes no difference
159  *  as the timing generator keeps running the whole time generating
160  *  normal periodic vblanks.
161  *
162  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
163  *  and doing so makes the behaviour match the skl+ reset value.
164  *
165  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
166  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
167  *
168  *  On BDW without this bit is no vblanks whatsoever are
169  *  generated after PSR exit. On HSW this has no apparant effect.
170  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
171  *
172  * The rest of the bits are more self-explanatory and/or
173  * irrelevant for normal operation.
174  */
175 
176 bool intel_encoder_can_psr(struct intel_encoder *encoder)
177 {
178 	if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
179 		return CAN_PSR(enc_to_intel_dp(encoder)) ||
180 		       CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
181 	else
182 		return false;
183 }
184 
185 static bool psr_global_enabled(struct intel_dp *intel_dp)
186 {
187 	struct intel_connector *connector = intel_dp->attached_connector;
188 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
189 
190 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
191 	case I915_PSR_DEBUG_DEFAULT:
192 		if (i915->display.params.enable_psr == -1)
193 			return connector->panel.vbt.psr.enable;
194 		return i915->display.params.enable_psr;
195 	case I915_PSR_DEBUG_DISABLE:
196 		return false;
197 	default:
198 		return true;
199 	}
200 }
201 
202 static bool psr2_global_enabled(struct intel_dp *intel_dp)
203 {
204 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
205 
206 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
207 	case I915_PSR_DEBUG_DISABLE:
208 	case I915_PSR_DEBUG_FORCE_PSR1:
209 		return false;
210 	default:
211 		if (i915->display.params.enable_psr == 1)
212 			return false;
213 		return true;
214 	}
215 }
216 
217 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
218 {
219 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
220 
221 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
222 		EDP_PSR_ERROR(intel_dp->psr.transcoder);
223 }
224 
225 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
226 {
227 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
228 
229 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
230 		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
231 }
232 
233 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
234 {
235 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
236 
237 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
238 		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
239 }
240 
241 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
242 {
243 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
244 
245 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
246 		EDP_PSR_MASK(intel_dp->psr.transcoder);
247 }
248 
249 static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
250 			      enum transcoder cpu_transcoder)
251 {
252 	if (DISPLAY_VER(dev_priv) >= 8)
253 		return EDP_PSR_CTL(cpu_transcoder);
254 	else
255 		return HSW_SRD_CTL;
256 }
257 
258 static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
259 				enum transcoder cpu_transcoder)
260 {
261 	if (DISPLAY_VER(dev_priv) >= 8)
262 		return EDP_PSR_DEBUG(cpu_transcoder);
263 	else
264 		return HSW_SRD_DEBUG;
265 }
266 
267 static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
268 				   enum transcoder cpu_transcoder)
269 {
270 	if (DISPLAY_VER(dev_priv) >= 8)
271 		return EDP_PSR_PERF_CNT(cpu_transcoder);
272 	else
273 		return HSW_SRD_PERF_CNT;
274 }
275 
276 static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
277 				 enum transcoder cpu_transcoder)
278 {
279 	if (DISPLAY_VER(dev_priv) >= 8)
280 		return EDP_PSR_STATUS(cpu_transcoder);
281 	else
282 		return HSW_SRD_STATUS;
283 }
284 
285 static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
286 			      enum transcoder cpu_transcoder)
287 {
288 	if (DISPLAY_VER(dev_priv) >= 12)
289 		return TRANS_PSR_IMR(cpu_transcoder);
290 	else
291 		return EDP_PSR_IMR;
292 }
293 
294 static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
295 			      enum transcoder cpu_transcoder)
296 {
297 	if (DISPLAY_VER(dev_priv) >= 12)
298 		return TRANS_PSR_IIR(cpu_transcoder);
299 	else
300 		return EDP_PSR_IIR;
301 }
302 
303 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
304 				  enum transcoder cpu_transcoder)
305 {
306 	if (DISPLAY_VER(dev_priv) >= 8)
307 		return EDP_PSR_AUX_CTL(cpu_transcoder);
308 	else
309 		return HSW_SRD_AUX_CTL;
310 }
311 
312 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
313 				   enum transcoder cpu_transcoder, int i)
314 {
315 	if (DISPLAY_VER(dev_priv) >= 8)
316 		return EDP_PSR_AUX_DATA(cpu_transcoder, i);
317 	else
318 		return HSW_SRD_AUX_DATA(i);
319 }
320 
321 static void psr_irq_control(struct intel_dp *intel_dp)
322 {
323 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
324 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
325 	u32 mask;
326 
327 	mask = psr_irq_psr_error_bit_get(intel_dp);
328 	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
329 		mask |= psr_irq_post_exit_bit_get(intel_dp) |
330 			psr_irq_pre_entry_bit_get(intel_dp);
331 
332 	intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
333 		     psr_irq_mask_get(intel_dp), ~mask);
334 }
335 
336 static void psr_event_print(struct drm_i915_private *i915,
337 			    u32 val, bool psr2_enabled)
338 {
339 	drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
340 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
341 		drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
342 	if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
343 		drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
344 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
345 		drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
346 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
347 		drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
348 	if (val & PSR_EVENT_GRAPHICS_RESET)
349 		drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
350 	if (val & PSR_EVENT_PCH_INTERRUPT)
351 		drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
352 	if (val & PSR_EVENT_MEMORY_UP)
353 		drm_dbg_kms(&i915->drm, "\tMemory up\n");
354 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
355 		drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
356 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
357 		drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
358 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
359 		drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
360 	if (val & PSR_EVENT_REGISTER_UPDATE)
361 		drm_dbg_kms(&i915->drm, "\tRegister updated\n");
362 	if (val & PSR_EVENT_HDCP_ENABLE)
363 		drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
364 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
365 		drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
366 	if (val & PSR_EVENT_VBI_ENABLE)
367 		drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
368 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
369 		drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
370 	if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
371 		drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
372 }
373 
374 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
375 {
376 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
377 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
378 	ktime_t time_ns =  ktime_get();
379 
380 	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
381 		intel_dp->psr.last_entry_attempt = time_ns;
382 		drm_dbg_kms(&dev_priv->drm,
383 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
384 			    transcoder_name(cpu_transcoder));
385 	}
386 
387 	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
388 		intel_dp->psr.last_exit = time_ns;
389 		drm_dbg_kms(&dev_priv->drm,
390 			    "[transcoder %s] PSR exit completed\n",
391 			    transcoder_name(cpu_transcoder));
392 
393 		if (DISPLAY_VER(dev_priv) >= 9) {
394 			u32 val;
395 
396 			val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
397 
398 			psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
399 		}
400 	}
401 
402 	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
403 		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
404 			 transcoder_name(cpu_transcoder));
405 
406 		intel_dp->psr.irq_aux_error = true;
407 
408 		/*
409 		 * If this interruption is not masked it will keep
410 		 * interrupting so fast that it prevents the scheduled
411 		 * work to run.
412 		 * Also after a PSR error, we don't want to arm PSR
413 		 * again so we don't care about unmask the interruption
414 		 * or unset irq_aux_error.
415 		 */
416 		intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
417 			     0, psr_irq_psr_error_bit_get(intel_dp));
418 
419 		queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
420 	}
421 }
422 
423 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
424 {
425 	u8 alpm_caps = 0;
426 
427 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
428 			      &alpm_caps) != 1)
429 		return false;
430 	return alpm_caps & DP_ALPM_CAP;
431 }
432 
433 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
434 {
435 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
436 	u8 val = 8; /* assume the worst if we can't read the value */
437 
438 	if (drm_dp_dpcd_readb(&intel_dp->aux,
439 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
440 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
441 	else
442 		drm_dbg_kms(&i915->drm,
443 			    "Unable to get sink synchronization latency, assuming 8 frames\n");
444 	return val;
445 }
446 
447 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
448 {
449 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
450 	ssize_t r;
451 	u16 w;
452 	u8 y;
453 
454 	/* If sink don't have specific granularity requirements set legacy ones */
455 	if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
456 		/* As PSR2 HW sends full lines, we do not care about x granularity */
457 		w = 4;
458 		y = 4;
459 		goto exit;
460 	}
461 
462 	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
463 	if (r != 2)
464 		drm_dbg_kms(&i915->drm,
465 			    "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
466 	/*
467 	 * Spec says that if the value read is 0 the default granularity should
468 	 * be used instead.
469 	 */
470 	if (r != 2 || w == 0)
471 		w = 4;
472 
473 	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
474 	if (r != 1) {
475 		drm_dbg_kms(&i915->drm,
476 			    "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
477 		y = 4;
478 	}
479 	if (y == 0)
480 		y = 1;
481 
482 exit:
483 	intel_dp->psr.su_w_granularity = w;
484 	intel_dp->psr.su_y_granularity = y;
485 }
486 
487 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
488 {
489 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
490 	u8 pr_dpcd = 0;
491 
492 	intel_dp->psr.sink_panel_replay_support = false;
493 	drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP, &pr_dpcd);
494 
495 	if (!(pr_dpcd & DP_PANEL_REPLAY_SUPPORT)) {
496 		drm_dbg_kms(&i915->drm,
497 			    "Panel replay is not supported by panel\n");
498 		return;
499 	}
500 
501 	drm_dbg_kms(&i915->drm,
502 		    "Panel replay is supported by panel\n");
503 	intel_dp->psr.sink_panel_replay_support = true;
504 }
505 
506 static void _psr_init_dpcd(struct intel_dp *intel_dp)
507 {
508 	struct drm_i915_private *i915 =
509 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
510 
511 	drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
512 		    intel_dp->psr_dpcd[0]);
513 
514 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
515 		drm_dbg_kms(&i915->drm,
516 			    "PSR support not currently available for this panel\n");
517 		return;
518 	}
519 
520 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
521 		drm_dbg_kms(&i915->drm,
522 			    "Panel lacks power state control, PSR cannot be enabled\n");
523 		return;
524 	}
525 
526 	intel_dp->psr.sink_support = true;
527 	intel_dp->psr.sink_sync_latency =
528 		intel_dp_get_sink_sync_latency(intel_dp);
529 
530 	if (DISPLAY_VER(i915) >= 9 &&
531 	    intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
532 		bool y_req = intel_dp->psr_dpcd[1] &
533 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
534 		bool alpm = intel_dp_get_alpm_status(intel_dp);
535 
536 		/*
537 		 * All panels that supports PSR version 03h (PSR2 +
538 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
539 		 * only sure that it is going to be used when required by the
540 		 * panel. This way panel is capable to do selective update
541 		 * without a aux frame sync.
542 		 *
543 		 * To support PSR version 02h and PSR version 03h without
544 		 * Y-coordinate requirement panels we would need to enable
545 		 * GTC first.
546 		 */
547 		intel_dp->psr.sink_psr2_support = y_req && alpm;
548 		drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
549 			    intel_dp->psr.sink_psr2_support ? "" : "not ");
550 	}
551 }
552 
553 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
554 {
555 	_panel_replay_init_dpcd(intel_dp);
556 
557 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
558 			 sizeof(intel_dp->psr_dpcd));
559 
560 	if (intel_dp->psr_dpcd[0])
561 		_psr_init_dpcd(intel_dp);
562 
563 	if (intel_dp->psr.sink_psr2_support) {
564 		intel_dp->psr.colorimetry_support =
565 			intel_dp_get_colorimetry_status(intel_dp);
566 		intel_dp_get_su_granularity(intel_dp);
567 	}
568 }
569 
570 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
571 {
572 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
573 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
574 	u32 aux_clock_divider, aux_ctl;
575 	/* write DP_SET_POWER=D0 */
576 	static const u8 aux_msg[] = {
577 		[0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
578 		[1] = (DP_SET_POWER >> 8) & 0xff,
579 		[2] = DP_SET_POWER & 0xff,
580 		[3] = 1 - 1,
581 		[4] = DP_SET_POWER_D0,
582 	};
583 	int i;
584 
585 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
586 	for (i = 0; i < sizeof(aux_msg); i += 4)
587 		intel_de_write(dev_priv,
588 			       psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
589 			       intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
590 
591 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
592 
593 	/* Start with bits set for DDI_AUX_CTL register */
594 	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
595 					     aux_clock_divider);
596 
597 	/* Select only valid bits for SRD_AUX_CTL */
598 	aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
599 		EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
600 		EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
601 		EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
602 
603 	intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
604 		       aux_ctl);
605 }
606 
607 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
608 {
609 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
610 	u8 dpcd_val = DP_PSR_ENABLE;
611 
612 	if (intel_dp->psr.panel_replay_enabled)
613 		return;
614 
615 	if (intel_dp->psr.psr2_enabled) {
616 		/* Enable ALPM at sink for psr2 */
617 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
618 				   DP_ALPM_ENABLE |
619 				   DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
620 
621 		dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
622 	} else {
623 		if (intel_dp->psr.link_standby)
624 			dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
625 
626 		if (DISPLAY_VER(dev_priv) >= 8)
627 			dpcd_val |= DP_PSR_CRC_VERIFICATION;
628 	}
629 
630 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
631 		dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
632 
633 	if (intel_dp->psr.entry_setup_frames > 0)
634 		dpcd_val |= DP_PSR_FRAME_CAPTURE;
635 
636 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
637 
638 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
639 }
640 
641 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
642 {
643 	struct intel_connector *connector = intel_dp->attached_connector;
644 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
645 	u32 val = 0;
646 
647 	if (DISPLAY_VER(dev_priv) >= 11)
648 		val |= EDP_PSR_TP4_TIME_0us;
649 
650 	if (dev_priv->display.params.psr_safest_params) {
651 		val |= EDP_PSR_TP1_TIME_2500us;
652 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
653 		goto check_tp3_sel;
654 	}
655 
656 	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
657 		val |= EDP_PSR_TP1_TIME_0us;
658 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
659 		val |= EDP_PSR_TP1_TIME_100us;
660 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
661 		val |= EDP_PSR_TP1_TIME_500us;
662 	else
663 		val |= EDP_PSR_TP1_TIME_2500us;
664 
665 	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
666 		val |= EDP_PSR_TP2_TP3_TIME_0us;
667 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
668 		val |= EDP_PSR_TP2_TP3_TIME_100us;
669 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
670 		val |= EDP_PSR_TP2_TP3_TIME_500us;
671 	else
672 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
673 
674 	/*
675 	 * WA 0479: hsw,bdw
676 	 * "Do not skip both TP1 and TP2/TP3"
677 	 */
678 	if (DISPLAY_VER(dev_priv) < 9 &&
679 	    connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
680 	    connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
681 		val |= EDP_PSR_TP2_TP3_TIME_100us;
682 
683 check_tp3_sel:
684 	if (intel_dp_source_supports_tps3(dev_priv) &&
685 	    drm_dp_tps3_supported(intel_dp->dpcd))
686 		val |= EDP_PSR_TP_TP1_TP3;
687 	else
688 		val |= EDP_PSR_TP_TP1_TP2;
689 
690 	return val;
691 }
692 
693 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
694 {
695 	struct intel_connector *connector = intel_dp->attached_connector;
696 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
697 	int idle_frames;
698 
699 	/* Let's use 6 as the minimum to cover all known cases including the
700 	 * off-by-one issue that HW has in some cases.
701 	 */
702 	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
703 	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
704 
705 	if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
706 		idle_frames = 0xf;
707 
708 	return idle_frames;
709 }
710 
711 static void hsw_activate_psr1(struct intel_dp *intel_dp)
712 {
713 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
714 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
715 	u32 max_sleep_time = 0x1f;
716 	u32 val = EDP_PSR_ENABLE;
717 
718 	val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
719 
720 	if (DISPLAY_VER(dev_priv) < 20)
721 		val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
722 
723 	if (IS_HASWELL(dev_priv))
724 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
725 
726 	if (intel_dp->psr.link_standby)
727 		val |= EDP_PSR_LINK_STANDBY;
728 
729 	val |= intel_psr1_get_tp_time(intel_dp);
730 
731 	if (DISPLAY_VER(dev_priv) >= 8)
732 		val |= EDP_PSR_CRC_ENABLE;
733 
734 	if (DISPLAY_VER(dev_priv) >= 20)
735 		val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
736 
737 	intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
738 		     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
739 }
740 
741 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
742 {
743 	struct intel_connector *connector = intel_dp->attached_connector;
744 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
745 	u32 val = 0;
746 
747 	if (dev_priv->display.params.psr_safest_params)
748 		return EDP_PSR2_TP2_TIME_2500us;
749 
750 	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
751 	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
752 		val |= EDP_PSR2_TP2_TIME_50us;
753 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
754 		val |= EDP_PSR2_TP2_TIME_100us;
755 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
756 		val |= EDP_PSR2_TP2_TIME_500us;
757 	else
758 		val |= EDP_PSR2_TP2_TIME_2500us;
759 
760 	return val;
761 }
762 
763 static int psr2_block_count_lines(struct intel_dp *intel_dp)
764 {
765 	return intel_dp->psr.io_wake_lines < 9 &&
766 		intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
767 }
768 
769 static int psr2_block_count(struct intel_dp *intel_dp)
770 {
771 	return psr2_block_count_lines(intel_dp) / 4;
772 }
773 
774 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
775 {
776 	u8 frames_before_su_entry;
777 
778 	frames_before_su_entry = max_t(u8,
779 				       intel_dp->psr.sink_sync_latency + 1,
780 				       2);
781 
782 	/* Entry setup frames must be at least 1 less than frames before SU entry */
783 	if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
784 		frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
785 
786 	return frames_before_su_entry;
787 }
788 
789 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
790 {
791 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
792 
793 	intel_de_rmw(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
794 		     0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
795 
796 	intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
797 		     TRANS_DP2_PANEL_REPLAY_ENABLE);
798 }
799 
800 static void hsw_activate_psr2(struct intel_dp *intel_dp)
801 {
802 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
803 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
804 	u32 val = EDP_PSR2_ENABLE;
805 	u32 psr_val = 0;
806 
807 	val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
808 
809 	if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))
810 		val |= EDP_SU_TRACK_ENABLE;
811 
812 	if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
813 		val |= EDP_Y_COORDINATE_ENABLE;
814 
815 	val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
816 
817 	val |= intel_psr2_get_tp_time(intel_dp);
818 
819 	if (DISPLAY_VER(dev_priv) >= 12) {
820 		if (psr2_block_count(intel_dp) > 2)
821 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
822 		else
823 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
824 	}
825 
826 	/* Wa_22012278275:adl-p */
827 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
828 		static const u8 map[] = {
829 			2, /* 5 lines */
830 			1, /* 6 lines */
831 			0, /* 7 lines */
832 			3, /* 8 lines */
833 			6, /* 9 lines */
834 			5, /* 10 lines */
835 			4, /* 11 lines */
836 			7, /* 12 lines */
837 		};
838 		/*
839 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
840 		 * comments bellow for more information
841 		 */
842 		int tmp;
843 
844 		tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
845 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
846 
847 		tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
848 		val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
849 	} else if (DISPLAY_VER(dev_priv) >= 12) {
850 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
851 		val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
852 	} else if (DISPLAY_VER(dev_priv) >= 9) {
853 		val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
854 		val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
855 	}
856 
857 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
858 		val |= EDP_PSR2_SU_SDP_SCANLINE;
859 
860 	if (DISPLAY_VER(dev_priv) >= 20)
861 		psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
862 
863 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
864 		u32 tmp;
865 
866 		tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
867 		drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
868 	} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
869 		intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
870 	}
871 
872 	/*
873 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
874 	 * recommending keep this bit unset while PSR2 is enabled.
875 	 */
876 	intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
877 
878 	intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
879 }
880 
881 static bool
882 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
883 {
884 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
885 		return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
886 	else if (DISPLAY_VER(dev_priv) >= 12)
887 		return cpu_transcoder == TRANSCODER_A;
888 	else if (DISPLAY_VER(dev_priv) >= 9)
889 		return cpu_transcoder == TRANSCODER_EDP;
890 	else
891 		return false;
892 }
893 
894 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
895 {
896 	if (!cstate || !cstate->hw.active)
897 		return 0;
898 
899 	return DIV_ROUND_UP(1000 * 1000,
900 			    drm_mode_vrefresh(&cstate->hw.adjusted_mode));
901 }
902 
903 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
904 				     u32 idle_frames)
905 {
906 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
907 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
908 
909 	intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
910 		     EDP_PSR2_IDLE_FRAMES_MASK,
911 		     EDP_PSR2_IDLE_FRAMES(idle_frames));
912 }
913 
914 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
915 {
916 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
917 
918 	psr2_program_idle_frames(intel_dp, 0);
919 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
920 }
921 
922 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
923 {
924 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
925 
926 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
927 	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
928 }
929 
930 static void tgl_dc3co_disable_work(struct work_struct *work)
931 {
932 	struct intel_dp *intel_dp =
933 		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
934 
935 	mutex_lock(&intel_dp->psr.lock);
936 	/* If delayed work is pending, it is not idle */
937 	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
938 		goto unlock;
939 
940 	tgl_psr2_disable_dc3co(intel_dp);
941 unlock:
942 	mutex_unlock(&intel_dp->psr.lock);
943 }
944 
945 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
946 {
947 	if (!intel_dp->psr.dc3co_exitline)
948 		return;
949 
950 	cancel_delayed_work(&intel_dp->psr.dc3co_work);
951 	/* Before PSR2 exit disallow dc3co*/
952 	tgl_psr2_disable_dc3co(intel_dp);
953 }
954 
955 static bool
956 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
957 			      struct intel_crtc_state *crtc_state)
958 {
959 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
960 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
961 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
962 	enum port port = dig_port->base.port;
963 
964 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
965 		return pipe <= PIPE_B && port <= PORT_B;
966 	else
967 		return pipe == PIPE_A && port == PORT_A;
968 }
969 
970 static void
971 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
972 				  struct intel_crtc_state *crtc_state)
973 {
974 	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
975 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
976 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
977 	u32 exit_scanlines;
978 
979 	/*
980 	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
981 	 * disable DC3CO until the changed dc3co activating/deactivating sequence
982 	 * is applied. B.Specs:49196
983 	 */
984 	return;
985 
986 	/*
987 	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
988 	 * TODO: when the issue is addressed, this restriction should be removed.
989 	 */
990 	if (crtc_state->enable_psr2_sel_fetch)
991 		return;
992 
993 	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
994 		return;
995 
996 	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
997 		return;
998 
999 	/* Wa_16011303918:adl-p */
1000 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1001 		return;
1002 
1003 	/*
1004 	 * DC3CO Exit time 200us B.Spec 49196
1005 	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1006 	 */
1007 	exit_scanlines =
1008 		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1009 
1010 	if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
1011 		return;
1012 
1013 	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1014 }
1015 
1016 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1017 					      struct intel_crtc_state *crtc_state)
1018 {
1019 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1020 
1021 	if (!dev_priv->display.params.enable_psr2_sel_fetch &&
1022 	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1023 		drm_dbg_kms(&dev_priv->drm,
1024 			    "PSR2 sel fetch not enabled, disabled by parameter\n");
1025 		return false;
1026 	}
1027 
1028 	if (crtc_state->uapi.async_flip) {
1029 		drm_dbg_kms(&dev_priv->drm,
1030 			    "PSR2 sel fetch not enabled, async flip enabled\n");
1031 		return false;
1032 	}
1033 
1034 	return crtc_state->enable_psr2_sel_fetch = true;
1035 }
1036 
1037 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1038 				   struct intel_crtc_state *crtc_state)
1039 {
1040 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1041 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1042 	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1043 	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1044 	u16 y_granularity = 0;
1045 
1046 	/* PSR2 HW only send full lines so we only need to validate the width */
1047 	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1048 		return false;
1049 
1050 	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1051 		return false;
1052 
1053 	/* HW tracking is only aligned to 4 lines */
1054 	if (!crtc_state->enable_psr2_sel_fetch)
1055 		return intel_dp->psr.su_y_granularity == 4;
1056 
1057 	/*
1058 	 * adl_p and mtl platforms have 1 line granularity.
1059 	 * For other platforms with SW tracking we can adjust the y coordinates
1060 	 * to match sink requirement if multiple of 4.
1061 	 */
1062 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1063 		y_granularity = intel_dp->psr.su_y_granularity;
1064 	else if (intel_dp->psr.su_y_granularity <= 2)
1065 		y_granularity = 4;
1066 	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1067 		y_granularity = intel_dp->psr.su_y_granularity;
1068 
1069 	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1070 		return false;
1071 
1072 	if (crtc_state->dsc.compression_enable &&
1073 	    vdsc_cfg->slice_height % y_granularity)
1074 		return false;
1075 
1076 	crtc_state->su_y_granularity = y_granularity;
1077 	return true;
1078 }
1079 
1080 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1081 							struct intel_crtc_state *crtc_state)
1082 {
1083 	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1084 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1085 	u32 hblank_total, hblank_ns, req_ns;
1086 
1087 	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1088 	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1089 
1090 	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1091 	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1092 
1093 	if ((hblank_ns - req_ns) > 100)
1094 		return true;
1095 
1096 	/* Not supported <13 / Wa_22012279113:adl-p */
1097 	if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1098 		return false;
1099 
1100 	crtc_state->req_psr2_sdp_prior_scanline = true;
1101 	return true;
1102 }
1103 
1104 static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
1105 				     struct intel_crtc_state *crtc_state)
1106 {
1107 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1108 	int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
1109 	u8 max_wake_lines;
1110 
1111 	if (DISPLAY_VER(i915) >= 12) {
1112 		io_wake_time = 42;
1113 		/*
1114 		 * According to Bspec it's 42us, but based on testing
1115 		 * it is not enough -> use 45 us.
1116 		 */
1117 		fast_wake_time = 45;
1118 		max_wake_lines = 12;
1119 	} else {
1120 		io_wake_time = 50;
1121 		fast_wake_time = 32;
1122 		max_wake_lines = 8;
1123 	}
1124 
1125 	io_wake_lines = intel_usecs_to_scanlines(
1126 		&crtc_state->hw.adjusted_mode, io_wake_time);
1127 	fast_wake_lines = intel_usecs_to_scanlines(
1128 		&crtc_state->hw.adjusted_mode, fast_wake_time);
1129 
1130 	if (io_wake_lines > max_wake_lines ||
1131 	    fast_wake_lines > max_wake_lines)
1132 		return false;
1133 
1134 	if (i915->display.params.psr_safest_params)
1135 		io_wake_lines = fast_wake_lines = max_wake_lines;
1136 
1137 	/* According to Bspec lower limit should be set as 7 lines. */
1138 	intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
1139 	intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
1140 
1141 	return true;
1142 }
1143 
1144 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1145 					const struct drm_display_mode *adjusted_mode)
1146 {
1147 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1148 	int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1149 	int entry_setup_frames = 0;
1150 
1151 	if (psr_setup_time < 0) {
1152 		drm_dbg_kms(&i915->drm,
1153 			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1154 			    intel_dp->psr_dpcd[1]);
1155 		return -ETIME;
1156 	}
1157 
1158 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1159 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1160 		if (DISPLAY_VER(i915) >= 20) {
1161 			/* setup entry frames can be up to 3 frames */
1162 			entry_setup_frames = 1;
1163 			drm_dbg_kms(&i915->drm,
1164 				    "PSR setup entry frames %d\n",
1165 				    entry_setup_frames);
1166 		} else {
1167 			drm_dbg_kms(&i915->drm,
1168 				    "PSR condition failed: PSR setup time (%d us) too long\n",
1169 				    psr_setup_time);
1170 			return -ETIME;
1171 		}
1172 	}
1173 
1174 	return entry_setup_frames;
1175 }
1176 
1177 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1178 				    struct intel_crtc_state *crtc_state)
1179 {
1180 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1181 	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1182 	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1183 	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1184 
1185 	if (!intel_dp->psr.sink_psr2_support)
1186 		return false;
1187 
1188 	/* JSL and EHL only supports eDP 1.3 */
1189 	if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1190 		drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1191 		return false;
1192 	}
1193 
1194 	/* Wa_16011181250 */
1195 	if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1196 	    IS_DG2(dev_priv)) {
1197 		drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1198 		return false;
1199 	}
1200 
1201 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1202 		drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1203 		return false;
1204 	}
1205 
1206 	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1207 		drm_dbg_kms(&dev_priv->drm,
1208 			    "PSR2 not supported in transcoder %s\n",
1209 			    transcoder_name(crtc_state->cpu_transcoder));
1210 		return false;
1211 	}
1212 
1213 	if (!psr2_global_enabled(intel_dp)) {
1214 		drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
1215 		return false;
1216 	}
1217 
1218 	/*
1219 	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1220 	 * resolution requires DSC to be enabled, priority is given to DSC
1221 	 * over PSR2.
1222 	 */
1223 	if (crtc_state->dsc.compression_enable &&
1224 	    (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))) {
1225 		drm_dbg_kms(&dev_priv->drm,
1226 			    "PSR2 cannot be enabled since DSC is enabled\n");
1227 		return false;
1228 	}
1229 
1230 	if (crtc_state->crc_enabled) {
1231 		drm_dbg_kms(&dev_priv->drm,
1232 			    "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1233 		return false;
1234 	}
1235 
1236 	if (DISPLAY_VER(dev_priv) >= 12) {
1237 		psr_max_h = 5120;
1238 		psr_max_v = 3200;
1239 		max_bpp = 30;
1240 	} else if (DISPLAY_VER(dev_priv) >= 10) {
1241 		psr_max_h = 4096;
1242 		psr_max_v = 2304;
1243 		max_bpp = 24;
1244 	} else if (DISPLAY_VER(dev_priv) == 9) {
1245 		psr_max_h = 3640;
1246 		psr_max_v = 2304;
1247 		max_bpp = 24;
1248 	}
1249 
1250 	if (crtc_state->pipe_bpp > max_bpp) {
1251 		drm_dbg_kms(&dev_priv->drm,
1252 			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1253 			    crtc_state->pipe_bpp, max_bpp);
1254 		return false;
1255 	}
1256 
1257 	/* Wa_16011303918:adl-p */
1258 	if (crtc_state->vrr.enable &&
1259 	    IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1260 		drm_dbg_kms(&dev_priv->drm,
1261 			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1262 		return false;
1263 	}
1264 
1265 	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1266 		drm_dbg_kms(&dev_priv->drm,
1267 			    "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1268 		return false;
1269 	}
1270 
1271 	if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
1272 		drm_dbg_kms(&dev_priv->drm,
1273 			    "PSR2 not enabled, Unable to use long enough wake times\n");
1274 		return false;
1275 	}
1276 
1277 	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
1278 	if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1279 	    crtc_state->hw.adjusted_mode.crtc_vblank_start <
1280 	    psr2_block_count_lines(intel_dp)) {
1281 		drm_dbg_kms(&dev_priv->drm,
1282 			    "PSR2 not enabled, too short vblank time\n");
1283 		return false;
1284 	}
1285 
1286 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1287 		if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1288 		    !HAS_PSR_HW_TRACKING(dev_priv)) {
1289 			drm_dbg_kms(&dev_priv->drm,
1290 				    "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1291 			return false;
1292 		}
1293 	}
1294 
1295 	if (!psr2_granularity_check(intel_dp, crtc_state)) {
1296 		drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1297 		goto unsupported;
1298 	}
1299 
1300 	if (!crtc_state->enable_psr2_sel_fetch &&
1301 	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1302 		drm_dbg_kms(&dev_priv->drm,
1303 			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1304 			    crtc_hdisplay, crtc_vdisplay,
1305 			    psr_max_h, psr_max_v);
1306 		goto unsupported;
1307 	}
1308 
1309 	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1310 	return true;
1311 
1312 unsupported:
1313 	crtc_state->enable_psr2_sel_fetch = false;
1314 	return false;
1315 }
1316 
1317 static bool _psr_compute_config(struct intel_dp *intel_dp,
1318 				struct intel_crtc_state *crtc_state)
1319 {
1320 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1321 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1322 	int entry_setup_frames;
1323 
1324 	/*
1325 	 * Current PSR panels don't work reliably with VRR enabled
1326 	 * So if VRR is enabled, do not enable PSR.
1327 	 */
1328 	if (crtc_state->vrr.enable)
1329 		return false;
1330 
1331 	if (!CAN_PSR(intel_dp))
1332 		return false;
1333 
1334 	entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1335 
1336 	if (entry_setup_frames >= 0) {
1337 		intel_dp->psr.entry_setup_frames = entry_setup_frames;
1338 	} else {
1339 		drm_dbg_kms(&dev_priv->drm,
1340 			    "PSR condition failed: PSR setup timing not met\n");
1341 		return false;
1342 	}
1343 
1344 	return true;
1345 }
1346 
1347 void intel_psr_compute_config(struct intel_dp *intel_dp,
1348 			      struct intel_crtc_state *crtc_state,
1349 			      struct drm_connector_state *conn_state)
1350 {
1351 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1352 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1353 
1354 	if (!psr_global_enabled(intel_dp)) {
1355 		drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1356 		return;
1357 	}
1358 
1359 	if (intel_dp->psr.sink_not_reliable) {
1360 		drm_dbg_kms(&dev_priv->drm,
1361 			    "PSR sink implementation is not reliable\n");
1362 		return;
1363 	}
1364 
1365 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1366 		drm_dbg_kms(&dev_priv->drm,
1367 			    "PSR condition failed: Interlaced mode enabled\n");
1368 		return;
1369 	}
1370 
1371 	if (CAN_PANEL_REPLAY(intel_dp))
1372 		crtc_state->has_panel_replay = true;
1373 	else
1374 		crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state);
1375 
1376 	if (!(crtc_state->has_panel_replay || crtc_state->has_psr))
1377 		return;
1378 
1379 	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1380 
1381 	crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1382 	intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1383 				     &crtc_state->psr_vsc);
1384 }
1385 
1386 void intel_psr_get_config(struct intel_encoder *encoder,
1387 			  struct intel_crtc_state *pipe_config)
1388 {
1389 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1390 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1391 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1392 	struct intel_dp *intel_dp;
1393 	u32 val;
1394 
1395 	if (!dig_port)
1396 		return;
1397 
1398 	intel_dp = &dig_port->dp;
1399 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1400 		return;
1401 
1402 	mutex_lock(&intel_dp->psr.lock);
1403 	if (!intel_dp->psr.enabled)
1404 		goto unlock;
1405 
1406 	if (intel_dp->psr.panel_replay_enabled) {
1407 		pipe_config->has_panel_replay = true;
1408 	} else {
1409 		/*
1410 		 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1411 		 * enabled/disabled because of frontbuffer tracking and others.
1412 		 */
1413 		pipe_config->has_psr = true;
1414 	}
1415 
1416 	pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1417 	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1418 
1419 	if (!intel_dp->psr.psr2_enabled)
1420 		goto unlock;
1421 
1422 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1423 		val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
1424 		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1425 			pipe_config->enable_psr2_sel_fetch = true;
1426 	}
1427 
1428 	if (DISPLAY_VER(dev_priv) >= 12) {
1429 		val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
1430 		pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1431 	}
1432 unlock:
1433 	mutex_unlock(&intel_dp->psr.lock);
1434 }
1435 
1436 static void intel_psr_activate(struct intel_dp *intel_dp)
1437 {
1438 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1439 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1440 
1441 	drm_WARN_ON(&dev_priv->drm,
1442 		    transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1443 		    intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1444 
1445 	drm_WARN_ON(&dev_priv->drm,
1446 		    intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1447 
1448 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1449 
1450 	lockdep_assert_held(&intel_dp->psr.lock);
1451 
1452 	/* psr1, psr2 and panel-replay are mutually exclusive.*/
1453 	if (intel_dp->psr.panel_replay_enabled)
1454 		dg2_activate_panel_replay(intel_dp);
1455 	else if (intel_dp->psr.psr2_enabled)
1456 		hsw_activate_psr2(intel_dp);
1457 	else
1458 		hsw_activate_psr1(intel_dp);
1459 
1460 	intel_dp->psr.active = true;
1461 }
1462 
1463 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1464 {
1465 	switch (intel_dp->psr.pipe) {
1466 	case PIPE_A:
1467 		return LATENCY_REPORTING_REMOVED_PIPE_A;
1468 	case PIPE_B:
1469 		return LATENCY_REPORTING_REMOVED_PIPE_B;
1470 	case PIPE_C:
1471 		return LATENCY_REPORTING_REMOVED_PIPE_C;
1472 	case PIPE_D:
1473 		return LATENCY_REPORTING_REMOVED_PIPE_D;
1474 	default:
1475 		MISSING_CASE(intel_dp->psr.pipe);
1476 		return 0;
1477 	}
1478 }
1479 
1480 /*
1481  * Wa_16013835468
1482  * Wa_14015648006
1483  */
1484 static void wm_optimization_wa(struct intel_dp *intel_dp,
1485 			       const struct intel_crtc_state *crtc_state)
1486 {
1487 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1488 	bool set_wa_bit = false;
1489 
1490 	/* Wa_14015648006 */
1491 	if (IS_DISPLAY_VER(dev_priv, 11, 14))
1492 		set_wa_bit |= crtc_state->wm_level_disabled;
1493 
1494 	/* Wa_16013835468 */
1495 	if (DISPLAY_VER(dev_priv) == 12)
1496 		set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1497 			crtc_state->hw.adjusted_mode.crtc_vdisplay;
1498 
1499 	if (set_wa_bit)
1500 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1501 			     0, wa_16013835468_bit_get(intel_dp));
1502 	else
1503 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1504 			     wa_16013835468_bit_get(intel_dp), 0);
1505 }
1506 
1507 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1508 				    const struct intel_crtc_state *crtc_state)
1509 {
1510 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1511 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1512 	u32 mask;
1513 
1514 	/*
1515 	 * Only HSW and BDW have PSR AUX registers that need to be setup.
1516 	 * SKL+ use hardcoded values PSR AUX transactions
1517 	 */
1518 	if (DISPLAY_VER(dev_priv) < 9)
1519 		hsw_psr_setup_aux(intel_dp);
1520 
1521 	/*
1522 	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1523 	 * mask LPSP to avoid dependency on other drivers that might block
1524 	 * runtime_pm besides preventing  other hw tracking issues now we
1525 	 * can rely on frontbuffer tracking.
1526 	 */
1527 	mask = EDP_PSR_DEBUG_MASK_MEMUP |
1528 	       EDP_PSR_DEBUG_MASK_HPD |
1529 	       EDP_PSR_DEBUG_MASK_LPSP;
1530 
1531 	if (DISPLAY_VER(dev_priv) < 20)
1532 		mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1533 
1534 	/*
1535 	 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1536 	 * registers in order to keep the CURSURFLIVE tricks working :(
1537 	 */
1538 	if (IS_DISPLAY_VER(dev_priv, 9, 10))
1539 		mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1540 
1541 	/* allow PSR with sprite enabled */
1542 	if (IS_HASWELL(dev_priv))
1543 		mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1544 
1545 	intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1546 
1547 	psr_irq_control(intel_dp);
1548 
1549 	/*
1550 	 * TODO: if future platforms supports DC3CO in more than one
1551 	 * transcoder, EXITLINE will need to be unset when disabling PSR
1552 	 */
1553 	if (intel_dp->psr.dc3co_exitline)
1554 		intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1555 			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1556 
1557 	if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1558 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1559 			     intel_dp->psr.psr2_sel_fetch_enabled ?
1560 			     IGNORE_PSR2_HW_TRACKING : 0);
1561 
1562 	/*
1563 	 * Wa_16013835468
1564 	 * Wa_14015648006
1565 	 */
1566 	wm_optimization_wa(intel_dp, crtc_state);
1567 
1568 	if (intel_dp->psr.psr2_enabled) {
1569 		if (DISPLAY_VER(dev_priv) == 9)
1570 			intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1571 				     PSR2_VSC_ENABLE_PROG_HEADER |
1572 				     PSR2_ADD_VERTICAL_LINE_COUNT);
1573 
1574 		/*
1575 		 * Wa_16014451276:adlp,mtl[a0,b0]
1576 		 * All supported adlp panels have 1-based X granularity, this may
1577 		 * cause issues if non-supported panels are used.
1578 		 */
1579 		if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
1580 		    IS_ALDERLAKE_P(dev_priv))
1581 			intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
1582 				     0, ADLP_1_BASED_X_GRANULARITY);
1583 
1584 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1585 		if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1586 			intel_de_rmw(dev_priv,
1587 				     MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1588 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1589 		else if (IS_ALDERLAKE_P(dev_priv))
1590 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1591 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1592 	}
1593 }
1594 
1595 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1596 {
1597 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1598 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1599 	u32 val;
1600 
1601 	/*
1602 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1603 	 * will still keep the error set even after the reset done in the
1604 	 * irq_preinstall and irq_uninstall hooks.
1605 	 * And enabling in this situation cause the screen to freeze in the
1606 	 * first time that PSR HW tries to activate so lets keep PSR disabled
1607 	 * to avoid any rendering problems.
1608 	 */
1609 	val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1610 	val &= psr_irq_psr_error_bit_get(intel_dp);
1611 	if (val) {
1612 		intel_dp->psr.sink_not_reliable = true;
1613 		drm_dbg_kms(&dev_priv->drm,
1614 			    "PSR interruption error set, not enabling PSR\n");
1615 		return false;
1616 	}
1617 
1618 	return true;
1619 }
1620 
1621 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1622 				    const struct intel_crtc_state *crtc_state)
1623 {
1624 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1625 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1626 	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1627 	struct intel_encoder *encoder = &dig_port->base;
1628 	u32 val;
1629 
1630 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1631 
1632 	intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1633 	intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
1634 	intel_dp->psr.busy_frontbuffer_bits = 0;
1635 	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1636 	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1637 	/* DC5/DC6 requires at least 6 idle frames */
1638 	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1639 	intel_dp->psr.dc3co_exit_delay = val;
1640 	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1641 	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1642 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1643 	intel_dp->psr.req_psr2_sdp_prior_scanline =
1644 		crtc_state->req_psr2_sdp_prior_scanline;
1645 
1646 	if (!psr_interrupt_error_check(intel_dp))
1647 		return;
1648 
1649 	if (intel_dp->psr.panel_replay_enabled)
1650 		drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
1651 	else
1652 		drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1653 			    intel_dp->psr.psr2_enabled ? "2" : "1");
1654 
1655 	intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
1656 	intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1657 	intel_psr_enable_sink(intel_dp);
1658 	intel_psr_enable_source(intel_dp, crtc_state);
1659 	intel_dp->psr.enabled = true;
1660 	intel_dp->psr.paused = false;
1661 
1662 	intel_psr_activate(intel_dp);
1663 }
1664 
1665 static void intel_psr_exit(struct intel_dp *intel_dp)
1666 {
1667 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1668 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1669 	u32 val;
1670 
1671 	if (!intel_dp->psr.active) {
1672 		if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1673 			val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
1674 			drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1675 		}
1676 
1677 		val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
1678 		drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1679 
1680 		return;
1681 	}
1682 
1683 	if (intel_dp->psr.panel_replay_enabled) {
1684 		intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
1685 			     TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
1686 	} else if (intel_dp->psr.psr2_enabled) {
1687 		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1688 
1689 		val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
1690 				   EDP_PSR2_ENABLE, 0);
1691 
1692 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1693 	} else {
1694 		val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
1695 				   EDP_PSR_ENABLE, 0);
1696 
1697 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1698 	}
1699 	intel_dp->psr.active = false;
1700 }
1701 
1702 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1703 {
1704 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1705 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1706 	i915_reg_t psr_status;
1707 	u32 psr_status_mask;
1708 
1709 	if (intel_dp->psr.psr2_enabled) {
1710 		psr_status = EDP_PSR2_STATUS(cpu_transcoder);
1711 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1712 	} else {
1713 		psr_status = psr_status_reg(dev_priv, cpu_transcoder);
1714 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1715 	}
1716 
1717 	/* Wait till PSR is idle */
1718 	if (intel_de_wait_for_clear(dev_priv, psr_status,
1719 				    psr_status_mask, 2000))
1720 		drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1721 }
1722 
1723 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1724 {
1725 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1726 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1727 	enum phy phy = intel_port_to_phy(dev_priv,
1728 					 dp_to_dig_port(intel_dp)->base.port);
1729 
1730 	lockdep_assert_held(&intel_dp->psr.lock);
1731 
1732 	if (!intel_dp->psr.enabled)
1733 		return;
1734 
1735 	if (intel_dp->psr.panel_replay_enabled)
1736 		drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
1737 	else
1738 		drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1739 			    intel_dp->psr.psr2_enabled ? "2" : "1");
1740 
1741 	intel_psr_exit(intel_dp);
1742 	intel_psr_wait_exit_locked(intel_dp);
1743 
1744 	/*
1745 	 * Wa_16013835468
1746 	 * Wa_14015648006
1747 	 */
1748 	if (DISPLAY_VER(dev_priv) >= 11)
1749 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1750 			     wa_16013835468_bit_get(intel_dp), 0);
1751 
1752 	if (intel_dp->psr.psr2_enabled) {
1753 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1754 		if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1755 			intel_de_rmw(dev_priv,
1756 				     MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
1757 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1758 		else if (IS_ALDERLAKE_P(dev_priv))
1759 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1760 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1761 	}
1762 
1763 	intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1764 
1765 	/* Disable PSR on Sink */
1766 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1767 
1768 	if (intel_dp->psr.psr2_enabled)
1769 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1770 
1771 	intel_dp->psr.enabled = false;
1772 	intel_dp->psr.panel_replay_enabled = false;
1773 	intel_dp->psr.psr2_enabled = false;
1774 	intel_dp->psr.psr2_sel_fetch_enabled = false;
1775 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1776 }
1777 
1778 /**
1779  * intel_psr_disable - Disable PSR
1780  * @intel_dp: Intel DP
1781  * @old_crtc_state: old CRTC state
1782  *
1783  * This function needs to be called before disabling pipe.
1784  */
1785 void intel_psr_disable(struct intel_dp *intel_dp,
1786 		       const struct intel_crtc_state *old_crtc_state)
1787 {
1788 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1789 
1790 	if (!old_crtc_state->has_psr)
1791 		return;
1792 
1793 	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1794 		return;
1795 
1796 	mutex_lock(&intel_dp->psr.lock);
1797 
1798 	intel_psr_disable_locked(intel_dp);
1799 
1800 	mutex_unlock(&intel_dp->psr.lock);
1801 	cancel_work_sync(&intel_dp->psr.work);
1802 	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1803 }
1804 
1805 /**
1806  * intel_psr_pause - Pause PSR
1807  * @intel_dp: Intel DP
1808  *
1809  * This function need to be called after enabling psr.
1810  */
1811 void intel_psr_pause(struct intel_dp *intel_dp)
1812 {
1813 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1814 	struct intel_psr *psr = &intel_dp->psr;
1815 
1816 	if (!CAN_PSR(intel_dp))
1817 		return;
1818 
1819 	mutex_lock(&psr->lock);
1820 
1821 	if (!psr->enabled) {
1822 		mutex_unlock(&psr->lock);
1823 		return;
1824 	}
1825 
1826 	/* If we ever hit this, we will need to add refcount to pause/resume */
1827 	drm_WARN_ON(&dev_priv->drm, psr->paused);
1828 
1829 	intel_psr_exit(intel_dp);
1830 	intel_psr_wait_exit_locked(intel_dp);
1831 	psr->paused = true;
1832 
1833 	mutex_unlock(&psr->lock);
1834 
1835 	cancel_work_sync(&psr->work);
1836 	cancel_delayed_work_sync(&psr->dc3co_work);
1837 }
1838 
1839 /**
1840  * intel_psr_resume - Resume PSR
1841  * @intel_dp: Intel DP
1842  *
1843  * This function need to be called after pausing psr.
1844  */
1845 void intel_psr_resume(struct intel_dp *intel_dp)
1846 {
1847 	struct intel_psr *psr = &intel_dp->psr;
1848 
1849 	if (!CAN_PSR(intel_dp))
1850 		return;
1851 
1852 	mutex_lock(&psr->lock);
1853 
1854 	if (!psr->paused)
1855 		goto unlock;
1856 
1857 	psr->paused = false;
1858 	intel_psr_activate(intel_dp);
1859 
1860 unlock:
1861 	mutex_unlock(&psr->lock);
1862 }
1863 
1864 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1865 {
1866 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1867 		PSR2_MAN_TRK_CTL_ENABLE;
1868 }
1869 
1870 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1871 {
1872 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1873 	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1874 	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1875 }
1876 
1877 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1878 {
1879 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1880 	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1881 	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1882 }
1883 
1884 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1885 {
1886 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1887 	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1888 	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1889 }
1890 
1891 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1892 {
1893 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1894 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1895 
1896 	if (intel_dp->psr.psr2_sel_fetch_enabled)
1897 		intel_de_write(dev_priv,
1898 			       PSR2_MAN_TRK_CTL(cpu_transcoder),
1899 			       man_trk_ctl_enable_bit_get(dev_priv) |
1900 			       man_trk_ctl_partial_frame_bit_get(dev_priv) |
1901 			       man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1902 			       man_trk_ctl_continuos_full_frame(dev_priv));
1903 
1904 	/*
1905 	 * Display WA #0884: skl+
1906 	 * This documented WA for bxt can be safely applied
1907 	 * broadly so we can force HW tracking to exit PSR
1908 	 * instead of disabling and re-enabling.
1909 	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1910 	 * but it makes more sense write to the current active
1911 	 * pipe.
1912 	 *
1913 	 * This workaround do not exist for platforms with display 10 or newer
1914 	 * but testing proved that it works for up display 13, for newer
1915 	 * than that testing will be needed.
1916 	 */
1917 	intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1918 }
1919 
1920 void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
1921 					    const struct intel_crtc_state *crtc_state)
1922 {
1923 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1924 	enum pipe pipe = plane->pipe;
1925 
1926 	if (!crtc_state->enable_psr2_sel_fetch)
1927 		return;
1928 
1929 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
1930 }
1931 
1932 void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
1933 					    const struct intel_crtc_state *crtc_state,
1934 					    const struct intel_plane_state *plane_state)
1935 {
1936 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
1937 	enum pipe pipe = plane->pipe;
1938 
1939 	if (!crtc_state->enable_psr2_sel_fetch)
1940 		return;
1941 
1942 	if (plane->id == PLANE_CURSOR)
1943 		intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1944 				  plane_state->ctl);
1945 	else
1946 		intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1947 				  PLANE_SEL_FETCH_CTL_ENABLE);
1948 }
1949 
1950 void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
1951 					      const struct intel_crtc_state *crtc_state,
1952 					      const struct intel_plane_state *plane_state,
1953 					      int color_plane)
1954 {
1955 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1956 	enum pipe pipe = plane->pipe;
1957 	const struct drm_rect *clip;
1958 	u32 val;
1959 	int x, y;
1960 
1961 	if (!crtc_state->enable_psr2_sel_fetch)
1962 		return;
1963 
1964 	if (plane->id == PLANE_CURSOR)
1965 		return;
1966 
1967 	clip = &plane_state->psr2_sel_fetch_area;
1968 
1969 	val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
1970 	val |= plane_state->uapi.dst.x1;
1971 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
1972 
1973 	x = plane_state->view.color_plane[color_plane].x;
1974 
1975 	/*
1976 	 * From Bspec: UV surface Start Y Position = half of Y plane Y
1977 	 * start position.
1978 	 */
1979 	if (!color_plane)
1980 		y = plane_state->view.color_plane[color_plane].y + clip->y1;
1981 	else
1982 		y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
1983 
1984 	val = y << 16 | x;
1985 
1986 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
1987 			  val);
1988 
1989 	/* Sizes are 0 based */
1990 	val = (drm_rect_height(clip) - 1) << 16;
1991 	val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
1992 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
1993 }
1994 
1995 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1996 {
1997 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1998 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1999 	struct intel_encoder *encoder;
2000 
2001 	if (!crtc_state->enable_psr2_sel_fetch)
2002 		return;
2003 
2004 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2005 					     crtc_state->uapi.encoder_mask) {
2006 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2007 
2008 		lockdep_assert_held(&intel_dp->psr.lock);
2009 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
2010 			return;
2011 		break;
2012 	}
2013 
2014 	intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2015 		       crtc_state->psr2_man_track_ctl);
2016 }
2017 
2018 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2019 				  struct drm_rect *clip, bool full_update)
2020 {
2021 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2022 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2023 	u32 val = man_trk_ctl_enable_bit_get(dev_priv);
2024 
2025 	/* SF partial frame enable has to be set even on full update */
2026 	val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
2027 
2028 	if (full_update) {
2029 		val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
2030 		val |= man_trk_ctl_continuos_full_frame(dev_priv);
2031 		goto exit;
2032 	}
2033 
2034 	if (clip->y1 == -1)
2035 		goto exit;
2036 
2037 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
2038 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
2039 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
2040 	} else {
2041 		drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
2042 
2043 		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
2044 		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
2045 	}
2046 exit:
2047 	crtc_state->psr2_man_track_ctl = val;
2048 }
2049 
2050 static void clip_area_update(struct drm_rect *overlap_damage_area,
2051 			     struct drm_rect *damage_area,
2052 			     struct drm_rect *pipe_src)
2053 {
2054 	if (!drm_rect_intersect(damage_area, pipe_src))
2055 		return;
2056 
2057 	if (overlap_damage_area->y1 == -1) {
2058 		overlap_damage_area->y1 = damage_area->y1;
2059 		overlap_damage_area->y2 = damage_area->y2;
2060 		return;
2061 	}
2062 
2063 	if (damage_area->y1 < overlap_damage_area->y1)
2064 		overlap_damage_area->y1 = damage_area->y1;
2065 
2066 	if (damage_area->y2 > overlap_damage_area->y2)
2067 		overlap_damage_area->y2 = damage_area->y2;
2068 }
2069 
2070 static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
2071 						struct drm_rect *pipe_clip)
2072 {
2073 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2074 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2075 	u16 y_alignment;
2076 
2077 	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2078 	if (crtc_state->dsc.compression_enable &&
2079 	    (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
2080 		y_alignment = vdsc_cfg->slice_height;
2081 	else
2082 		y_alignment = crtc_state->su_y_granularity;
2083 
2084 	pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
2085 	if (pipe_clip->y2 % y_alignment)
2086 		pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
2087 }
2088 
2089 /*
2090  * TODO: Not clear how to handle planes with negative position,
2091  * also planes are not updated if they have a negative X
2092  * position so for now doing a full update in this cases
2093  *
2094  * Plane scaling and rotation is not supported by selective fetch and both
2095  * properties can change without a modeset, so need to be check at every
2096  * atomic commit.
2097  */
2098 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2099 {
2100 	if (plane_state->uapi.dst.y1 < 0 ||
2101 	    plane_state->uapi.dst.x1 < 0 ||
2102 	    plane_state->scaler_id >= 0 ||
2103 	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2104 		return false;
2105 
2106 	return true;
2107 }
2108 
2109 /*
2110  * Check for pipe properties that is not supported by selective fetch.
2111  *
2112  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2113  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2114  * enabled and going to the full update path.
2115  */
2116 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2117 {
2118 	if (crtc_state->scaler_state.scaler_id >= 0)
2119 		return false;
2120 
2121 	return true;
2122 }
2123 
2124 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2125 				struct intel_crtc *crtc)
2126 {
2127 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2128 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2129 	struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
2130 	struct intel_plane_state *new_plane_state, *old_plane_state;
2131 	struct intel_plane *plane;
2132 	bool full_update = false;
2133 	int i, ret;
2134 
2135 	if (!crtc_state->enable_psr2_sel_fetch)
2136 		return 0;
2137 
2138 	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2139 		full_update = true;
2140 		goto skip_sel_fetch_set_loop;
2141 	}
2142 
2143 	/*
2144 	 * Calculate minimal selective fetch area of each plane and calculate
2145 	 * the pipe damaged area.
2146 	 * In the next loop the plane selective fetch area will actually be set
2147 	 * using whole pipe damaged area.
2148 	 */
2149 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2150 					     new_plane_state, i) {
2151 		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2152 						      .x2 = INT_MAX };
2153 
2154 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2155 			continue;
2156 
2157 		if (!new_plane_state->uapi.visible &&
2158 		    !old_plane_state->uapi.visible)
2159 			continue;
2160 
2161 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2162 			full_update = true;
2163 			break;
2164 		}
2165 
2166 		/*
2167 		 * If visibility or plane moved, mark the whole plane area as
2168 		 * damaged as it needs to be complete redraw in the new and old
2169 		 * position.
2170 		 */
2171 		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2172 		    !drm_rect_equals(&new_plane_state->uapi.dst,
2173 				     &old_plane_state->uapi.dst)) {
2174 			if (old_plane_state->uapi.visible) {
2175 				damaged_area.y1 = old_plane_state->uapi.dst.y1;
2176 				damaged_area.y2 = old_plane_state->uapi.dst.y2;
2177 				clip_area_update(&pipe_clip, &damaged_area,
2178 						 &crtc_state->pipe_src);
2179 			}
2180 
2181 			if (new_plane_state->uapi.visible) {
2182 				damaged_area.y1 = new_plane_state->uapi.dst.y1;
2183 				damaged_area.y2 = new_plane_state->uapi.dst.y2;
2184 				clip_area_update(&pipe_clip, &damaged_area,
2185 						 &crtc_state->pipe_src);
2186 			}
2187 			continue;
2188 		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2189 			/* If alpha changed mark the whole plane area as damaged */
2190 			damaged_area.y1 = new_plane_state->uapi.dst.y1;
2191 			damaged_area.y2 = new_plane_state->uapi.dst.y2;
2192 			clip_area_update(&pipe_clip, &damaged_area,
2193 					 &crtc_state->pipe_src);
2194 			continue;
2195 		}
2196 
2197 		src = drm_plane_state_src(&new_plane_state->uapi);
2198 		drm_rect_fp_to_int(&src, &src);
2199 
2200 		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2201 						     &new_plane_state->uapi, &damaged_area))
2202 			continue;
2203 
2204 		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2205 		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2206 		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2207 		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2208 
2209 		clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
2210 	}
2211 
2212 	/*
2213 	 * TODO: For now we are just using full update in case
2214 	 * selective fetch area calculation fails. To optimize this we
2215 	 * should identify cases where this happens and fix the area
2216 	 * calculation for those.
2217 	 */
2218 	if (pipe_clip.y1 == -1) {
2219 		drm_info_once(&dev_priv->drm,
2220 			      "Selective fetch area calculation failed in pipe %c\n",
2221 			      pipe_name(crtc->pipe));
2222 		full_update = true;
2223 	}
2224 
2225 	if (full_update)
2226 		goto skip_sel_fetch_set_loop;
2227 
2228 	/* Wa_14014971492 */
2229 	if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
2230 	     IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
2231 	    crtc_state->splitter.enable)
2232 		pipe_clip.y1 = 0;
2233 
2234 	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2235 	if (ret)
2236 		return ret;
2237 
2238 	intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
2239 
2240 	/*
2241 	 * Now that we have the pipe damaged area check if it intersect with
2242 	 * every plane, if it does set the plane selective fetch area.
2243 	 */
2244 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2245 					     new_plane_state, i) {
2246 		struct drm_rect *sel_fetch_area, inter;
2247 		struct intel_plane *linked = new_plane_state->planar_linked_plane;
2248 
2249 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2250 		    !new_plane_state->uapi.visible)
2251 			continue;
2252 
2253 		inter = pipe_clip;
2254 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2255 			continue;
2256 
2257 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2258 			full_update = true;
2259 			break;
2260 		}
2261 
2262 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2263 		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2264 		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2265 		crtc_state->update_planes |= BIT(plane->id);
2266 
2267 		/*
2268 		 * Sel_fetch_area is calculated for UV plane. Use
2269 		 * same area for Y plane as well.
2270 		 */
2271 		if (linked) {
2272 			struct intel_plane_state *linked_new_plane_state;
2273 			struct drm_rect *linked_sel_fetch_area;
2274 
2275 			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2276 			if (IS_ERR(linked_new_plane_state))
2277 				return PTR_ERR(linked_new_plane_state);
2278 
2279 			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2280 			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2281 			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2282 			crtc_state->update_planes |= BIT(linked->id);
2283 		}
2284 	}
2285 
2286 skip_sel_fetch_set_loop:
2287 	psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
2288 	return 0;
2289 }
2290 
2291 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2292 				struct intel_crtc *crtc)
2293 {
2294 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2295 	const struct intel_crtc_state *old_crtc_state =
2296 		intel_atomic_get_old_crtc_state(state, crtc);
2297 	const struct intel_crtc_state *new_crtc_state =
2298 		intel_atomic_get_new_crtc_state(state, crtc);
2299 	struct intel_encoder *encoder;
2300 
2301 	if (!HAS_PSR(i915))
2302 		return;
2303 
2304 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2305 					     old_crtc_state->uapi.encoder_mask) {
2306 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2307 		struct intel_psr *psr = &intel_dp->psr;
2308 		bool needs_to_disable = false;
2309 
2310 		mutex_lock(&psr->lock);
2311 
2312 		/*
2313 		 * Reasons to disable:
2314 		 * - PSR disabled in new state
2315 		 * - All planes will go inactive
2316 		 * - Changing between PSR versions
2317 		 * - Display WA #1136: skl, bxt
2318 		 */
2319 		needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2320 		needs_to_disable |= !new_crtc_state->has_psr;
2321 		needs_to_disable |= !new_crtc_state->active_planes;
2322 		needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2323 		needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2324 			new_crtc_state->wm_level_disabled;
2325 
2326 		if (psr->enabled && needs_to_disable)
2327 			intel_psr_disable_locked(intel_dp);
2328 		else if (psr->enabled && new_crtc_state->wm_level_disabled)
2329 			/* Wa_14015648006 */
2330 			wm_optimization_wa(intel_dp, new_crtc_state);
2331 
2332 		mutex_unlock(&psr->lock);
2333 	}
2334 }
2335 
2336 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2337 				 struct intel_crtc *crtc)
2338 {
2339 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2340 	const struct intel_crtc_state *crtc_state =
2341 		intel_atomic_get_new_crtc_state(state, crtc);
2342 	struct intel_encoder *encoder;
2343 
2344 	if (!(crtc_state->has_psr || crtc_state->has_panel_replay))
2345 		return;
2346 
2347 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2348 					     crtc_state->uapi.encoder_mask) {
2349 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2350 		struct intel_psr *psr = &intel_dp->psr;
2351 		bool keep_disabled = false;
2352 
2353 		mutex_lock(&psr->lock);
2354 
2355 		drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2356 
2357 		keep_disabled |= psr->sink_not_reliable;
2358 		keep_disabled |= !crtc_state->active_planes;
2359 
2360 		/* Display WA #1136: skl, bxt */
2361 		keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2362 			crtc_state->wm_level_disabled;
2363 
2364 		if (!psr->enabled && !keep_disabled)
2365 			intel_psr_enable_locked(intel_dp, crtc_state);
2366 		else if (psr->enabled && !crtc_state->wm_level_disabled)
2367 			/* Wa_14015648006 */
2368 			wm_optimization_wa(intel_dp, crtc_state);
2369 
2370 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2371 		if (crtc_state->crc_enabled && psr->enabled)
2372 			psr_force_hw_tracking_exit(intel_dp);
2373 
2374 		/*
2375 		 * Clear possible busy bits in case we have
2376 		 * invalidate -> flip -> flush sequence.
2377 		 */
2378 		intel_dp->psr.busy_frontbuffer_bits = 0;
2379 
2380 		mutex_unlock(&psr->lock);
2381 	}
2382 }
2383 
2384 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2385 {
2386 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2387 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2388 
2389 	/*
2390 	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2391 	 * As all higher states has bit 4 of PSR2 state set we can just wait for
2392 	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2393 	 */
2394 	return intel_de_wait_for_clear(dev_priv,
2395 				       EDP_PSR2_STATUS(cpu_transcoder),
2396 				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2397 }
2398 
2399 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2400 {
2401 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2402 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2403 
2404 	/*
2405 	 * From bspec: Panel Self Refresh (BDW+)
2406 	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2407 	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2408 	 * defensive enough to cover everything.
2409 	 */
2410 	return intel_de_wait_for_clear(dev_priv,
2411 				       psr_status_reg(dev_priv, cpu_transcoder),
2412 				       EDP_PSR_STATUS_STATE_MASK, 50);
2413 }
2414 
2415 /**
2416  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2417  * @new_crtc_state: new CRTC state
2418  *
2419  * This function is expected to be called from pipe_update_start() where it is
2420  * not expected to race with PSR enable or disable.
2421  */
2422 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2423 {
2424 	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2425 	struct intel_encoder *encoder;
2426 
2427 	if (!new_crtc_state->has_psr)
2428 		return;
2429 
2430 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2431 					     new_crtc_state->uapi.encoder_mask) {
2432 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2433 		int ret;
2434 
2435 		lockdep_assert_held(&intel_dp->psr.lock);
2436 
2437 		if (!intel_dp->psr.enabled)
2438 			continue;
2439 
2440 		if (intel_dp->psr.psr2_enabled)
2441 			ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2442 		else
2443 			ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2444 
2445 		if (ret)
2446 			drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2447 	}
2448 }
2449 
2450 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2451 {
2452 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2453 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2454 	i915_reg_t reg;
2455 	u32 mask;
2456 	int err;
2457 
2458 	if (!intel_dp->psr.enabled)
2459 		return false;
2460 
2461 	if (intel_dp->psr.psr2_enabled) {
2462 		reg = EDP_PSR2_STATUS(cpu_transcoder);
2463 		mask = EDP_PSR2_STATUS_STATE_MASK;
2464 	} else {
2465 		reg = psr_status_reg(dev_priv, cpu_transcoder);
2466 		mask = EDP_PSR_STATUS_STATE_MASK;
2467 	}
2468 
2469 	mutex_unlock(&intel_dp->psr.lock);
2470 
2471 	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2472 	if (err)
2473 		drm_err(&dev_priv->drm,
2474 			"Timed out waiting for PSR Idle for re-enable\n");
2475 
2476 	/* After the unlocked wait, verify that PSR is still wanted! */
2477 	mutex_lock(&intel_dp->psr.lock);
2478 	return err == 0 && intel_dp->psr.enabled;
2479 }
2480 
2481 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2482 {
2483 	struct drm_connector_list_iter conn_iter;
2484 	struct drm_modeset_acquire_ctx ctx;
2485 	struct drm_atomic_state *state;
2486 	struct drm_connector *conn;
2487 	int err = 0;
2488 
2489 	state = drm_atomic_state_alloc(&dev_priv->drm);
2490 	if (!state)
2491 		return -ENOMEM;
2492 
2493 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2494 
2495 	state->acquire_ctx = &ctx;
2496 	to_intel_atomic_state(state)->internal = true;
2497 
2498 retry:
2499 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2500 	drm_for_each_connector_iter(conn, &conn_iter) {
2501 		struct drm_connector_state *conn_state;
2502 		struct drm_crtc_state *crtc_state;
2503 
2504 		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2505 			continue;
2506 
2507 		conn_state = drm_atomic_get_connector_state(state, conn);
2508 		if (IS_ERR(conn_state)) {
2509 			err = PTR_ERR(conn_state);
2510 			break;
2511 		}
2512 
2513 		if (!conn_state->crtc)
2514 			continue;
2515 
2516 		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2517 		if (IS_ERR(crtc_state)) {
2518 			err = PTR_ERR(crtc_state);
2519 			break;
2520 		}
2521 
2522 		/* Mark mode as changed to trigger a pipe->update() */
2523 		crtc_state->mode_changed = true;
2524 	}
2525 	drm_connector_list_iter_end(&conn_iter);
2526 
2527 	if (err == 0)
2528 		err = drm_atomic_commit(state);
2529 
2530 	if (err == -EDEADLK) {
2531 		drm_atomic_state_clear(state);
2532 		err = drm_modeset_backoff(&ctx);
2533 		if (!err)
2534 			goto retry;
2535 	}
2536 
2537 	drm_modeset_drop_locks(&ctx);
2538 	drm_modeset_acquire_fini(&ctx);
2539 	drm_atomic_state_put(state);
2540 
2541 	return err;
2542 }
2543 
2544 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2545 {
2546 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2547 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2548 	u32 old_mode;
2549 	int ret;
2550 
2551 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2552 	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2553 		drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2554 		return -EINVAL;
2555 	}
2556 
2557 	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2558 	if (ret)
2559 		return ret;
2560 
2561 	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2562 	intel_dp->psr.debug = val;
2563 
2564 	/*
2565 	 * Do it right away if it's already enabled, otherwise it will be done
2566 	 * when enabling the source.
2567 	 */
2568 	if (intel_dp->psr.enabled)
2569 		psr_irq_control(intel_dp);
2570 
2571 	mutex_unlock(&intel_dp->psr.lock);
2572 
2573 	if (old_mode != mode)
2574 		ret = intel_psr_fastset_force(dev_priv);
2575 
2576 	return ret;
2577 }
2578 
2579 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2580 {
2581 	struct intel_psr *psr = &intel_dp->psr;
2582 
2583 	intel_psr_disable_locked(intel_dp);
2584 	psr->sink_not_reliable = true;
2585 	/* let's make sure that sink is awaken */
2586 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2587 }
2588 
2589 static void intel_psr_work(struct work_struct *work)
2590 {
2591 	struct intel_dp *intel_dp =
2592 		container_of(work, typeof(*intel_dp), psr.work);
2593 
2594 	mutex_lock(&intel_dp->psr.lock);
2595 
2596 	if (!intel_dp->psr.enabled)
2597 		goto unlock;
2598 
2599 	if (READ_ONCE(intel_dp->psr.irq_aux_error))
2600 		intel_psr_handle_irq(intel_dp);
2601 
2602 	/*
2603 	 * We have to make sure PSR is ready for re-enable
2604 	 * otherwise it keeps disabled until next full enable/disable cycle.
2605 	 * PSR might take some time to get fully disabled
2606 	 * and be ready for re-enable.
2607 	 */
2608 	if (!__psr_wait_for_idle_locked(intel_dp))
2609 		goto unlock;
2610 
2611 	/*
2612 	 * The delayed work can race with an invalidate hence we need to
2613 	 * recheck. Since psr_flush first clears this and then reschedules we
2614 	 * won't ever miss a flush when bailing out here.
2615 	 */
2616 	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2617 		goto unlock;
2618 
2619 	intel_psr_activate(intel_dp);
2620 unlock:
2621 	mutex_unlock(&intel_dp->psr.lock);
2622 }
2623 
2624 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2625 {
2626 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2627 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2628 
2629 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2630 		u32 val;
2631 
2632 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2633 			/* Send one update otherwise lag is observed in screen */
2634 			intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2635 			return;
2636 		}
2637 
2638 		val = man_trk_ctl_enable_bit_get(dev_priv) |
2639 		      man_trk_ctl_partial_frame_bit_get(dev_priv) |
2640 		      man_trk_ctl_continuos_full_frame(dev_priv);
2641 		intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
2642 		intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2643 		intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2644 	} else {
2645 		intel_psr_exit(intel_dp);
2646 	}
2647 }
2648 
2649 /**
2650  * intel_psr_invalidate - Invalidate PSR
2651  * @dev_priv: i915 device
2652  * @frontbuffer_bits: frontbuffer plane tracking bits
2653  * @origin: which operation caused the invalidate
2654  *
2655  * Since the hardware frontbuffer tracking has gaps we need to integrate
2656  * with the software frontbuffer tracking. This function gets called every
2657  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2658  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2659  *
2660  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2661  */
2662 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2663 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
2664 {
2665 	struct intel_encoder *encoder;
2666 
2667 	if (origin == ORIGIN_FLIP)
2668 		return;
2669 
2670 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2671 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2672 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2673 
2674 		mutex_lock(&intel_dp->psr.lock);
2675 		if (!intel_dp->psr.enabled) {
2676 			mutex_unlock(&intel_dp->psr.lock);
2677 			continue;
2678 		}
2679 
2680 		pipe_frontbuffer_bits &=
2681 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2682 		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2683 
2684 		if (pipe_frontbuffer_bits)
2685 			_psr_invalidate_handle(intel_dp);
2686 
2687 		mutex_unlock(&intel_dp->psr.lock);
2688 	}
2689 }
2690 /*
2691  * When we will be completely rely on PSR2 S/W tracking in future,
2692  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2693  * event also therefore tgl_dc3co_flush_locked() require to be changed
2694  * accordingly in future.
2695  */
2696 static void
2697 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2698 		       enum fb_op_origin origin)
2699 {
2700 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2701 
2702 	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2703 	    !intel_dp->psr.active)
2704 		return;
2705 
2706 	/*
2707 	 * At every frontbuffer flush flip event modified delay of delayed work,
2708 	 * when delayed work schedules that means display has been idle.
2709 	 */
2710 	if (!(frontbuffer_bits &
2711 	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2712 		return;
2713 
2714 	tgl_psr2_enable_dc3co(intel_dp);
2715 	mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
2716 			 intel_dp->psr.dc3co_exit_delay);
2717 }
2718 
2719 static void _psr_flush_handle(struct intel_dp *intel_dp)
2720 {
2721 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2722 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2723 
2724 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2725 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2726 			/* can we turn CFF off? */
2727 			if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2728 				u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2729 					man_trk_ctl_partial_frame_bit_get(dev_priv) |
2730 					man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2731 					man_trk_ctl_continuos_full_frame(dev_priv);
2732 
2733 				/*
2734 				 * Set psr2_sel_fetch_cff_enabled as false to allow selective
2735 				 * updates. Still keep cff bit enabled as we don't have proper
2736 				 * SU configuration in case update is sent for any reason after
2737 				 * sff bit gets cleared by the HW on next vblank.
2738 				 */
2739 				intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2740 					       val);
2741 				intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2742 				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2743 			}
2744 		} else {
2745 			/*
2746 			 * continuous full frame is disabled, only a single full
2747 			 * frame is required
2748 			 */
2749 			psr_force_hw_tracking_exit(intel_dp);
2750 		}
2751 	} else {
2752 		psr_force_hw_tracking_exit(intel_dp);
2753 
2754 		if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2755 			queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
2756 	}
2757 }
2758 
2759 /**
2760  * intel_psr_flush - Flush PSR
2761  * @dev_priv: i915 device
2762  * @frontbuffer_bits: frontbuffer plane tracking bits
2763  * @origin: which operation caused the flush
2764  *
2765  * Since the hardware frontbuffer tracking has gaps we need to integrate
2766  * with the software frontbuffer tracking. This function gets called every
2767  * time frontbuffer rendering has completed and flushed out to memory. PSR
2768  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2769  *
2770  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2771  */
2772 void intel_psr_flush(struct drm_i915_private *dev_priv,
2773 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
2774 {
2775 	struct intel_encoder *encoder;
2776 
2777 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2778 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2779 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2780 
2781 		mutex_lock(&intel_dp->psr.lock);
2782 		if (!intel_dp->psr.enabled) {
2783 			mutex_unlock(&intel_dp->psr.lock);
2784 			continue;
2785 		}
2786 
2787 		pipe_frontbuffer_bits &=
2788 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2789 		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2790 
2791 		/*
2792 		 * If the PSR is paused by an explicit intel_psr_paused() call,
2793 		 * we have to ensure that the PSR is not activated until
2794 		 * intel_psr_resume() is called.
2795 		 */
2796 		if (intel_dp->psr.paused)
2797 			goto unlock;
2798 
2799 		if (origin == ORIGIN_FLIP ||
2800 		    (origin == ORIGIN_CURSOR_UPDATE &&
2801 		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
2802 			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2803 			goto unlock;
2804 		}
2805 
2806 		if (pipe_frontbuffer_bits == 0)
2807 			goto unlock;
2808 
2809 		/* By definition flush = invalidate + flush */
2810 		_psr_flush_handle(intel_dp);
2811 unlock:
2812 		mutex_unlock(&intel_dp->psr.lock);
2813 	}
2814 }
2815 
2816 /**
2817  * intel_psr_init - Init basic PSR work and mutex.
2818  * @intel_dp: Intel DP
2819  *
2820  * This function is called after the initializing connector.
2821  * (the initializing of connector treats the handling of connector capabilities)
2822  * And it initializes basic PSR stuff for each DP Encoder.
2823  */
2824 void intel_psr_init(struct intel_dp *intel_dp)
2825 {
2826 	struct intel_connector *connector = intel_dp->attached_connector;
2827 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2828 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2829 
2830 	if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
2831 		return;
2832 
2833 	if (!intel_dp_is_edp(intel_dp))
2834 		intel_psr_init_dpcd(intel_dp);
2835 
2836 	/*
2837 	 * HSW spec explicitly says PSR is tied to port A.
2838 	 * BDW+ platforms have a instance of PSR registers per transcoder but
2839 	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2840 	 * than eDP one.
2841 	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2842 	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2843 	 * But GEN12 supports a instance of PSR registers per transcoder.
2844 	 */
2845 	if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2846 		drm_dbg_kms(&dev_priv->drm,
2847 			    "PSR condition failed: Port not supported\n");
2848 		return;
2849 	}
2850 
2851 	if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp))
2852 		intel_dp->psr.source_panel_replay_support = true;
2853 	else
2854 		intel_dp->psr.source_support = true;
2855 
2856 	/* Set link_standby x link_off defaults */
2857 	if (DISPLAY_VER(dev_priv) < 12)
2858 		/* For new platforms up to TGL let's respect VBT back again */
2859 		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2860 
2861 	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2862 	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2863 	mutex_init(&intel_dp->psr.lock);
2864 }
2865 
2866 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2867 					   u8 *status, u8 *error_status)
2868 {
2869 	struct drm_dp_aux *aux = &intel_dp->aux;
2870 	int ret;
2871 	unsigned int offset;
2872 
2873 	offset = intel_dp->psr.panel_replay_enabled ?
2874 		 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
2875 
2876 	ret = drm_dp_dpcd_readb(aux, offset, status);
2877 	if (ret != 1)
2878 		return ret;
2879 
2880 	offset = intel_dp->psr.panel_replay_enabled ?
2881 		 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
2882 
2883 	ret = drm_dp_dpcd_readb(aux, offset, error_status);
2884 	if (ret != 1)
2885 		return ret;
2886 
2887 	*status = *status & DP_PSR_SINK_STATE_MASK;
2888 
2889 	return 0;
2890 }
2891 
2892 static void psr_alpm_check(struct intel_dp *intel_dp)
2893 {
2894 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2895 	struct drm_dp_aux *aux = &intel_dp->aux;
2896 	struct intel_psr *psr = &intel_dp->psr;
2897 	u8 val;
2898 	int r;
2899 
2900 	if (!psr->psr2_enabled)
2901 		return;
2902 
2903 	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2904 	if (r != 1) {
2905 		drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2906 		return;
2907 	}
2908 
2909 	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2910 		intel_psr_disable_locked(intel_dp);
2911 		psr->sink_not_reliable = true;
2912 		drm_dbg_kms(&dev_priv->drm,
2913 			    "ALPM lock timeout error, disabling PSR\n");
2914 
2915 		/* Clearing error */
2916 		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2917 	}
2918 }
2919 
2920 static void psr_capability_changed_check(struct intel_dp *intel_dp)
2921 {
2922 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2923 	struct intel_psr *psr = &intel_dp->psr;
2924 	u8 val;
2925 	int r;
2926 
2927 	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2928 	if (r != 1) {
2929 		drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2930 		return;
2931 	}
2932 
2933 	if (val & DP_PSR_CAPS_CHANGE) {
2934 		intel_psr_disable_locked(intel_dp);
2935 		psr->sink_not_reliable = true;
2936 		drm_dbg_kms(&dev_priv->drm,
2937 			    "Sink PSR capability changed, disabling PSR\n");
2938 
2939 		/* Clearing it */
2940 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2941 	}
2942 }
2943 
2944 void intel_psr_short_pulse(struct intel_dp *intel_dp)
2945 {
2946 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2947 	struct intel_psr *psr = &intel_dp->psr;
2948 	u8 status, error_status;
2949 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2950 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2951 			  DP_PSR_LINK_CRC_ERROR;
2952 
2953 	if (!CAN_PSR(intel_dp))
2954 		return;
2955 
2956 	mutex_lock(&psr->lock);
2957 
2958 	if (!psr->enabled)
2959 		goto exit;
2960 
2961 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2962 		drm_err(&dev_priv->drm,
2963 			"Error reading PSR status or error status\n");
2964 		goto exit;
2965 	}
2966 
2967 	if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2968 		intel_psr_disable_locked(intel_dp);
2969 		psr->sink_not_reliable = true;
2970 	}
2971 
2972 	if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2973 		drm_dbg_kms(&dev_priv->drm,
2974 			    "PSR sink internal error, disabling PSR\n");
2975 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2976 		drm_dbg_kms(&dev_priv->drm,
2977 			    "PSR RFB storage error, disabling PSR\n");
2978 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2979 		drm_dbg_kms(&dev_priv->drm,
2980 			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
2981 	if (error_status & DP_PSR_LINK_CRC_ERROR)
2982 		drm_dbg_kms(&dev_priv->drm,
2983 			    "PSR Link CRC error, disabling PSR\n");
2984 
2985 	if (error_status & ~errors)
2986 		drm_err(&dev_priv->drm,
2987 			"PSR_ERROR_STATUS unhandled errors %x\n",
2988 			error_status & ~errors);
2989 	/* clear status register */
2990 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2991 
2992 	psr_alpm_check(intel_dp);
2993 	psr_capability_changed_check(intel_dp);
2994 
2995 exit:
2996 	mutex_unlock(&psr->lock);
2997 }
2998 
2999 bool intel_psr_enabled(struct intel_dp *intel_dp)
3000 {
3001 	bool ret;
3002 
3003 	if (!CAN_PSR(intel_dp))
3004 		return false;
3005 
3006 	mutex_lock(&intel_dp->psr.lock);
3007 	ret = intel_dp->psr.enabled;
3008 	mutex_unlock(&intel_dp->psr.lock);
3009 
3010 	return ret;
3011 }
3012 
3013 /**
3014  * intel_psr_lock - grab PSR lock
3015  * @crtc_state: the crtc state
3016  *
3017  * This is initially meant to be used by around CRTC update, when
3018  * vblank sensitive registers are updated and we need grab the lock
3019  * before it to avoid vblank evasion.
3020  */
3021 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3022 {
3023 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3024 	struct intel_encoder *encoder;
3025 
3026 	if (!crtc_state->has_psr)
3027 		return;
3028 
3029 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
3030 					     crtc_state->uapi.encoder_mask) {
3031 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3032 
3033 		mutex_lock(&intel_dp->psr.lock);
3034 		break;
3035 	}
3036 }
3037 
3038 /**
3039  * intel_psr_unlock - release PSR lock
3040  * @crtc_state: the crtc state
3041  *
3042  * Release the PSR lock that was held during pipe update.
3043  */
3044 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3045 {
3046 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3047 	struct intel_encoder *encoder;
3048 
3049 	if (!crtc_state->has_psr)
3050 		return;
3051 
3052 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
3053 					     crtc_state->uapi.encoder_mask) {
3054 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3055 
3056 		mutex_unlock(&intel_dp->psr.lock);
3057 		break;
3058 	}
3059 }
3060 
3061 static void
3062 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3063 {
3064 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3065 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3066 	const char *status = "unknown";
3067 	u32 val, status_val;
3068 
3069 	if (intel_dp->psr.psr2_enabled) {
3070 		static const char * const live_status[] = {
3071 			"IDLE",
3072 			"CAPTURE",
3073 			"CAPTURE_FS",
3074 			"SLEEP",
3075 			"BUFON_FW",
3076 			"ML_UP",
3077 			"SU_STANDBY",
3078 			"FAST_SLEEP",
3079 			"DEEP_SLEEP",
3080 			"BUF_ON",
3081 			"TG_ON"
3082 		};
3083 		val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
3084 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3085 		if (status_val < ARRAY_SIZE(live_status))
3086 			status = live_status[status_val];
3087 	} else {
3088 		static const char * const live_status[] = {
3089 			"IDLE",
3090 			"SRDONACK",
3091 			"SRDENT",
3092 			"BUFOFF",
3093 			"BUFON",
3094 			"AUXACK",
3095 			"SRDOFFACK",
3096 			"SRDENT_ON",
3097 		};
3098 		val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
3099 		status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3100 		if (status_val < ARRAY_SIZE(live_status))
3101 			status = live_status[status_val];
3102 	}
3103 
3104 	seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3105 }
3106 
3107 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3108 {
3109 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3110 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3111 	struct intel_psr *psr = &intel_dp->psr;
3112 	intel_wakeref_t wakeref;
3113 	const char *status;
3114 	bool enabled;
3115 	u32 val;
3116 
3117 	seq_printf(m, "Sink support: PSR = %s",
3118 		   str_yes_no(psr->sink_support));
3119 
3120 	if (psr->sink_support)
3121 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3122 	seq_printf(m, ", Panel Replay = %s\n", str_yes_no(psr->sink_panel_replay_support));
3123 
3124 	if (!(psr->sink_support || psr->sink_panel_replay_support))
3125 		return 0;
3126 
3127 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3128 	mutex_lock(&psr->lock);
3129 
3130 	if (psr->panel_replay_enabled)
3131 		status = "Panel Replay Enabled";
3132 	else if (psr->enabled)
3133 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
3134 	else
3135 		status = "disabled";
3136 	seq_printf(m, "PSR mode: %s\n", status);
3137 
3138 	if (!psr->enabled) {
3139 		seq_printf(m, "PSR sink not reliable: %s\n",
3140 			   str_yes_no(psr->sink_not_reliable));
3141 
3142 		goto unlock;
3143 	}
3144 
3145 	if (psr->panel_replay_enabled) {
3146 		val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
3147 		enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3148 	} else if (psr->psr2_enabled) {
3149 		val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
3150 		enabled = val & EDP_PSR2_ENABLE;
3151 	} else {
3152 		val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
3153 		enabled = val & EDP_PSR_ENABLE;
3154 	}
3155 	seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
3156 		   str_enabled_disabled(enabled), val);
3157 	psr_source_status(intel_dp, m);
3158 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3159 		   psr->busy_frontbuffer_bits);
3160 
3161 	/*
3162 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
3163 	 */
3164 	val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3165 	seq_printf(m, "Performance counter: %u\n",
3166 		   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3167 
3168 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
3169 		seq_printf(m, "Last attempted entry at: %lld\n",
3170 			   psr->last_entry_attempt);
3171 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3172 	}
3173 
3174 	if (psr->psr2_enabled) {
3175 		u32 su_frames_val[3];
3176 		int frame;
3177 
3178 		/*
3179 		 * Reading all 3 registers before hand to minimize crossing a
3180 		 * frame boundary between register reads
3181 		 */
3182 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3183 			val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
3184 			su_frames_val[frame / 3] = val;
3185 		}
3186 
3187 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3188 
3189 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3190 			u32 su_blocks;
3191 
3192 			su_blocks = su_frames_val[frame / 3] &
3193 				    PSR2_SU_STATUS_MASK(frame);
3194 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3195 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
3196 		}
3197 
3198 		seq_printf(m, "PSR2 selective fetch: %s\n",
3199 			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3200 	}
3201 
3202 unlock:
3203 	mutex_unlock(&psr->lock);
3204 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3205 
3206 	return 0;
3207 }
3208 
3209 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3210 {
3211 	struct drm_i915_private *dev_priv = m->private;
3212 	struct intel_dp *intel_dp = NULL;
3213 	struct intel_encoder *encoder;
3214 
3215 	if (!HAS_PSR(dev_priv))
3216 		return -ENODEV;
3217 
3218 	/* Find the first EDP which supports PSR */
3219 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3220 		intel_dp = enc_to_intel_dp(encoder);
3221 		break;
3222 	}
3223 
3224 	if (!intel_dp)
3225 		return -ENODEV;
3226 
3227 	return intel_psr_status(m, intel_dp);
3228 }
3229 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3230 
3231 static int
3232 i915_edp_psr_debug_set(void *data, u64 val)
3233 {
3234 	struct drm_i915_private *dev_priv = data;
3235 	struct intel_encoder *encoder;
3236 	intel_wakeref_t wakeref;
3237 	int ret = -ENODEV;
3238 
3239 	if (!HAS_PSR(dev_priv))
3240 		return ret;
3241 
3242 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3243 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3244 
3245 		drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3246 
3247 		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3248 
3249 		// TODO: split to each transcoder's PSR debug state
3250 		ret = intel_psr_debug_set(intel_dp, val);
3251 
3252 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3253 	}
3254 
3255 	return ret;
3256 }
3257 
3258 static int
3259 i915_edp_psr_debug_get(void *data, u64 *val)
3260 {
3261 	struct drm_i915_private *dev_priv = data;
3262 	struct intel_encoder *encoder;
3263 
3264 	if (!HAS_PSR(dev_priv))
3265 		return -ENODEV;
3266 
3267 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3268 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3269 
3270 		// TODO: split to each transcoder's PSR debug state
3271 		*val = READ_ONCE(intel_dp->psr.debug);
3272 		return 0;
3273 	}
3274 
3275 	return -ENODEV;
3276 }
3277 
3278 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3279 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3280 			"%llu\n");
3281 
3282 void intel_psr_debugfs_register(struct drm_i915_private *i915)
3283 {
3284 	struct drm_minor *minor = i915->drm.primary;
3285 
3286 	debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3287 			    i915, &i915_edp_psr_debug_fops);
3288 
3289 	debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3290 			    i915, &i915_edp_psr_status_fops);
3291 }
3292 
3293 static const char *psr_mode_str(struct intel_dp *intel_dp)
3294 {
3295 	if (intel_dp->psr.panel_replay_enabled)
3296 		return "PANEL-REPLAY";
3297 	else if (intel_dp->psr.enabled)
3298 		return "PSR";
3299 
3300 	return "unknown";
3301 }
3302 
3303 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3304 {
3305 	struct intel_connector *connector = m->private;
3306 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3307 	static const char * const sink_status[] = {
3308 		"inactive",
3309 		"transition to active, capture and display",
3310 		"active, display from RFB",
3311 		"active, capture and display on sink device timings",
3312 		"transition to inactive, capture and display, timing re-sync",
3313 		"reserved",
3314 		"reserved",
3315 		"sink internal error",
3316 	};
3317 	static const char * const panel_replay_status[] = {
3318 		"Sink device frame is locked to the Source device",
3319 		"Sink device is coasting, using the VTotal target",
3320 		"Sink device is governing the frame rate (frame rate unlock is granted)",
3321 		"Sink device in the process of re-locking with the Source device",
3322 	};
3323 	const char *str;
3324 	int ret;
3325 	u8 status, error_status;
3326 	u32 idx;
3327 
3328 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
3329 		seq_puts(m, "PSR/Panel-Replay Unsupported\n");
3330 		return -ENODEV;
3331 	}
3332 
3333 	if (connector->base.status != connector_status_connected)
3334 		return -ENODEV;
3335 
3336 	ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
3337 	if (ret)
3338 		return ret;
3339 
3340 	str = "unknown";
3341 	if (intel_dp->psr.panel_replay_enabled) {
3342 		idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT;
3343 		if (idx < ARRAY_SIZE(panel_replay_status))
3344 			str = panel_replay_status[idx];
3345 	} else if (intel_dp->psr.enabled) {
3346 		idx = status & DP_PSR_SINK_STATE_MASK;
3347 		if (idx < ARRAY_SIZE(sink_status))
3348 			str = sink_status[idx];
3349 	}
3350 
3351 	seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
3352 
3353 	seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
3354 
3355 	if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
3356 			    DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3357 			    DP_PSR_LINK_CRC_ERROR))
3358 		seq_puts(m, ":\n");
3359 	else
3360 		seq_puts(m, "\n");
3361 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3362 		seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
3363 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3364 		seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
3365 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3366 		seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
3367 
3368 	return ret;
3369 }
3370 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3371 
3372 static int i915_psr_status_show(struct seq_file *m, void *data)
3373 {
3374 	struct intel_connector *connector = m->private;
3375 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3376 
3377 	return intel_psr_status(m, intel_dp);
3378 }
3379 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3380 
3381 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3382 {
3383 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3384 	struct dentry *root = connector->base.debugfs_entry;
3385 
3386 	if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) {
3387 		if (!(HAS_DP20(i915) &&
3388 		      connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort))
3389 			return;
3390 	}
3391 
3392 	debugfs_create_file("i915_psr_sink_status", 0444, root,
3393 			    connector, &i915_psr_sink_status_fops);
3394 
3395 	if (HAS_PSR(i915) || HAS_DP20(i915))
3396 		debugfs_create_file("i915_psr_status", 0444, root,
3397 				    connector, &i915_psr_status_fops);
3398 }
3399