xref: /linux/drivers/gpu/drm/i915/display/intel_psr.c (revision 001821b0e79716c4e17c71d8e053a23599a7a508)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26 #include <drm/drm_debugfs.h>
27 
28 #include "i915_drv.h"
29 #include "i915_reg.h"
30 #include "intel_atomic.h"
31 #include "intel_crtc.h"
32 #include "intel_ddi.h"
33 #include "intel_de.h"
34 #include "intel_display_types.h"
35 #include "intel_dp.h"
36 #include "intel_dp_aux.h"
37 #include "intel_frontbuffer.h"
38 #include "intel_hdmi.h"
39 #include "intel_psr.h"
40 #include "intel_psr_regs.h"
41 #include "intel_snps_phy.h"
42 #include "skl_universal_plane.h"
43 
44 /**
45  * DOC: Panel Self Refresh (PSR/SRD)
46  *
47  * Since Haswell Display controller supports Panel Self-Refresh on display
48  * panels witch have a remote frame buffer (RFB) implemented according to PSR
49  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
50  * when system is idle but display is on as it eliminates display refresh
51  * request to DDR memory completely as long as the frame buffer for that
52  * display is unchanged.
53  *
54  * Panel Self Refresh must be supported by both Hardware (source) and
55  * Panel (sink).
56  *
57  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
58  * to power down the link and memory controller. For DSI panels the same idea
59  * is called "manual mode".
60  *
61  * The implementation uses the hardware-based PSR support which automatically
62  * enters/exits self-refresh mode. The hardware takes care of sending the
63  * required DP aux message and could even retrain the link (that part isn't
64  * enabled yet though). The hardware also keeps track of any frontbuffer
65  * changes to know when to exit self-refresh mode again. Unfortunately that
66  * part doesn't work too well, hence why the i915 PSR support uses the
67  * software frontbuffer tracking to make sure it doesn't miss a screen
68  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
69  * get called by the frontbuffer tracking code. Note that because of locking
70  * issues the self-refresh re-enable code is done from a work queue, which
71  * must be correctly synchronized/cancelled when shutting down the pipe."
72  *
73  * DC3CO (DC3 clock off)
74  *
75  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
76  * clock off automatically during PSR2 idle state.
77  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
78  * entry/exit allows the HW to enter a low-power state even when page flipping
79  * periodically (for instance a 30fps video playback scenario).
80  *
81  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
82  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
83  * frames, if no other flip occurs and the function above is executed, DC3CO is
84  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
85  * of another flip.
86  * Front buffer modifications do not trigger DC3CO activation on purpose as it
87  * would bring a lot of complexity and most of the moderns systems will only
88  * use page flips.
89  */
90 
91 /*
92  * Description of PSR mask bits:
93  *
94  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
95  *
96  *  When unmasked (nearly) all display register writes (eg. even
97  *  SWF) trigger a PSR exit. Some registers are excluded from this
98  *  and they have a more specific mask (described below). On icl+
99  *  this bit no longer exists and is effectively always set.
100  *
101  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
102  *
103  *  When unmasked (nearly) all pipe/plane register writes
104  *  trigger a PSR exit. Some plane registers are excluded from this
105  *  and they have a more specific mask (described below).
106  *
107  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
108  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
109  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
110  *
111  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
112  *  SPR_SURF/CURBASE are not included in this and instead are
113  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
114  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
115  *
116  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
117  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
118  *
119  *  When unmasked PSR is blocked as long as the sprite
120  *  plane is enabled. skl+ with their universal planes no
121  *  longer have a mask bit like this, and no plane being
122  *  enabledb blocks PSR.
123  *
124  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
125  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
126  *
127  *  When umasked CURPOS writes trigger a PSR exit. On skl+
128  *  this doesn't exit but CURPOS is included in the
129  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
130  *
131  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
132  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
133  *
134  *  When unmasked PSR is blocked as long as vblank and/or vsync
135  *  interrupt is unmasked in IMR *and* enabled in IER.
136  *
137  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
138  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
139  *
140  *  Selectcs whether PSR exit generates an extra vblank before
141  *  the first frame is transmitted. Also note the opposite polarity
142  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
143  *  unmasked==do not generate the extra vblank).
144  *
145  *  With DC states enabled the extra vblank happens after link training,
146  *  with DC states disabled it happens immediately upuon PSR exit trigger.
147  *  No idea as of now why there is a difference. HSW/BDW (which don't
148  *  even have DMC) always generate it after link training. Go figure.
149  *
150  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
151  *  and thus won't latch until the first vblank. So with DC states
152  *  enabled the register effctively uses the reset value during DC5
153  *  exit+PSR exit sequence, and thus the bit does nothing until
154  *  latched by the vblank that it was trying to prevent from being
155  *  generated in the first place. So we should probably call this
156  *  one a chicken/egg bit instead on skl+.
157  *
158  *  In standby mode (as opposed to link-off) this makes no difference
159  *  as the timing generator keeps running the whole time generating
160  *  normal periodic vblanks.
161  *
162  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
163  *  and doing so makes the behaviour match the skl+ reset value.
164  *
165  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
166  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
167  *
168  *  On BDW without this bit is no vblanks whatsoever are
169  *  generated after PSR exit. On HSW this has no apparant effect.
170  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
171  *
172  * The rest of the bits are more self-explanatory and/or
173  * irrelevant for normal operation.
174  *
175  * Description of intel_crtc_state variables. has_psr, has_panel_replay and
176  * has_sel_update:
177  *
178  *  has_psr (alone):					PSR1
179  *  has_psr + has_sel_update:				PSR2
180  *  has_psr + has_panel_replay:				Panel Replay
181  *  has_psr + has_panel_replay + has_sel_update:	Panel Replay Selective Update
182  *
183  * Description of some intel_psr varibles. enabled, panel_replay_enabled,
184  * sel_update_enabled
185  *
186  *  enabled (alone):						PSR1
187  *  enabled + sel_update_enabled:				PSR2
188  *  enabled + panel_replay_enabled:				Panel Replay
189  *  enabled + panel_replay_enabled + sel_update_enabled:	Panel Replay SU
190  */
191 
192 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
193 			   (intel_dp)->psr.source_support)
194 
195 bool intel_encoder_can_psr(struct intel_encoder *encoder)
196 {
197 	if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
198 		return CAN_PSR(enc_to_intel_dp(encoder)) ||
199 		       CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
200 	else
201 		return false;
202 }
203 
204 static bool psr_global_enabled(struct intel_dp *intel_dp)
205 {
206 	struct intel_connector *connector = intel_dp->attached_connector;
207 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
208 
209 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
210 	case I915_PSR_DEBUG_DEFAULT:
211 		if (i915->display.params.enable_psr == -1)
212 			return connector->panel.vbt.psr.enable;
213 		return i915->display.params.enable_psr;
214 	case I915_PSR_DEBUG_DISABLE:
215 		return false;
216 	default:
217 		return true;
218 	}
219 }
220 
221 static bool psr2_global_enabled(struct intel_dp *intel_dp)
222 {
223 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
224 
225 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
226 	case I915_PSR_DEBUG_DISABLE:
227 	case I915_PSR_DEBUG_FORCE_PSR1:
228 		return false;
229 	default:
230 		if (i915->display.params.enable_psr == 1)
231 			return false;
232 		return true;
233 	}
234 }
235 
236 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
237 {
238 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
239 
240 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
241 		EDP_PSR_ERROR(intel_dp->psr.transcoder);
242 }
243 
244 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
245 {
246 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
247 
248 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
249 		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
250 }
251 
252 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
253 {
254 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
255 
256 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
257 		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
258 }
259 
260 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
261 {
262 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
263 
264 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
265 		EDP_PSR_MASK(intel_dp->psr.transcoder);
266 }
267 
268 static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
269 			      enum transcoder cpu_transcoder)
270 {
271 	if (DISPLAY_VER(dev_priv) >= 8)
272 		return EDP_PSR_CTL(cpu_transcoder);
273 	else
274 		return HSW_SRD_CTL;
275 }
276 
277 static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
278 				enum transcoder cpu_transcoder)
279 {
280 	if (DISPLAY_VER(dev_priv) >= 8)
281 		return EDP_PSR_DEBUG(cpu_transcoder);
282 	else
283 		return HSW_SRD_DEBUG;
284 }
285 
286 static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
287 				   enum transcoder cpu_transcoder)
288 {
289 	if (DISPLAY_VER(dev_priv) >= 8)
290 		return EDP_PSR_PERF_CNT(cpu_transcoder);
291 	else
292 		return HSW_SRD_PERF_CNT;
293 }
294 
295 static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
296 				 enum transcoder cpu_transcoder)
297 {
298 	if (DISPLAY_VER(dev_priv) >= 8)
299 		return EDP_PSR_STATUS(cpu_transcoder);
300 	else
301 		return HSW_SRD_STATUS;
302 }
303 
304 static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
305 			      enum transcoder cpu_transcoder)
306 {
307 	if (DISPLAY_VER(dev_priv) >= 12)
308 		return TRANS_PSR_IMR(cpu_transcoder);
309 	else
310 		return EDP_PSR_IMR;
311 }
312 
313 static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
314 			      enum transcoder cpu_transcoder)
315 {
316 	if (DISPLAY_VER(dev_priv) >= 12)
317 		return TRANS_PSR_IIR(cpu_transcoder);
318 	else
319 		return EDP_PSR_IIR;
320 }
321 
322 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
323 				  enum transcoder cpu_transcoder)
324 {
325 	if (DISPLAY_VER(dev_priv) >= 8)
326 		return EDP_PSR_AUX_CTL(cpu_transcoder);
327 	else
328 		return HSW_SRD_AUX_CTL;
329 }
330 
331 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
332 				   enum transcoder cpu_transcoder, int i)
333 {
334 	if (DISPLAY_VER(dev_priv) >= 8)
335 		return EDP_PSR_AUX_DATA(cpu_transcoder, i);
336 	else
337 		return HSW_SRD_AUX_DATA(i);
338 }
339 
340 static void psr_irq_control(struct intel_dp *intel_dp)
341 {
342 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
343 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
344 	u32 mask;
345 
346 	if (intel_dp->psr.panel_replay_enabled)
347 		return;
348 
349 	mask = psr_irq_psr_error_bit_get(intel_dp);
350 	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
351 		mask |= psr_irq_post_exit_bit_get(intel_dp) |
352 			psr_irq_pre_entry_bit_get(intel_dp);
353 
354 	intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
355 		     psr_irq_mask_get(intel_dp), ~mask);
356 }
357 
358 static void psr_event_print(struct drm_i915_private *i915,
359 			    u32 val, bool psr2_enabled)
360 {
361 	drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
362 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
363 		drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
364 	if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
365 		drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
366 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
367 		drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
368 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
369 		drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
370 	if (val & PSR_EVENT_GRAPHICS_RESET)
371 		drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
372 	if (val & PSR_EVENT_PCH_INTERRUPT)
373 		drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
374 	if (val & PSR_EVENT_MEMORY_UP)
375 		drm_dbg_kms(&i915->drm, "\tMemory up\n");
376 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
377 		drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
378 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
379 		drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
380 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
381 		drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
382 	if (val & PSR_EVENT_REGISTER_UPDATE)
383 		drm_dbg_kms(&i915->drm, "\tRegister updated\n");
384 	if (val & PSR_EVENT_HDCP_ENABLE)
385 		drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
386 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
387 		drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
388 	if (val & PSR_EVENT_VBI_ENABLE)
389 		drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
390 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
391 		drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
392 	if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
393 		drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
394 }
395 
396 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
397 {
398 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
399 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
400 	ktime_t time_ns =  ktime_get();
401 
402 	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
403 		intel_dp->psr.last_entry_attempt = time_ns;
404 		drm_dbg_kms(&dev_priv->drm,
405 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
406 			    transcoder_name(cpu_transcoder));
407 	}
408 
409 	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
410 		intel_dp->psr.last_exit = time_ns;
411 		drm_dbg_kms(&dev_priv->drm,
412 			    "[transcoder %s] PSR exit completed\n",
413 			    transcoder_name(cpu_transcoder));
414 
415 		if (DISPLAY_VER(dev_priv) >= 9) {
416 			u32 val;
417 
418 			val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
419 
420 			psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
421 		}
422 	}
423 
424 	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
425 		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
426 			 transcoder_name(cpu_transcoder));
427 
428 		intel_dp->psr.irq_aux_error = true;
429 
430 		/*
431 		 * If this interruption is not masked it will keep
432 		 * interrupting so fast that it prevents the scheduled
433 		 * work to run.
434 		 * Also after a PSR error, we don't want to arm PSR
435 		 * again so we don't care about unmask the interruption
436 		 * or unset irq_aux_error.
437 		 */
438 		intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
439 			     0, psr_irq_psr_error_bit_get(intel_dp));
440 
441 		queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
442 	}
443 }
444 
445 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
446 {
447 	u8 alpm_caps = 0;
448 
449 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
450 			      &alpm_caps) != 1)
451 		return false;
452 	return alpm_caps & DP_ALPM_CAP;
453 }
454 
455 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
456 {
457 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
458 	u8 val = 8; /* assume the worst if we can't read the value */
459 
460 	if (drm_dp_dpcd_readb(&intel_dp->aux,
461 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
462 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
463 	else
464 		drm_dbg_kms(&i915->drm,
465 			    "Unable to get sink synchronization latency, assuming 8 frames\n");
466 	return val;
467 }
468 
469 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
470 {
471 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
472 	ssize_t r;
473 	u16 w;
474 	u8 y;
475 
476 	/* If sink don't have specific granularity requirements set legacy ones */
477 	if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
478 		/* As PSR2 HW sends full lines, we do not care about x granularity */
479 		w = 4;
480 		y = 4;
481 		goto exit;
482 	}
483 
484 	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
485 	if (r != 2)
486 		drm_dbg_kms(&i915->drm,
487 			    "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
488 	/*
489 	 * Spec says that if the value read is 0 the default granularity should
490 	 * be used instead.
491 	 */
492 	if (r != 2 || w == 0)
493 		w = 4;
494 
495 	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
496 	if (r != 1) {
497 		drm_dbg_kms(&i915->drm,
498 			    "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
499 		y = 4;
500 	}
501 	if (y == 0)
502 		y = 1;
503 
504 exit:
505 	intel_dp->psr.su_w_granularity = w;
506 	intel_dp->psr.su_y_granularity = y;
507 }
508 
509 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
510 {
511 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
512 	u8 pr_dpcd = 0;
513 
514 	intel_dp->psr.sink_panel_replay_support = false;
515 	drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP, &pr_dpcd);
516 
517 	if (!(pr_dpcd & DP_PANEL_REPLAY_SUPPORT)) {
518 		drm_dbg_kms(&i915->drm,
519 			    "Panel replay is not supported by panel\n");
520 		return;
521 	}
522 
523 	drm_dbg_kms(&i915->drm,
524 		    "Panel replay is supported by panel\n");
525 	intel_dp->psr.sink_panel_replay_support = true;
526 }
527 
528 static void _psr_init_dpcd(struct intel_dp *intel_dp)
529 {
530 	struct drm_i915_private *i915 =
531 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
532 
533 	drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
534 		    intel_dp->psr_dpcd[0]);
535 
536 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
537 		drm_dbg_kms(&i915->drm,
538 			    "PSR support not currently available for this panel\n");
539 		return;
540 	}
541 
542 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
543 		drm_dbg_kms(&i915->drm,
544 			    "Panel lacks power state control, PSR cannot be enabled\n");
545 		return;
546 	}
547 
548 	intel_dp->psr.sink_support = true;
549 	intel_dp->psr.sink_sync_latency =
550 		intel_dp_get_sink_sync_latency(intel_dp);
551 
552 	if (DISPLAY_VER(i915) >= 9 &&
553 	    intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
554 		bool y_req = intel_dp->psr_dpcd[1] &
555 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
556 		bool alpm = intel_dp_get_alpm_status(intel_dp);
557 
558 		/*
559 		 * All panels that supports PSR version 03h (PSR2 +
560 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
561 		 * only sure that it is going to be used when required by the
562 		 * panel. This way panel is capable to do selective update
563 		 * without a aux frame sync.
564 		 *
565 		 * To support PSR version 02h and PSR version 03h without
566 		 * Y-coordinate requirement panels we would need to enable
567 		 * GTC first.
568 		 */
569 		intel_dp->psr.sink_psr2_support = y_req && alpm;
570 		drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
571 			    intel_dp->psr.sink_psr2_support ? "" : "not ");
572 	}
573 }
574 
575 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
576 {
577 	_panel_replay_init_dpcd(intel_dp);
578 
579 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
580 			 sizeof(intel_dp->psr_dpcd));
581 
582 	if (intel_dp->psr_dpcd[0])
583 		_psr_init_dpcd(intel_dp);
584 
585 	if (intel_dp->psr.sink_psr2_support)
586 		intel_dp_get_su_granularity(intel_dp);
587 }
588 
589 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
590 {
591 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
592 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
593 	u32 aux_clock_divider, aux_ctl;
594 	/* write DP_SET_POWER=D0 */
595 	static const u8 aux_msg[] = {
596 		[0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
597 		[1] = (DP_SET_POWER >> 8) & 0xff,
598 		[2] = DP_SET_POWER & 0xff,
599 		[3] = 1 - 1,
600 		[4] = DP_SET_POWER_D0,
601 	};
602 	int i;
603 
604 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
605 	for (i = 0; i < sizeof(aux_msg); i += 4)
606 		intel_de_write(dev_priv,
607 			       psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
608 			       intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
609 
610 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
611 
612 	/* Start with bits set for DDI_AUX_CTL register */
613 	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
614 					     aux_clock_divider);
615 
616 	/* Select only valid bits for SRD_AUX_CTL */
617 	aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
618 		EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
619 		EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
620 		EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
621 
622 	intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
623 		       aux_ctl);
624 }
625 
626 static bool psr2_su_region_et_valid(struct intel_dp *intel_dp)
627 {
628 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
629 
630 	if (DISPLAY_VER(i915) >= 20 &&
631 	    intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
632 	    !(intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE))
633 		return true;
634 
635 	return false;
636 }
637 
638 static unsigned int intel_psr_get_enable_sink_offset(struct intel_dp *intel_dp)
639 {
640 	return intel_dp->psr.panel_replay_enabled ?
641 		PANEL_REPLAY_CONFIG : DP_PSR_EN_CFG;
642 }
643 
644 /*
645  * Note: Most of the bits are same in PANEL_REPLAY_CONFIG and DP_PSR_EN_CFG. We
646  * are relying on PSR definitions on these "common" bits.
647  */
648 void intel_psr_enable_sink(struct intel_dp *intel_dp,
649 			   const struct intel_crtc_state *crtc_state)
650 {
651 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
652 	u8 dpcd_val = DP_PSR_ENABLE;
653 
654 	if (crtc_state->has_psr2) {
655 		/* Enable ALPM at sink for psr2 */
656 		if (!crtc_state->has_panel_replay) {
657 			drm_dp_dpcd_writeb(&intel_dp->aux,
658 					   DP_RECEIVER_ALPM_CONFIG,
659 					   DP_ALPM_ENABLE |
660 					   DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
661 
662 			if (psr2_su_region_et_valid(intel_dp))
663 				dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET;
664 		}
665 
666 		dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
667 	} else {
668 		if (intel_dp->psr.link_standby)
669 			dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
670 
671 		if (!crtc_state->has_panel_replay && DISPLAY_VER(dev_priv) >= 8)
672 			dpcd_val |= DP_PSR_CRC_VERIFICATION;
673 	}
674 
675 	if (crtc_state->has_panel_replay)
676 		dpcd_val |= DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
677 			DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN;
678 
679 	if (crtc_state->req_psr2_sdp_prior_scanline)
680 		dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
681 
682 	if (intel_dp->psr.entry_setup_frames > 0)
683 		dpcd_val |= DP_PSR_FRAME_CAPTURE;
684 
685 	drm_dp_dpcd_writeb(&intel_dp->aux,
686 			   intel_psr_get_enable_sink_offset(intel_dp),
687 			   dpcd_val);
688 
689 	if (intel_dp_is_edp(intel_dp))
690 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
691 }
692 
693 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
694 {
695 	struct intel_connector *connector = intel_dp->attached_connector;
696 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
697 	u32 val = 0;
698 
699 	if (DISPLAY_VER(dev_priv) >= 11)
700 		val |= EDP_PSR_TP4_TIME_0us;
701 
702 	if (dev_priv->display.params.psr_safest_params) {
703 		val |= EDP_PSR_TP1_TIME_2500us;
704 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
705 		goto check_tp3_sel;
706 	}
707 
708 	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
709 		val |= EDP_PSR_TP1_TIME_0us;
710 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
711 		val |= EDP_PSR_TP1_TIME_100us;
712 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
713 		val |= EDP_PSR_TP1_TIME_500us;
714 	else
715 		val |= EDP_PSR_TP1_TIME_2500us;
716 
717 	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
718 		val |= EDP_PSR_TP2_TP3_TIME_0us;
719 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
720 		val |= EDP_PSR_TP2_TP3_TIME_100us;
721 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
722 		val |= EDP_PSR_TP2_TP3_TIME_500us;
723 	else
724 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
725 
726 	/*
727 	 * WA 0479: hsw,bdw
728 	 * "Do not skip both TP1 and TP2/TP3"
729 	 */
730 	if (DISPLAY_VER(dev_priv) < 9 &&
731 	    connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
732 	    connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
733 		val |= EDP_PSR_TP2_TP3_TIME_100us;
734 
735 check_tp3_sel:
736 	if (intel_dp_source_supports_tps3(dev_priv) &&
737 	    drm_dp_tps3_supported(intel_dp->dpcd))
738 		val |= EDP_PSR_TP_TP1_TP3;
739 	else
740 		val |= EDP_PSR_TP_TP1_TP2;
741 
742 	return val;
743 }
744 
745 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
746 {
747 	struct intel_connector *connector = intel_dp->attached_connector;
748 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
749 	int idle_frames;
750 
751 	/* Let's use 6 as the minimum to cover all known cases including the
752 	 * off-by-one issue that HW has in some cases.
753 	 */
754 	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
755 	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
756 
757 	if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
758 		idle_frames = 0xf;
759 
760 	return idle_frames;
761 }
762 
763 static void hsw_activate_psr1(struct intel_dp *intel_dp)
764 {
765 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
766 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
767 	u32 max_sleep_time = 0x1f;
768 	u32 val = EDP_PSR_ENABLE;
769 
770 	val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
771 
772 	if (DISPLAY_VER(dev_priv) < 20)
773 		val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
774 
775 	if (IS_HASWELL(dev_priv))
776 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
777 
778 	if (intel_dp->psr.link_standby)
779 		val |= EDP_PSR_LINK_STANDBY;
780 
781 	val |= intel_psr1_get_tp_time(intel_dp);
782 
783 	if (DISPLAY_VER(dev_priv) >= 8)
784 		val |= EDP_PSR_CRC_ENABLE;
785 
786 	if (DISPLAY_VER(dev_priv) >= 20)
787 		val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
788 
789 	intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
790 		     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
791 }
792 
793 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
794 {
795 	struct intel_connector *connector = intel_dp->attached_connector;
796 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
797 	u32 val = 0;
798 
799 	if (dev_priv->display.params.psr_safest_params)
800 		return EDP_PSR2_TP2_TIME_2500us;
801 
802 	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
803 	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
804 		val |= EDP_PSR2_TP2_TIME_50us;
805 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
806 		val |= EDP_PSR2_TP2_TIME_100us;
807 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
808 		val |= EDP_PSR2_TP2_TIME_500us;
809 	else
810 		val |= EDP_PSR2_TP2_TIME_2500us;
811 
812 	return val;
813 }
814 
815 static int psr2_block_count_lines(struct intel_dp *intel_dp)
816 {
817 	return intel_dp->psr.alpm_parameters.io_wake_lines < 9 &&
818 		intel_dp->psr.alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
819 }
820 
821 static int psr2_block_count(struct intel_dp *intel_dp)
822 {
823 	return psr2_block_count_lines(intel_dp) / 4;
824 }
825 
826 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
827 {
828 	u8 frames_before_su_entry;
829 
830 	frames_before_su_entry = max_t(u8,
831 				       intel_dp->psr.sink_sync_latency + 1,
832 				       2);
833 
834 	/* Entry setup frames must be at least 1 less than frames before SU entry */
835 	if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
836 		frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
837 
838 	return frames_before_su_entry;
839 }
840 
841 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
842 {
843 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
844 
845 	intel_de_rmw(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
846 		     0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
847 
848 	intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
849 		     TRANS_DP2_PANEL_REPLAY_ENABLE);
850 }
851 
852 static void hsw_activate_psr2(struct intel_dp *intel_dp)
853 {
854 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
855 	struct intel_psr *psr = &intel_dp->psr;
856 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
857 	u32 val = EDP_PSR2_ENABLE;
858 	u32 psr_val = 0;
859 
860 	val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
861 
862 	if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
863 		val |= EDP_SU_TRACK_ENABLE;
864 
865 	if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
866 		val |= EDP_Y_COORDINATE_ENABLE;
867 
868 	val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
869 
870 	val |= intel_psr2_get_tp_time(intel_dp);
871 
872 	if (DISPLAY_VER(dev_priv) >= 12) {
873 		if (psr2_block_count(intel_dp) > 2)
874 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
875 		else
876 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
877 	}
878 
879 	/* Wa_22012278275:adl-p */
880 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
881 		static const u8 map[] = {
882 			2, /* 5 lines */
883 			1, /* 6 lines */
884 			0, /* 7 lines */
885 			3, /* 8 lines */
886 			6, /* 9 lines */
887 			5, /* 10 lines */
888 			4, /* 11 lines */
889 			7, /* 12 lines */
890 		};
891 		/*
892 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
893 		 * comments bellow for more information
894 		 */
895 		int tmp;
896 
897 		tmp = map[psr->alpm_parameters.io_wake_lines -
898 			  TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
899 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
900 
901 		tmp = map[psr->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
902 		val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
903 	} else if (DISPLAY_VER(dev_priv) >= 12) {
904 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines);
905 		val |= TGL_EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines);
906 	} else if (DISPLAY_VER(dev_priv) >= 9) {
907 		val |= EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines);
908 		val |= EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines);
909 	}
910 
911 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
912 		val |= EDP_PSR2_SU_SDP_SCANLINE;
913 
914 	if (DISPLAY_VER(dev_priv) >= 20)
915 		psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
916 
917 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
918 		u32 tmp;
919 
920 		tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
921 		drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
922 	} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
923 		intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
924 	}
925 
926 	if (psr2_su_region_et_valid(intel_dp))
927 		val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
928 
929 	/*
930 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
931 	 * recommending keep this bit unset while PSR2 is enabled.
932 	 */
933 	intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
934 
935 	intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
936 }
937 
938 static bool
939 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
940 {
941 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
942 		return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
943 	else if (DISPLAY_VER(dev_priv) >= 12)
944 		return cpu_transcoder == TRANSCODER_A;
945 	else if (DISPLAY_VER(dev_priv) >= 9)
946 		return cpu_transcoder == TRANSCODER_EDP;
947 	else
948 		return false;
949 }
950 
951 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
952 {
953 	if (!crtc_state->hw.active)
954 		return 0;
955 
956 	return DIV_ROUND_UP(1000 * 1000,
957 			    drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
958 }
959 
960 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
961 				     u32 idle_frames)
962 {
963 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
964 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
965 
966 	intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
967 		     EDP_PSR2_IDLE_FRAMES_MASK,
968 		     EDP_PSR2_IDLE_FRAMES(idle_frames));
969 }
970 
971 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
972 {
973 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
974 
975 	psr2_program_idle_frames(intel_dp, 0);
976 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
977 }
978 
979 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
980 {
981 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
982 
983 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
984 	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
985 }
986 
987 static void tgl_dc3co_disable_work(struct work_struct *work)
988 {
989 	struct intel_dp *intel_dp =
990 		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
991 
992 	mutex_lock(&intel_dp->psr.lock);
993 	/* If delayed work is pending, it is not idle */
994 	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
995 		goto unlock;
996 
997 	tgl_psr2_disable_dc3co(intel_dp);
998 unlock:
999 	mutex_unlock(&intel_dp->psr.lock);
1000 }
1001 
1002 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
1003 {
1004 	if (!intel_dp->psr.dc3co_exitline)
1005 		return;
1006 
1007 	cancel_delayed_work(&intel_dp->psr.dc3co_work);
1008 	/* Before PSR2 exit disallow dc3co*/
1009 	tgl_psr2_disable_dc3co(intel_dp);
1010 }
1011 
1012 static bool
1013 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
1014 			      struct intel_crtc_state *crtc_state)
1015 {
1016 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1017 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1018 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1019 	enum port port = dig_port->base.port;
1020 
1021 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1022 		return pipe <= PIPE_B && port <= PORT_B;
1023 	else
1024 		return pipe == PIPE_A && port == PORT_A;
1025 }
1026 
1027 static void
1028 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
1029 				  struct intel_crtc_state *crtc_state)
1030 {
1031 	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
1032 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1033 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1034 	u32 exit_scanlines;
1035 
1036 	/*
1037 	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1038 	 * disable DC3CO until the changed dc3co activating/deactivating sequence
1039 	 * is applied. B.Specs:49196
1040 	 */
1041 	return;
1042 
1043 	/*
1044 	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1045 	 * TODO: when the issue is addressed, this restriction should be removed.
1046 	 */
1047 	if (crtc_state->enable_psr2_sel_fetch)
1048 		return;
1049 
1050 	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1051 		return;
1052 
1053 	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1054 		return;
1055 
1056 	/* Wa_16011303918:adl-p */
1057 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1058 		return;
1059 
1060 	/*
1061 	 * DC3CO Exit time 200us B.Spec 49196
1062 	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1063 	 */
1064 	exit_scanlines =
1065 		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1066 
1067 	if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
1068 		return;
1069 
1070 	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1071 }
1072 
1073 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1074 					      struct intel_crtc_state *crtc_state)
1075 {
1076 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1077 
1078 	if (!dev_priv->display.params.enable_psr2_sel_fetch &&
1079 	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1080 		drm_dbg_kms(&dev_priv->drm,
1081 			    "PSR2 sel fetch not enabled, disabled by parameter\n");
1082 		return false;
1083 	}
1084 
1085 	if (crtc_state->uapi.async_flip) {
1086 		drm_dbg_kms(&dev_priv->drm,
1087 			    "PSR2 sel fetch not enabled, async flip enabled\n");
1088 		return false;
1089 	}
1090 
1091 	if (psr2_su_region_et_valid(intel_dp))
1092 		crtc_state->enable_psr2_su_region_et = true;
1093 
1094 	return crtc_state->enable_psr2_sel_fetch = true;
1095 }
1096 
1097 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1098 				   struct intel_crtc_state *crtc_state)
1099 {
1100 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1101 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1102 	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1103 	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1104 	u16 y_granularity = 0;
1105 
1106 	/* PSR2 HW only send full lines so we only need to validate the width */
1107 	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1108 		return false;
1109 
1110 	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1111 		return false;
1112 
1113 	/* HW tracking is only aligned to 4 lines */
1114 	if (!crtc_state->enable_psr2_sel_fetch)
1115 		return intel_dp->psr.su_y_granularity == 4;
1116 
1117 	/*
1118 	 * adl_p and mtl platforms have 1 line granularity.
1119 	 * For other platforms with SW tracking we can adjust the y coordinates
1120 	 * to match sink requirement if multiple of 4.
1121 	 */
1122 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1123 		y_granularity = intel_dp->psr.su_y_granularity;
1124 	else if (intel_dp->psr.su_y_granularity <= 2)
1125 		y_granularity = 4;
1126 	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1127 		y_granularity = intel_dp->psr.su_y_granularity;
1128 
1129 	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1130 		return false;
1131 
1132 	if (crtc_state->dsc.compression_enable &&
1133 	    vdsc_cfg->slice_height % y_granularity)
1134 		return false;
1135 
1136 	crtc_state->su_y_granularity = y_granularity;
1137 	return true;
1138 }
1139 
1140 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1141 							struct intel_crtc_state *crtc_state)
1142 {
1143 	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1144 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1145 	u32 hblank_total, hblank_ns, req_ns;
1146 
1147 	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1148 	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1149 
1150 	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1151 	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1152 
1153 	if ((hblank_ns - req_ns) > 100)
1154 		return true;
1155 
1156 	/* Not supported <13 / Wa_22012279113:adl-p */
1157 	if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1158 		return false;
1159 
1160 	crtc_state->req_psr2_sdp_prior_scanline = true;
1161 	return true;
1162 }
1163 
1164 /*
1165  * See Bspec: 71632 for the table
1166  *
1167  * Silence_period = tSilence,Min + ((tSilence,Max - tSilence,Min) / 2)
1168  *
1169  * Half cycle duration:
1170  *
1171  * Link rates 1.62 - 4.32 and tLFPS_Cycle = 70 ns
1172  * FLOOR( (Link Rate * tLFPS_Cycle) / (2 * 10) )
1173  *
1174  * Link rates 5.4 - 8.1
1175  * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ] = 10
1176  * LFPS Period chosen is the mid-point of the min:max values from the table
1177  * FLOOR( LFPS Period in Symbol clocks /
1178  * (2 * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ]) )
1179  */
1180 static bool _lnl_get_silence_period_and_lfps_half_cycle(int link_rate,
1181 							int *silence_period,
1182 							int *lfps_half_cycle)
1183 {
1184 	switch (link_rate) {
1185 	case 162000:
1186 		*silence_period = 20;
1187 		*lfps_half_cycle = 5;
1188 		break;
1189 	case 216000:
1190 		*silence_period = 27;
1191 		*lfps_half_cycle = 7;
1192 		break;
1193 	case 243000:
1194 		*silence_period = 31;
1195 		*lfps_half_cycle = 8;
1196 		break;
1197 	case 270000:
1198 		*silence_period = 34;
1199 		*lfps_half_cycle = 9;
1200 		break;
1201 	case 324000:
1202 		*silence_period = 41;
1203 		*lfps_half_cycle = 11;
1204 		break;
1205 	case 432000:
1206 		*silence_period = 56;
1207 		*lfps_half_cycle = 15;
1208 		break;
1209 	case 540000:
1210 		*silence_period = 69;
1211 		*lfps_half_cycle = 12;
1212 		break;
1213 	case 648000:
1214 		*silence_period = 84;
1215 		*lfps_half_cycle = 15;
1216 		break;
1217 	case 675000:
1218 		*silence_period = 87;
1219 		*lfps_half_cycle = 15;
1220 		break;
1221 	case 810000:
1222 		*silence_period = 104;
1223 		*lfps_half_cycle = 19;
1224 		break;
1225 	default:
1226 		*silence_period = *lfps_half_cycle = -1;
1227 		return false;
1228 	}
1229 	return true;
1230 }
1231 
1232 /*
1233  * AUX-Less Wake Time = CEILING( ((PHY P2 to P0) + tLFPS_Period, Max+
1234  * tSilence, Max+ tPHY Establishment + tCDS) / tline)
1235  * For the "PHY P2 to P0" latency see the PHY Power Control page
1236  * (PHY P2 to P0) : https://gfxspecs.intel.com/Predator/Home/Index/68965
1237  * : 12 us
1238  * The tLFPS_Period, Max term is 800ns
1239  * The tSilence, Max term is 180ns
1240  * The tPHY Establishment (a.k.a. t1) term is 50us
1241  * The tCDS term is 1 or 2 times t2
1242  * t2 = Number ML_PHY_LOCK * tML_PHY_LOCK
1243  * Number ML_PHY_LOCK = ( 7 + CEILING( 6.5us / tML_PHY_LOCK ) + 1)
1244  * Rounding up the 6.5us padding to the next ML_PHY_LOCK boundary and
1245  * adding the "+ 1" term ensures all ML_PHY_LOCK sequences that start
1246  * within the CDS period complete within the CDS period regardless of
1247  * entry into the period
1248  * tML_PHY_LOCK = TPS4 Length * ( 10 / (Link Rate in MHz) )
1249  * TPS4 Length = 252 Symbols
1250  */
1251 static int _lnl_compute_aux_less_wake_time(int port_clock)
1252 {
1253 	int tphy2_p2_to_p0 = 12 * 1000;
1254 	int tlfps_period_max = 800;
1255 	int tsilence_max = 180;
1256 	int t1 = 50 * 1000;
1257 	int tps4 = 252;
1258 	int tml_phy_lock = 1000 * 1000 * tps4 * 10 / port_clock;
1259 	int num_ml_phy_lock = 7 + DIV_ROUND_UP(6500, tml_phy_lock) + 1;
1260 	int t2 = num_ml_phy_lock * tml_phy_lock;
1261 	int tcds = 1 * t2;
1262 
1263 	return DIV_ROUND_UP(tphy2_p2_to_p0 + tlfps_period_max + tsilence_max +
1264 			    t1 + tcds, 1000);
1265 }
1266 
1267 static int _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
1268 					     struct intel_crtc_state *crtc_state)
1269 {
1270 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1271 	int aux_less_wake_time, aux_less_wake_lines, silence_period,
1272 		lfps_half_cycle;
1273 
1274 	aux_less_wake_time =
1275 		_lnl_compute_aux_less_wake_time(crtc_state->port_clock);
1276 	aux_less_wake_lines = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode,
1277 						       aux_less_wake_time);
1278 
1279 	if (!_lnl_get_silence_period_and_lfps_half_cycle(crtc_state->port_clock,
1280 							 &silence_period,
1281 							 &lfps_half_cycle))
1282 		return false;
1283 
1284 	if (aux_less_wake_lines > ALPM_CTL_AUX_LESS_WAKE_TIME_MASK ||
1285 	    silence_period > PORT_ALPM_CTL_SILENCE_PERIOD_MASK ||
1286 	    lfps_half_cycle > PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK)
1287 		return false;
1288 
1289 	if (i915->display.params.psr_safest_params)
1290 		aux_less_wake_lines = ALPM_CTL_AUX_LESS_WAKE_TIME_MASK;
1291 
1292 	intel_dp->psr.alpm_parameters.fast_wake_lines = aux_less_wake_lines;
1293 	intel_dp->psr.alpm_parameters.silence_period_sym_clocks = silence_period;
1294 	intel_dp->psr.alpm_parameters.lfps_half_cycle_num_of_syms = lfps_half_cycle;
1295 
1296 	return true;
1297 }
1298 
1299 static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
1300 				     struct intel_crtc_state *crtc_state)
1301 {
1302 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1303 	int check_entry_lines;
1304 
1305 	if (DISPLAY_VER(i915) < 20)
1306 		return true;
1307 
1308 	/* ALPM Entry Check = 2 + CEILING( 5us /tline ) */
1309 	check_entry_lines = 2 +
1310 		intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 5);
1311 
1312 	if (check_entry_lines > 15)
1313 		return false;
1314 
1315 	if (!_lnl_compute_aux_less_alpm_params(intel_dp, crtc_state))
1316 		return false;
1317 
1318 	if (i915->display.params.psr_safest_params)
1319 		check_entry_lines = 15;
1320 
1321 	intel_dp->psr.alpm_parameters.check_entry_lines = check_entry_lines;
1322 
1323 	return true;
1324 }
1325 
1326 /*
1327  * IO wake time for DISPLAY_VER < 12 is not directly mentioned in Bspec. There
1328  * are 50 us io wake time and 32 us fast wake time. Clearly preharge pulses are
1329  * not (improperly) included in 32 us fast wake time. 50 us - 32 us = 18 us.
1330  */
1331 static int skl_io_buffer_wake_time(void)
1332 {
1333 	return 18;
1334 }
1335 
1336 static int tgl_io_buffer_wake_time(void)
1337 {
1338 	return 10;
1339 }
1340 
1341 static int io_buffer_wake_time(const struct intel_crtc_state *crtc_state)
1342 {
1343 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1344 
1345 	if (DISPLAY_VER(i915) >= 12)
1346 		return tgl_io_buffer_wake_time();
1347 	else
1348 		return skl_io_buffer_wake_time();
1349 }
1350 
1351 static bool _compute_alpm_params(struct intel_dp *intel_dp,
1352 				 struct intel_crtc_state *crtc_state)
1353 {
1354 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1355 	int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
1356 	int tfw_exit_latency = 20; /* eDP spec */
1357 	int phy_wake = 4;	   /* eDP spec */
1358 	int preamble = 8;	   /* eDP spec */
1359 	int precharge = intel_dp_aux_fw_sync_len() - preamble;
1360 	u8 max_wake_lines;
1361 
1362 	io_wake_time = max(precharge, io_buffer_wake_time(crtc_state)) +
1363 		preamble + phy_wake + tfw_exit_latency;
1364 	fast_wake_time = precharge + preamble + phy_wake +
1365 		tfw_exit_latency;
1366 
1367 	if (DISPLAY_VER(i915) >= 12)
1368 		/* TODO: Check how we can use ALPM_CTL fast wake extended field */
1369 		max_wake_lines = 12;
1370 	else
1371 		max_wake_lines = 8;
1372 
1373 	io_wake_lines = intel_usecs_to_scanlines(
1374 		&crtc_state->hw.adjusted_mode, io_wake_time);
1375 	fast_wake_lines = intel_usecs_to_scanlines(
1376 		&crtc_state->hw.adjusted_mode, fast_wake_time);
1377 
1378 	if (io_wake_lines > max_wake_lines ||
1379 	    fast_wake_lines > max_wake_lines)
1380 		return false;
1381 
1382 	if (!_lnl_compute_alpm_params(intel_dp, crtc_state))
1383 		return false;
1384 
1385 	if (i915->display.params.psr_safest_params)
1386 		io_wake_lines = fast_wake_lines = max_wake_lines;
1387 
1388 	/* According to Bspec lower limit should be set as 7 lines. */
1389 	intel_dp->psr.alpm_parameters.io_wake_lines = max(io_wake_lines, 7);
1390 	intel_dp->psr.alpm_parameters.fast_wake_lines = max(fast_wake_lines, 7);
1391 
1392 	return true;
1393 }
1394 
1395 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1396 					const struct drm_display_mode *adjusted_mode)
1397 {
1398 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1399 	int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1400 	int entry_setup_frames = 0;
1401 
1402 	if (psr_setup_time < 0) {
1403 		drm_dbg_kms(&i915->drm,
1404 			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1405 			    intel_dp->psr_dpcd[1]);
1406 		return -ETIME;
1407 	}
1408 
1409 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1410 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1411 		if (DISPLAY_VER(i915) >= 20) {
1412 			/* setup entry frames can be up to 3 frames */
1413 			entry_setup_frames = 1;
1414 			drm_dbg_kms(&i915->drm,
1415 				    "PSR setup entry frames %d\n",
1416 				    entry_setup_frames);
1417 		} else {
1418 			drm_dbg_kms(&i915->drm,
1419 				    "PSR condition failed: PSR setup time (%d us) too long\n",
1420 				    psr_setup_time);
1421 			return -ETIME;
1422 		}
1423 	}
1424 
1425 	return entry_setup_frames;
1426 }
1427 
1428 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1429 				    struct intel_crtc_state *crtc_state)
1430 {
1431 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1432 	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1433 	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1434 	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1435 
1436 	if (!intel_dp->psr.sink_psr2_support)
1437 		return false;
1438 
1439 	/* JSL and EHL only supports eDP 1.3 */
1440 	if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1441 		drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1442 		return false;
1443 	}
1444 
1445 	/* Wa_16011181250 */
1446 	if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1447 	    IS_DG2(dev_priv)) {
1448 		drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1449 		return false;
1450 	}
1451 
1452 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1453 		drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1454 		return false;
1455 	}
1456 
1457 	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1458 		drm_dbg_kms(&dev_priv->drm,
1459 			    "PSR2 not supported in transcoder %s\n",
1460 			    transcoder_name(crtc_state->cpu_transcoder));
1461 		return false;
1462 	}
1463 
1464 	if (!psr2_global_enabled(intel_dp)) {
1465 		drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
1466 		return false;
1467 	}
1468 
1469 	/*
1470 	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1471 	 * resolution requires DSC to be enabled, priority is given to DSC
1472 	 * over PSR2.
1473 	 */
1474 	if (crtc_state->dsc.compression_enable &&
1475 	    (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
1476 		drm_dbg_kms(&dev_priv->drm,
1477 			    "PSR2 cannot be enabled since DSC is enabled\n");
1478 		return false;
1479 	}
1480 
1481 	if (crtc_state->crc_enabled) {
1482 		drm_dbg_kms(&dev_priv->drm,
1483 			    "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1484 		return false;
1485 	}
1486 
1487 	if (DISPLAY_VER(dev_priv) >= 12) {
1488 		psr_max_h = 5120;
1489 		psr_max_v = 3200;
1490 		max_bpp = 30;
1491 	} else if (DISPLAY_VER(dev_priv) >= 10) {
1492 		psr_max_h = 4096;
1493 		psr_max_v = 2304;
1494 		max_bpp = 24;
1495 	} else if (DISPLAY_VER(dev_priv) == 9) {
1496 		psr_max_h = 3640;
1497 		psr_max_v = 2304;
1498 		max_bpp = 24;
1499 	}
1500 
1501 	if (crtc_state->pipe_bpp > max_bpp) {
1502 		drm_dbg_kms(&dev_priv->drm,
1503 			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1504 			    crtc_state->pipe_bpp, max_bpp);
1505 		return false;
1506 	}
1507 
1508 	/* Wa_16011303918:adl-p */
1509 	if (crtc_state->vrr.enable &&
1510 	    IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1511 		drm_dbg_kms(&dev_priv->drm,
1512 			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1513 		return false;
1514 	}
1515 
1516 	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1517 		drm_dbg_kms(&dev_priv->drm,
1518 			    "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1519 		return false;
1520 	}
1521 
1522 	if (!_compute_alpm_params(intel_dp, crtc_state)) {
1523 		drm_dbg_kms(&dev_priv->drm,
1524 			    "PSR2 not enabled, Unable to use long enough wake times\n");
1525 		return false;
1526 	}
1527 
1528 	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
1529 	if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1530 	    crtc_state->hw.adjusted_mode.crtc_vblank_start <
1531 	    psr2_block_count_lines(intel_dp)) {
1532 		drm_dbg_kms(&dev_priv->drm,
1533 			    "PSR2 not enabled, too short vblank time\n");
1534 		return false;
1535 	}
1536 
1537 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1538 		if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1539 		    !HAS_PSR_HW_TRACKING(dev_priv)) {
1540 			drm_dbg_kms(&dev_priv->drm,
1541 				    "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1542 			return false;
1543 		}
1544 	}
1545 
1546 	if (!psr2_granularity_check(intel_dp, crtc_state)) {
1547 		drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1548 		goto unsupported;
1549 	}
1550 
1551 	if (!crtc_state->enable_psr2_sel_fetch &&
1552 	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1553 		drm_dbg_kms(&dev_priv->drm,
1554 			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1555 			    crtc_hdisplay, crtc_vdisplay,
1556 			    psr_max_h, psr_max_v);
1557 		goto unsupported;
1558 	}
1559 
1560 	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1561 	return true;
1562 
1563 unsupported:
1564 	crtc_state->enable_psr2_sel_fetch = false;
1565 	return false;
1566 }
1567 
1568 static bool _psr_compute_config(struct intel_dp *intel_dp,
1569 				struct intel_crtc_state *crtc_state)
1570 {
1571 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1572 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1573 	int entry_setup_frames;
1574 
1575 	/*
1576 	 * Current PSR panels don't work reliably with VRR enabled
1577 	 * So if VRR is enabled, do not enable PSR.
1578 	 */
1579 	if (crtc_state->vrr.enable)
1580 		return false;
1581 
1582 	if (!CAN_PSR(intel_dp))
1583 		return false;
1584 
1585 	entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1586 
1587 	if (entry_setup_frames >= 0) {
1588 		intel_dp->psr.entry_setup_frames = entry_setup_frames;
1589 	} else {
1590 		drm_dbg_kms(&dev_priv->drm,
1591 			    "PSR condition failed: PSR setup timing not met\n");
1592 		return false;
1593 	}
1594 
1595 	return true;
1596 }
1597 
1598 void intel_psr_compute_config(struct intel_dp *intel_dp,
1599 			      struct intel_crtc_state *crtc_state,
1600 			      struct drm_connector_state *conn_state)
1601 {
1602 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1603 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1604 
1605 	if (!psr_global_enabled(intel_dp)) {
1606 		drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1607 		return;
1608 	}
1609 
1610 	if (intel_dp->psr.sink_not_reliable) {
1611 		drm_dbg_kms(&dev_priv->drm,
1612 			    "PSR sink implementation is not reliable\n");
1613 		return;
1614 	}
1615 
1616 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1617 		drm_dbg_kms(&dev_priv->drm,
1618 			    "PSR condition failed: Interlaced mode enabled\n");
1619 		return;
1620 	}
1621 
1622 	/*
1623 	 * FIXME figure out what is wrong with PSR+bigjoiner and
1624 	 * fix it. Presumably something related to the fact that
1625 	 * PSR is a transcoder level feature.
1626 	 */
1627 	if (crtc_state->bigjoiner_pipes) {
1628 		drm_dbg_kms(&dev_priv->drm,
1629 			    "PSR disabled due to bigjoiner\n");
1630 		return;
1631 	}
1632 
1633 	if (CAN_PANEL_REPLAY(intel_dp))
1634 		crtc_state->has_panel_replay = true;
1635 
1636 	crtc_state->has_psr = crtc_state->has_panel_replay ? true :
1637 		_psr_compute_config(intel_dp, crtc_state);
1638 
1639 	if (!crtc_state->has_psr)
1640 		return;
1641 
1642 	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1643 }
1644 
1645 void intel_psr_get_config(struct intel_encoder *encoder,
1646 			  struct intel_crtc_state *pipe_config)
1647 {
1648 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1649 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1650 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1651 	struct intel_dp *intel_dp;
1652 	u32 val;
1653 
1654 	if (!dig_port)
1655 		return;
1656 
1657 	intel_dp = &dig_port->dp;
1658 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1659 		return;
1660 
1661 	mutex_lock(&intel_dp->psr.lock);
1662 	if (!intel_dp->psr.enabled)
1663 		goto unlock;
1664 
1665 	if (intel_dp->psr.panel_replay_enabled) {
1666 		pipe_config->has_psr = pipe_config->has_panel_replay = true;
1667 	} else {
1668 		/*
1669 		 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1670 		 * enabled/disabled because of frontbuffer tracking and others.
1671 		 */
1672 		pipe_config->has_psr = true;
1673 	}
1674 
1675 	pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1676 	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1677 
1678 	if (!intel_dp->psr.psr2_enabled)
1679 		goto unlock;
1680 
1681 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1682 		val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
1683 		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1684 			pipe_config->enable_psr2_sel_fetch = true;
1685 	}
1686 
1687 	if (DISPLAY_VER(dev_priv) >= 12) {
1688 		val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
1689 		pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1690 	}
1691 unlock:
1692 	mutex_unlock(&intel_dp->psr.lock);
1693 }
1694 
1695 static void intel_psr_activate(struct intel_dp *intel_dp)
1696 {
1697 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1698 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1699 
1700 	drm_WARN_ON(&dev_priv->drm,
1701 		    transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1702 		    intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1703 
1704 	drm_WARN_ON(&dev_priv->drm,
1705 		    intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1706 
1707 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1708 
1709 	lockdep_assert_held(&intel_dp->psr.lock);
1710 
1711 	/* psr1, psr2 and panel-replay are mutually exclusive.*/
1712 	if (intel_dp->psr.panel_replay_enabled)
1713 		dg2_activate_panel_replay(intel_dp);
1714 	else if (intel_dp->psr.psr2_enabled)
1715 		hsw_activate_psr2(intel_dp);
1716 	else
1717 		hsw_activate_psr1(intel_dp);
1718 
1719 	intel_dp->psr.active = true;
1720 }
1721 
1722 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1723 {
1724 	switch (intel_dp->psr.pipe) {
1725 	case PIPE_A:
1726 		return LATENCY_REPORTING_REMOVED_PIPE_A;
1727 	case PIPE_B:
1728 		return LATENCY_REPORTING_REMOVED_PIPE_B;
1729 	case PIPE_C:
1730 		return LATENCY_REPORTING_REMOVED_PIPE_C;
1731 	case PIPE_D:
1732 		return LATENCY_REPORTING_REMOVED_PIPE_D;
1733 	default:
1734 		MISSING_CASE(intel_dp->psr.pipe);
1735 		return 0;
1736 	}
1737 }
1738 
1739 /*
1740  * Wa_16013835468
1741  * Wa_14015648006
1742  */
1743 static void wm_optimization_wa(struct intel_dp *intel_dp,
1744 			       const struct intel_crtc_state *crtc_state)
1745 {
1746 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1747 	bool set_wa_bit = false;
1748 
1749 	/* Wa_14015648006 */
1750 	if (IS_DISPLAY_VER(dev_priv, 11, 14))
1751 		set_wa_bit |= crtc_state->wm_level_disabled;
1752 
1753 	/* Wa_16013835468 */
1754 	if (DISPLAY_VER(dev_priv) == 12)
1755 		set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1756 			crtc_state->hw.adjusted_mode.crtc_vdisplay;
1757 
1758 	if (set_wa_bit)
1759 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1760 			     0, wa_16013835468_bit_get(intel_dp));
1761 	else
1762 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1763 			     wa_16013835468_bit_get(intel_dp), 0);
1764 }
1765 
1766 static void lnl_alpm_configure(struct intel_dp *intel_dp)
1767 {
1768 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1769 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1770 	struct intel_psr *psr = &intel_dp->psr;
1771 	u32 alpm_ctl;
1772 
1773 	if (DISPLAY_VER(dev_priv) < 20 || (!intel_dp->psr.psr2_enabled &&
1774 					   !intel_dp_is_edp(intel_dp)))
1775 		return;
1776 
1777 	/*
1778 	 * Panel Replay on eDP is always using ALPM aux less. I.e. no need to
1779 	 * check panel support at this point.
1780 	 */
1781 	if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
1782 		alpm_ctl = ALPM_CTL_ALPM_ENABLE |
1783 			ALPM_CTL_ALPM_AUX_LESS_ENABLE |
1784 			ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS;
1785 
1786 		intel_de_write(dev_priv, PORT_ALPM_CTL(cpu_transcoder),
1787 			       PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE |
1788 			       PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
1789 			       PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
1790 			       PORT_ALPM_CTL_SILENCE_PERIOD(
1791 				       psr->alpm_parameters.silence_period_sym_clocks));
1792 
1793 		intel_de_write(dev_priv, PORT_ALPM_LFPS_CTL(cpu_transcoder),
1794 			       PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) |
1795 			       PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
1796 				       psr->alpm_parameters.lfps_half_cycle_num_of_syms) |
1797 			       PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(
1798 				       psr->alpm_parameters.lfps_half_cycle_num_of_syms) |
1799 			       PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(
1800 				       psr->alpm_parameters.lfps_half_cycle_num_of_syms));
1801 	} else {
1802 		alpm_ctl = ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
1803 			ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines);
1804 	}
1805 
1806 	alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines);
1807 
1808 	intel_de_write(dev_priv, ALPM_CTL(cpu_transcoder), alpm_ctl);
1809 }
1810 
1811 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1812 				    const struct intel_crtc_state *crtc_state)
1813 {
1814 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1815 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1816 	u32 mask = 0;
1817 
1818 	/*
1819 	 * Only HSW and BDW have PSR AUX registers that need to be setup.
1820 	 * SKL+ use hardcoded values PSR AUX transactions
1821 	 */
1822 	if (DISPLAY_VER(dev_priv) < 9)
1823 		hsw_psr_setup_aux(intel_dp);
1824 
1825 	/*
1826 	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1827 	 * mask LPSP to avoid dependency on other drivers that might block
1828 	 * runtime_pm besides preventing  other hw tracking issues now we
1829 	 * can rely on frontbuffer tracking.
1830 	 *
1831 	 * From bspec prior LunarLake:
1832 	 * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
1833 	 * panel replay mode.
1834 	 *
1835 	 * From bspec beyod LunarLake:
1836 	 * Panel Replay on DP: No bits are applicable
1837 	 * Panel Replay on eDP: All bits are applicable
1838 	 */
1839 	if (DISPLAY_VER(dev_priv) < 20 || intel_dp_is_edp(intel_dp))
1840 		mask = EDP_PSR_DEBUG_MASK_HPD;
1841 
1842 	if (intel_dp_is_edp(intel_dp)) {
1843 		mask |= EDP_PSR_DEBUG_MASK_MEMUP;
1844 
1845 		/*
1846 		 * For some unknown reason on HSW non-ULT (or at least on
1847 		 * Dell Latitude E6540) external displays start to flicker
1848 		 * when PSR is enabled on the eDP. SR/PC6 residency is much
1849 		 * higher than should be possible with an external display.
1850 		 * As a workaround leave LPSP unmasked to prevent PSR entry
1851 		 * when external displays are active.
1852 		 */
1853 		if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
1854 			mask |= EDP_PSR_DEBUG_MASK_LPSP;
1855 
1856 		if (DISPLAY_VER(dev_priv) < 20)
1857 			mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1858 
1859 		/*
1860 		 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1861 		 * registers in order to keep the CURSURFLIVE tricks working :(
1862 		 */
1863 		if (IS_DISPLAY_VER(dev_priv, 9, 10))
1864 			mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1865 
1866 		/* allow PSR with sprite enabled */
1867 		if (IS_HASWELL(dev_priv))
1868 			mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1869 	}
1870 
1871 	intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1872 
1873 	psr_irq_control(intel_dp);
1874 
1875 	/*
1876 	 * TODO: if future platforms supports DC3CO in more than one
1877 	 * transcoder, EXITLINE will need to be unset when disabling PSR
1878 	 */
1879 	if (intel_dp->psr.dc3co_exitline)
1880 		intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1881 			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1882 
1883 	if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1884 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1885 			     intel_dp->psr.psr2_sel_fetch_enabled ?
1886 			     IGNORE_PSR2_HW_TRACKING : 0);
1887 
1888 	if (intel_dp_is_edp(intel_dp))
1889 		lnl_alpm_configure(intel_dp);
1890 
1891 	/*
1892 	 * Wa_16013835468
1893 	 * Wa_14015648006
1894 	 */
1895 	wm_optimization_wa(intel_dp, crtc_state);
1896 
1897 	if (intel_dp->psr.psr2_enabled) {
1898 		if (DISPLAY_VER(dev_priv) == 9)
1899 			intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1900 				     PSR2_VSC_ENABLE_PROG_HEADER |
1901 				     PSR2_ADD_VERTICAL_LINE_COUNT);
1902 
1903 		/*
1904 		 * Wa_16014451276:adlp,mtl[a0,b0]
1905 		 * All supported adlp panels have 1-based X granularity, this may
1906 		 * cause issues if non-supported panels are used.
1907 		 */
1908 		if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
1909 		    IS_ALDERLAKE_P(dev_priv))
1910 			intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
1911 				     0, ADLP_1_BASED_X_GRANULARITY);
1912 
1913 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1914 		if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1915 			intel_de_rmw(dev_priv,
1916 				     MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1917 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1918 		else if (IS_ALDERLAKE_P(dev_priv))
1919 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1920 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1921 	}
1922 }
1923 
1924 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1925 {
1926 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1927 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1928 	u32 val;
1929 
1930 	if (intel_dp->psr.panel_replay_enabled)
1931 		goto no_err;
1932 
1933 	/*
1934 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1935 	 * will still keep the error set even after the reset done in the
1936 	 * irq_preinstall and irq_uninstall hooks.
1937 	 * And enabling in this situation cause the screen to freeze in the
1938 	 * first time that PSR HW tries to activate so lets keep PSR disabled
1939 	 * to avoid any rendering problems.
1940 	 */
1941 	val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1942 	val &= psr_irq_psr_error_bit_get(intel_dp);
1943 	if (val) {
1944 		intel_dp->psr.sink_not_reliable = true;
1945 		drm_dbg_kms(&dev_priv->drm,
1946 			    "PSR interruption error set, not enabling PSR\n");
1947 		return false;
1948 	}
1949 
1950 no_err:
1951 	return true;
1952 }
1953 
1954 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1955 				    const struct intel_crtc_state *crtc_state)
1956 {
1957 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1958 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1959 	u32 val;
1960 
1961 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1962 
1963 	intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1964 	intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
1965 	intel_dp->psr.busy_frontbuffer_bits = 0;
1966 	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1967 	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1968 	/* DC5/DC6 requires at least 6 idle frames */
1969 	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1970 	intel_dp->psr.dc3co_exit_delay = val;
1971 	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1972 	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1973 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1974 	intel_dp->psr.req_psr2_sdp_prior_scanline =
1975 		crtc_state->req_psr2_sdp_prior_scanline;
1976 
1977 	if (!psr_interrupt_error_check(intel_dp))
1978 		return;
1979 
1980 	if (intel_dp->psr.panel_replay_enabled) {
1981 		drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
1982 	} else {
1983 		drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1984 			    intel_dp->psr.psr2_enabled ? "2" : "1");
1985 
1986 		/*
1987 		 * Panel replay has to be enabled before link training: doing it
1988 		 * only for PSR here.
1989 		 */
1990 		intel_psr_enable_sink(intel_dp, crtc_state);
1991 	}
1992 
1993 	if (intel_dp_is_edp(intel_dp))
1994 		intel_snps_phy_update_psr_power_state(&dig_port->base, true);
1995 
1996 	intel_psr_enable_source(intel_dp, crtc_state);
1997 	intel_dp->psr.enabled = true;
1998 	intel_dp->psr.paused = false;
1999 
2000 	intel_psr_activate(intel_dp);
2001 }
2002 
2003 static void intel_psr_exit(struct intel_dp *intel_dp)
2004 {
2005 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2006 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2007 	u32 val;
2008 
2009 	if (!intel_dp->psr.active) {
2010 		if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
2011 			val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
2012 			drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
2013 		}
2014 
2015 		val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
2016 		drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
2017 
2018 		return;
2019 	}
2020 
2021 	if (intel_dp->psr.panel_replay_enabled) {
2022 		intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
2023 			     TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
2024 	} else if (intel_dp->psr.psr2_enabled) {
2025 		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
2026 
2027 		val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
2028 				   EDP_PSR2_ENABLE, 0);
2029 
2030 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
2031 	} else {
2032 		val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
2033 				   EDP_PSR_ENABLE, 0);
2034 
2035 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
2036 	}
2037 	intel_dp->psr.active = false;
2038 }
2039 
2040 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
2041 {
2042 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2043 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2044 	i915_reg_t psr_status;
2045 	u32 psr_status_mask;
2046 
2047 	if (intel_dp->psr.psr2_enabled) {
2048 		psr_status = EDP_PSR2_STATUS(cpu_transcoder);
2049 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
2050 	} else {
2051 		psr_status = psr_status_reg(dev_priv, cpu_transcoder);
2052 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
2053 	}
2054 
2055 	/* Wait till PSR is idle */
2056 	if (intel_de_wait_for_clear(dev_priv, psr_status,
2057 				    psr_status_mask, 2000))
2058 		drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
2059 }
2060 
2061 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
2062 {
2063 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2064 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2065 
2066 	lockdep_assert_held(&intel_dp->psr.lock);
2067 
2068 	if (!intel_dp->psr.enabled)
2069 		return;
2070 
2071 	if (intel_dp->psr.panel_replay_enabled)
2072 		drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
2073 	else
2074 		drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
2075 			    intel_dp->psr.psr2_enabled ? "2" : "1");
2076 
2077 	intel_psr_exit(intel_dp);
2078 	intel_psr_wait_exit_locked(intel_dp);
2079 
2080 	/*
2081 	 * Wa_16013835468
2082 	 * Wa_14015648006
2083 	 */
2084 	if (DISPLAY_VER(dev_priv) >= 11)
2085 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
2086 			     wa_16013835468_bit_get(intel_dp), 0);
2087 
2088 	if (intel_dp->psr.psr2_enabled) {
2089 		/* Wa_16012604467:adlp,mtl[a0,b0] */
2090 		if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
2091 			intel_de_rmw(dev_priv,
2092 				     MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
2093 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
2094 		else if (IS_ALDERLAKE_P(dev_priv))
2095 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
2096 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
2097 	}
2098 
2099 	if (intel_dp_is_edp(intel_dp))
2100 		intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
2101 
2102 	/* Panel Replay on eDP is always using ALPM aux less. */
2103 	if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
2104 		intel_de_rmw(dev_priv, ALPM_CTL(cpu_transcoder),
2105 			     ALPM_CTL_ALPM_ENABLE |
2106 			     ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
2107 
2108 		intel_de_rmw(dev_priv, PORT_ALPM_CTL(cpu_transcoder),
2109 			     PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
2110 	}
2111 
2112 	/* Disable PSR on Sink */
2113 	drm_dp_dpcd_writeb(&intel_dp->aux,
2114 			   intel_psr_get_enable_sink_offset(intel_dp), 0);
2115 
2116 	if (!intel_dp->psr.panel_replay_enabled &&
2117 	    intel_dp->psr.psr2_enabled)
2118 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
2119 
2120 	intel_dp->psr.enabled = false;
2121 	intel_dp->psr.panel_replay_enabled = false;
2122 	intel_dp->psr.psr2_enabled = false;
2123 	intel_dp->psr.psr2_sel_fetch_enabled = false;
2124 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2125 }
2126 
2127 /**
2128  * intel_psr_disable - Disable PSR
2129  * @intel_dp: Intel DP
2130  * @old_crtc_state: old CRTC state
2131  *
2132  * This function needs to be called before disabling pipe.
2133  */
2134 void intel_psr_disable(struct intel_dp *intel_dp,
2135 		       const struct intel_crtc_state *old_crtc_state)
2136 {
2137 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2138 
2139 	if (!old_crtc_state->has_psr)
2140 		return;
2141 
2142 	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
2143 		return;
2144 
2145 	mutex_lock(&intel_dp->psr.lock);
2146 
2147 	intel_psr_disable_locked(intel_dp);
2148 
2149 	mutex_unlock(&intel_dp->psr.lock);
2150 	cancel_work_sync(&intel_dp->psr.work);
2151 	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
2152 }
2153 
2154 /**
2155  * intel_psr_pause - Pause PSR
2156  * @intel_dp: Intel DP
2157  *
2158  * This function need to be called after enabling psr.
2159  */
2160 void intel_psr_pause(struct intel_dp *intel_dp)
2161 {
2162 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2163 	struct intel_psr *psr = &intel_dp->psr;
2164 
2165 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2166 		return;
2167 
2168 	mutex_lock(&psr->lock);
2169 
2170 	if (!psr->enabled) {
2171 		mutex_unlock(&psr->lock);
2172 		return;
2173 	}
2174 
2175 	/* If we ever hit this, we will need to add refcount to pause/resume */
2176 	drm_WARN_ON(&dev_priv->drm, psr->paused);
2177 
2178 	intel_psr_exit(intel_dp);
2179 	intel_psr_wait_exit_locked(intel_dp);
2180 	psr->paused = true;
2181 
2182 	mutex_unlock(&psr->lock);
2183 
2184 	cancel_work_sync(&psr->work);
2185 	cancel_delayed_work_sync(&psr->dc3co_work);
2186 }
2187 
2188 /**
2189  * intel_psr_resume - Resume PSR
2190  * @intel_dp: Intel DP
2191  *
2192  * This function need to be called after pausing psr.
2193  */
2194 void intel_psr_resume(struct intel_dp *intel_dp)
2195 {
2196 	struct intel_psr *psr = &intel_dp->psr;
2197 
2198 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2199 		return;
2200 
2201 	mutex_lock(&psr->lock);
2202 
2203 	if (!psr->paused)
2204 		goto unlock;
2205 
2206 	psr->paused = false;
2207 	intel_psr_activate(intel_dp);
2208 
2209 unlock:
2210 	mutex_unlock(&psr->lock);
2211 }
2212 
2213 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
2214 {
2215 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
2216 		PSR2_MAN_TRK_CTL_ENABLE;
2217 }
2218 
2219 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
2220 {
2221 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
2222 	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
2223 	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
2224 }
2225 
2226 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
2227 {
2228 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
2229 	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
2230 	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
2231 }
2232 
2233 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
2234 {
2235 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
2236 	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
2237 	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
2238 }
2239 
2240 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
2241 {
2242 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2243 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2244 
2245 	if (intel_dp->psr.psr2_sel_fetch_enabled)
2246 		intel_de_write(dev_priv,
2247 			       PSR2_MAN_TRK_CTL(cpu_transcoder),
2248 			       man_trk_ctl_enable_bit_get(dev_priv) |
2249 			       man_trk_ctl_partial_frame_bit_get(dev_priv) |
2250 			       man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2251 			       man_trk_ctl_continuos_full_frame(dev_priv));
2252 
2253 	/*
2254 	 * Display WA #0884: skl+
2255 	 * This documented WA for bxt can be safely applied
2256 	 * broadly so we can force HW tracking to exit PSR
2257 	 * instead of disabling and re-enabling.
2258 	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
2259 	 * but it makes more sense write to the current active
2260 	 * pipe.
2261 	 *
2262 	 * This workaround do not exist for platforms with display 10 or newer
2263 	 * but testing proved that it works for up display 13, for newer
2264 	 * than that testing will be needed.
2265 	 */
2266 	intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2267 }
2268 
2269 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
2270 {
2271 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2272 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2273 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2274 	struct intel_encoder *encoder;
2275 
2276 	if (!crtc_state->enable_psr2_sel_fetch)
2277 		return;
2278 
2279 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2280 					     crtc_state->uapi.encoder_mask) {
2281 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2282 
2283 		lockdep_assert_held(&intel_dp->psr.lock);
2284 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
2285 			return;
2286 		break;
2287 	}
2288 
2289 	intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2290 		       crtc_state->psr2_man_track_ctl);
2291 
2292 	if (!crtc_state->enable_psr2_su_region_et)
2293 		return;
2294 
2295 	intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
2296 		       crtc_state->pipe_srcsz_early_tpt);
2297 }
2298 
2299 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2300 				  bool full_update)
2301 {
2302 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2303 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2304 	u32 val = man_trk_ctl_enable_bit_get(dev_priv);
2305 
2306 	/* SF partial frame enable has to be set even on full update */
2307 	val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
2308 
2309 	if (full_update) {
2310 		val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
2311 		val |= man_trk_ctl_continuos_full_frame(dev_priv);
2312 		goto exit;
2313 	}
2314 
2315 	if (crtc_state->psr2_su_area.y1 == -1)
2316 		goto exit;
2317 
2318 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
2319 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2320 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2321 	} else {
2322 		drm_WARN_ON(crtc_state->uapi.crtc->dev,
2323 			    crtc_state->psr2_su_area.y1 % 4 ||
2324 			    crtc_state->psr2_su_area.y2 % 4);
2325 
2326 		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2327 			crtc_state->psr2_su_area.y1 / 4 + 1);
2328 		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2329 			crtc_state->psr2_su_area.y2 / 4 + 1);
2330 	}
2331 exit:
2332 	crtc_state->psr2_man_track_ctl = val;
2333 }
2334 
2335 static u32
2336 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
2337 			       bool full_update, bool cursor_in_su_area)
2338 {
2339 	int width, height;
2340 
2341 	if (!crtc_state->enable_psr2_su_region_et || full_update)
2342 		return 0;
2343 
2344 	if (!cursor_in_su_area)
2345 		return PIPESRC_WIDTH(0) |
2346 			PIPESRC_HEIGHT(drm_rect_height(&crtc_state->pipe_src));
2347 
2348 	width = drm_rect_width(&crtc_state->psr2_su_area);
2349 	height = drm_rect_height(&crtc_state->psr2_su_area);
2350 
2351 	return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
2352 }
2353 
2354 static void clip_area_update(struct drm_rect *overlap_damage_area,
2355 			     struct drm_rect *damage_area,
2356 			     struct drm_rect *pipe_src)
2357 {
2358 	if (!drm_rect_intersect(damage_area, pipe_src))
2359 		return;
2360 
2361 	if (overlap_damage_area->y1 == -1) {
2362 		overlap_damage_area->y1 = damage_area->y1;
2363 		overlap_damage_area->y2 = damage_area->y2;
2364 		return;
2365 	}
2366 
2367 	if (damage_area->y1 < overlap_damage_area->y1)
2368 		overlap_damage_area->y1 = damage_area->y1;
2369 
2370 	if (damage_area->y2 > overlap_damage_area->y2)
2371 		overlap_damage_area->y2 = damage_area->y2;
2372 }
2373 
2374 static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2375 {
2376 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2377 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2378 	u16 y_alignment;
2379 
2380 	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2381 	if (crtc_state->dsc.compression_enable &&
2382 	    (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
2383 		y_alignment = vdsc_cfg->slice_height;
2384 	else
2385 		y_alignment = crtc_state->su_y_granularity;
2386 
2387 	crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2388 	if (crtc_state->psr2_su_area.y2 % y_alignment)
2389 		crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2390 						y_alignment) + 1) * y_alignment;
2391 }
2392 
2393 /*
2394  * When early transport is in use we need to extend SU area to cover
2395  * cursor fully when cursor is in SU area.
2396  */
2397 static void
2398 intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
2399 				  struct intel_crtc *crtc,
2400 				  bool *cursor_in_su_area)
2401 {
2402 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2403 	struct intel_plane_state *new_plane_state;
2404 	struct intel_plane *plane;
2405 	int i;
2406 
2407 	if (!crtc_state->enable_psr2_su_region_et)
2408 		return;
2409 
2410 	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
2411 		struct drm_rect inter;
2412 
2413 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2414 			continue;
2415 
2416 		if (plane->id != PLANE_CURSOR)
2417 			continue;
2418 
2419 		if (!new_plane_state->uapi.visible)
2420 			continue;
2421 
2422 		inter = crtc_state->psr2_su_area;
2423 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2424 			continue;
2425 
2426 		clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
2427 				 &crtc_state->pipe_src);
2428 		*cursor_in_su_area = true;
2429 	}
2430 }
2431 
2432 /*
2433  * TODO: Not clear how to handle planes with negative position,
2434  * also planes are not updated if they have a negative X
2435  * position so for now doing a full update in this cases
2436  *
2437  * Plane scaling and rotation is not supported by selective fetch and both
2438  * properties can change without a modeset, so need to be check at every
2439  * atomic commit.
2440  */
2441 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2442 {
2443 	if (plane_state->uapi.dst.y1 < 0 ||
2444 	    plane_state->uapi.dst.x1 < 0 ||
2445 	    plane_state->scaler_id >= 0 ||
2446 	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2447 		return false;
2448 
2449 	return true;
2450 }
2451 
2452 /*
2453  * Check for pipe properties that is not supported by selective fetch.
2454  *
2455  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2456  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2457  * enabled and going to the full update path.
2458  */
2459 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2460 {
2461 	if (crtc_state->scaler_state.scaler_id >= 0)
2462 		return false;
2463 
2464 	return true;
2465 }
2466 
2467 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2468 				struct intel_crtc *crtc)
2469 {
2470 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2471 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2472 	struct intel_plane_state *new_plane_state, *old_plane_state;
2473 	struct intel_plane *plane;
2474 	bool full_update = false, cursor_in_su_area = false;
2475 	int i, ret;
2476 
2477 	if (!crtc_state->enable_psr2_sel_fetch)
2478 		return 0;
2479 
2480 	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2481 		full_update = true;
2482 		goto skip_sel_fetch_set_loop;
2483 	}
2484 
2485 	crtc_state->psr2_su_area.x1 = 0;
2486 	crtc_state->psr2_su_area.y1 = -1;
2487 	crtc_state->psr2_su_area.x2 = INT_MAX;
2488 	crtc_state->psr2_su_area.y2 = -1;
2489 
2490 	/*
2491 	 * Calculate minimal selective fetch area of each plane and calculate
2492 	 * the pipe damaged area.
2493 	 * In the next loop the plane selective fetch area will actually be set
2494 	 * using whole pipe damaged area.
2495 	 */
2496 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2497 					     new_plane_state, i) {
2498 		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2499 						      .x2 = INT_MAX };
2500 
2501 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2502 			continue;
2503 
2504 		if (!new_plane_state->uapi.visible &&
2505 		    !old_plane_state->uapi.visible)
2506 			continue;
2507 
2508 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2509 			full_update = true;
2510 			break;
2511 		}
2512 
2513 		/*
2514 		 * If visibility or plane moved, mark the whole plane area as
2515 		 * damaged as it needs to be complete redraw in the new and old
2516 		 * position.
2517 		 */
2518 		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2519 		    !drm_rect_equals(&new_plane_state->uapi.dst,
2520 				     &old_plane_state->uapi.dst)) {
2521 			if (old_plane_state->uapi.visible) {
2522 				damaged_area.y1 = old_plane_state->uapi.dst.y1;
2523 				damaged_area.y2 = old_plane_state->uapi.dst.y2;
2524 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2525 						 &crtc_state->pipe_src);
2526 			}
2527 
2528 			if (new_plane_state->uapi.visible) {
2529 				damaged_area.y1 = new_plane_state->uapi.dst.y1;
2530 				damaged_area.y2 = new_plane_state->uapi.dst.y2;
2531 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2532 						 &crtc_state->pipe_src);
2533 			}
2534 			continue;
2535 		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2536 			/* If alpha changed mark the whole plane area as damaged */
2537 			damaged_area.y1 = new_plane_state->uapi.dst.y1;
2538 			damaged_area.y2 = new_plane_state->uapi.dst.y2;
2539 			clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2540 					 &crtc_state->pipe_src);
2541 			continue;
2542 		}
2543 
2544 		src = drm_plane_state_src(&new_plane_state->uapi);
2545 		drm_rect_fp_to_int(&src, &src);
2546 
2547 		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2548 						     &new_plane_state->uapi, &damaged_area))
2549 			continue;
2550 
2551 		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2552 		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2553 		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2554 		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2555 
2556 		clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
2557 	}
2558 
2559 	/*
2560 	 * TODO: For now we are just using full update in case
2561 	 * selective fetch area calculation fails. To optimize this we
2562 	 * should identify cases where this happens and fix the area
2563 	 * calculation for those.
2564 	 */
2565 	if (crtc_state->psr2_su_area.y1 == -1) {
2566 		drm_info_once(&dev_priv->drm,
2567 			      "Selective fetch area calculation failed in pipe %c\n",
2568 			      pipe_name(crtc->pipe));
2569 		full_update = true;
2570 	}
2571 
2572 	if (full_update)
2573 		goto skip_sel_fetch_set_loop;
2574 
2575 	/* Wa_14014971492 */
2576 	if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
2577 	     IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
2578 	    crtc_state->splitter.enable)
2579 		crtc_state->psr2_su_area.y1 = 0;
2580 
2581 	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2582 	if (ret)
2583 		return ret;
2584 
2585 	/*
2586 	 * Adjust su area to cover cursor fully as necessary (early
2587 	 * transport). This needs to be done after
2588 	 * drm_atomic_add_affected_planes to ensure visible cursor is added into
2589 	 * affected planes even when cursor is not updated by itself.
2590 	 */
2591 	intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
2592 
2593 	intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2594 
2595 	/*
2596 	 * Now that we have the pipe damaged area check if it intersect with
2597 	 * every plane, if it does set the plane selective fetch area.
2598 	 */
2599 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2600 					     new_plane_state, i) {
2601 		struct drm_rect *sel_fetch_area, inter;
2602 		struct intel_plane *linked = new_plane_state->planar_linked_plane;
2603 
2604 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2605 		    !new_plane_state->uapi.visible)
2606 			continue;
2607 
2608 		inter = crtc_state->psr2_su_area;
2609 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2610 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2611 			sel_fetch_area->y1 = -1;
2612 			sel_fetch_area->y2 = -1;
2613 			/*
2614 			 * if plane sel fetch was previously enabled ->
2615 			 * disable it
2616 			 */
2617 			if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2618 				crtc_state->update_planes |= BIT(plane->id);
2619 
2620 			continue;
2621 		}
2622 
2623 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2624 			full_update = true;
2625 			break;
2626 		}
2627 
2628 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2629 		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2630 		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2631 		crtc_state->update_planes |= BIT(plane->id);
2632 
2633 		/*
2634 		 * Sel_fetch_area is calculated for UV plane. Use
2635 		 * same area for Y plane as well.
2636 		 */
2637 		if (linked) {
2638 			struct intel_plane_state *linked_new_plane_state;
2639 			struct drm_rect *linked_sel_fetch_area;
2640 
2641 			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2642 			if (IS_ERR(linked_new_plane_state))
2643 				return PTR_ERR(linked_new_plane_state);
2644 
2645 			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2646 			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2647 			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2648 			crtc_state->update_planes |= BIT(linked->id);
2649 		}
2650 	}
2651 
2652 skip_sel_fetch_set_loop:
2653 	psr2_man_trk_ctl_calc(crtc_state, full_update);
2654 	crtc_state->pipe_srcsz_early_tpt =
2655 		psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update,
2656 					       cursor_in_su_area);
2657 	return 0;
2658 }
2659 
2660 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2661 				struct intel_crtc *crtc)
2662 {
2663 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2664 	const struct intel_crtc_state *old_crtc_state =
2665 		intel_atomic_get_old_crtc_state(state, crtc);
2666 	const struct intel_crtc_state *new_crtc_state =
2667 		intel_atomic_get_new_crtc_state(state, crtc);
2668 	struct intel_encoder *encoder;
2669 
2670 	if (!HAS_PSR(i915))
2671 		return;
2672 
2673 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2674 					     old_crtc_state->uapi.encoder_mask) {
2675 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2676 		struct intel_psr *psr = &intel_dp->psr;
2677 		bool needs_to_disable = false;
2678 
2679 		mutex_lock(&psr->lock);
2680 
2681 		/*
2682 		 * Reasons to disable:
2683 		 * - PSR disabled in new state
2684 		 * - All planes will go inactive
2685 		 * - Changing between PSR versions
2686 		 * - Display WA #1136: skl, bxt
2687 		 */
2688 		needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2689 		needs_to_disable |= !new_crtc_state->has_psr;
2690 		needs_to_disable |= !new_crtc_state->active_planes;
2691 		needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2692 		needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2693 			new_crtc_state->wm_level_disabled;
2694 
2695 		if (psr->enabled && needs_to_disable)
2696 			intel_psr_disable_locked(intel_dp);
2697 		else if (psr->enabled && new_crtc_state->wm_level_disabled)
2698 			/* Wa_14015648006 */
2699 			wm_optimization_wa(intel_dp, new_crtc_state);
2700 
2701 		mutex_unlock(&psr->lock);
2702 	}
2703 }
2704 
2705 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2706 				 struct intel_crtc *crtc)
2707 {
2708 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2709 	const struct intel_crtc_state *crtc_state =
2710 		intel_atomic_get_new_crtc_state(state, crtc);
2711 	struct intel_encoder *encoder;
2712 
2713 	if (!crtc_state->has_psr)
2714 		return;
2715 
2716 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2717 					     crtc_state->uapi.encoder_mask) {
2718 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2719 		struct intel_psr *psr = &intel_dp->psr;
2720 		bool keep_disabled = false;
2721 
2722 		mutex_lock(&psr->lock);
2723 
2724 		drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2725 
2726 		keep_disabled |= psr->sink_not_reliable;
2727 		keep_disabled |= !crtc_state->active_planes;
2728 
2729 		/* Display WA #1136: skl, bxt */
2730 		keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2731 			crtc_state->wm_level_disabled;
2732 
2733 		if (!psr->enabled && !keep_disabled)
2734 			intel_psr_enable_locked(intel_dp, crtc_state);
2735 		else if (psr->enabled && !crtc_state->wm_level_disabled)
2736 			/* Wa_14015648006 */
2737 			wm_optimization_wa(intel_dp, crtc_state);
2738 
2739 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2740 		if (crtc_state->crc_enabled && psr->enabled)
2741 			psr_force_hw_tracking_exit(intel_dp);
2742 
2743 		/*
2744 		 * Clear possible busy bits in case we have
2745 		 * invalidate -> flip -> flush sequence.
2746 		 */
2747 		intel_dp->psr.busy_frontbuffer_bits = 0;
2748 
2749 		mutex_unlock(&psr->lock);
2750 	}
2751 }
2752 
2753 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2754 {
2755 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2756 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2757 
2758 	/*
2759 	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2760 	 * As all higher states has bit 4 of PSR2 state set we can just wait for
2761 	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2762 	 */
2763 	return intel_de_wait_for_clear(dev_priv,
2764 				       EDP_PSR2_STATUS(cpu_transcoder),
2765 				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2766 }
2767 
2768 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2769 {
2770 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2771 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2772 
2773 	/*
2774 	 * From bspec: Panel Self Refresh (BDW+)
2775 	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2776 	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2777 	 * defensive enough to cover everything.
2778 	 */
2779 	return intel_de_wait_for_clear(dev_priv,
2780 				       psr_status_reg(dev_priv, cpu_transcoder),
2781 				       EDP_PSR_STATUS_STATE_MASK, 50);
2782 }
2783 
2784 /**
2785  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2786  * @new_crtc_state: new CRTC state
2787  *
2788  * This function is expected to be called from pipe_update_start() where it is
2789  * not expected to race with PSR enable or disable.
2790  */
2791 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2792 {
2793 	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2794 	struct intel_encoder *encoder;
2795 
2796 	if (!new_crtc_state->has_psr)
2797 		return;
2798 
2799 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2800 					     new_crtc_state->uapi.encoder_mask) {
2801 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2802 		int ret;
2803 
2804 		lockdep_assert_held(&intel_dp->psr.lock);
2805 
2806 		if (!intel_dp->psr.enabled)
2807 			continue;
2808 
2809 		if (intel_dp->psr.psr2_enabled)
2810 			ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2811 		else
2812 			ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2813 
2814 		if (ret)
2815 			drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2816 	}
2817 }
2818 
2819 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2820 {
2821 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2822 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2823 	i915_reg_t reg;
2824 	u32 mask;
2825 	int err;
2826 
2827 	if (!intel_dp->psr.enabled)
2828 		return false;
2829 
2830 	if (intel_dp->psr.psr2_enabled) {
2831 		reg = EDP_PSR2_STATUS(cpu_transcoder);
2832 		mask = EDP_PSR2_STATUS_STATE_MASK;
2833 	} else {
2834 		reg = psr_status_reg(dev_priv, cpu_transcoder);
2835 		mask = EDP_PSR_STATUS_STATE_MASK;
2836 	}
2837 
2838 	mutex_unlock(&intel_dp->psr.lock);
2839 
2840 	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2841 	if (err)
2842 		drm_err(&dev_priv->drm,
2843 			"Timed out waiting for PSR Idle for re-enable\n");
2844 
2845 	/* After the unlocked wait, verify that PSR is still wanted! */
2846 	mutex_lock(&intel_dp->psr.lock);
2847 	return err == 0 && intel_dp->psr.enabled;
2848 }
2849 
2850 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2851 {
2852 	struct drm_connector_list_iter conn_iter;
2853 	struct drm_modeset_acquire_ctx ctx;
2854 	struct drm_atomic_state *state;
2855 	struct drm_connector *conn;
2856 	int err = 0;
2857 
2858 	state = drm_atomic_state_alloc(&dev_priv->drm);
2859 	if (!state)
2860 		return -ENOMEM;
2861 
2862 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2863 
2864 	state->acquire_ctx = &ctx;
2865 	to_intel_atomic_state(state)->internal = true;
2866 
2867 retry:
2868 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2869 	drm_for_each_connector_iter(conn, &conn_iter) {
2870 		struct drm_connector_state *conn_state;
2871 		struct drm_crtc_state *crtc_state;
2872 
2873 		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2874 			continue;
2875 
2876 		conn_state = drm_atomic_get_connector_state(state, conn);
2877 		if (IS_ERR(conn_state)) {
2878 			err = PTR_ERR(conn_state);
2879 			break;
2880 		}
2881 
2882 		if (!conn_state->crtc)
2883 			continue;
2884 
2885 		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2886 		if (IS_ERR(crtc_state)) {
2887 			err = PTR_ERR(crtc_state);
2888 			break;
2889 		}
2890 
2891 		/* Mark mode as changed to trigger a pipe->update() */
2892 		crtc_state->mode_changed = true;
2893 	}
2894 	drm_connector_list_iter_end(&conn_iter);
2895 
2896 	if (err == 0)
2897 		err = drm_atomic_commit(state);
2898 
2899 	if (err == -EDEADLK) {
2900 		drm_atomic_state_clear(state);
2901 		err = drm_modeset_backoff(&ctx);
2902 		if (!err)
2903 			goto retry;
2904 	}
2905 
2906 	drm_modeset_drop_locks(&ctx);
2907 	drm_modeset_acquire_fini(&ctx);
2908 	drm_atomic_state_put(state);
2909 
2910 	return err;
2911 }
2912 
2913 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2914 {
2915 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2916 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2917 	u32 old_mode;
2918 	int ret;
2919 
2920 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2921 	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2922 		drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2923 		return -EINVAL;
2924 	}
2925 
2926 	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2927 	if (ret)
2928 		return ret;
2929 
2930 	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2931 	intel_dp->psr.debug = val;
2932 
2933 	/*
2934 	 * Do it right away if it's already enabled, otherwise it will be done
2935 	 * when enabling the source.
2936 	 */
2937 	if (intel_dp->psr.enabled)
2938 		psr_irq_control(intel_dp);
2939 
2940 	mutex_unlock(&intel_dp->psr.lock);
2941 
2942 	if (old_mode != mode)
2943 		ret = intel_psr_fastset_force(dev_priv);
2944 
2945 	return ret;
2946 }
2947 
2948 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2949 {
2950 	struct intel_psr *psr = &intel_dp->psr;
2951 
2952 	intel_psr_disable_locked(intel_dp);
2953 	psr->sink_not_reliable = true;
2954 	/* let's make sure that sink is awaken */
2955 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2956 }
2957 
2958 static void intel_psr_work(struct work_struct *work)
2959 {
2960 	struct intel_dp *intel_dp =
2961 		container_of(work, typeof(*intel_dp), psr.work);
2962 
2963 	mutex_lock(&intel_dp->psr.lock);
2964 
2965 	if (!intel_dp->psr.enabled)
2966 		goto unlock;
2967 
2968 	if (READ_ONCE(intel_dp->psr.irq_aux_error))
2969 		intel_psr_handle_irq(intel_dp);
2970 
2971 	/*
2972 	 * We have to make sure PSR is ready for re-enable
2973 	 * otherwise it keeps disabled until next full enable/disable cycle.
2974 	 * PSR might take some time to get fully disabled
2975 	 * and be ready for re-enable.
2976 	 */
2977 	if (!__psr_wait_for_idle_locked(intel_dp))
2978 		goto unlock;
2979 
2980 	/*
2981 	 * The delayed work can race with an invalidate hence we need to
2982 	 * recheck. Since psr_flush first clears this and then reschedules we
2983 	 * won't ever miss a flush when bailing out here.
2984 	 */
2985 	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2986 		goto unlock;
2987 
2988 	intel_psr_activate(intel_dp);
2989 unlock:
2990 	mutex_unlock(&intel_dp->psr.lock);
2991 }
2992 
2993 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2994 {
2995 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2996 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2997 
2998 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2999 		u32 val;
3000 
3001 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3002 			/* Send one update otherwise lag is observed in screen */
3003 			intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
3004 			return;
3005 		}
3006 
3007 		val = man_trk_ctl_enable_bit_get(dev_priv) |
3008 		      man_trk_ctl_partial_frame_bit_get(dev_priv) |
3009 		      man_trk_ctl_continuos_full_frame(dev_priv);
3010 		intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
3011 		intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
3012 		intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
3013 	} else {
3014 		intel_psr_exit(intel_dp);
3015 	}
3016 }
3017 
3018 /**
3019  * intel_psr_invalidate - Invalidate PSR
3020  * @dev_priv: i915 device
3021  * @frontbuffer_bits: frontbuffer plane tracking bits
3022  * @origin: which operation caused the invalidate
3023  *
3024  * Since the hardware frontbuffer tracking has gaps we need to integrate
3025  * with the software frontbuffer tracking. This function gets called every
3026  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
3027  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
3028  *
3029  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
3030  */
3031 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
3032 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
3033 {
3034 	struct intel_encoder *encoder;
3035 
3036 	if (origin == ORIGIN_FLIP)
3037 		return;
3038 
3039 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3040 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3041 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3042 
3043 		mutex_lock(&intel_dp->psr.lock);
3044 		if (!intel_dp->psr.enabled) {
3045 			mutex_unlock(&intel_dp->psr.lock);
3046 			continue;
3047 		}
3048 
3049 		pipe_frontbuffer_bits &=
3050 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3051 		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
3052 
3053 		if (pipe_frontbuffer_bits)
3054 			_psr_invalidate_handle(intel_dp);
3055 
3056 		mutex_unlock(&intel_dp->psr.lock);
3057 	}
3058 }
3059 /*
3060  * When we will be completely rely on PSR2 S/W tracking in future,
3061  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
3062  * event also therefore tgl_dc3co_flush_locked() require to be changed
3063  * accordingly in future.
3064  */
3065 static void
3066 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
3067 		       enum fb_op_origin origin)
3068 {
3069 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3070 
3071 	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
3072 	    !intel_dp->psr.active)
3073 		return;
3074 
3075 	/*
3076 	 * At every frontbuffer flush flip event modified delay of delayed work,
3077 	 * when delayed work schedules that means display has been idle.
3078 	 */
3079 	if (!(frontbuffer_bits &
3080 	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
3081 		return;
3082 
3083 	tgl_psr2_enable_dc3co(intel_dp);
3084 	mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
3085 			 intel_dp->psr.dc3co_exit_delay);
3086 }
3087 
3088 static void _psr_flush_handle(struct intel_dp *intel_dp)
3089 {
3090 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3091 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3092 
3093 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
3094 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3095 			/* can we turn CFF off? */
3096 			if (intel_dp->psr.busy_frontbuffer_bits == 0) {
3097 				u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
3098 					man_trk_ctl_partial_frame_bit_get(dev_priv) |
3099 					man_trk_ctl_single_full_frame_bit_get(dev_priv) |
3100 					man_trk_ctl_continuos_full_frame(dev_priv);
3101 
3102 				/*
3103 				 * Set psr2_sel_fetch_cff_enabled as false to allow selective
3104 				 * updates. Still keep cff bit enabled as we don't have proper
3105 				 * SU configuration in case update is sent for any reason after
3106 				 * sff bit gets cleared by the HW on next vblank.
3107 				 */
3108 				intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
3109 					       val);
3110 				intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
3111 				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
3112 			}
3113 		} else {
3114 			/*
3115 			 * continuous full frame is disabled, only a single full
3116 			 * frame is required
3117 			 */
3118 			psr_force_hw_tracking_exit(intel_dp);
3119 		}
3120 	} else {
3121 		psr_force_hw_tracking_exit(intel_dp);
3122 
3123 		if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
3124 			queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
3125 	}
3126 }
3127 
3128 /**
3129  * intel_psr_flush - Flush PSR
3130  * @dev_priv: i915 device
3131  * @frontbuffer_bits: frontbuffer plane tracking bits
3132  * @origin: which operation caused the flush
3133  *
3134  * Since the hardware frontbuffer tracking has gaps we need to integrate
3135  * with the software frontbuffer tracking. This function gets called every
3136  * time frontbuffer rendering has completed and flushed out to memory. PSR
3137  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
3138  *
3139  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
3140  */
3141 void intel_psr_flush(struct drm_i915_private *dev_priv,
3142 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
3143 {
3144 	struct intel_encoder *encoder;
3145 
3146 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3147 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3148 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3149 
3150 		mutex_lock(&intel_dp->psr.lock);
3151 		if (!intel_dp->psr.enabled) {
3152 			mutex_unlock(&intel_dp->psr.lock);
3153 			continue;
3154 		}
3155 
3156 		pipe_frontbuffer_bits &=
3157 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3158 		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
3159 
3160 		/*
3161 		 * If the PSR is paused by an explicit intel_psr_paused() call,
3162 		 * we have to ensure that the PSR is not activated until
3163 		 * intel_psr_resume() is called.
3164 		 */
3165 		if (intel_dp->psr.paused)
3166 			goto unlock;
3167 
3168 		if (origin == ORIGIN_FLIP ||
3169 		    (origin == ORIGIN_CURSOR_UPDATE &&
3170 		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
3171 			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
3172 			goto unlock;
3173 		}
3174 
3175 		if (pipe_frontbuffer_bits == 0)
3176 			goto unlock;
3177 
3178 		/* By definition flush = invalidate + flush */
3179 		_psr_flush_handle(intel_dp);
3180 unlock:
3181 		mutex_unlock(&intel_dp->psr.lock);
3182 	}
3183 }
3184 
3185 /**
3186  * intel_psr_init - Init basic PSR work and mutex.
3187  * @intel_dp: Intel DP
3188  *
3189  * This function is called after the initializing connector.
3190  * (the initializing of connector treats the handling of connector capabilities)
3191  * And it initializes basic PSR stuff for each DP Encoder.
3192  */
3193 void intel_psr_init(struct intel_dp *intel_dp)
3194 {
3195 	struct intel_connector *connector = intel_dp->attached_connector;
3196 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3197 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3198 
3199 	if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
3200 		return;
3201 
3202 	/*
3203 	 * HSW spec explicitly says PSR is tied to port A.
3204 	 * BDW+ platforms have a instance of PSR registers per transcoder but
3205 	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
3206 	 * than eDP one.
3207 	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
3208 	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
3209 	 * But GEN12 supports a instance of PSR registers per transcoder.
3210 	 */
3211 	if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
3212 		drm_dbg_kms(&dev_priv->drm,
3213 			    "PSR condition failed: Port not supported\n");
3214 		return;
3215 	}
3216 
3217 	if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp))
3218 		intel_dp->psr.source_panel_replay_support = true;
3219 	else
3220 		intel_dp->psr.source_support = true;
3221 
3222 	/* Disable early transport for now */
3223 	intel_dp->psr.debug |= I915_PSR_DEBUG_SU_REGION_ET_DISABLE;
3224 
3225 	/* Set link_standby x link_off defaults */
3226 	if (DISPLAY_VER(dev_priv) < 12)
3227 		/* For new platforms up to TGL let's respect VBT back again */
3228 		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
3229 
3230 	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
3231 	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
3232 	mutex_init(&intel_dp->psr.lock);
3233 }
3234 
3235 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
3236 					   u8 *status, u8 *error_status)
3237 {
3238 	struct drm_dp_aux *aux = &intel_dp->aux;
3239 	int ret;
3240 	unsigned int offset;
3241 
3242 	offset = intel_dp->psr.panel_replay_enabled ?
3243 		 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
3244 
3245 	ret = drm_dp_dpcd_readb(aux, offset, status);
3246 	if (ret != 1)
3247 		return ret;
3248 
3249 	offset = intel_dp->psr.panel_replay_enabled ?
3250 		 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
3251 
3252 	ret = drm_dp_dpcd_readb(aux, offset, error_status);
3253 	if (ret != 1)
3254 		return ret;
3255 
3256 	*status = *status & DP_PSR_SINK_STATE_MASK;
3257 
3258 	return 0;
3259 }
3260 
3261 static void psr_alpm_check(struct intel_dp *intel_dp)
3262 {
3263 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3264 	struct drm_dp_aux *aux = &intel_dp->aux;
3265 	struct intel_psr *psr = &intel_dp->psr;
3266 	u8 val;
3267 	int r;
3268 
3269 	if (!psr->psr2_enabled)
3270 		return;
3271 
3272 	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
3273 	if (r != 1) {
3274 		drm_err(&dev_priv->drm, "Error reading ALPM status\n");
3275 		return;
3276 	}
3277 
3278 	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
3279 		intel_psr_disable_locked(intel_dp);
3280 		psr->sink_not_reliable = true;
3281 		drm_dbg_kms(&dev_priv->drm,
3282 			    "ALPM lock timeout error, disabling PSR\n");
3283 
3284 		/* Clearing error */
3285 		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
3286 	}
3287 }
3288 
3289 static void psr_capability_changed_check(struct intel_dp *intel_dp)
3290 {
3291 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3292 	struct intel_psr *psr = &intel_dp->psr;
3293 	u8 val;
3294 	int r;
3295 
3296 	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
3297 	if (r != 1) {
3298 		drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
3299 		return;
3300 	}
3301 
3302 	if (val & DP_PSR_CAPS_CHANGE) {
3303 		intel_psr_disable_locked(intel_dp);
3304 		psr->sink_not_reliable = true;
3305 		drm_dbg_kms(&dev_priv->drm,
3306 			    "Sink PSR capability changed, disabling PSR\n");
3307 
3308 		/* Clearing it */
3309 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
3310 	}
3311 }
3312 
3313 /*
3314  * On common bits:
3315  * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
3316  * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
3317  * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
3318  * this function is relying on PSR definitions
3319  */
3320 void intel_psr_short_pulse(struct intel_dp *intel_dp)
3321 {
3322 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3323 	struct intel_psr *psr = &intel_dp->psr;
3324 	u8 status, error_status;
3325 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3326 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3327 			  DP_PSR_LINK_CRC_ERROR;
3328 
3329 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
3330 		return;
3331 
3332 	mutex_lock(&psr->lock);
3333 
3334 	if (!psr->enabled)
3335 		goto exit;
3336 
3337 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
3338 		drm_err(&dev_priv->drm,
3339 			"Error reading PSR status or error status\n");
3340 		goto exit;
3341 	}
3342 
3343 	if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
3344 	    (error_status & errors)) {
3345 		intel_psr_disable_locked(intel_dp);
3346 		psr->sink_not_reliable = true;
3347 	}
3348 
3349 	if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
3350 	    !error_status)
3351 		drm_dbg_kms(&dev_priv->drm,
3352 			    "PSR sink internal error, disabling PSR\n");
3353 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3354 		drm_dbg_kms(&dev_priv->drm,
3355 			    "PSR RFB storage error, disabling PSR\n");
3356 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3357 		drm_dbg_kms(&dev_priv->drm,
3358 			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
3359 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3360 		drm_dbg_kms(&dev_priv->drm,
3361 			    "PSR Link CRC error, disabling PSR\n");
3362 
3363 	if (error_status & ~errors)
3364 		drm_err(&dev_priv->drm,
3365 			"PSR_ERROR_STATUS unhandled errors %x\n",
3366 			error_status & ~errors);
3367 	/* clear status register */
3368 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
3369 
3370 	if (!psr->panel_replay_enabled) {
3371 		psr_alpm_check(intel_dp);
3372 		psr_capability_changed_check(intel_dp);
3373 	}
3374 
3375 exit:
3376 	mutex_unlock(&psr->lock);
3377 }
3378 
3379 bool intel_psr_enabled(struct intel_dp *intel_dp)
3380 {
3381 	bool ret;
3382 
3383 	if (!CAN_PSR(intel_dp))
3384 		return false;
3385 
3386 	mutex_lock(&intel_dp->psr.lock);
3387 	ret = intel_dp->psr.enabled;
3388 	mutex_unlock(&intel_dp->psr.lock);
3389 
3390 	return ret;
3391 }
3392 
3393 /**
3394  * intel_psr_lock - grab PSR lock
3395  * @crtc_state: the crtc state
3396  *
3397  * This is initially meant to be used by around CRTC update, when
3398  * vblank sensitive registers are updated and we need grab the lock
3399  * before it to avoid vblank evasion.
3400  */
3401 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3402 {
3403 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3404 	struct intel_encoder *encoder;
3405 
3406 	if (!crtc_state->has_psr)
3407 		return;
3408 
3409 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
3410 					     crtc_state->uapi.encoder_mask) {
3411 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3412 
3413 		mutex_lock(&intel_dp->psr.lock);
3414 		break;
3415 	}
3416 }
3417 
3418 /**
3419  * intel_psr_unlock - release PSR lock
3420  * @crtc_state: the crtc state
3421  *
3422  * Release the PSR lock that was held during pipe update.
3423  */
3424 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3425 {
3426 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3427 	struct intel_encoder *encoder;
3428 
3429 	if (!crtc_state->has_psr)
3430 		return;
3431 
3432 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
3433 					     crtc_state->uapi.encoder_mask) {
3434 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3435 
3436 		mutex_unlock(&intel_dp->psr.lock);
3437 		break;
3438 	}
3439 }
3440 
3441 static void
3442 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3443 {
3444 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3445 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3446 	const char *status = "unknown";
3447 	u32 val, status_val;
3448 
3449 	if (intel_dp->psr.psr2_enabled) {
3450 		static const char * const live_status[] = {
3451 			"IDLE",
3452 			"CAPTURE",
3453 			"CAPTURE_FS",
3454 			"SLEEP",
3455 			"BUFON_FW",
3456 			"ML_UP",
3457 			"SU_STANDBY",
3458 			"FAST_SLEEP",
3459 			"DEEP_SLEEP",
3460 			"BUF_ON",
3461 			"TG_ON"
3462 		};
3463 		val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
3464 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3465 		if (status_val < ARRAY_SIZE(live_status))
3466 			status = live_status[status_val];
3467 	} else {
3468 		static const char * const live_status[] = {
3469 			"IDLE",
3470 			"SRDONACK",
3471 			"SRDENT",
3472 			"BUFOFF",
3473 			"BUFON",
3474 			"AUXACK",
3475 			"SRDOFFACK",
3476 			"SRDENT_ON",
3477 		};
3478 		val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
3479 		status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3480 		if (status_val < ARRAY_SIZE(live_status))
3481 			status = live_status[status_val];
3482 	}
3483 
3484 	seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3485 }
3486 
3487 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3488 {
3489 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3490 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3491 	struct intel_psr *psr = &intel_dp->psr;
3492 	intel_wakeref_t wakeref;
3493 	const char *status;
3494 	bool enabled;
3495 	u32 val;
3496 
3497 	seq_printf(m, "Sink support: PSR = %s",
3498 		   str_yes_no(psr->sink_support));
3499 
3500 	if (psr->sink_support)
3501 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3502 	seq_printf(m, ", Panel Replay = %s\n", str_yes_no(psr->sink_panel_replay_support));
3503 
3504 	if (!(psr->sink_support || psr->sink_panel_replay_support))
3505 		return 0;
3506 
3507 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3508 	mutex_lock(&psr->lock);
3509 
3510 	if (psr->panel_replay_enabled)
3511 		status = "Panel Replay Enabled";
3512 	else if (psr->enabled)
3513 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
3514 	else
3515 		status = "disabled";
3516 	seq_printf(m, "PSR mode: %s\n", status);
3517 
3518 	if (!psr->enabled) {
3519 		seq_printf(m, "PSR sink not reliable: %s\n",
3520 			   str_yes_no(psr->sink_not_reliable));
3521 
3522 		goto unlock;
3523 	}
3524 
3525 	if (psr->panel_replay_enabled) {
3526 		val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
3527 		enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3528 	} else if (psr->psr2_enabled) {
3529 		val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
3530 		enabled = val & EDP_PSR2_ENABLE;
3531 	} else {
3532 		val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
3533 		enabled = val & EDP_PSR_ENABLE;
3534 	}
3535 	seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
3536 		   str_enabled_disabled(enabled), val);
3537 	psr_source_status(intel_dp, m);
3538 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3539 		   psr->busy_frontbuffer_bits);
3540 
3541 	/*
3542 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
3543 	 */
3544 	val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3545 	seq_printf(m, "Performance counter: %u\n",
3546 		   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3547 
3548 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
3549 		seq_printf(m, "Last attempted entry at: %lld\n",
3550 			   psr->last_entry_attempt);
3551 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3552 	}
3553 
3554 	if (psr->psr2_enabled) {
3555 		u32 su_frames_val[3];
3556 		int frame;
3557 
3558 		/*
3559 		 * Reading all 3 registers before hand to minimize crossing a
3560 		 * frame boundary between register reads
3561 		 */
3562 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3563 			val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
3564 			su_frames_val[frame / 3] = val;
3565 		}
3566 
3567 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3568 
3569 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3570 			u32 su_blocks;
3571 
3572 			su_blocks = su_frames_val[frame / 3] &
3573 				    PSR2_SU_STATUS_MASK(frame);
3574 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3575 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
3576 		}
3577 
3578 		seq_printf(m, "PSR2 selective fetch: %s\n",
3579 			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3580 	}
3581 
3582 unlock:
3583 	mutex_unlock(&psr->lock);
3584 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3585 
3586 	return 0;
3587 }
3588 
3589 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3590 {
3591 	struct drm_i915_private *dev_priv = m->private;
3592 	struct intel_dp *intel_dp = NULL;
3593 	struct intel_encoder *encoder;
3594 
3595 	if (!HAS_PSR(dev_priv))
3596 		return -ENODEV;
3597 
3598 	/* Find the first EDP which supports PSR */
3599 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3600 		intel_dp = enc_to_intel_dp(encoder);
3601 		break;
3602 	}
3603 
3604 	if (!intel_dp)
3605 		return -ENODEV;
3606 
3607 	return intel_psr_status(m, intel_dp);
3608 }
3609 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3610 
3611 static int
3612 i915_edp_psr_debug_set(void *data, u64 val)
3613 {
3614 	struct drm_i915_private *dev_priv = data;
3615 	struct intel_encoder *encoder;
3616 	intel_wakeref_t wakeref;
3617 	int ret = -ENODEV;
3618 
3619 	if (!HAS_PSR(dev_priv))
3620 		return ret;
3621 
3622 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3623 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3624 
3625 		drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3626 
3627 		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3628 
3629 		// TODO: split to each transcoder's PSR debug state
3630 		ret = intel_psr_debug_set(intel_dp, val);
3631 
3632 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3633 	}
3634 
3635 	return ret;
3636 }
3637 
3638 static int
3639 i915_edp_psr_debug_get(void *data, u64 *val)
3640 {
3641 	struct drm_i915_private *dev_priv = data;
3642 	struct intel_encoder *encoder;
3643 
3644 	if (!HAS_PSR(dev_priv))
3645 		return -ENODEV;
3646 
3647 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3648 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3649 
3650 		// TODO: split to each transcoder's PSR debug state
3651 		*val = READ_ONCE(intel_dp->psr.debug);
3652 		return 0;
3653 	}
3654 
3655 	return -ENODEV;
3656 }
3657 
3658 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3659 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3660 			"%llu\n");
3661 
3662 void intel_psr_debugfs_register(struct drm_i915_private *i915)
3663 {
3664 	struct drm_minor *minor = i915->drm.primary;
3665 
3666 	debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3667 			    i915, &i915_edp_psr_debug_fops);
3668 
3669 	debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3670 			    i915, &i915_edp_psr_status_fops);
3671 }
3672 
3673 static const char *psr_mode_str(struct intel_dp *intel_dp)
3674 {
3675 	if (intel_dp->psr.panel_replay_enabled)
3676 		return "PANEL-REPLAY";
3677 	else if (intel_dp->psr.enabled)
3678 		return "PSR";
3679 
3680 	return "unknown";
3681 }
3682 
3683 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3684 {
3685 	struct intel_connector *connector = m->private;
3686 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3687 	static const char * const sink_status[] = {
3688 		"inactive",
3689 		"transition to active, capture and display",
3690 		"active, display from RFB",
3691 		"active, capture and display on sink device timings",
3692 		"transition to inactive, capture and display, timing re-sync",
3693 		"reserved",
3694 		"reserved",
3695 		"sink internal error",
3696 	};
3697 	static const char * const panel_replay_status[] = {
3698 		"Sink device frame is locked to the Source device",
3699 		"Sink device is coasting, using the VTotal target",
3700 		"Sink device is governing the frame rate (frame rate unlock is granted)",
3701 		"Sink device in the process of re-locking with the Source device",
3702 	};
3703 	const char *str;
3704 	int ret;
3705 	u8 status, error_status;
3706 	u32 idx;
3707 
3708 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
3709 		seq_puts(m, "PSR/Panel-Replay Unsupported\n");
3710 		return -ENODEV;
3711 	}
3712 
3713 	if (connector->base.status != connector_status_connected)
3714 		return -ENODEV;
3715 
3716 	ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
3717 	if (ret)
3718 		return ret;
3719 
3720 	str = "unknown";
3721 	if (intel_dp->psr.panel_replay_enabled) {
3722 		idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT;
3723 		if (idx < ARRAY_SIZE(panel_replay_status))
3724 			str = panel_replay_status[idx];
3725 	} else if (intel_dp->psr.enabled) {
3726 		idx = status & DP_PSR_SINK_STATE_MASK;
3727 		if (idx < ARRAY_SIZE(sink_status))
3728 			str = sink_status[idx];
3729 	}
3730 
3731 	seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
3732 
3733 	seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
3734 
3735 	if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
3736 			    DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3737 			    DP_PSR_LINK_CRC_ERROR))
3738 		seq_puts(m, ":\n");
3739 	else
3740 		seq_puts(m, "\n");
3741 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3742 		seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
3743 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3744 		seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
3745 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3746 		seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
3747 
3748 	return ret;
3749 }
3750 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3751 
3752 static int i915_psr_status_show(struct seq_file *m, void *data)
3753 {
3754 	struct intel_connector *connector = m->private;
3755 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3756 
3757 	return intel_psr_status(m, intel_dp);
3758 }
3759 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3760 
3761 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3762 {
3763 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3764 	struct dentry *root = connector->base.debugfs_entry;
3765 
3766 	/* TODO: Add support for MST connectors as well. */
3767 	if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
3768 	     connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
3769 	    connector->mst_port)
3770 		return;
3771 
3772 	debugfs_create_file("i915_psr_sink_status", 0444, root,
3773 			    connector, &i915_psr_sink_status_fops);
3774 
3775 	if (HAS_PSR(i915) || HAS_DP20(i915))
3776 		debugfs_create_file("i915_psr_status", 0444, root,
3777 				    connector, &i915_psr_status_fops);
3778 }
3779