xref: /linux/drivers/gpu/drm/i915/display/intel_psr.c (revision c156ef573efe4230ef3dc1ff2ec0038fe0eb217f)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/debugfs.h>
25 
26 #include <drm/drm_atomic_helper.h>
27 #include <drm/drm_damage_helper.h>
28 #include <drm/drm_debugfs.h>
29 
30 #include "i915_drv.h"
31 #include "i915_reg.h"
32 #include "intel_alpm.h"
33 #include "intel_atomic.h"
34 #include "intel_crtc.h"
35 #include "intel_cursor_regs.h"
36 #include "intel_ddi.h"
37 #include "intel_de.h"
38 #include "intel_display_irq.h"
39 #include "intel_display_types.h"
40 #include "intel_dp.h"
41 #include "intel_dp_aux.h"
42 #include "intel_frontbuffer.h"
43 #include "intel_hdmi.h"
44 #include "intel_psr.h"
45 #include "intel_psr_regs.h"
46 #include "intel_snps_phy.h"
47 #include "skl_universal_plane.h"
48 
49 /**
50  * DOC: Panel Self Refresh (PSR/SRD)
51  *
52  * Since Haswell Display controller supports Panel Self-Refresh on display
53  * panels witch have a remote frame buffer (RFB) implemented according to PSR
54  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
55  * when system is idle but display is on as it eliminates display refresh
56  * request to DDR memory completely as long as the frame buffer for that
57  * display is unchanged.
58  *
59  * Panel Self Refresh must be supported by both Hardware (source) and
60  * Panel (sink).
61  *
62  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
63  * to power down the link and memory controller. For DSI panels the same idea
64  * is called "manual mode".
65  *
66  * The implementation uses the hardware-based PSR support which automatically
67  * enters/exits self-refresh mode. The hardware takes care of sending the
68  * required DP aux message and could even retrain the link (that part isn't
69  * enabled yet though). The hardware also keeps track of any frontbuffer
70  * changes to know when to exit self-refresh mode again. Unfortunately that
71  * part doesn't work too well, hence why the i915 PSR support uses the
72  * software frontbuffer tracking to make sure it doesn't miss a screen
73  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
74  * get called by the frontbuffer tracking code. Note that because of locking
75  * issues the self-refresh re-enable code is done from a work queue, which
76  * must be correctly synchronized/cancelled when shutting down the pipe."
77  *
78  * DC3CO (DC3 clock off)
79  *
80  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
81  * clock off automatically during PSR2 idle state.
82  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
83  * entry/exit allows the HW to enter a low-power state even when page flipping
84  * periodically (for instance a 30fps video playback scenario).
85  *
86  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
87  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
88  * frames, if no other flip occurs and the function above is executed, DC3CO is
89  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
90  * of another flip.
91  * Front buffer modifications do not trigger DC3CO activation on purpose as it
92  * would bring a lot of complexity and most of the moderns systems will only
93  * use page flips.
94  */
95 
96 /*
97  * Description of PSR mask bits:
98  *
99  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
100  *
101  *  When unmasked (nearly) all display register writes (eg. even
102  *  SWF) trigger a PSR exit. Some registers are excluded from this
103  *  and they have a more specific mask (described below). On icl+
104  *  this bit no longer exists and is effectively always set.
105  *
106  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
107  *
108  *  When unmasked (nearly) all pipe/plane register writes
109  *  trigger a PSR exit. Some plane registers are excluded from this
110  *  and they have a more specific mask (described below).
111  *
112  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
113  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
114  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
115  *
116  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
117  *  SPR_SURF/CURBASE are not included in this and instead are
118  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
119  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
120  *
121  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
122  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
123  *
124  *  When unmasked PSR is blocked as long as the sprite
125  *  plane is enabled. skl+ with their universal planes no
126  *  longer have a mask bit like this, and no plane being
127  *  enabledb blocks PSR.
128  *
129  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
130  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
131  *
132  *  When umasked CURPOS writes trigger a PSR exit. On skl+
133  *  this doesn't exit but CURPOS is included in the
134  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
135  *
136  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
137  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
138  *
139  *  When unmasked PSR is blocked as long as vblank and/or vsync
140  *  interrupt is unmasked in IMR *and* enabled in IER.
141  *
142  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
143  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
144  *
145  *  Selectcs whether PSR exit generates an extra vblank before
146  *  the first frame is transmitted. Also note the opposite polarity
147  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
148  *  unmasked==do not generate the extra vblank).
149  *
150  *  With DC states enabled the extra vblank happens after link training,
151  *  with DC states disabled it happens immediately upuon PSR exit trigger.
152  *  No idea as of now why there is a difference. HSW/BDW (which don't
153  *  even have DMC) always generate it after link training. Go figure.
154  *
155  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
156  *  and thus won't latch until the first vblank. So with DC states
157  *  enabled the register effctively uses the reset value during DC5
158  *  exit+PSR exit sequence, and thus the bit does nothing until
159  *  latched by the vblank that it was trying to prevent from being
160  *  generated in the first place. So we should probably call this
161  *  one a chicken/egg bit instead on skl+.
162  *
163  *  In standby mode (as opposed to link-off) this makes no difference
164  *  as the timing generator keeps running the whole time generating
165  *  normal periodic vblanks.
166  *
167  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
168  *  and doing so makes the behaviour match the skl+ reset value.
169  *
170  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
171  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
172  *
173  *  On BDW without this bit is no vblanks whatsoever are
174  *  generated after PSR exit. On HSW this has no apparant effect.
175  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
176  *
177  * The rest of the bits are more self-explanatory and/or
178  * irrelevant for normal operation.
179  *
180  * Description of intel_crtc_state variables. has_psr, has_panel_replay and
181  * has_sel_update:
182  *
183  *  has_psr (alone):					PSR1
184  *  has_psr + has_sel_update:				PSR2
185  *  has_psr + has_panel_replay:				Panel Replay
186  *  has_psr + has_panel_replay + has_sel_update:	Panel Replay Selective Update
187  *
188  * Description of some intel_psr varibles. enabled, panel_replay_enabled,
189  * sel_update_enabled
190  *
191  *  enabled (alone):						PSR1
192  *  enabled + sel_update_enabled:				PSR2
193  *  enabled + panel_replay_enabled:				Panel Replay
194  *  enabled + panel_replay_enabled + sel_update_enabled:	Panel Replay SU
195  */
196 
197 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
198 			   (intel_dp)->psr.source_support)
199 
200 bool intel_encoder_can_psr(struct intel_encoder *encoder)
201 {
202 	if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
203 		return CAN_PSR(enc_to_intel_dp(encoder)) ||
204 		       CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
205 	else
206 		return false;
207 }
208 
209 bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
210 				  const struct intel_crtc_state *crtc_state)
211 {
212 	/*
213 	 * For PSR/PR modes only eDP requires the AUX IO power to be enabled whenever
214 	 * the output is enabled. For non-eDP outputs the main link is always
215 	 * on, hence it doesn't require the HW initiated AUX wake-up signaling used
216 	 * for eDP.
217 	 *
218 	 * TODO:
219 	 * - Consider leaving AUX IO disabled for eDP / PR as well, in case
220 	 *   the ALPM with main-link off mode is not enabled.
221 	 * - Leave AUX IO enabled for DP / PR, once support for ALPM with
222 	 *   main-link off mode is added for it and this mode gets enabled.
223 	 */
224 	return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
225 	       intel_encoder_can_psr(encoder);
226 }
227 
228 static bool psr_global_enabled(struct intel_dp *intel_dp)
229 {
230 	struct intel_display *display = to_intel_display(intel_dp);
231 	struct intel_connector *connector = intel_dp->attached_connector;
232 
233 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
234 	case I915_PSR_DEBUG_DEFAULT:
235 		if (display->params.enable_psr == -1)
236 			return intel_dp_is_edp(intel_dp) ?
237 				connector->panel.vbt.psr.enable :
238 				true;
239 		return display->params.enable_psr;
240 	case I915_PSR_DEBUG_DISABLE:
241 		return false;
242 	default:
243 		return true;
244 	}
245 }
246 
247 static bool psr2_global_enabled(struct intel_dp *intel_dp)
248 {
249 	struct intel_display *display = to_intel_display(intel_dp);
250 
251 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
252 	case I915_PSR_DEBUG_DISABLE:
253 	case I915_PSR_DEBUG_FORCE_PSR1:
254 		return false;
255 	default:
256 		if (display->params.enable_psr == 1)
257 			return false;
258 		return true;
259 	}
260 }
261 
262 static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp)
263 {
264 	struct intel_display *display = to_intel_display(intel_dp);
265 
266 	if (display->params.enable_psr != -1)
267 		return false;
268 
269 	return true;
270 }
271 
272 static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
273 {
274 	struct intel_display *display = to_intel_display(intel_dp);
275 
276 	if ((display->params.enable_psr != -1) ||
277 	    (intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE))
278 		return false;
279 	return true;
280 }
281 
282 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
283 {
284 	struct intel_display *display = to_intel_display(intel_dp);
285 
286 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_ERROR :
287 		EDP_PSR_ERROR(intel_dp->psr.transcoder);
288 }
289 
290 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
291 {
292 	struct intel_display *display = to_intel_display(intel_dp);
293 
294 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_POST_EXIT :
295 		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
296 }
297 
298 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
299 {
300 	struct intel_display *display = to_intel_display(intel_dp);
301 
302 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_PRE_ENTRY :
303 		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
304 }
305 
306 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
307 {
308 	struct intel_display *display = to_intel_display(intel_dp);
309 
310 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_MASK :
311 		EDP_PSR_MASK(intel_dp->psr.transcoder);
312 }
313 
314 static i915_reg_t psr_ctl_reg(struct intel_display *display,
315 			      enum transcoder cpu_transcoder)
316 {
317 	if (DISPLAY_VER(display) >= 8)
318 		return EDP_PSR_CTL(display, cpu_transcoder);
319 	else
320 		return HSW_SRD_CTL;
321 }
322 
323 static i915_reg_t psr_debug_reg(struct intel_display *display,
324 				enum transcoder cpu_transcoder)
325 {
326 	if (DISPLAY_VER(display) >= 8)
327 		return EDP_PSR_DEBUG(display, cpu_transcoder);
328 	else
329 		return HSW_SRD_DEBUG;
330 }
331 
332 static i915_reg_t psr_perf_cnt_reg(struct intel_display *display,
333 				   enum transcoder cpu_transcoder)
334 {
335 	if (DISPLAY_VER(display) >= 8)
336 		return EDP_PSR_PERF_CNT(display, cpu_transcoder);
337 	else
338 		return HSW_SRD_PERF_CNT;
339 }
340 
341 static i915_reg_t psr_status_reg(struct intel_display *display,
342 				 enum transcoder cpu_transcoder)
343 {
344 	if (DISPLAY_VER(display) >= 8)
345 		return EDP_PSR_STATUS(display, cpu_transcoder);
346 	else
347 		return HSW_SRD_STATUS;
348 }
349 
350 static i915_reg_t psr_imr_reg(struct intel_display *display,
351 			      enum transcoder cpu_transcoder)
352 {
353 	if (DISPLAY_VER(display) >= 12)
354 		return TRANS_PSR_IMR(display, cpu_transcoder);
355 	else
356 		return EDP_PSR_IMR;
357 }
358 
359 static i915_reg_t psr_iir_reg(struct intel_display *display,
360 			      enum transcoder cpu_transcoder)
361 {
362 	if (DISPLAY_VER(display) >= 12)
363 		return TRANS_PSR_IIR(display, cpu_transcoder);
364 	else
365 		return EDP_PSR_IIR;
366 }
367 
368 static i915_reg_t psr_aux_ctl_reg(struct intel_display *display,
369 				  enum transcoder cpu_transcoder)
370 {
371 	if (DISPLAY_VER(display) >= 8)
372 		return EDP_PSR_AUX_CTL(display, cpu_transcoder);
373 	else
374 		return HSW_SRD_AUX_CTL;
375 }
376 
377 static i915_reg_t psr_aux_data_reg(struct intel_display *display,
378 				   enum transcoder cpu_transcoder, int i)
379 {
380 	if (DISPLAY_VER(display) >= 8)
381 		return EDP_PSR_AUX_DATA(display, cpu_transcoder, i);
382 	else
383 		return HSW_SRD_AUX_DATA(i);
384 }
385 
386 static void psr_irq_control(struct intel_dp *intel_dp)
387 {
388 	struct intel_display *display = to_intel_display(intel_dp);
389 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
390 	u32 mask;
391 
392 	if (intel_dp->psr.panel_replay_enabled)
393 		return;
394 
395 	mask = psr_irq_psr_error_bit_get(intel_dp);
396 	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
397 		mask |= psr_irq_post_exit_bit_get(intel_dp) |
398 			psr_irq_pre_entry_bit_get(intel_dp);
399 
400 	intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
401 		     psr_irq_mask_get(intel_dp), ~mask);
402 }
403 
404 static void psr_event_print(struct intel_display *display,
405 			    u32 val, bool sel_update_enabled)
406 {
407 	drm_dbg_kms(display->drm, "PSR exit events: 0x%x\n", val);
408 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
409 		drm_dbg_kms(display->drm, "\tPSR2 watchdog timer expired\n");
410 	if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled)
411 		drm_dbg_kms(display->drm, "\tPSR2 disabled\n");
412 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
413 		drm_dbg_kms(display->drm, "\tSU dirty FIFO underrun\n");
414 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
415 		drm_dbg_kms(display->drm, "\tSU CRC FIFO underrun\n");
416 	if (val & PSR_EVENT_GRAPHICS_RESET)
417 		drm_dbg_kms(display->drm, "\tGraphics reset\n");
418 	if (val & PSR_EVENT_PCH_INTERRUPT)
419 		drm_dbg_kms(display->drm, "\tPCH interrupt\n");
420 	if (val & PSR_EVENT_MEMORY_UP)
421 		drm_dbg_kms(display->drm, "\tMemory up\n");
422 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
423 		drm_dbg_kms(display->drm, "\tFront buffer modification\n");
424 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
425 		drm_dbg_kms(display->drm, "\tPSR watchdog timer expired\n");
426 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
427 		drm_dbg_kms(display->drm, "\tPIPE registers updated\n");
428 	if (val & PSR_EVENT_REGISTER_UPDATE)
429 		drm_dbg_kms(display->drm, "\tRegister updated\n");
430 	if (val & PSR_EVENT_HDCP_ENABLE)
431 		drm_dbg_kms(display->drm, "\tHDCP enabled\n");
432 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
433 		drm_dbg_kms(display->drm, "\tKVMR session enabled\n");
434 	if (val & PSR_EVENT_VBI_ENABLE)
435 		drm_dbg_kms(display->drm, "\tVBI enabled\n");
436 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
437 		drm_dbg_kms(display->drm, "\tLPSP mode exited\n");
438 	if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled)
439 		drm_dbg_kms(display->drm, "\tPSR disabled\n");
440 }
441 
442 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
443 {
444 	struct intel_display *display = to_intel_display(intel_dp);
445 	struct drm_i915_private *dev_priv = to_i915(display->drm);
446 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
447 	ktime_t time_ns =  ktime_get();
448 
449 	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
450 		intel_dp->psr.last_entry_attempt = time_ns;
451 		drm_dbg_kms(display->drm,
452 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
453 			    transcoder_name(cpu_transcoder));
454 	}
455 
456 	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
457 		intel_dp->psr.last_exit = time_ns;
458 		drm_dbg_kms(display->drm,
459 			    "[transcoder %s] PSR exit completed\n",
460 			    transcoder_name(cpu_transcoder));
461 
462 		if (DISPLAY_VER(display) >= 9) {
463 			u32 val;
464 
465 			val = intel_de_rmw(dev_priv,
466 					   PSR_EVENT(dev_priv, cpu_transcoder),
467 					   0, 0);
468 
469 			psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
470 		}
471 	}
472 
473 	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
474 		drm_warn(display->drm, "[transcoder %s] PSR aux error\n",
475 			 transcoder_name(cpu_transcoder));
476 
477 		intel_dp->psr.irq_aux_error = true;
478 
479 		/*
480 		 * If this interruption is not masked it will keep
481 		 * interrupting so fast that it prevents the scheduled
482 		 * work to run.
483 		 * Also after a PSR error, we don't want to arm PSR
484 		 * again so we don't care about unmask the interruption
485 		 * or unset irq_aux_error.
486 		 */
487 		intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
488 			     0, psr_irq_psr_error_bit_get(intel_dp));
489 
490 		queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
491 	}
492 }
493 
494 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
495 {
496 	struct intel_display *display = to_intel_display(intel_dp);
497 	u8 val = 8; /* assume the worst if we can't read the value */
498 
499 	if (drm_dp_dpcd_readb(&intel_dp->aux,
500 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
501 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
502 	else
503 		drm_dbg_kms(display->drm,
504 			    "Unable to get sink synchronization latency, assuming 8 frames\n");
505 	return val;
506 }
507 
508 static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
509 {
510 	u8 su_capability = 0;
511 
512 	if (intel_dp->psr.sink_panel_replay_su_support)
513 		drm_dp_dpcd_readb(&intel_dp->aux,
514 				  DP_PANEL_PANEL_REPLAY_CAPABILITY,
515 				  &su_capability);
516 	else
517 		su_capability = intel_dp->psr_dpcd[1];
518 
519 	return su_capability;
520 }
521 
522 static unsigned int
523 intel_dp_get_su_x_granularity_offset(struct intel_dp *intel_dp)
524 {
525 	return intel_dp->psr.sink_panel_replay_su_support ?
526 		DP_PANEL_PANEL_REPLAY_X_GRANULARITY :
527 		DP_PSR2_SU_X_GRANULARITY;
528 }
529 
530 static unsigned int
531 intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
532 {
533 	return intel_dp->psr.sink_panel_replay_su_support ?
534 		DP_PANEL_PANEL_REPLAY_Y_GRANULARITY :
535 		DP_PSR2_SU_Y_GRANULARITY;
536 }
537 
538 /*
539  * Note: Bits related to granularity are same in panel replay and psr
540  * registers. Rely on PSR definitions on these "common" bits.
541  */
542 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
543 {
544 	struct intel_display *display = to_intel_display(intel_dp);
545 	ssize_t r;
546 	u16 w;
547 	u8 y;
548 
549 	/*
550 	 * TODO: Do we need to take into account panel supporting both PSR and
551 	 * Panel replay?
552 	 */
553 
554 	/*
555 	 * If sink don't have specific granularity requirements set legacy
556 	 * ones.
557 	 */
558 	if (!(intel_dp_get_su_capability(intel_dp) &
559 	      DP_PSR2_SU_GRANULARITY_REQUIRED)) {
560 		/* As PSR2 HW sends full lines, we do not care about x granularity */
561 		w = 4;
562 		y = 4;
563 		goto exit;
564 	}
565 
566 	r = drm_dp_dpcd_read(&intel_dp->aux,
567 			     intel_dp_get_su_x_granularity_offset(intel_dp),
568 			     &w, 2);
569 	if (r != 2)
570 		drm_dbg_kms(display->drm,
571 			    "Unable to read selective update x granularity\n");
572 	/*
573 	 * Spec says that if the value read is 0 the default granularity should
574 	 * be used instead.
575 	 */
576 	if (r != 2 || w == 0)
577 		w = 4;
578 
579 	r = drm_dp_dpcd_read(&intel_dp->aux,
580 			     intel_dp_get_su_y_granularity_offset(intel_dp),
581 			     &y, 1);
582 	if (r != 1) {
583 		drm_dbg_kms(display->drm,
584 			    "Unable to read selective update y granularity\n");
585 		y = 4;
586 	}
587 	if (y == 0)
588 		y = 1;
589 
590 exit:
591 	intel_dp->psr.su_w_granularity = w;
592 	intel_dp->psr.su_y_granularity = y;
593 }
594 
595 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
596 {
597 	struct intel_display *display = to_intel_display(intel_dp);
598 
599 	if (intel_dp_is_edp(intel_dp)) {
600 		if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
601 			drm_dbg_kms(display->drm,
602 				    "Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n");
603 			return;
604 		}
605 
606 		if (!(intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
607 			drm_dbg_kms(display->drm,
608 				    "Panel doesn't support early transport, eDP Panel Replay not possible\n");
609 			return;
610 		}
611 	}
612 
613 	intel_dp->psr.sink_panel_replay_support = true;
614 
615 	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SU_SUPPORT)
616 		intel_dp->psr.sink_panel_replay_su_support = true;
617 
618 	drm_dbg_kms(display->drm,
619 		    "Panel replay %sis supported by panel\n",
620 		    intel_dp->psr.sink_panel_replay_su_support ?
621 		    "selective_update " : "");
622 }
623 
624 static void _psr_init_dpcd(struct intel_dp *intel_dp)
625 {
626 	struct intel_display *display = to_intel_display(intel_dp);
627 
628 	drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
629 		    intel_dp->psr_dpcd[0]);
630 
631 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
632 		drm_dbg_kms(display->drm,
633 			    "PSR support not currently available for this panel\n");
634 		return;
635 	}
636 
637 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
638 		drm_dbg_kms(display->drm,
639 			    "Panel lacks power state control, PSR cannot be enabled\n");
640 		return;
641 	}
642 
643 	intel_dp->psr.sink_support = true;
644 	intel_dp->psr.sink_sync_latency =
645 		intel_dp_get_sink_sync_latency(intel_dp);
646 
647 	if (DISPLAY_VER(display) >= 9 &&
648 	    intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
649 		bool y_req = intel_dp->psr_dpcd[1] &
650 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
651 
652 		/*
653 		 * All panels that supports PSR version 03h (PSR2 +
654 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
655 		 * only sure that it is going to be used when required by the
656 		 * panel. This way panel is capable to do selective update
657 		 * without a aux frame sync.
658 		 *
659 		 * To support PSR version 02h and PSR version 03h without
660 		 * Y-coordinate requirement panels we would need to enable
661 		 * GTC first.
662 		 */
663 		intel_dp->psr.sink_psr2_support = y_req &&
664 			intel_alpm_aux_wake_supported(intel_dp);
665 		drm_dbg_kms(display->drm, "PSR2 %ssupported\n",
666 			    intel_dp->psr.sink_psr2_support ? "" : "not ");
667 	}
668 }
669 
670 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
671 {
672 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
673 			 sizeof(intel_dp->psr_dpcd));
674 	drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP,
675 			  &intel_dp->pr_dpcd);
676 
677 	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SUPPORT)
678 		_panel_replay_init_dpcd(intel_dp);
679 
680 	if (intel_dp->psr_dpcd[0])
681 		_psr_init_dpcd(intel_dp);
682 
683 	if (intel_dp->psr.sink_psr2_support ||
684 	    intel_dp->psr.sink_panel_replay_su_support)
685 		intel_dp_get_su_granularity(intel_dp);
686 }
687 
688 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
689 {
690 	struct intel_display *display = to_intel_display(intel_dp);
691 	struct drm_i915_private *dev_priv = to_i915(display->drm);
692 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
693 	u32 aux_clock_divider, aux_ctl;
694 	/* write DP_SET_POWER=D0 */
695 	static const u8 aux_msg[] = {
696 		[0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
697 		[1] = (DP_SET_POWER >> 8) & 0xff,
698 		[2] = DP_SET_POWER & 0xff,
699 		[3] = 1 - 1,
700 		[4] = DP_SET_POWER_D0,
701 	};
702 	int i;
703 
704 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
705 	for (i = 0; i < sizeof(aux_msg); i += 4)
706 		intel_de_write(dev_priv,
707 			       psr_aux_data_reg(display, cpu_transcoder, i >> 2),
708 			       intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
709 
710 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
711 
712 	/* Start with bits set for DDI_AUX_CTL register */
713 	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
714 					     aux_clock_divider);
715 
716 	/* Select only valid bits for SRD_AUX_CTL */
717 	aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
718 		EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
719 		EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
720 		EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
721 
722 	intel_de_write(display, psr_aux_ctl_reg(display, cpu_transcoder),
723 		       aux_ctl);
724 }
725 
726 static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay)
727 {
728 	struct intel_display *display = to_intel_display(intel_dp);
729 
730 	if (DISPLAY_VER(display) < 20 || !intel_dp_is_edp(intel_dp) ||
731 	    intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)
732 		return false;
733 
734 	return panel_replay ?
735 		intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
736 		intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
737 		psr2_su_region_et_global_enabled(intel_dp);
738 }
739 
740 static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
741 				      const struct intel_crtc_state *crtc_state)
742 {
743 	u8 val = DP_PANEL_REPLAY_ENABLE |
744 		DP_PANEL_REPLAY_VSC_SDP_CRC_EN |
745 		DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
746 		DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN |
747 		DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN;
748 	u8 panel_replay_config2 = DP_PANEL_REPLAY_CRC_VERIFICATION;
749 
750 	if (crtc_state->has_sel_update)
751 		val |= DP_PANEL_REPLAY_SU_ENABLE;
752 
753 	if (crtc_state->enable_psr2_su_region_et)
754 		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
755 
756 	if (crtc_state->req_psr2_sdp_prior_scanline)
757 		panel_replay_config2 |=
758 			DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE;
759 
760 	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, val);
761 
762 	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG2,
763 			   panel_replay_config2);
764 }
765 
766 static void _psr_enable_sink(struct intel_dp *intel_dp,
767 			     const struct intel_crtc_state *crtc_state)
768 {
769 	struct intel_display *display = to_intel_display(intel_dp);
770 	u8 val = 0;
771 
772 	if (crtc_state->has_sel_update) {
773 		val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
774 	} else {
775 		if (intel_dp->psr.link_standby)
776 			val |= DP_PSR_MAIN_LINK_ACTIVE;
777 
778 		if (DISPLAY_VER(display) >= 8)
779 			val |= DP_PSR_CRC_VERIFICATION;
780 	}
781 
782 	if (crtc_state->req_psr2_sdp_prior_scanline)
783 		val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
784 
785 	if (crtc_state->enable_psr2_su_region_et)
786 		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
787 
788 	if (intel_dp->psr.entry_setup_frames > 0)
789 		val |= DP_PSR_FRAME_CAPTURE;
790 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
791 
792 	val |= DP_PSR_ENABLE;
793 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
794 }
795 
796 static void intel_psr_enable_sink_alpm(struct intel_dp *intel_dp,
797 				       const struct intel_crtc_state *crtc_state)
798 {
799 	u8 val;
800 
801 	/*
802 	 * eDP Panel Replay uses always ALPM
803 	 * PSR2 uses ALPM but PSR1 doesn't
804 	 */
805 	if (!intel_dp_is_edp(intel_dp) || (!crtc_state->has_panel_replay &&
806 					   !crtc_state->has_sel_update))
807 		return;
808 
809 	val = DP_ALPM_ENABLE | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE;
810 
811 	if (crtc_state->has_panel_replay)
812 		val |= DP_ALPM_MODE_AUX_LESS;
813 
814 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, val);
815 }
816 
817 static void intel_psr_enable_sink(struct intel_dp *intel_dp,
818 				  const struct intel_crtc_state *crtc_state)
819 {
820 	intel_psr_enable_sink_alpm(intel_dp, crtc_state);
821 
822 	crtc_state->has_panel_replay ?
823 		_panel_replay_enable_sink(intel_dp, crtc_state) :
824 		_psr_enable_sink(intel_dp, crtc_state);
825 
826 	if (intel_dp_is_edp(intel_dp))
827 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
828 }
829 
830 void intel_psr_panel_replay_enable_sink(struct intel_dp *intel_dp)
831 {
832 	if (CAN_PANEL_REPLAY(intel_dp))
833 		drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
834 				   DP_PANEL_REPLAY_ENABLE);
835 }
836 
837 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
838 {
839 	struct intel_display *display = to_intel_display(intel_dp);
840 	struct intel_connector *connector = intel_dp->attached_connector;
841 	struct drm_i915_private *dev_priv = to_i915(display->drm);
842 	u32 val = 0;
843 
844 	if (DISPLAY_VER(display) >= 11)
845 		val |= EDP_PSR_TP4_TIME_0us;
846 
847 	if (display->params.psr_safest_params) {
848 		val |= EDP_PSR_TP1_TIME_2500us;
849 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
850 		goto check_tp3_sel;
851 	}
852 
853 	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
854 		val |= EDP_PSR_TP1_TIME_0us;
855 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
856 		val |= EDP_PSR_TP1_TIME_100us;
857 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
858 		val |= EDP_PSR_TP1_TIME_500us;
859 	else
860 		val |= EDP_PSR_TP1_TIME_2500us;
861 
862 	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
863 		val |= EDP_PSR_TP2_TP3_TIME_0us;
864 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
865 		val |= EDP_PSR_TP2_TP3_TIME_100us;
866 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
867 		val |= EDP_PSR_TP2_TP3_TIME_500us;
868 	else
869 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
870 
871 	/*
872 	 * WA 0479: hsw,bdw
873 	 * "Do not skip both TP1 and TP2/TP3"
874 	 */
875 	if (DISPLAY_VER(dev_priv) < 9 &&
876 	    connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
877 	    connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
878 		val |= EDP_PSR_TP2_TP3_TIME_100us;
879 
880 check_tp3_sel:
881 	if (intel_dp_source_supports_tps3(display) &&
882 	    drm_dp_tps3_supported(intel_dp->dpcd))
883 		val |= EDP_PSR_TP_TP1_TP3;
884 	else
885 		val |= EDP_PSR_TP_TP1_TP2;
886 
887 	return val;
888 }
889 
890 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
891 {
892 	struct intel_display *display = to_intel_display(intel_dp);
893 	struct intel_connector *connector = intel_dp->attached_connector;
894 	int idle_frames;
895 
896 	/* Let's use 6 as the minimum to cover all known cases including the
897 	 * off-by-one issue that HW has in some cases.
898 	 */
899 	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
900 	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
901 
902 	if (drm_WARN_ON(display->drm, idle_frames > 0xf))
903 		idle_frames = 0xf;
904 
905 	return idle_frames;
906 }
907 
908 static void hsw_activate_psr1(struct intel_dp *intel_dp)
909 {
910 	struct intel_display *display = to_intel_display(intel_dp);
911 	struct drm_i915_private *dev_priv = to_i915(display->drm);
912 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
913 	u32 max_sleep_time = 0x1f;
914 	u32 val = EDP_PSR_ENABLE;
915 
916 	val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
917 
918 	if (DISPLAY_VER(display) < 20)
919 		val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
920 
921 	if (IS_HASWELL(dev_priv))
922 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
923 
924 	if (intel_dp->psr.link_standby)
925 		val |= EDP_PSR_LINK_STANDBY;
926 
927 	val |= intel_psr1_get_tp_time(intel_dp);
928 
929 	if (DISPLAY_VER(display) >= 8)
930 		val |= EDP_PSR_CRC_ENABLE;
931 
932 	if (DISPLAY_VER(display) >= 20)
933 		val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
934 
935 	intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder),
936 		     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
937 }
938 
939 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
940 {
941 	struct intel_display *display = to_intel_display(intel_dp);
942 	struct intel_connector *connector = intel_dp->attached_connector;
943 	u32 val = 0;
944 
945 	if (display->params.psr_safest_params)
946 		return EDP_PSR2_TP2_TIME_2500us;
947 
948 	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
949 	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
950 		val |= EDP_PSR2_TP2_TIME_50us;
951 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
952 		val |= EDP_PSR2_TP2_TIME_100us;
953 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
954 		val |= EDP_PSR2_TP2_TIME_500us;
955 	else
956 		val |= EDP_PSR2_TP2_TIME_2500us;
957 
958 	return val;
959 }
960 
961 static int psr2_block_count_lines(struct intel_dp *intel_dp)
962 {
963 	return intel_dp->alpm_parameters.io_wake_lines < 9 &&
964 		intel_dp->alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
965 }
966 
967 static int psr2_block_count(struct intel_dp *intel_dp)
968 {
969 	return psr2_block_count_lines(intel_dp) / 4;
970 }
971 
972 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
973 {
974 	u8 frames_before_su_entry;
975 
976 	frames_before_su_entry = max_t(u8,
977 				       intel_dp->psr.sink_sync_latency + 1,
978 				       2);
979 
980 	/* Entry setup frames must be at least 1 less than frames before SU entry */
981 	if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
982 		frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
983 
984 	return frames_before_su_entry;
985 }
986 
987 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
988 {
989 	struct intel_display *display = to_intel_display(intel_dp);
990 	struct intel_psr *psr = &intel_dp->psr;
991 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
992 
993 	if (intel_dp_is_edp(intel_dp) && psr->sel_update_enabled) {
994 		u32 val = psr->su_region_et_enabled ?
995 			LNL_EDP_PSR2_SU_REGION_ET_ENABLE : 0;
996 
997 		if (intel_dp->psr.req_psr2_sdp_prior_scanline)
998 			val |= EDP_PSR2_SU_SDP_SCANLINE;
999 
1000 		intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder),
1001 			       val);
1002 	}
1003 
1004 	intel_de_rmw(display,
1005 		     PSR2_MAN_TRK_CTL(display, intel_dp->psr.transcoder),
1006 		     0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
1007 
1008 	intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
1009 		     TRANS_DP2_PANEL_REPLAY_ENABLE);
1010 }
1011 
1012 static void hsw_activate_psr2(struct intel_dp *intel_dp)
1013 {
1014 	struct intel_display *display = to_intel_display(intel_dp);
1015 	struct drm_i915_private *dev_priv = to_i915(display->drm);
1016 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1017 	u32 val = EDP_PSR2_ENABLE;
1018 	u32 psr_val = 0;
1019 
1020 	val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
1021 
1022 	if (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))
1023 		val |= EDP_SU_TRACK_ENABLE;
1024 
1025 	if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
1026 		val |= EDP_Y_COORDINATE_ENABLE;
1027 
1028 	val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
1029 
1030 	val |= intel_psr2_get_tp_time(intel_dp);
1031 
1032 	if (DISPLAY_VER(display) >= 12 && DISPLAY_VER(display) < 20) {
1033 		if (psr2_block_count(intel_dp) > 2)
1034 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
1035 		else
1036 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
1037 	}
1038 
1039 	/* Wa_22012278275:adl-p */
1040 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
1041 		static const u8 map[] = {
1042 			2, /* 5 lines */
1043 			1, /* 6 lines */
1044 			0, /* 7 lines */
1045 			3, /* 8 lines */
1046 			6, /* 9 lines */
1047 			5, /* 10 lines */
1048 			4, /* 11 lines */
1049 			7, /* 12 lines */
1050 		};
1051 		/*
1052 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
1053 		 * comments bellow for more information
1054 		 */
1055 		int tmp;
1056 
1057 		tmp = map[intel_dp->alpm_parameters.io_wake_lines -
1058 			  TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
1059 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
1060 
1061 		tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
1062 		val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
1063 	} else if (DISPLAY_VER(display) >= 20) {
1064 		val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1065 	} else if (DISPLAY_VER(display) >= 12) {
1066 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1067 		val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1068 	} else if (DISPLAY_VER(display) >= 9) {
1069 		val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1070 		val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1071 	}
1072 
1073 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
1074 		val |= EDP_PSR2_SU_SDP_SCANLINE;
1075 
1076 	if (DISPLAY_VER(display) >= 20)
1077 		psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
1078 
1079 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
1080 		u32 tmp;
1081 
1082 		tmp = intel_de_read(display,
1083 				    PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1084 		drm_WARN_ON(display->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
1085 	} else if (HAS_PSR2_SEL_FETCH(display)) {
1086 		intel_de_write(display,
1087 			       PSR2_MAN_TRK_CTL(display, cpu_transcoder), 0);
1088 	}
1089 
1090 	if (intel_dp->psr.su_region_et_enabled)
1091 		val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
1092 
1093 	/*
1094 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
1095 	 * recommending keep this bit unset while PSR2 is enabled.
1096 	 */
1097 	intel_de_write(display, psr_ctl_reg(display, cpu_transcoder), psr_val);
1098 
1099 	intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), val);
1100 }
1101 
1102 static bool
1103 transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
1104 {
1105 	struct drm_i915_private *dev_priv = to_i915(display->drm);
1106 
1107 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
1108 		return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
1109 	else if (DISPLAY_VER(display) >= 12)
1110 		return cpu_transcoder == TRANSCODER_A;
1111 	else if (DISPLAY_VER(display) >= 9)
1112 		return cpu_transcoder == TRANSCODER_EDP;
1113 	else
1114 		return false;
1115 }
1116 
1117 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
1118 {
1119 	if (!crtc_state->hw.active)
1120 		return 0;
1121 
1122 	return DIV_ROUND_UP(1000 * 1000,
1123 			    drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
1124 }
1125 
1126 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
1127 				     u32 idle_frames)
1128 {
1129 	struct intel_display *display = to_intel_display(intel_dp);
1130 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1131 
1132 	intel_de_rmw(display, EDP_PSR2_CTL(display, cpu_transcoder),
1133 		     EDP_PSR2_IDLE_FRAMES_MASK,
1134 		     EDP_PSR2_IDLE_FRAMES(idle_frames));
1135 }
1136 
1137 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
1138 {
1139 	struct intel_display *display = to_intel_display(intel_dp);
1140 
1141 	psr2_program_idle_frames(intel_dp, 0);
1142 	intel_display_power_set_target_dc_state(display, DC_STATE_EN_DC3CO);
1143 }
1144 
1145 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
1146 {
1147 	struct intel_display *display = to_intel_display(intel_dp);
1148 
1149 	intel_display_power_set_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
1150 	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
1151 }
1152 
1153 static void tgl_dc3co_disable_work(struct work_struct *work)
1154 {
1155 	struct intel_dp *intel_dp =
1156 		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
1157 
1158 	mutex_lock(&intel_dp->psr.lock);
1159 	/* If delayed work is pending, it is not idle */
1160 	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
1161 		goto unlock;
1162 
1163 	tgl_psr2_disable_dc3co(intel_dp);
1164 unlock:
1165 	mutex_unlock(&intel_dp->psr.lock);
1166 }
1167 
1168 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
1169 {
1170 	if (!intel_dp->psr.dc3co_exitline)
1171 		return;
1172 
1173 	cancel_delayed_work(&intel_dp->psr.dc3co_work);
1174 	/* Before PSR2 exit disallow dc3co*/
1175 	tgl_psr2_disable_dc3co(intel_dp);
1176 }
1177 
1178 static bool
1179 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
1180 			      struct intel_crtc_state *crtc_state)
1181 {
1182 	struct intel_display *display = to_intel_display(intel_dp);
1183 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1184 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1185 	struct drm_i915_private *dev_priv = to_i915(display->drm);
1186 	enum port port = dig_port->base.port;
1187 
1188 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
1189 		return pipe <= PIPE_B && port <= PORT_B;
1190 	else
1191 		return pipe == PIPE_A && port == PORT_A;
1192 }
1193 
1194 static void
1195 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
1196 				  struct intel_crtc_state *crtc_state)
1197 {
1198 	struct intel_display *display = to_intel_display(intel_dp);
1199 	struct drm_i915_private *dev_priv = to_i915(display->drm);
1200 	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
1201 	struct i915_power_domains *power_domains = &display->power.domains;
1202 	u32 exit_scanlines;
1203 
1204 	/*
1205 	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1206 	 * disable DC3CO until the changed dc3co activating/deactivating sequence
1207 	 * is applied. B.Specs:49196
1208 	 */
1209 	return;
1210 
1211 	/*
1212 	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1213 	 * TODO: when the issue is addressed, this restriction should be removed.
1214 	 */
1215 	if (crtc_state->enable_psr2_sel_fetch)
1216 		return;
1217 
1218 	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1219 		return;
1220 
1221 	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1222 		return;
1223 
1224 	/* Wa_16011303918:adl-p */
1225 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
1226 		return;
1227 
1228 	/*
1229 	 * DC3CO Exit time 200us B.Spec 49196
1230 	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1231 	 */
1232 	exit_scanlines =
1233 		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1234 
1235 	if (drm_WARN_ON(display->drm, exit_scanlines > crtc_vdisplay))
1236 		return;
1237 
1238 	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1239 }
1240 
1241 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1242 					      struct intel_crtc_state *crtc_state)
1243 {
1244 	struct intel_display *display = to_intel_display(intel_dp);
1245 
1246 	if (!display->params.enable_psr2_sel_fetch &&
1247 	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1248 		drm_dbg_kms(display->drm,
1249 			    "PSR2 sel fetch not enabled, disabled by parameter\n");
1250 		return false;
1251 	}
1252 
1253 	if (crtc_state->uapi.async_flip) {
1254 		drm_dbg_kms(display->drm,
1255 			    "PSR2 sel fetch not enabled, async flip enabled\n");
1256 		return false;
1257 	}
1258 
1259 	return crtc_state->enable_psr2_sel_fetch = true;
1260 }
1261 
1262 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1263 				   struct intel_crtc_state *crtc_state)
1264 {
1265 	struct intel_display *display = to_intel_display(intel_dp);
1266 	struct drm_i915_private *dev_priv = to_i915(display->drm);
1267 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1268 	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1269 	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1270 	u16 y_granularity = 0;
1271 
1272 	/* PSR2 HW only send full lines so we only need to validate the width */
1273 	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1274 		return false;
1275 
1276 	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1277 		return false;
1278 
1279 	/* HW tracking is only aligned to 4 lines */
1280 	if (!crtc_state->enable_psr2_sel_fetch)
1281 		return intel_dp->psr.su_y_granularity == 4;
1282 
1283 	/*
1284 	 * adl_p and mtl platforms have 1 line granularity.
1285 	 * For other platforms with SW tracking we can adjust the y coordinates
1286 	 * to match sink requirement if multiple of 4.
1287 	 */
1288 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
1289 		y_granularity = intel_dp->psr.su_y_granularity;
1290 	else if (intel_dp->psr.su_y_granularity <= 2)
1291 		y_granularity = 4;
1292 	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1293 		y_granularity = intel_dp->psr.su_y_granularity;
1294 
1295 	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1296 		return false;
1297 
1298 	if (crtc_state->dsc.compression_enable &&
1299 	    vdsc_cfg->slice_height % y_granularity)
1300 		return false;
1301 
1302 	crtc_state->su_y_granularity = y_granularity;
1303 	return true;
1304 }
1305 
1306 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1307 							struct intel_crtc_state *crtc_state)
1308 {
1309 	struct intel_display *display = to_intel_display(intel_dp);
1310 	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1311 	u32 hblank_total, hblank_ns, req_ns;
1312 
1313 	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1314 	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1315 
1316 	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1317 	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1318 
1319 	if ((hblank_ns - req_ns) > 100)
1320 		return true;
1321 
1322 	/* Not supported <13 / Wa_22012279113:adl-p */
1323 	if (DISPLAY_VER(display) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1324 		return false;
1325 
1326 	crtc_state->req_psr2_sdp_prior_scanline = true;
1327 	return true;
1328 }
1329 
1330 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1331 					const struct drm_display_mode *adjusted_mode)
1332 {
1333 	struct intel_display *display = to_intel_display(intel_dp);
1334 	int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1335 	int entry_setup_frames = 0;
1336 
1337 	if (psr_setup_time < 0) {
1338 		drm_dbg_kms(display->drm,
1339 			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1340 			    intel_dp->psr_dpcd[1]);
1341 		return -ETIME;
1342 	}
1343 
1344 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1345 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1346 		if (DISPLAY_VER(display) >= 20) {
1347 			/* setup entry frames can be up to 3 frames */
1348 			entry_setup_frames = 1;
1349 			drm_dbg_kms(display->drm,
1350 				    "PSR setup entry frames %d\n",
1351 				    entry_setup_frames);
1352 		} else {
1353 			drm_dbg_kms(display->drm,
1354 				    "PSR condition failed: PSR setup time (%d us) too long\n",
1355 				    psr_setup_time);
1356 			return -ETIME;
1357 		}
1358 	}
1359 
1360 	return entry_setup_frames;
1361 }
1362 
1363 static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
1364 				       const struct intel_crtc_state *crtc_state,
1365 				       bool aux_less)
1366 {
1367 	struct intel_display *display = to_intel_display(intel_dp);
1368 	int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
1369 		crtc_state->hw.adjusted_mode.crtc_vblank_start;
1370 	int wake_lines;
1371 
1372 	if (aux_less)
1373 		wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
1374 	else
1375 		wake_lines = DISPLAY_VER(display) < 20 ?
1376 			psr2_block_count_lines(intel_dp) :
1377 			intel_dp->alpm_parameters.io_wake_lines;
1378 
1379 	if (crtc_state->req_psr2_sdp_prior_scanline)
1380 		vblank -= 1;
1381 
1382 	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
1383 	if (vblank < wake_lines)
1384 		return false;
1385 
1386 	return true;
1387 }
1388 
1389 static bool alpm_config_valid(struct intel_dp *intel_dp,
1390 			      const struct intel_crtc_state *crtc_state,
1391 			      bool aux_less)
1392 {
1393 	struct intel_display *display = to_intel_display(intel_dp);
1394 
1395 	if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
1396 		drm_dbg_kms(display->drm,
1397 			    "PSR2/Panel Replay  not enabled, Unable to use long enough wake times\n");
1398 		return false;
1399 	}
1400 
1401 	if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less)) {
1402 		drm_dbg_kms(display->drm,
1403 			    "PSR2/Panel Replay not enabled, too short vblank time\n");
1404 		return false;
1405 	}
1406 
1407 	return true;
1408 }
1409 
1410 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1411 				    struct intel_crtc_state *crtc_state)
1412 {
1413 	struct intel_display *display = to_intel_display(intel_dp);
1414 	struct drm_i915_private *dev_priv = to_i915(display->drm);
1415 	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1416 	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1417 	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1418 
1419 	if (!intel_dp->psr.sink_psr2_support)
1420 		return false;
1421 
1422 	/* JSL and EHL only supports eDP 1.3 */
1423 	if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1424 		drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
1425 		return false;
1426 	}
1427 
1428 	/* Wa_16011181250 */
1429 	if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1430 	    IS_DG2(dev_priv)) {
1431 		drm_dbg_kms(display->drm,
1432 			    "PSR2 is defeatured for this platform\n");
1433 		return false;
1434 	}
1435 
1436 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1437 		drm_dbg_kms(display->drm,
1438 			    "PSR2 not completely functional in this stepping\n");
1439 		return false;
1440 	}
1441 
1442 	if (!transcoder_has_psr2(display, crtc_state->cpu_transcoder)) {
1443 		drm_dbg_kms(display->drm,
1444 			    "PSR2 not supported in transcoder %s\n",
1445 			    transcoder_name(crtc_state->cpu_transcoder));
1446 		return false;
1447 	}
1448 
1449 	/*
1450 	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1451 	 * resolution requires DSC to be enabled, priority is given to DSC
1452 	 * over PSR2.
1453 	 */
1454 	if (crtc_state->dsc.compression_enable &&
1455 	    (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
1456 		drm_dbg_kms(display->drm,
1457 			    "PSR2 cannot be enabled since DSC is enabled\n");
1458 		return false;
1459 	}
1460 
1461 	if (DISPLAY_VER(display) >= 20) {
1462 		psr_max_h = crtc_hdisplay;
1463 		psr_max_v = crtc_vdisplay;
1464 		max_bpp = crtc_state->pipe_bpp;
1465 	} else if (IS_DISPLAY_VER(display, 12, 14)) {
1466 		psr_max_h = 5120;
1467 		psr_max_v = 3200;
1468 		max_bpp = 30;
1469 	} else if (IS_DISPLAY_VER(display, 10, 11)) {
1470 		psr_max_h = 4096;
1471 		psr_max_v = 2304;
1472 		max_bpp = 24;
1473 	} else if (DISPLAY_VER(display) == 9) {
1474 		psr_max_h = 3640;
1475 		psr_max_v = 2304;
1476 		max_bpp = 24;
1477 	}
1478 
1479 	if (crtc_state->pipe_bpp > max_bpp) {
1480 		drm_dbg_kms(display->drm,
1481 			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1482 			    crtc_state->pipe_bpp, max_bpp);
1483 		return false;
1484 	}
1485 
1486 	/* Wa_16011303918:adl-p */
1487 	if (crtc_state->vrr.enable &&
1488 	    IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1489 		drm_dbg_kms(display->drm,
1490 			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1491 		return false;
1492 	}
1493 
1494 	if (!alpm_config_valid(intel_dp, crtc_state, false))
1495 		return false;
1496 
1497 	if (!crtc_state->enable_psr2_sel_fetch &&
1498 	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1499 		drm_dbg_kms(display->drm,
1500 			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1501 			    crtc_hdisplay, crtc_vdisplay,
1502 			    psr_max_h, psr_max_v);
1503 		return false;
1504 	}
1505 
1506 	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1507 
1508 	return true;
1509 }
1510 
1511 static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
1512 					  struct intel_crtc_state *crtc_state)
1513 {
1514 	struct intel_display *display = to_intel_display(intel_dp);
1515 
1516 	if (HAS_PSR2_SEL_FETCH(display) &&
1517 	    !intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1518 	    !HAS_PSR_HW_TRACKING(display)) {
1519 		drm_dbg_kms(display->drm,
1520 			    "Selective update not enabled, selective fetch not valid and no HW tracking available\n");
1521 		goto unsupported;
1522 	}
1523 
1524 	if (!psr2_global_enabled(intel_dp)) {
1525 		drm_dbg_kms(display->drm,
1526 			    "Selective update disabled by flag\n");
1527 		goto unsupported;
1528 	}
1529 
1530 	if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state))
1531 		goto unsupported;
1532 
1533 	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1534 		drm_dbg_kms(display->drm,
1535 			    "Selective update not enabled, SDP indication do not fit in hblank\n");
1536 		goto unsupported;
1537 	}
1538 
1539 	if (crtc_state->has_panel_replay && (DISPLAY_VER(display) < 14 ||
1540 					     !intel_dp->psr.sink_panel_replay_su_support))
1541 		goto unsupported;
1542 
1543 	if (crtc_state->crc_enabled) {
1544 		drm_dbg_kms(display->drm,
1545 			    "Selective update not enabled because it would inhibit pipe CRC calculation\n");
1546 		goto unsupported;
1547 	}
1548 
1549 	if (!psr2_granularity_check(intel_dp, crtc_state)) {
1550 		drm_dbg_kms(display->drm,
1551 			    "Selective update not enabled, SU granularity not compatible\n");
1552 		goto unsupported;
1553 	}
1554 
1555 	crtc_state->enable_psr2_su_region_et =
1556 		psr2_su_region_et_valid(intel_dp, crtc_state->has_panel_replay);
1557 
1558 	return true;
1559 
1560 unsupported:
1561 	crtc_state->enable_psr2_sel_fetch = false;
1562 	return false;
1563 }
1564 
1565 static bool _psr_compute_config(struct intel_dp *intel_dp,
1566 				struct intel_crtc_state *crtc_state)
1567 {
1568 	struct intel_display *display = to_intel_display(intel_dp);
1569 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1570 	int entry_setup_frames;
1571 
1572 	if (!CAN_PSR(intel_dp))
1573 		return false;
1574 
1575 	entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1576 
1577 	if (entry_setup_frames >= 0) {
1578 		intel_dp->psr.entry_setup_frames = entry_setup_frames;
1579 	} else {
1580 		drm_dbg_kms(display->drm,
1581 			    "PSR condition failed: PSR setup timing not met\n");
1582 		return false;
1583 	}
1584 
1585 	return true;
1586 }
1587 
1588 static bool
1589 _panel_replay_compute_config(struct intel_dp *intel_dp,
1590 			     const struct intel_crtc_state *crtc_state,
1591 			     const struct drm_connector_state *conn_state)
1592 {
1593 	struct intel_display *display = to_intel_display(intel_dp);
1594 	struct intel_connector *connector =
1595 		to_intel_connector(conn_state->connector);
1596 	struct intel_hdcp *hdcp = &connector->hdcp;
1597 
1598 	if (!CAN_PANEL_REPLAY(intel_dp))
1599 		return false;
1600 
1601 	if (!panel_replay_global_enabled(intel_dp)) {
1602 		drm_dbg_kms(display->drm, "Panel Replay disabled by flag\n");
1603 		return false;
1604 	}
1605 
1606 	if (!intel_dp_is_edp(intel_dp))
1607 		return true;
1608 
1609 	/* Remaining checks are for eDP only */
1610 
1611 	if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A &&
1612 	    to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_B)
1613 		return false;
1614 
1615 	/* 128b/132b Panel Replay is not supported on eDP */
1616 	if (intel_dp_is_uhbr(crtc_state)) {
1617 		drm_dbg_kms(display->drm,
1618 			    "Panel Replay is not supported with 128b/132b\n");
1619 		return false;
1620 	}
1621 
1622 	/* HW will not allow Panel Replay on eDP when HDCP enabled */
1623 	if (conn_state->content_protection ==
1624 	    DRM_MODE_CONTENT_PROTECTION_DESIRED ||
1625 	    (conn_state->content_protection ==
1626 	     DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
1627 	     DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) {
1628 		drm_dbg_kms(display->drm,
1629 			    "Panel Replay is not supported with HDCP\n");
1630 		return false;
1631 	}
1632 
1633 	if (!alpm_config_valid(intel_dp, crtc_state, true))
1634 		return false;
1635 
1636 	if (crtc_state->crc_enabled) {
1637 		drm_dbg_kms(display->drm,
1638 			    "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
1639 		return false;
1640 	}
1641 
1642 	return true;
1643 }
1644 
1645 static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp,
1646 					   struct intel_crtc_state *crtc_state)
1647 {
1648 	struct intel_display *display = to_intel_display(intel_dp);
1649 
1650 	return (DISPLAY_VER(display) == 20 && intel_dp->psr.entry_setup_frames > 0 &&
1651 		!crtc_state->has_sel_update);
1652 }
1653 
1654 void intel_psr_compute_config(struct intel_dp *intel_dp,
1655 			      struct intel_crtc_state *crtc_state,
1656 			      struct drm_connector_state *conn_state)
1657 {
1658 	struct intel_display *display = to_intel_display(intel_dp);
1659 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1660 
1661 	if (!psr_global_enabled(intel_dp)) {
1662 		drm_dbg_kms(display->drm, "PSR disabled by flag\n");
1663 		return;
1664 	}
1665 
1666 	if (intel_dp->psr.sink_not_reliable) {
1667 		drm_dbg_kms(display->drm,
1668 			    "PSR sink implementation is not reliable\n");
1669 		return;
1670 	}
1671 
1672 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1673 		drm_dbg_kms(display->drm,
1674 			    "PSR condition failed: Interlaced mode enabled\n");
1675 		return;
1676 	}
1677 
1678 	/*
1679 	 * FIXME figure out what is wrong with PSR+joiner and
1680 	 * fix it. Presumably something related to the fact that
1681 	 * PSR is a transcoder level feature.
1682 	 */
1683 	if (crtc_state->joiner_pipes) {
1684 		drm_dbg_kms(display->drm,
1685 			    "PSR disabled due to joiner\n");
1686 		return;
1687 	}
1688 
1689 	/*
1690 	 * Currently PSR/PR doesn't work reliably with VRR enabled.
1691 	 */
1692 	if (crtc_state->vrr.enable)
1693 		return;
1694 
1695 	crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
1696 								    crtc_state,
1697 								    conn_state);
1698 
1699 	crtc_state->has_psr = crtc_state->has_panel_replay ? true :
1700 		_psr_compute_config(intel_dp, crtc_state);
1701 
1702 	if (!crtc_state->has_psr)
1703 		return;
1704 
1705 	crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
1706 
1707 	/* Wa_18037818876 */
1708 	if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) {
1709 		crtc_state->has_psr = false;
1710 		drm_dbg_kms(display->drm,
1711 			    "PSR disabled to workaround PSR FSM hang issue\n");
1712 	}
1713 }
1714 
1715 void intel_psr_get_config(struct intel_encoder *encoder,
1716 			  struct intel_crtc_state *pipe_config)
1717 {
1718 	struct intel_display *display = to_intel_display(encoder);
1719 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1720 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1721 	struct intel_dp *intel_dp;
1722 	u32 val;
1723 
1724 	if (!dig_port)
1725 		return;
1726 
1727 	intel_dp = &dig_port->dp;
1728 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1729 		return;
1730 
1731 	mutex_lock(&intel_dp->psr.lock);
1732 	if (!intel_dp->psr.enabled)
1733 		goto unlock;
1734 
1735 	if (intel_dp->psr.panel_replay_enabled) {
1736 		pipe_config->has_psr = pipe_config->has_panel_replay = true;
1737 	} else {
1738 		/*
1739 		 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1740 		 * enabled/disabled because of frontbuffer tracking and others.
1741 		 */
1742 		pipe_config->has_psr = true;
1743 	}
1744 
1745 	pipe_config->has_sel_update = intel_dp->psr.sel_update_enabled;
1746 	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1747 
1748 	if (!intel_dp->psr.sel_update_enabled)
1749 		goto unlock;
1750 
1751 	if (HAS_PSR2_SEL_FETCH(display)) {
1752 		val = intel_de_read(display,
1753 				    PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1754 		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1755 			pipe_config->enable_psr2_sel_fetch = true;
1756 	}
1757 
1758 	pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled;
1759 
1760 	if (DISPLAY_VER(display) >= 12) {
1761 		val = intel_de_read(display,
1762 				    TRANS_EXITLINE(display, cpu_transcoder));
1763 		pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1764 	}
1765 unlock:
1766 	mutex_unlock(&intel_dp->psr.lock);
1767 }
1768 
1769 static void intel_psr_activate(struct intel_dp *intel_dp)
1770 {
1771 	struct intel_display *display = to_intel_display(intel_dp);
1772 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1773 
1774 	drm_WARN_ON(display->drm,
1775 		    transcoder_has_psr2(display, cpu_transcoder) &&
1776 		    intel_de_read(display, EDP_PSR2_CTL(display, cpu_transcoder)) & EDP_PSR2_ENABLE);
1777 
1778 	drm_WARN_ON(display->drm,
1779 		    intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)) & EDP_PSR_ENABLE);
1780 
1781 	drm_WARN_ON(display->drm, intel_dp->psr.active);
1782 
1783 	lockdep_assert_held(&intel_dp->psr.lock);
1784 
1785 	/* psr1, psr2 and panel-replay are mutually exclusive.*/
1786 	if (intel_dp->psr.panel_replay_enabled)
1787 		dg2_activate_panel_replay(intel_dp);
1788 	else if (intel_dp->psr.sel_update_enabled)
1789 		hsw_activate_psr2(intel_dp);
1790 	else
1791 		hsw_activate_psr1(intel_dp);
1792 
1793 	intel_dp->psr.active = true;
1794 }
1795 
1796 /*
1797  * Wa_16013835468
1798  * Wa_14015648006
1799  */
1800 static void wm_optimization_wa(struct intel_dp *intel_dp,
1801 			       const struct intel_crtc_state *crtc_state)
1802 {
1803 	struct intel_display *display = to_intel_display(intel_dp);
1804 	enum pipe pipe = intel_dp->psr.pipe;
1805 	bool activate = false;
1806 
1807 	/* Wa_14015648006 */
1808 	if (IS_DISPLAY_VER(display, 11, 14) && crtc_state->wm_level_disabled)
1809 		activate = true;
1810 
1811 	/* Wa_16013835468 */
1812 	if (DISPLAY_VER(display) == 12 &&
1813 	    crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1814 	    crtc_state->hw.adjusted_mode.crtc_vdisplay)
1815 		activate = true;
1816 
1817 	if (activate)
1818 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
1819 			     0, LATENCY_REPORTING_REMOVED(pipe));
1820 	else
1821 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
1822 			     LATENCY_REPORTING_REMOVED(pipe), 0);
1823 }
1824 
1825 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1826 				    const struct intel_crtc_state *crtc_state)
1827 {
1828 	struct intel_display *display = to_intel_display(intel_dp);
1829 	struct drm_i915_private *dev_priv = to_i915(display->drm);
1830 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1831 	u32 mask = 0;
1832 
1833 	/*
1834 	 * Only HSW and BDW have PSR AUX registers that need to be setup.
1835 	 * SKL+ use hardcoded values PSR AUX transactions
1836 	 */
1837 	if (DISPLAY_VER(display) < 9)
1838 		hsw_psr_setup_aux(intel_dp);
1839 
1840 	/*
1841 	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1842 	 * mask LPSP to avoid dependency on other drivers that might block
1843 	 * runtime_pm besides preventing  other hw tracking issues now we
1844 	 * can rely on frontbuffer tracking.
1845 	 *
1846 	 * From bspec prior LunarLake:
1847 	 * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
1848 	 * panel replay mode.
1849 	 *
1850 	 * From bspec beyod LunarLake:
1851 	 * Panel Replay on DP: No bits are applicable
1852 	 * Panel Replay on eDP: All bits are applicable
1853 	 */
1854 	if (DISPLAY_VER(display) < 20 || intel_dp_is_edp(intel_dp))
1855 		mask = EDP_PSR_DEBUG_MASK_HPD;
1856 
1857 	if (intel_dp_is_edp(intel_dp)) {
1858 		mask |= EDP_PSR_DEBUG_MASK_MEMUP;
1859 
1860 		/*
1861 		 * For some unknown reason on HSW non-ULT (or at least on
1862 		 * Dell Latitude E6540) external displays start to flicker
1863 		 * when PSR is enabled on the eDP. SR/PC6 residency is much
1864 		 * higher than should be possible with an external display.
1865 		 * As a workaround leave LPSP unmasked to prevent PSR entry
1866 		 * when external displays are active.
1867 		 */
1868 		if (DISPLAY_VER(display) >= 8 || IS_HASWELL_ULT(dev_priv))
1869 			mask |= EDP_PSR_DEBUG_MASK_LPSP;
1870 
1871 		if (DISPLAY_VER(display) < 20)
1872 			mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1873 
1874 		/*
1875 		 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1876 		 * registers in order to keep the CURSURFLIVE tricks working :(
1877 		 */
1878 		if (IS_DISPLAY_VER(display, 9, 10))
1879 			mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1880 
1881 		/* allow PSR with sprite enabled */
1882 		if (IS_HASWELL(dev_priv))
1883 			mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1884 	}
1885 
1886 	intel_de_write(display, psr_debug_reg(display, cpu_transcoder), mask);
1887 
1888 	psr_irq_control(intel_dp);
1889 
1890 	/*
1891 	 * TODO: if future platforms supports DC3CO in more than one
1892 	 * transcoder, EXITLINE will need to be unset when disabling PSR
1893 	 */
1894 	if (intel_dp->psr.dc3co_exitline)
1895 		intel_de_rmw(display,
1896 			     TRANS_EXITLINE(display, cpu_transcoder),
1897 			     EXITLINE_MASK,
1898 			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1899 
1900 	if (HAS_PSR_HW_TRACKING(display) && HAS_PSR2_SEL_FETCH(display))
1901 		intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1902 			     intel_dp->psr.psr2_sel_fetch_enabled ?
1903 			     IGNORE_PSR2_HW_TRACKING : 0);
1904 
1905 	if (intel_dp_is_edp(intel_dp))
1906 		intel_alpm_configure(intel_dp, crtc_state);
1907 
1908 	/*
1909 	 * Wa_16013835468
1910 	 * Wa_14015648006
1911 	 */
1912 	wm_optimization_wa(intel_dp, crtc_state);
1913 
1914 	if (intel_dp->psr.sel_update_enabled) {
1915 		if (DISPLAY_VER(display) == 9)
1916 			intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 0,
1917 				     PSR2_VSC_ENABLE_PROG_HEADER |
1918 				     PSR2_ADD_VERTICAL_LINE_COUNT);
1919 
1920 		/*
1921 		 * Wa_16014451276:adlp,mtl[a0,b0]
1922 		 * All supported adlp panels have 1-based X granularity, this may
1923 		 * cause issues if non-supported panels are used.
1924 		 */
1925 		if (!intel_dp->psr.panel_replay_enabled &&
1926 		    (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
1927 		     IS_ALDERLAKE_P(dev_priv)))
1928 			intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
1929 				     0, ADLP_1_BASED_X_GRANULARITY);
1930 
1931 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1932 		if (!intel_dp->psr.panel_replay_enabled &&
1933 		    IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
1934 			intel_de_rmw(display,
1935 				     MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
1936 				     0,
1937 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1938 		else if (IS_ALDERLAKE_P(dev_priv))
1939 			intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
1940 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1941 	}
1942 }
1943 
1944 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1945 {
1946 	struct intel_display *display = to_intel_display(intel_dp);
1947 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1948 	u32 val;
1949 
1950 	if (intel_dp->psr.panel_replay_enabled)
1951 		goto no_err;
1952 
1953 	/*
1954 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1955 	 * will still keep the error set even after the reset done in the
1956 	 * irq_preinstall and irq_uninstall hooks.
1957 	 * And enabling in this situation cause the screen to freeze in the
1958 	 * first time that PSR HW tries to activate so lets keep PSR disabled
1959 	 * to avoid any rendering problems.
1960 	 */
1961 	val = intel_de_read(display, psr_iir_reg(display, cpu_transcoder));
1962 	val &= psr_irq_psr_error_bit_get(intel_dp);
1963 	if (val) {
1964 		intel_dp->psr.sink_not_reliable = true;
1965 		drm_dbg_kms(display->drm,
1966 			    "PSR interruption error set, not enabling PSR\n");
1967 		return false;
1968 	}
1969 
1970 no_err:
1971 	return true;
1972 }
1973 
1974 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1975 				    const struct intel_crtc_state *crtc_state)
1976 {
1977 	struct intel_display *display = to_intel_display(intel_dp);
1978 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1979 	u32 val;
1980 
1981 	drm_WARN_ON(display->drm, intel_dp->psr.enabled);
1982 
1983 	intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update;
1984 	intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
1985 	intel_dp->psr.busy_frontbuffer_bits = 0;
1986 	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1987 	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1988 	/* DC5/DC6 requires at least 6 idle frames */
1989 	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1990 	intel_dp->psr.dc3co_exit_delay = val;
1991 	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1992 	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1993 	intel_dp->psr.su_region_et_enabled = crtc_state->enable_psr2_su_region_et;
1994 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1995 	intel_dp->psr.req_psr2_sdp_prior_scanline =
1996 		crtc_state->req_psr2_sdp_prior_scanline;
1997 
1998 	if (!psr_interrupt_error_check(intel_dp))
1999 		return;
2000 
2001 	if (intel_dp->psr.panel_replay_enabled)
2002 		drm_dbg_kms(display->drm, "Enabling Panel Replay\n");
2003 	else
2004 		drm_dbg_kms(display->drm, "Enabling PSR%s\n",
2005 			    intel_dp->psr.sel_update_enabled ? "2" : "1");
2006 
2007 	/*
2008 	 * Enabling here only for PSR. Panel Replay enable bit is already
2009 	 * written at this point. See
2010 	 * intel_psr_panel_replay_enable_sink. Modifiers/options:
2011 	 *  - Selective Update
2012 	 *  - Region Early Transport
2013 	 *  - Selective Update Region Scanline Capture
2014 	 *  - VSC_SDP_CRC
2015 	 *  - HPD on different Errors
2016 	 *  - CRC verification
2017 	 * are written for PSR and Panel Replay here.
2018 	 */
2019 	intel_psr_enable_sink(intel_dp, crtc_state);
2020 
2021 	if (intel_dp_is_edp(intel_dp))
2022 		intel_snps_phy_update_psr_power_state(&dig_port->base, true);
2023 
2024 	intel_psr_enable_source(intel_dp, crtc_state);
2025 	intel_dp->psr.enabled = true;
2026 	intel_dp->psr.paused = false;
2027 
2028 	/*
2029 	 * Link_ok is sticky and set here on PSR enable. We can assume link
2030 	 * training is complete as we never continue to PSR enable with
2031 	 * untrained link. Link_ok is kept as set until first short pulse
2032 	 * interrupt. This is targeted to workaround panels stating bad link
2033 	 * after PSR is enabled.
2034 	 */
2035 	intel_dp->psr.link_ok = true;
2036 
2037 	intel_psr_activate(intel_dp);
2038 }
2039 
2040 static void intel_psr_exit(struct intel_dp *intel_dp)
2041 {
2042 	struct intel_display *display = to_intel_display(intel_dp);
2043 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2044 	u32 val;
2045 
2046 	if (!intel_dp->psr.active) {
2047 		if (transcoder_has_psr2(display, cpu_transcoder)) {
2048 			val = intel_de_read(display,
2049 					    EDP_PSR2_CTL(display, cpu_transcoder));
2050 			drm_WARN_ON(display->drm, val & EDP_PSR2_ENABLE);
2051 		}
2052 
2053 		val = intel_de_read(display,
2054 				    psr_ctl_reg(display, cpu_transcoder));
2055 		drm_WARN_ON(display->drm, val & EDP_PSR_ENABLE);
2056 
2057 		return;
2058 	}
2059 
2060 	if (intel_dp->psr.panel_replay_enabled) {
2061 		intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder),
2062 			     TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
2063 	} else if (intel_dp->psr.sel_update_enabled) {
2064 		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
2065 
2066 		val = intel_de_rmw(display,
2067 				   EDP_PSR2_CTL(display, cpu_transcoder),
2068 				   EDP_PSR2_ENABLE, 0);
2069 
2070 		drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
2071 	} else {
2072 		val = intel_de_rmw(display,
2073 				   psr_ctl_reg(display, cpu_transcoder),
2074 				   EDP_PSR_ENABLE, 0);
2075 
2076 		drm_WARN_ON(display->drm, !(val & EDP_PSR_ENABLE));
2077 	}
2078 	intel_dp->psr.active = false;
2079 }
2080 
2081 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
2082 {
2083 	struct intel_display *display = to_intel_display(intel_dp);
2084 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2085 	i915_reg_t psr_status;
2086 	u32 psr_status_mask;
2087 
2088 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
2089 					  intel_dp->psr.panel_replay_enabled)) {
2090 		psr_status = EDP_PSR2_STATUS(display, cpu_transcoder);
2091 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
2092 	} else {
2093 		psr_status = psr_status_reg(display, cpu_transcoder);
2094 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
2095 	}
2096 
2097 	/* Wait till PSR is idle */
2098 	if (intel_de_wait_for_clear(display, psr_status,
2099 				    psr_status_mask, 2000))
2100 		drm_err(display->drm, "Timed out waiting PSR idle state\n");
2101 }
2102 
2103 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
2104 {
2105 	struct intel_display *display = to_intel_display(intel_dp);
2106 	struct drm_i915_private *dev_priv = to_i915(display->drm);
2107 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2108 
2109 	lockdep_assert_held(&intel_dp->psr.lock);
2110 
2111 	if (!intel_dp->psr.enabled)
2112 		return;
2113 
2114 	if (intel_dp->psr.panel_replay_enabled)
2115 		drm_dbg_kms(display->drm, "Disabling Panel Replay\n");
2116 	else
2117 		drm_dbg_kms(display->drm, "Disabling PSR%s\n",
2118 			    intel_dp->psr.sel_update_enabled ? "2" : "1");
2119 
2120 	intel_psr_exit(intel_dp);
2121 	intel_psr_wait_exit_locked(intel_dp);
2122 
2123 	/*
2124 	 * Wa_16013835468
2125 	 * Wa_14015648006
2126 	 */
2127 	if (DISPLAY_VER(display) >= 11)
2128 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
2129 			     LATENCY_REPORTING_REMOVED(intel_dp->psr.pipe), 0);
2130 
2131 	if (intel_dp->psr.sel_update_enabled) {
2132 		/* Wa_16012604467:adlp,mtl[a0,b0] */
2133 		if (!intel_dp->psr.panel_replay_enabled &&
2134 		    IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
2135 			intel_de_rmw(display,
2136 				     MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
2137 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
2138 		else if (IS_ALDERLAKE_P(dev_priv))
2139 			intel_de_rmw(display, CLKGATE_DIS_MISC,
2140 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
2141 	}
2142 
2143 	if (intel_dp_is_edp(intel_dp))
2144 		intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
2145 
2146 	/* Panel Replay on eDP is always using ALPM aux less. */
2147 	if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
2148 		intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder),
2149 			     ALPM_CTL_ALPM_ENABLE |
2150 			     ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
2151 
2152 		intel_de_rmw(display,
2153 			     PORT_ALPM_CTL(cpu_transcoder),
2154 			     PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
2155 	}
2156 
2157 	/* Disable PSR on Sink */
2158 	if (!intel_dp->psr.panel_replay_enabled) {
2159 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
2160 
2161 		if (intel_dp->psr.sel_update_enabled)
2162 			drm_dp_dpcd_writeb(&intel_dp->aux,
2163 					   DP_RECEIVER_ALPM_CONFIG, 0);
2164 	}
2165 
2166 	intel_dp->psr.enabled = false;
2167 	intel_dp->psr.panel_replay_enabled = false;
2168 	intel_dp->psr.sel_update_enabled = false;
2169 	intel_dp->psr.psr2_sel_fetch_enabled = false;
2170 	intel_dp->psr.su_region_et_enabled = false;
2171 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2172 }
2173 
2174 /**
2175  * intel_psr_disable - Disable PSR
2176  * @intel_dp: Intel DP
2177  * @old_crtc_state: old CRTC state
2178  *
2179  * This function needs to be called before disabling pipe.
2180  */
2181 void intel_psr_disable(struct intel_dp *intel_dp,
2182 		       const struct intel_crtc_state *old_crtc_state)
2183 {
2184 	struct intel_display *display = to_intel_display(intel_dp);
2185 
2186 	if (!old_crtc_state->has_psr)
2187 		return;
2188 
2189 	if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp)))
2190 		return;
2191 
2192 	mutex_lock(&intel_dp->psr.lock);
2193 
2194 	intel_psr_disable_locked(intel_dp);
2195 
2196 	intel_dp->psr.link_ok = false;
2197 
2198 	mutex_unlock(&intel_dp->psr.lock);
2199 	cancel_work_sync(&intel_dp->psr.work);
2200 	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
2201 }
2202 
2203 /**
2204  * intel_psr_pause - Pause PSR
2205  * @intel_dp: Intel DP
2206  *
2207  * This function need to be called after enabling psr.
2208  */
2209 void intel_psr_pause(struct intel_dp *intel_dp)
2210 {
2211 	struct intel_display *display = to_intel_display(intel_dp);
2212 	struct intel_psr *psr = &intel_dp->psr;
2213 
2214 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2215 		return;
2216 
2217 	mutex_lock(&psr->lock);
2218 
2219 	if (!psr->enabled) {
2220 		mutex_unlock(&psr->lock);
2221 		return;
2222 	}
2223 
2224 	/* If we ever hit this, we will need to add refcount to pause/resume */
2225 	drm_WARN_ON(display->drm, psr->paused);
2226 
2227 	intel_psr_exit(intel_dp);
2228 	intel_psr_wait_exit_locked(intel_dp);
2229 	psr->paused = true;
2230 
2231 	mutex_unlock(&psr->lock);
2232 
2233 	cancel_work_sync(&psr->work);
2234 	cancel_delayed_work_sync(&psr->dc3co_work);
2235 }
2236 
2237 /**
2238  * intel_psr_resume - Resume PSR
2239  * @intel_dp: Intel DP
2240  *
2241  * This function need to be called after pausing psr.
2242  */
2243 void intel_psr_resume(struct intel_dp *intel_dp)
2244 {
2245 	struct intel_psr *psr = &intel_dp->psr;
2246 
2247 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2248 		return;
2249 
2250 	mutex_lock(&psr->lock);
2251 
2252 	if (!psr->paused)
2253 		goto unlock;
2254 
2255 	psr->paused = false;
2256 	intel_psr_activate(intel_dp);
2257 
2258 unlock:
2259 	mutex_unlock(&psr->lock);
2260 }
2261 
2262 /**
2263  * intel_psr_needs_block_dc_vblank - Check if block dc entry is needed
2264  * @crtc_state: CRTC status
2265  *
2266  * We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't
2267  * prevent it in case of Panel Replay. Panel Replay switches main link off on
2268  * DC entry. This means vblank interrupts are not fired and is a problem if
2269  * user-space is polling for vblank events.
2270  */
2271 bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state)
2272 {
2273 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2274 	struct intel_encoder *encoder;
2275 
2276 	for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) {
2277 		struct intel_dp *intel_dp;
2278 
2279 		if (!intel_encoder_is_dp(encoder))
2280 			continue;
2281 
2282 		intel_dp = enc_to_intel_dp(encoder);
2283 
2284 		if (intel_dp_is_edp(intel_dp) &&
2285 		    CAN_PANEL_REPLAY(intel_dp))
2286 			return true;
2287 	}
2288 
2289 	return false;
2290 }
2291 
2292 static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
2293 {
2294 	struct drm_i915_private *dev_priv = to_i915(display->drm);
2295 
2296 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ? 0 :
2297 		PSR2_MAN_TRK_CTL_ENABLE;
2298 }
2299 
2300 static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
2301 {
2302 	struct drm_i915_private *dev_priv = to_i915(display->drm);
2303 
2304 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
2305 	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
2306 	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
2307 }
2308 
2309 static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
2310 {
2311 	struct drm_i915_private *dev_priv = to_i915(display->drm);
2312 
2313 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
2314 	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
2315 	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
2316 }
2317 
2318 static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
2319 {
2320 	struct drm_i915_private *dev_priv = to_i915(display->drm);
2321 
2322 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
2323 	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
2324 	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
2325 }
2326 
2327 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
2328 {
2329 	struct intel_display *display = to_intel_display(intel_dp);
2330 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2331 
2332 	if (intel_dp->psr.psr2_sel_fetch_enabled)
2333 		intel_de_write(display,
2334 			       PSR2_MAN_TRK_CTL(display, cpu_transcoder),
2335 			       man_trk_ctl_enable_bit_get(display) |
2336 			       man_trk_ctl_partial_frame_bit_get(display) |
2337 			       man_trk_ctl_single_full_frame_bit_get(display) |
2338 			       man_trk_ctl_continuos_full_frame(display));
2339 
2340 	/*
2341 	 * Display WA #0884: skl+
2342 	 * This documented WA for bxt can be safely applied
2343 	 * broadly so we can force HW tracking to exit PSR
2344 	 * instead of disabling and re-enabling.
2345 	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
2346 	 * but it makes more sense write to the current active
2347 	 * pipe.
2348 	 *
2349 	 * This workaround do not exist for platforms with display 10 or newer
2350 	 * but testing proved that it works for up display 13, for newer
2351 	 * than that testing will be needed.
2352 	 */
2353 	intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0);
2354 }
2355 
2356 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
2357 {
2358 	struct intel_display *display = to_intel_display(crtc_state);
2359 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2360 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2361 	struct intel_encoder *encoder;
2362 
2363 	if (!crtc_state->enable_psr2_sel_fetch)
2364 		return;
2365 
2366 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2367 					     crtc_state->uapi.encoder_mask) {
2368 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2369 
2370 		lockdep_assert_held(&intel_dp->psr.lock);
2371 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
2372 			return;
2373 		break;
2374 	}
2375 
2376 	intel_de_write(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder),
2377 		       crtc_state->psr2_man_track_ctl);
2378 
2379 	if (!crtc_state->enable_psr2_su_region_et)
2380 		return;
2381 
2382 	intel_de_write(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
2383 		       crtc_state->pipe_srcsz_early_tpt);
2384 }
2385 
2386 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2387 				  bool full_update)
2388 {
2389 	struct intel_display *display = to_intel_display(crtc_state);
2390 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2391 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2392 	u32 val = man_trk_ctl_enable_bit_get(display);
2393 
2394 	/* SF partial frame enable has to be set even on full update */
2395 	val |= man_trk_ctl_partial_frame_bit_get(display);
2396 
2397 	if (full_update) {
2398 		val |= man_trk_ctl_single_full_frame_bit_get(display);
2399 		val |= man_trk_ctl_continuos_full_frame(display);
2400 		goto exit;
2401 	}
2402 
2403 	if (crtc_state->psr2_su_area.y1 == -1)
2404 		goto exit;
2405 
2406 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14) {
2407 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2408 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2409 	} else {
2410 		drm_WARN_ON(crtc_state->uapi.crtc->dev,
2411 			    crtc_state->psr2_su_area.y1 % 4 ||
2412 			    crtc_state->psr2_su_area.y2 % 4);
2413 
2414 		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2415 			crtc_state->psr2_su_area.y1 / 4 + 1);
2416 		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2417 			crtc_state->psr2_su_area.y2 / 4 + 1);
2418 	}
2419 exit:
2420 	crtc_state->psr2_man_track_ctl = val;
2421 }
2422 
2423 static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
2424 					  bool full_update)
2425 {
2426 	int width, height;
2427 
2428 	if (!crtc_state->enable_psr2_su_region_et || full_update)
2429 		return 0;
2430 
2431 	width = drm_rect_width(&crtc_state->psr2_su_area);
2432 	height = drm_rect_height(&crtc_state->psr2_su_area);
2433 
2434 	return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
2435 }
2436 
2437 static void clip_area_update(struct drm_rect *overlap_damage_area,
2438 			     struct drm_rect *damage_area,
2439 			     struct drm_rect *pipe_src)
2440 {
2441 	if (!drm_rect_intersect(damage_area, pipe_src))
2442 		return;
2443 
2444 	if (overlap_damage_area->y1 == -1) {
2445 		overlap_damage_area->y1 = damage_area->y1;
2446 		overlap_damage_area->y2 = damage_area->y2;
2447 		return;
2448 	}
2449 
2450 	if (damage_area->y1 < overlap_damage_area->y1)
2451 		overlap_damage_area->y1 = damage_area->y1;
2452 
2453 	if (damage_area->y2 > overlap_damage_area->y2)
2454 		overlap_damage_area->y2 = damage_area->y2;
2455 }
2456 
2457 static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2458 {
2459 	struct intel_display *display = to_intel_display(crtc_state);
2460 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2461 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2462 	u16 y_alignment;
2463 
2464 	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2465 	if (crtc_state->dsc.compression_enable &&
2466 	    (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14))
2467 		y_alignment = vdsc_cfg->slice_height;
2468 	else
2469 		y_alignment = crtc_state->su_y_granularity;
2470 
2471 	crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2472 	if (crtc_state->psr2_su_area.y2 % y_alignment)
2473 		crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2474 						y_alignment) + 1) * y_alignment;
2475 }
2476 
2477 /*
2478  * When early transport is in use we need to extend SU area to cover
2479  * cursor fully when cursor is in SU area.
2480  */
2481 static void
2482 intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
2483 				  struct intel_crtc *crtc,
2484 				  bool *cursor_in_su_area)
2485 {
2486 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2487 	struct intel_plane_state *new_plane_state;
2488 	struct intel_plane *plane;
2489 	int i;
2490 
2491 	if (!crtc_state->enable_psr2_su_region_et)
2492 		return;
2493 
2494 	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
2495 		struct drm_rect inter;
2496 
2497 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2498 			continue;
2499 
2500 		if (plane->id != PLANE_CURSOR)
2501 			continue;
2502 
2503 		if (!new_plane_state->uapi.visible)
2504 			continue;
2505 
2506 		inter = crtc_state->psr2_su_area;
2507 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2508 			continue;
2509 
2510 		clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
2511 				 &crtc_state->pipe_src);
2512 		*cursor_in_su_area = true;
2513 	}
2514 }
2515 
2516 /*
2517  * TODO: Not clear how to handle planes with negative position,
2518  * also planes are not updated if they have a negative X
2519  * position so for now doing a full update in this cases
2520  *
2521  * Plane scaling and rotation is not supported by selective fetch and both
2522  * properties can change without a modeset, so need to be check at every
2523  * atomic commit.
2524  */
2525 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2526 {
2527 	if (plane_state->uapi.dst.y1 < 0 ||
2528 	    plane_state->uapi.dst.x1 < 0 ||
2529 	    plane_state->scaler_id >= 0 ||
2530 	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2531 		return false;
2532 
2533 	return true;
2534 }
2535 
2536 /*
2537  * Check for pipe properties that is not supported by selective fetch.
2538  *
2539  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2540  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2541  * enabled and going to the full update path.
2542  */
2543 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2544 {
2545 	if (crtc_state->scaler_state.scaler_id >= 0)
2546 		return false;
2547 
2548 	return true;
2549 }
2550 
2551 /* Wa 14019834836 */
2552 static void intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state *crtc_state)
2553 {
2554 	struct intel_display *display = to_intel_display(crtc_state);
2555 	struct intel_encoder *encoder;
2556 	int hactive_limit;
2557 
2558 	if (crtc_state->psr2_su_area.y1 != 0 ||
2559 	    crtc_state->psr2_su_area.y2 != 0)
2560 		return;
2561 
2562 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2563 		hactive_limit = intel_dp_is_uhbr(crtc_state) ? 1230 : 546;
2564 	else
2565 		hactive_limit = intel_dp_is_uhbr(crtc_state) ? 615 : 273;
2566 
2567 	if (crtc_state->hw.adjusted_mode.hdisplay < hactive_limit)
2568 		return;
2569 
2570 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2571 					     crtc_state->uapi.encoder_mask) {
2572 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2573 
2574 		if (!intel_dp_is_edp(intel_dp) &&
2575 		    intel_dp->psr.panel_replay_enabled &&
2576 		    intel_dp->psr.sel_update_enabled) {
2577 			crtc_state->psr2_su_area.y2++;
2578 			return;
2579 		}
2580 	}
2581 }
2582 
2583 static void
2584 intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state)
2585 {
2586 	struct intel_display *display = to_intel_display(crtc_state);
2587 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2588 
2589 	/* Wa_14014971492 */
2590 	if (!crtc_state->has_panel_replay &&
2591 	    ((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
2592 	      IS_ALDERLAKE_P(i915) || IS_TIGERLAKE(i915))) &&
2593 	    crtc_state->splitter.enable)
2594 		crtc_state->psr2_su_area.y1 = 0;
2595 
2596 	/* Wa 14019834836 */
2597 	if (DISPLAY_VER(display) == 30)
2598 		intel_psr_apply_pr_link_on_su_wa(crtc_state);
2599 }
2600 
2601 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2602 				struct intel_crtc *crtc)
2603 {
2604 	struct intel_display *display = to_intel_display(state);
2605 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2606 	struct intel_plane_state *new_plane_state, *old_plane_state;
2607 	struct intel_plane *plane;
2608 	bool full_update = false, cursor_in_su_area = false;
2609 	int i, ret;
2610 
2611 	if (!crtc_state->enable_psr2_sel_fetch)
2612 		return 0;
2613 
2614 	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2615 		full_update = true;
2616 		goto skip_sel_fetch_set_loop;
2617 	}
2618 
2619 	crtc_state->psr2_su_area.x1 = 0;
2620 	crtc_state->psr2_su_area.y1 = -1;
2621 	crtc_state->psr2_su_area.x2 = drm_rect_width(&crtc_state->pipe_src);
2622 	crtc_state->psr2_su_area.y2 = -1;
2623 
2624 	/*
2625 	 * Calculate minimal selective fetch area of each plane and calculate
2626 	 * the pipe damaged area.
2627 	 * In the next loop the plane selective fetch area will actually be set
2628 	 * using whole pipe damaged area.
2629 	 */
2630 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2631 					     new_plane_state, i) {
2632 		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2633 						      .x2 = INT_MAX };
2634 
2635 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2636 			continue;
2637 
2638 		if (!new_plane_state->uapi.visible &&
2639 		    !old_plane_state->uapi.visible)
2640 			continue;
2641 
2642 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2643 			full_update = true;
2644 			break;
2645 		}
2646 
2647 		/*
2648 		 * If visibility or plane moved, mark the whole plane area as
2649 		 * damaged as it needs to be complete redraw in the new and old
2650 		 * position.
2651 		 */
2652 		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2653 		    !drm_rect_equals(&new_plane_state->uapi.dst,
2654 				     &old_plane_state->uapi.dst)) {
2655 			if (old_plane_state->uapi.visible) {
2656 				damaged_area.y1 = old_plane_state->uapi.dst.y1;
2657 				damaged_area.y2 = old_plane_state->uapi.dst.y2;
2658 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2659 						 &crtc_state->pipe_src);
2660 			}
2661 
2662 			if (new_plane_state->uapi.visible) {
2663 				damaged_area.y1 = new_plane_state->uapi.dst.y1;
2664 				damaged_area.y2 = new_plane_state->uapi.dst.y2;
2665 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2666 						 &crtc_state->pipe_src);
2667 			}
2668 			continue;
2669 		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2670 			/* If alpha changed mark the whole plane area as damaged */
2671 			damaged_area.y1 = new_plane_state->uapi.dst.y1;
2672 			damaged_area.y2 = new_plane_state->uapi.dst.y2;
2673 			clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2674 					 &crtc_state->pipe_src);
2675 			continue;
2676 		}
2677 
2678 		src = drm_plane_state_src(&new_plane_state->uapi);
2679 		drm_rect_fp_to_int(&src, &src);
2680 
2681 		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2682 						     &new_plane_state->uapi, &damaged_area))
2683 			continue;
2684 
2685 		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2686 		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2687 		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2688 		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2689 
2690 		clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
2691 	}
2692 
2693 	/*
2694 	 * TODO: For now we are just using full update in case
2695 	 * selective fetch area calculation fails. To optimize this we
2696 	 * should identify cases where this happens and fix the area
2697 	 * calculation for those.
2698 	 */
2699 	if (crtc_state->psr2_su_area.y1 == -1) {
2700 		drm_info_once(display->drm,
2701 			      "Selective fetch area calculation failed in pipe %c\n",
2702 			      pipe_name(crtc->pipe));
2703 		full_update = true;
2704 	}
2705 
2706 	if (full_update)
2707 		goto skip_sel_fetch_set_loop;
2708 
2709 	intel_psr_apply_su_area_workarounds(crtc_state);
2710 
2711 	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2712 	if (ret)
2713 		return ret;
2714 
2715 	/*
2716 	 * Adjust su area to cover cursor fully as necessary (early
2717 	 * transport). This needs to be done after
2718 	 * drm_atomic_add_affected_planes to ensure visible cursor is added into
2719 	 * affected planes even when cursor is not updated by itself.
2720 	 */
2721 	intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
2722 
2723 	intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2724 
2725 	/*
2726 	 * Now that we have the pipe damaged area check if it intersect with
2727 	 * every plane, if it does set the plane selective fetch area.
2728 	 */
2729 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2730 					     new_plane_state, i) {
2731 		struct drm_rect *sel_fetch_area, inter;
2732 		struct intel_plane *linked = new_plane_state->planar_linked_plane;
2733 
2734 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2735 		    !new_plane_state->uapi.visible)
2736 			continue;
2737 
2738 		inter = crtc_state->psr2_su_area;
2739 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2740 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2741 			sel_fetch_area->y1 = -1;
2742 			sel_fetch_area->y2 = -1;
2743 			/*
2744 			 * if plane sel fetch was previously enabled ->
2745 			 * disable it
2746 			 */
2747 			if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2748 				crtc_state->update_planes |= BIT(plane->id);
2749 
2750 			continue;
2751 		}
2752 
2753 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2754 			full_update = true;
2755 			break;
2756 		}
2757 
2758 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2759 		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2760 		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2761 		crtc_state->update_planes |= BIT(plane->id);
2762 
2763 		/*
2764 		 * Sel_fetch_area is calculated for UV plane. Use
2765 		 * same area for Y plane as well.
2766 		 */
2767 		if (linked) {
2768 			struct intel_plane_state *linked_new_plane_state;
2769 			struct drm_rect *linked_sel_fetch_area;
2770 
2771 			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2772 			if (IS_ERR(linked_new_plane_state))
2773 				return PTR_ERR(linked_new_plane_state);
2774 
2775 			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2776 			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2777 			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2778 			crtc_state->update_planes |= BIT(linked->id);
2779 		}
2780 	}
2781 
2782 skip_sel_fetch_set_loop:
2783 	psr2_man_trk_ctl_calc(crtc_state, full_update);
2784 	crtc_state->pipe_srcsz_early_tpt =
2785 		psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
2786 	return 0;
2787 }
2788 
2789 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2790 				struct intel_crtc *crtc)
2791 {
2792 	struct intel_display *display = to_intel_display(state);
2793 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2794 	const struct intel_crtc_state *old_crtc_state =
2795 		intel_atomic_get_old_crtc_state(state, crtc);
2796 	const struct intel_crtc_state *new_crtc_state =
2797 		intel_atomic_get_new_crtc_state(state, crtc);
2798 	struct intel_encoder *encoder;
2799 
2800 	if (!HAS_PSR(display))
2801 		return;
2802 
2803 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2804 					     old_crtc_state->uapi.encoder_mask) {
2805 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2806 		struct intel_psr *psr = &intel_dp->psr;
2807 		bool needs_to_disable = false;
2808 
2809 		mutex_lock(&psr->lock);
2810 
2811 		/*
2812 		 * Reasons to disable:
2813 		 * - PSR disabled in new state
2814 		 * - All planes will go inactive
2815 		 * - Changing between PSR versions
2816 		 * - Region Early Transport changing
2817 		 * - Display WA #1136: skl, bxt
2818 		 */
2819 		needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2820 		needs_to_disable |= !new_crtc_state->has_psr;
2821 		needs_to_disable |= !new_crtc_state->active_planes;
2822 		needs_to_disable |= new_crtc_state->has_sel_update != psr->sel_update_enabled;
2823 		needs_to_disable |= new_crtc_state->enable_psr2_su_region_et !=
2824 			psr->su_region_et_enabled;
2825 		needs_to_disable |= new_crtc_state->has_panel_replay !=
2826 			psr->panel_replay_enabled;
2827 		needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2828 			new_crtc_state->wm_level_disabled;
2829 
2830 		if (psr->enabled && needs_to_disable)
2831 			intel_psr_disable_locked(intel_dp);
2832 		else if (psr->enabled && new_crtc_state->wm_level_disabled)
2833 			/* Wa_14015648006 */
2834 			wm_optimization_wa(intel_dp, new_crtc_state);
2835 
2836 		mutex_unlock(&psr->lock);
2837 	}
2838 }
2839 
2840 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2841 				 struct intel_crtc *crtc)
2842 {
2843 	struct intel_display *display = to_intel_display(state);
2844 	const struct intel_crtc_state *crtc_state =
2845 		intel_atomic_get_new_crtc_state(state, crtc);
2846 	struct intel_encoder *encoder;
2847 
2848 	if (!crtc_state->has_psr)
2849 		return;
2850 
2851 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2852 					     crtc_state->uapi.encoder_mask) {
2853 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2854 		struct intel_psr *psr = &intel_dp->psr;
2855 		bool keep_disabled = false;
2856 
2857 		mutex_lock(&psr->lock);
2858 
2859 		drm_WARN_ON(display->drm,
2860 			    psr->enabled && !crtc_state->active_planes);
2861 
2862 		keep_disabled |= psr->sink_not_reliable;
2863 		keep_disabled |= !crtc_state->active_planes;
2864 
2865 		/* Display WA #1136: skl, bxt */
2866 		keep_disabled |= DISPLAY_VER(display) < 11 &&
2867 			crtc_state->wm_level_disabled;
2868 
2869 		if (!psr->enabled && !keep_disabled)
2870 			intel_psr_enable_locked(intel_dp, crtc_state);
2871 		else if (psr->enabled && !crtc_state->wm_level_disabled)
2872 			/* Wa_14015648006 */
2873 			wm_optimization_wa(intel_dp, crtc_state);
2874 
2875 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2876 		if (crtc_state->crc_enabled && psr->enabled)
2877 			psr_force_hw_tracking_exit(intel_dp);
2878 
2879 		/*
2880 		 * Clear possible busy bits in case we have
2881 		 * invalidate -> flip -> flush sequence.
2882 		 */
2883 		intel_dp->psr.busy_frontbuffer_bits = 0;
2884 
2885 		mutex_unlock(&psr->lock);
2886 	}
2887 }
2888 
2889 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2890 {
2891 	struct intel_display *display = to_intel_display(intel_dp);
2892 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2893 
2894 	/*
2895 	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2896 	 * As all higher states has bit 4 of PSR2 state set we can just wait for
2897 	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2898 	 */
2899 	return intel_de_wait_for_clear(display,
2900 				       EDP_PSR2_STATUS(display, cpu_transcoder),
2901 				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2902 }
2903 
2904 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2905 {
2906 	struct intel_display *display = to_intel_display(intel_dp);
2907 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2908 
2909 	/*
2910 	 * From bspec: Panel Self Refresh (BDW+)
2911 	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2912 	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2913 	 * defensive enough to cover everything.
2914 	 */
2915 	return intel_de_wait_for_clear(display,
2916 				       psr_status_reg(display, cpu_transcoder),
2917 				       EDP_PSR_STATUS_STATE_MASK, 50);
2918 }
2919 
2920 /**
2921  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2922  * @new_crtc_state: new CRTC state
2923  *
2924  * This function is expected to be called from pipe_update_start() where it is
2925  * not expected to race with PSR enable or disable.
2926  */
2927 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2928 {
2929 	struct intel_display *display = to_intel_display(new_crtc_state);
2930 	struct intel_encoder *encoder;
2931 
2932 	if (!new_crtc_state->has_psr)
2933 		return;
2934 
2935 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2936 					     new_crtc_state->uapi.encoder_mask) {
2937 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2938 		int ret;
2939 
2940 		lockdep_assert_held(&intel_dp->psr.lock);
2941 
2942 		if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
2943 			continue;
2944 
2945 		if (intel_dp->psr.sel_update_enabled)
2946 			ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2947 		else
2948 			ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2949 
2950 		if (ret)
2951 			drm_err(display->drm,
2952 				"PSR wait timed out, atomic update may fail\n");
2953 	}
2954 }
2955 
2956 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2957 {
2958 	struct intel_display *display = to_intel_display(intel_dp);
2959 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2960 	i915_reg_t reg;
2961 	u32 mask;
2962 	int err;
2963 
2964 	if (!intel_dp->psr.enabled)
2965 		return false;
2966 
2967 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
2968 					  intel_dp->psr.panel_replay_enabled)) {
2969 		reg = EDP_PSR2_STATUS(display, cpu_transcoder);
2970 		mask = EDP_PSR2_STATUS_STATE_MASK;
2971 	} else {
2972 		reg = psr_status_reg(display, cpu_transcoder);
2973 		mask = EDP_PSR_STATUS_STATE_MASK;
2974 	}
2975 
2976 	mutex_unlock(&intel_dp->psr.lock);
2977 
2978 	err = intel_de_wait_for_clear(display, reg, mask, 50);
2979 	if (err)
2980 		drm_err(display->drm,
2981 			"Timed out waiting for PSR Idle for re-enable\n");
2982 
2983 	/* After the unlocked wait, verify that PSR is still wanted! */
2984 	mutex_lock(&intel_dp->psr.lock);
2985 	return err == 0 && intel_dp->psr.enabled;
2986 }
2987 
2988 static int intel_psr_fastset_force(struct intel_display *display)
2989 {
2990 	struct drm_connector_list_iter conn_iter;
2991 	struct drm_modeset_acquire_ctx ctx;
2992 	struct drm_atomic_state *state;
2993 	struct drm_connector *conn;
2994 	int err = 0;
2995 
2996 	state = drm_atomic_state_alloc(display->drm);
2997 	if (!state)
2998 		return -ENOMEM;
2999 
3000 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
3001 
3002 	state->acquire_ctx = &ctx;
3003 	to_intel_atomic_state(state)->internal = true;
3004 
3005 retry:
3006 	drm_connector_list_iter_begin(display->drm, &conn_iter);
3007 	drm_for_each_connector_iter(conn, &conn_iter) {
3008 		struct drm_connector_state *conn_state;
3009 		struct drm_crtc_state *crtc_state;
3010 
3011 		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
3012 			continue;
3013 
3014 		conn_state = drm_atomic_get_connector_state(state, conn);
3015 		if (IS_ERR(conn_state)) {
3016 			err = PTR_ERR(conn_state);
3017 			break;
3018 		}
3019 
3020 		if (!conn_state->crtc)
3021 			continue;
3022 
3023 		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
3024 		if (IS_ERR(crtc_state)) {
3025 			err = PTR_ERR(crtc_state);
3026 			break;
3027 		}
3028 
3029 		/* Mark mode as changed to trigger a pipe->update() */
3030 		crtc_state->mode_changed = true;
3031 	}
3032 	drm_connector_list_iter_end(&conn_iter);
3033 
3034 	if (err == 0)
3035 		err = drm_atomic_commit(state);
3036 
3037 	if (err == -EDEADLK) {
3038 		drm_atomic_state_clear(state);
3039 		err = drm_modeset_backoff(&ctx);
3040 		if (!err)
3041 			goto retry;
3042 	}
3043 
3044 	drm_modeset_drop_locks(&ctx);
3045 	drm_modeset_acquire_fini(&ctx);
3046 	drm_atomic_state_put(state);
3047 
3048 	return err;
3049 }
3050 
3051 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
3052 {
3053 	struct intel_display *display = to_intel_display(intel_dp);
3054 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
3055 	const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3056 					I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3057 	u32 old_mode, old_disable_bits;
3058 	int ret;
3059 
3060 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3061 		    I915_PSR_DEBUG_PANEL_REPLAY_DISABLE |
3062 		    I915_PSR_DEBUG_MODE_MASK) ||
3063 	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
3064 		drm_dbg_kms(display->drm, "Invalid debug mask %llx\n", val);
3065 		return -EINVAL;
3066 	}
3067 
3068 	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
3069 	if (ret)
3070 		return ret;
3071 
3072 	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
3073 	old_disable_bits = intel_dp->psr.debug &
3074 		(I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3075 		 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3076 
3077 	intel_dp->psr.debug = val;
3078 
3079 	/*
3080 	 * Do it right away if it's already enabled, otherwise it will be done
3081 	 * when enabling the source.
3082 	 */
3083 	if (intel_dp->psr.enabled)
3084 		psr_irq_control(intel_dp);
3085 
3086 	mutex_unlock(&intel_dp->psr.lock);
3087 
3088 	if (old_mode != mode || old_disable_bits != disable_bits)
3089 		ret = intel_psr_fastset_force(display);
3090 
3091 	return ret;
3092 }
3093 
3094 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
3095 {
3096 	struct intel_psr *psr = &intel_dp->psr;
3097 
3098 	intel_psr_disable_locked(intel_dp);
3099 	psr->sink_not_reliable = true;
3100 	/* let's make sure that sink is awaken */
3101 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
3102 }
3103 
3104 static void intel_psr_work(struct work_struct *work)
3105 {
3106 	struct intel_dp *intel_dp =
3107 		container_of(work, typeof(*intel_dp), psr.work);
3108 
3109 	mutex_lock(&intel_dp->psr.lock);
3110 
3111 	if (!intel_dp->psr.enabled)
3112 		goto unlock;
3113 
3114 	if (READ_ONCE(intel_dp->psr.irq_aux_error))
3115 		intel_psr_handle_irq(intel_dp);
3116 
3117 	/*
3118 	 * We have to make sure PSR is ready for re-enable
3119 	 * otherwise it keeps disabled until next full enable/disable cycle.
3120 	 * PSR might take some time to get fully disabled
3121 	 * and be ready for re-enable.
3122 	 */
3123 	if (!__psr_wait_for_idle_locked(intel_dp))
3124 		goto unlock;
3125 
3126 	/*
3127 	 * The delayed work can race with an invalidate hence we need to
3128 	 * recheck. Since psr_flush first clears this and then reschedules we
3129 	 * won't ever miss a flush when bailing out here.
3130 	 */
3131 	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
3132 		goto unlock;
3133 
3134 	intel_psr_activate(intel_dp);
3135 unlock:
3136 	mutex_unlock(&intel_dp->psr.lock);
3137 }
3138 
3139 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
3140 {
3141 	struct intel_display *display = to_intel_display(intel_dp);
3142 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3143 
3144 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
3145 		u32 val;
3146 
3147 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3148 			/* Send one update otherwise lag is observed in screen */
3149 			intel_de_write(display,
3150 				       CURSURFLIVE(display, intel_dp->psr.pipe),
3151 				       0);
3152 			return;
3153 		}
3154 
3155 		val = man_trk_ctl_enable_bit_get(display) |
3156 		      man_trk_ctl_partial_frame_bit_get(display) |
3157 		      man_trk_ctl_continuos_full_frame(display);
3158 		intel_de_write(display,
3159 			       PSR2_MAN_TRK_CTL(display, cpu_transcoder),
3160 			       val);
3161 		intel_de_write(display,
3162 			       CURSURFLIVE(display, intel_dp->psr.pipe), 0);
3163 		intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
3164 	} else {
3165 		intel_psr_exit(intel_dp);
3166 	}
3167 }
3168 
3169 /**
3170  * intel_psr_invalidate - Invalidate PSR
3171  * @display: display device
3172  * @frontbuffer_bits: frontbuffer plane tracking bits
3173  * @origin: which operation caused the invalidate
3174  *
3175  * Since the hardware frontbuffer tracking has gaps we need to integrate
3176  * with the software frontbuffer tracking. This function gets called every
3177  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
3178  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
3179  *
3180  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
3181  */
3182 void intel_psr_invalidate(struct intel_display *display,
3183 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
3184 {
3185 	struct intel_encoder *encoder;
3186 
3187 	if (origin == ORIGIN_FLIP)
3188 		return;
3189 
3190 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3191 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3192 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3193 
3194 		mutex_lock(&intel_dp->psr.lock);
3195 		if (!intel_dp->psr.enabled) {
3196 			mutex_unlock(&intel_dp->psr.lock);
3197 			continue;
3198 		}
3199 
3200 		pipe_frontbuffer_bits &=
3201 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3202 		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
3203 
3204 		if (pipe_frontbuffer_bits)
3205 			_psr_invalidate_handle(intel_dp);
3206 
3207 		mutex_unlock(&intel_dp->psr.lock);
3208 	}
3209 }
3210 /*
3211  * When we will be completely rely on PSR2 S/W tracking in future,
3212  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
3213  * event also therefore tgl_dc3co_flush_locked() require to be changed
3214  * accordingly in future.
3215  */
3216 static void
3217 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
3218 		       enum fb_op_origin origin)
3219 {
3220 	struct intel_display *display = to_intel_display(intel_dp);
3221 	struct drm_i915_private *i915 = to_i915(display->drm);
3222 
3223 	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
3224 	    !intel_dp->psr.active)
3225 		return;
3226 
3227 	/*
3228 	 * At every frontbuffer flush flip event modified delay of delayed work,
3229 	 * when delayed work schedules that means display has been idle.
3230 	 */
3231 	if (!(frontbuffer_bits &
3232 	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
3233 		return;
3234 
3235 	tgl_psr2_enable_dc3co(intel_dp);
3236 	mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
3237 			 intel_dp->psr.dc3co_exit_delay);
3238 }
3239 
3240 static void _psr_flush_handle(struct intel_dp *intel_dp)
3241 {
3242 	struct intel_display *display = to_intel_display(intel_dp);
3243 	struct drm_i915_private *dev_priv = to_i915(display->drm);
3244 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3245 
3246 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
3247 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3248 			/* can we turn CFF off? */
3249 			if (intel_dp->psr.busy_frontbuffer_bits == 0) {
3250 				u32 val = man_trk_ctl_enable_bit_get(display) |
3251 					man_trk_ctl_partial_frame_bit_get(display) |
3252 					man_trk_ctl_single_full_frame_bit_get(display) |
3253 					man_trk_ctl_continuos_full_frame(display);
3254 
3255 				/*
3256 				 * Set psr2_sel_fetch_cff_enabled as false to allow selective
3257 				 * updates. Still keep cff bit enabled as we don't have proper
3258 				 * SU configuration in case update is sent for any reason after
3259 				 * sff bit gets cleared by the HW on next vblank.
3260 				 */
3261 				intel_de_write(display,
3262 					       PSR2_MAN_TRK_CTL(display, cpu_transcoder),
3263 					       val);
3264 				intel_de_write(display,
3265 					       CURSURFLIVE(display, intel_dp->psr.pipe),
3266 					       0);
3267 				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
3268 			}
3269 		} else {
3270 			/*
3271 			 * continuous full frame is disabled, only a single full
3272 			 * frame is required
3273 			 */
3274 			psr_force_hw_tracking_exit(intel_dp);
3275 		}
3276 	} else {
3277 		psr_force_hw_tracking_exit(intel_dp);
3278 
3279 		if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
3280 			queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
3281 	}
3282 }
3283 
3284 /**
3285  * intel_psr_flush - Flush PSR
3286  * @display: display device
3287  * @frontbuffer_bits: frontbuffer plane tracking bits
3288  * @origin: which operation caused the flush
3289  *
3290  * Since the hardware frontbuffer tracking has gaps we need to integrate
3291  * with the software frontbuffer tracking. This function gets called every
3292  * time frontbuffer rendering has completed and flushed out to memory. PSR
3293  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
3294  *
3295  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
3296  */
3297 void intel_psr_flush(struct intel_display *display,
3298 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
3299 {
3300 	struct intel_encoder *encoder;
3301 
3302 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3303 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3304 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3305 
3306 		mutex_lock(&intel_dp->psr.lock);
3307 		if (!intel_dp->psr.enabled) {
3308 			mutex_unlock(&intel_dp->psr.lock);
3309 			continue;
3310 		}
3311 
3312 		pipe_frontbuffer_bits &=
3313 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3314 		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
3315 
3316 		/*
3317 		 * If the PSR is paused by an explicit intel_psr_paused() call,
3318 		 * we have to ensure that the PSR is not activated until
3319 		 * intel_psr_resume() is called.
3320 		 */
3321 		if (intel_dp->psr.paused)
3322 			goto unlock;
3323 
3324 		if (origin == ORIGIN_FLIP ||
3325 		    (origin == ORIGIN_CURSOR_UPDATE &&
3326 		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
3327 			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
3328 			goto unlock;
3329 		}
3330 
3331 		if (pipe_frontbuffer_bits == 0)
3332 			goto unlock;
3333 
3334 		/* By definition flush = invalidate + flush */
3335 		_psr_flush_handle(intel_dp);
3336 unlock:
3337 		mutex_unlock(&intel_dp->psr.lock);
3338 	}
3339 }
3340 
3341 /**
3342  * intel_psr_init - Init basic PSR work and mutex.
3343  * @intel_dp: Intel DP
3344  *
3345  * This function is called after the initializing connector.
3346  * (the initializing of connector treats the handling of connector capabilities)
3347  * And it initializes basic PSR stuff for each DP Encoder.
3348  */
3349 void intel_psr_init(struct intel_dp *intel_dp)
3350 {
3351 	struct intel_display *display = to_intel_display(intel_dp);
3352 	struct intel_connector *connector = intel_dp->attached_connector;
3353 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3354 
3355 	if (!(HAS_PSR(display) || HAS_DP20(display)))
3356 		return;
3357 
3358 	/*
3359 	 * HSW spec explicitly says PSR is tied to port A.
3360 	 * BDW+ platforms have a instance of PSR registers per transcoder but
3361 	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
3362 	 * than eDP one.
3363 	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
3364 	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
3365 	 * But GEN12 supports a instance of PSR registers per transcoder.
3366 	 */
3367 	if (DISPLAY_VER(display) < 12 && dig_port->base.port != PORT_A) {
3368 		drm_dbg_kms(display->drm,
3369 			    "PSR condition failed: Port not supported\n");
3370 		return;
3371 	}
3372 
3373 	if ((HAS_DP20(display) && !intel_dp_is_edp(intel_dp)) ||
3374 	    DISPLAY_VER(display) >= 20)
3375 		intel_dp->psr.source_panel_replay_support = true;
3376 
3377 	if (HAS_PSR(display) && intel_dp_is_edp(intel_dp))
3378 		intel_dp->psr.source_support = true;
3379 
3380 	/* Set link_standby x link_off defaults */
3381 	if (DISPLAY_VER(display) < 12)
3382 		/* For new platforms up to TGL let's respect VBT back again */
3383 		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
3384 
3385 	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
3386 	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
3387 	mutex_init(&intel_dp->psr.lock);
3388 }
3389 
3390 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
3391 					   u8 *status, u8 *error_status)
3392 {
3393 	struct drm_dp_aux *aux = &intel_dp->aux;
3394 	int ret;
3395 	unsigned int offset;
3396 
3397 	offset = intel_dp->psr.panel_replay_enabled ?
3398 		 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
3399 
3400 	ret = drm_dp_dpcd_readb(aux, offset, status);
3401 	if (ret != 1)
3402 		return ret;
3403 
3404 	offset = intel_dp->psr.panel_replay_enabled ?
3405 		 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
3406 
3407 	ret = drm_dp_dpcd_readb(aux, offset, error_status);
3408 	if (ret != 1)
3409 		return ret;
3410 
3411 	*status = *status & DP_PSR_SINK_STATE_MASK;
3412 
3413 	return 0;
3414 }
3415 
3416 static void psr_alpm_check(struct intel_dp *intel_dp)
3417 {
3418 	struct intel_display *display = to_intel_display(intel_dp);
3419 	struct drm_dp_aux *aux = &intel_dp->aux;
3420 	struct intel_psr *psr = &intel_dp->psr;
3421 	u8 val;
3422 	int r;
3423 
3424 	if (!psr->sel_update_enabled)
3425 		return;
3426 
3427 	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
3428 	if (r != 1) {
3429 		drm_err(display->drm, "Error reading ALPM status\n");
3430 		return;
3431 	}
3432 
3433 	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
3434 		intel_psr_disable_locked(intel_dp);
3435 		psr->sink_not_reliable = true;
3436 		drm_dbg_kms(display->drm,
3437 			    "ALPM lock timeout error, disabling PSR\n");
3438 
3439 		/* Clearing error */
3440 		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
3441 	}
3442 }
3443 
3444 static void psr_capability_changed_check(struct intel_dp *intel_dp)
3445 {
3446 	struct intel_display *display = to_intel_display(intel_dp);
3447 	struct intel_psr *psr = &intel_dp->psr;
3448 	u8 val;
3449 	int r;
3450 
3451 	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
3452 	if (r != 1) {
3453 		drm_err(display->drm, "Error reading DP_PSR_ESI\n");
3454 		return;
3455 	}
3456 
3457 	if (val & DP_PSR_CAPS_CHANGE) {
3458 		intel_psr_disable_locked(intel_dp);
3459 		psr->sink_not_reliable = true;
3460 		drm_dbg_kms(display->drm,
3461 			    "Sink PSR capability changed, disabling PSR\n");
3462 
3463 		/* Clearing it */
3464 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
3465 	}
3466 }
3467 
3468 /*
3469  * On common bits:
3470  * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
3471  * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
3472  * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
3473  * this function is relying on PSR definitions
3474  */
3475 void intel_psr_short_pulse(struct intel_dp *intel_dp)
3476 {
3477 	struct intel_display *display = to_intel_display(intel_dp);
3478 	struct intel_psr *psr = &intel_dp->psr;
3479 	u8 status, error_status;
3480 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3481 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3482 			  DP_PSR_LINK_CRC_ERROR;
3483 
3484 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
3485 		return;
3486 
3487 	mutex_lock(&psr->lock);
3488 
3489 	psr->link_ok = false;
3490 
3491 	if (!psr->enabled)
3492 		goto exit;
3493 
3494 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
3495 		drm_err(display->drm,
3496 			"Error reading PSR status or error status\n");
3497 		goto exit;
3498 	}
3499 
3500 	if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
3501 	    (error_status & errors)) {
3502 		intel_psr_disable_locked(intel_dp);
3503 		psr->sink_not_reliable = true;
3504 	}
3505 
3506 	if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
3507 	    !error_status)
3508 		drm_dbg_kms(display->drm,
3509 			    "PSR sink internal error, disabling PSR\n");
3510 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3511 		drm_dbg_kms(display->drm,
3512 			    "PSR RFB storage error, disabling PSR\n");
3513 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3514 		drm_dbg_kms(display->drm,
3515 			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
3516 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3517 		drm_dbg_kms(display->drm,
3518 			    "PSR Link CRC error, disabling PSR\n");
3519 
3520 	if (error_status & ~errors)
3521 		drm_err(display->drm,
3522 			"PSR_ERROR_STATUS unhandled errors %x\n",
3523 			error_status & ~errors);
3524 	/* clear status register */
3525 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
3526 
3527 	if (!psr->panel_replay_enabled) {
3528 		psr_alpm_check(intel_dp);
3529 		psr_capability_changed_check(intel_dp);
3530 	}
3531 
3532 exit:
3533 	mutex_unlock(&psr->lock);
3534 }
3535 
3536 bool intel_psr_enabled(struct intel_dp *intel_dp)
3537 {
3538 	bool ret;
3539 
3540 	if (!CAN_PSR(intel_dp))
3541 		return false;
3542 
3543 	mutex_lock(&intel_dp->psr.lock);
3544 	ret = intel_dp->psr.enabled;
3545 	mutex_unlock(&intel_dp->psr.lock);
3546 
3547 	return ret;
3548 }
3549 
3550 /**
3551  * intel_psr_link_ok - return psr->link_ok
3552  * @intel_dp: struct intel_dp
3553  *
3554  * We are seeing unexpected link re-trainings with some panels. This is caused
3555  * by panel stating bad link status after PSR is enabled. Code checking link
3556  * status can call this to ensure it can ignore bad link status stated by the
3557  * panel I.e. if panel is stating bad link and intel_psr_link_ok is stating link
3558  * is ok caller should rely on latter.
3559  *
3560  * Return value of link_ok
3561  */
3562 bool intel_psr_link_ok(struct intel_dp *intel_dp)
3563 {
3564 	bool ret;
3565 
3566 	if ((!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) ||
3567 	    !intel_dp_is_edp(intel_dp))
3568 		return false;
3569 
3570 	mutex_lock(&intel_dp->psr.lock);
3571 	ret = intel_dp->psr.link_ok;
3572 	mutex_unlock(&intel_dp->psr.lock);
3573 
3574 	return ret;
3575 }
3576 
3577 /**
3578  * intel_psr_lock - grab PSR lock
3579  * @crtc_state: the crtc state
3580  *
3581  * This is initially meant to be used by around CRTC update, when
3582  * vblank sensitive registers are updated and we need grab the lock
3583  * before it to avoid vblank evasion.
3584  */
3585 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3586 {
3587 	struct intel_display *display = to_intel_display(crtc_state);
3588 	struct intel_encoder *encoder;
3589 
3590 	if (!crtc_state->has_psr)
3591 		return;
3592 
3593 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3594 					     crtc_state->uapi.encoder_mask) {
3595 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3596 
3597 		mutex_lock(&intel_dp->psr.lock);
3598 		break;
3599 	}
3600 }
3601 
3602 /**
3603  * intel_psr_unlock - release PSR lock
3604  * @crtc_state: the crtc state
3605  *
3606  * Release the PSR lock that was held during pipe update.
3607  */
3608 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3609 {
3610 	struct intel_display *display = to_intel_display(crtc_state);
3611 	struct intel_encoder *encoder;
3612 
3613 	if (!crtc_state->has_psr)
3614 		return;
3615 
3616 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3617 					     crtc_state->uapi.encoder_mask) {
3618 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3619 
3620 		mutex_unlock(&intel_dp->psr.lock);
3621 		break;
3622 	}
3623 }
3624 
3625 static void
3626 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3627 {
3628 	struct intel_display *display = to_intel_display(intel_dp);
3629 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3630 	const char *status = "unknown";
3631 	u32 val, status_val;
3632 
3633 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
3634 					  intel_dp->psr.panel_replay_enabled)) {
3635 		static const char * const live_status[] = {
3636 			"IDLE",
3637 			"CAPTURE",
3638 			"CAPTURE_FS",
3639 			"SLEEP",
3640 			"BUFON_FW",
3641 			"ML_UP",
3642 			"SU_STANDBY",
3643 			"FAST_SLEEP",
3644 			"DEEP_SLEEP",
3645 			"BUF_ON",
3646 			"TG_ON"
3647 		};
3648 		val = intel_de_read(display,
3649 				    EDP_PSR2_STATUS(display, cpu_transcoder));
3650 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3651 		if (status_val < ARRAY_SIZE(live_status))
3652 			status = live_status[status_val];
3653 	} else {
3654 		static const char * const live_status[] = {
3655 			"IDLE",
3656 			"SRDONACK",
3657 			"SRDENT",
3658 			"BUFOFF",
3659 			"BUFON",
3660 			"AUXACK",
3661 			"SRDOFFACK",
3662 			"SRDENT_ON",
3663 		};
3664 		val = intel_de_read(display,
3665 				    psr_status_reg(display, cpu_transcoder));
3666 		status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3667 		if (status_val < ARRAY_SIZE(live_status))
3668 			status = live_status[status_val];
3669 	}
3670 
3671 	seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3672 }
3673 
3674 static void intel_psr_sink_capability(struct intel_dp *intel_dp,
3675 				      struct seq_file *m)
3676 {
3677 	struct intel_psr *psr = &intel_dp->psr;
3678 
3679 	seq_printf(m, "Sink support: PSR = %s",
3680 		   str_yes_no(psr->sink_support));
3681 
3682 	if (psr->sink_support)
3683 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3684 	if (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
3685 		seq_printf(m, " (Early Transport)");
3686 	seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
3687 	seq_printf(m, ", Panel Replay Selective Update = %s",
3688 		   str_yes_no(psr->sink_panel_replay_su_support));
3689 	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
3690 		seq_printf(m, " (Early Transport)");
3691 	seq_printf(m, "\n");
3692 }
3693 
3694 static void intel_psr_print_mode(struct intel_dp *intel_dp,
3695 				 struct seq_file *m)
3696 {
3697 	struct intel_psr *psr = &intel_dp->psr;
3698 	const char *status, *mode, *region_et;
3699 
3700 	if (psr->enabled)
3701 		status = " enabled";
3702 	else
3703 		status = "disabled";
3704 
3705 	if (psr->panel_replay_enabled && psr->sel_update_enabled)
3706 		mode = "Panel Replay Selective Update";
3707 	else if (psr->panel_replay_enabled)
3708 		mode = "Panel Replay";
3709 	else if (psr->sel_update_enabled)
3710 		mode = "PSR2";
3711 	else if (psr->enabled)
3712 		mode = "PSR1";
3713 	else
3714 		mode = "";
3715 
3716 	if (psr->su_region_et_enabled)
3717 		region_et = " (Early Transport)";
3718 	else
3719 		region_et = "";
3720 
3721 	seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et);
3722 }
3723 
3724 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3725 {
3726 	struct intel_display *display = to_intel_display(intel_dp);
3727 	struct drm_i915_private *dev_priv = to_i915(display->drm);
3728 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3729 	struct intel_psr *psr = &intel_dp->psr;
3730 	intel_wakeref_t wakeref;
3731 	bool enabled;
3732 	u32 val, psr2_ctl;
3733 
3734 	intel_psr_sink_capability(intel_dp, m);
3735 
3736 	if (!(psr->sink_support || psr->sink_panel_replay_support))
3737 		return 0;
3738 
3739 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3740 	mutex_lock(&psr->lock);
3741 
3742 	intel_psr_print_mode(intel_dp, m);
3743 
3744 	if (!psr->enabled) {
3745 		seq_printf(m, "PSR sink not reliable: %s\n",
3746 			   str_yes_no(psr->sink_not_reliable));
3747 
3748 		goto unlock;
3749 	}
3750 
3751 	if (psr->panel_replay_enabled) {
3752 		val = intel_de_read(display, TRANS_DP2_CTL(cpu_transcoder));
3753 
3754 		if (intel_dp_is_edp(intel_dp))
3755 			psr2_ctl = intel_de_read(display,
3756 						 EDP_PSR2_CTL(display,
3757 							      cpu_transcoder));
3758 
3759 		enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3760 	} else if (psr->sel_update_enabled) {
3761 		val = intel_de_read(display,
3762 				    EDP_PSR2_CTL(display, cpu_transcoder));
3763 		enabled = val & EDP_PSR2_ENABLE;
3764 	} else {
3765 		val = intel_de_read(display, psr_ctl_reg(display, cpu_transcoder));
3766 		enabled = val & EDP_PSR_ENABLE;
3767 	}
3768 	seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
3769 		   str_enabled_disabled(enabled), val);
3770 	if (psr->panel_replay_enabled && intel_dp_is_edp(intel_dp))
3771 		seq_printf(m, "PSR2_CTL: 0x%08x\n",
3772 			   psr2_ctl);
3773 	psr_source_status(intel_dp, m);
3774 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3775 		   psr->busy_frontbuffer_bits);
3776 
3777 	/*
3778 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
3779 	 */
3780 	val = intel_de_read(display, psr_perf_cnt_reg(display, cpu_transcoder));
3781 	seq_printf(m, "Performance counter: %u\n",
3782 		   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3783 
3784 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
3785 		seq_printf(m, "Last attempted entry at: %lld\n",
3786 			   psr->last_entry_attempt);
3787 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3788 	}
3789 
3790 	if (psr->sel_update_enabled) {
3791 		u32 su_frames_val[3];
3792 		int frame;
3793 
3794 		/*
3795 		 * Reading all 3 registers before hand to minimize crossing a
3796 		 * frame boundary between register reads
3797 		 */
3798 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3799 			val = intel_de_read(display,
3800 					    PSR2_SU_STATUS(display, cpu_transcoder, frame));
3801 			su_frames_val[frame / 3] = val;
3802 		}
3803 
3804 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3805 
3806 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3807 			u32 su_blocks;
3808 
3809 			su_blocks = su_frames_val[frame / 3] &
3810 				    PSR2_SU_STATUS_MASK(frame);
3811 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3812 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
3813 		}
3814 
3815 		seq_printf(m, "PSR2 selective fetch: %s\n",
3816 			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3817 	}
3818 
3819 unlock:
3820 	mutex_unlock(&psr->lock);
3821 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3822 
3823 	return 0;
3824 }
3825 
3826 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3827 {
3828 	struct intel_display *display = m->private;
3829 	struct intel_dp *intel_dp = NULL;
3830 	struct intel_encoder *encoder;
3831 
3832 	if (!HAS_PSR(display))
3833 		return -ENODEV;
3834 
3835 	/* Find the first EDP which supports PSR */
3836 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3837 		intel_dp = enc_to_intel_dp(encoder);
3838 		break;
3839 	}
3840 
3841 	if (!intel_dp)
3842 		return -ENODEV;
3843 
3844 	return intel_psr_status(m, intel_dp);
3845 }
3846 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3847 
3848 static int
3849 i915_edp_psr_debug_set(void *data, u64 val)
3850 {
3851 	struct intel_display *display = data;
3852 	struct drm_i915_private *dev_priv = to_i915(display->drm);
3853 	struct intel_encoder *encoder;
3854 	intel_wakeref_t wakeref;
3855 	int ret = -ENODEV;
3856 
3857 	if (!HAS_PSR(display))
3858 		return ret;
3859 
3860 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3861 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3862 
3863 		drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);
3864 
3865 		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3866 
3867 		// TODO: split to each transcoder's PSR debug state
3868 		ret = intel_psr_debug_set(intel_dp, val);
3869 
3870 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3871 	}
3872 
3873 	return ret;
3874 }
3875 
3876 static int
3877 i915_edp_psr_debug_get(void *data, u64 *val)
3878 {
3879 	struct intel_display *display = data;
3880 	struct intel_encoder *encoder;
3881 
3882 	if (!HAS_PSR(display))
3883 		return -ENODEV;
3884 
3885 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3886 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3887 
3888 		// TODO: split to each transcoder's PSR debug state
3889 		*val = READ_ONCE(intel_dp->psr.debug);
3890 		return 0;
3891 	}
3892 
3893 	return -ENODEV;
3894 }
3895 
3896 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3897 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3898 			"%llu\n");
3899 
3900 void intel_psr_debugfs_register(struct intel_display *display)
3901 {
3902 	struct drm_minor *minor = display->drm->primary;
3903 
3904 	debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3905 			    display, &i915_edp_psr_debug_fops);
3906 
3907 	debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3908 			    display, &i915_edp_psr_status_fops);
3909 }
3910 
3911 static const char *psr_mode_str(struct intel_dp *intel_dp)
3912 {
3913 	if (intel_dp->psr.panel_replay_enabled)
3914 		return "PANEL-REPLAY";
3915 	else if (intel_dp->psr.enabled)
3916 		return "PSR";
3917 
3918 	return "unknown";
3919 }
3920 
3921 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3922 {
3923 	struct intel_connector *connector = m->private;
3924 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3925 	static const char * const sink_status[] = {
3926 		"inactive",
3927 		"transition to active, capture and display",
3928 		"active, display from RFB",
3929 		"active, capture and display on sink device timings",
3930 		"transition to inactive, capture and display, timing re-sync",
3931 		"reserved",
3932 		"reserved",
3933 		"sink internal error",
3934 	};
3935 	const char *str;
3936 	int ret;
3937 	u8 status, error_status;
3938 
3939 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
3940 		seq_puts(m, "PSR/Panel-Replay Unsupported\n");
3941 		return -ENODEV;
3942 	}
3943 
3944 	if (connector->base.status != connector_status_connected)
3945 		return -ENODEV;
3946 
3947 	ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
3948 	if (ret)
3949 		return ret;
3950 
3951 	status &= DP_PSR_SINK_STATE_MASK;
3952 	if (status < ARRAY_SIZE(sink_status))
3953 		str = sink_status[status];
3954 	else
3955 		str = "unknown";
3956 
3957 	seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
3958 
3959 	seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
3960 
3961 	if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
3962 			    DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3963 			    DP_PSR_LINK_CRC_ERROR))
3964 		seq_puts(m, ":\n");
3965 	else
3966 		seq_puts(m, "\n");
3967 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3968 		seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
3969 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3970 		seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
3971 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3972 		seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
3973 
3974 	return ret;
3975 }
3976 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3977 
3978 static int i915_psr_status_show(struct seq_file *m, void *data)
3979 {
3980 	struct intel_connector *connector = m->private;
3981 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3982 
3983 	return intel_psr_status(m, intel_dp);
3984 }
3985 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3986 
3987 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3988 {
3989 	struct intel_display *display = to_intel_display(connector);
3990 	struct dentry *root = connector->base.debugfs_entry;
3991 
3992 	if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
3993 	    connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3994 		return;
3995 
3996 	debugfs_create_file("i915_psr_sink_status", 0444, root,
3997 			    connector, &i915_psr_sink_status_fops);
3998 
3999 	if (HAS_PSR(display) || HAS_DP20(display))
4000 		debugfs_create_file("i915_psr_status", 0444, root,
4001 				    connector, &i915_psr_status_fops);
4002 }
4003