xref: /linux/drivers/gpu/drm/i915/display/intel_psr.c (revision 73b7fd4b209263a92726daca6453a37ecb89eb9d)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/debugfs.h>
25 
26 #include <drm/drm_atomic_helper.h>
27 #include <drm/drm_damage_helper.h>
28 #include <drm/drm_debugfs.h>
29 #include <drm/drm_vblank.h>
30 
31 #include "i915_drv.h"
32 #include "i915_reg.h"
33 #include "intel_alpm.h"
34 #include "intel_atomic.h"
35 #include "intel_crtc.h"
36 #include "intel_cursor_regs.h"
37 #include "intel_ddi.h"
38 #include "intel_de.h"
39 #include "intel_display_irq.h"
40 #include "intel_display_rpm.h"
41 #include "intel_display_types.h"
42 #include "intel_dmc.h"
43 #include "intel_dp.h"
44 #include "intel_dp_aux.h"
45 #include "intel_frontbuffer.h"
46 #include "intel_hdmi.h"
47 #include "intel_psr.h"
48 #include "intel_psr_regs.h"
49 #include "intel_snps_phy.h"
50 #include "intel_vblank.h"
51 #include "intel_vrr.h"
52 #include "skl_universal_plane.h"
53 
54 /**
55  * DOC: Panel Self Refresh (PSR/SRD)
56  *
57  * Since Haswell Display controller supports Panel Self-Refresh on display
58  * panels witch have a remote frame buffer (RFB) implemented according to PSR
59  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
60  * when system is idle but display is on as it eliminates display refresh
61  * request to DDR memory completely as long as the frame buffer for that
62  * display is unchanged.
63  *
64  * Panel Self Refresh must be supported by both Hardware (source) and
65  * Panel (sink).
66  *
67  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
68  * to power down the link and memory controller. For DSI panels the same idea
69  * is called "manual mode".
70  *
71  * The implementation uses the hardware-based PSR support which automatically
72  * enters/exits self-refresh mode. The hardware takes care of sending the
73  * required DP aux message and could even retrain the link (that part isn't
74  * enabled yet though). The hardware also keeps track of any frontbuffer
75  * changes to know when to exit self-refresh mode again. Unfortunately that
76  * part doesn't work too well, hence why the i915 PSR support uses the
77  * software frontbuffer tracking to make sure it doesn't miss a screen
78  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
79  * get called by the frontbuffer tracking code. Note that because of locking
80  * issues the self-refresh re-enable code is done from a work queue, which
81  * must be correctly synchronized/cancelled when shutting down the pipe."
82  *
83  * DC3CO (DC3 clock off)
84  *
85  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
86  * clock off automatically during PSR2 idle state.
87  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
88  * entry/exit allows the HW to enter a low-power state even when page flipping
89  * periodically (for instance a 30fps video playback scenario).
90  *
91  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
92  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
93  * frames, if no other flip occurs and the function above is executed, DC3CO is
94  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
95  * of another flip.
96  * Front buffer modifications do not trigger DC3CO activation on purpose as it
97  * would bring a lot of complexity and most of the moderns systems will only
98  * use page flips.
99  */
100 
101 /*
102  * Description of PSR mask bits:
103  *
104  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
105  *
106  *  When unmasked (nearly) all display register writes (eg. even
107  *  SWF) trigger a PSR exit. Some registers are excluded from this
108  *  and they have a more specific mask (described below). On icl+
109  *  this bit no longer exists and is effectively always set.
110  *
111  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
112  *
113  *  When unmasked (nearly) all pipe/plane register writes
114  *  trigger a PSR exit. Some plane registers are excluded from this
115  *  and they have a more specific mask (described below).
116  *
117  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
118  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
119  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
120  *
121  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
122  *  SPR_SURF/CURBASE are not included in this and instead are
123  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
124  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
125  *
126  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
127  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
128  *
129  *  When unmasked PSR is blocked as long as the sprite
130  *  plane is enabled. skl+ with their universal planes no
131  *  longer have a mask bit like this, and no plane being
132  *  enabledb blocks PSR.
133  *
134  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
135  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
136  *
137  *  When umasked CURPOS writes trigger a PSR exit. On skl+
138  *  this doesn't exit but CURPOS is included in the
139  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
140  *
141  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
142  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
143  *
144  *  When unmasked PSR is blocked as long as vblank and/or vsync
145  *  interrupt is unmasked in IMR *and* enabled in IER.
146  *
147  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
148  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
149  *
150  *  Selectcs whether PSR exit generates an extra vblank before
151  *  the first frame is transmitted. Also note the opposite polarity
152  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
153  *  unmasked==do not generate the extra vblank).
154  *
155  *  With DC states enabled the extra vblank happens after link training,
156  *  with DC states disabled it happens immediately upuon PSR exit trigger.
157  *  No idea as of now why there is a difference. HSW/BDW (which don't
158  *  even have DMC) always generate it after link training. Go figure.
159  *
160  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
161  *  and thus won't latch until the first vblank. So with DC states
162  *  enabled the register effectively uses the reset value during DC5
163  *  exit+PSR exit sequence, and thus the bit does nothing until
164  *  latched by the vblank that it was trying to prevent from being
165  *  generated in the first place. So we should probably call this
166  *  one a chicken/egg bit instead on skl+.
167  *
168  *  In standby mode (as opposed to link-off) this makes no difference
169  *  as the timing generator keeps running the whole time generating
170  *  normal periodic vblanks.
171  *
172  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
173  *  and doing so makes the behaviour match the skl+ reset value.
174  *
175  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
176  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
177  *
178  *  On BDW without this bit is no vblanks whatsoever are
179  *  generated after PSR exit. On HSW this has no apparent effect.
180  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
181  *
182  * The rest of the bits are more self-explanatory and/or
183  * irrelevant for normal operation.
184  *
185  * Description of intel_crtc_state variables. has_psr, has_panel_replay and
186  * has_sel_update:
187  *
188  *  has_psr (alone):					PSR1
189  *  has_psr + has_sel_update:				PSR2
190  *  has_psr + has_panel_replay:				Panel Replay
191  *  has_psr + has_panel_replay + has_sel_update:	Panel Replay Selective Update
192  *
193  * Description of some intel_psr variables. enabled, panel_replay_enabled,
194  * sel_update_enabled
195  *
196  *  enabled (alone):						PSR1
197  *  enabled + sel_update_enabled:				PSR2
198  *  enabled + panel_replay_enabled:				Panel Replay
199  *  enabled + panel_replay_enabled + sel_update_enabled:	Panel Replay SU
200  */
201 
202 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
203 			   (intel_dp)->psr.source_support)
204 
205 bool intel_encoder_can_psr(struct intel_encoder *encoder)
206 {
207 	if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
208 		return CAN_PSR(enc_to_intel_dp(encoder)) ||
209 		       CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
210 	else
211 		return false;
212 }
213 
214 bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
215 				  const struct intel_crtc_state *crtc_state)
216 {
217 	/*
218 	 * For PSR/PR modes only eDP requires the AUX IO power to be enabled whenever
219 	 * the output is enabled. For non-eDP outputs the main link is always
220 	 * on, hence it doesn't require the HW initiated AUX wake-up signaling used
221 	 * for eDP.
222 	 *
223 	 * TODO:
224 	 * - Consider leaving AUX IO disabled for eDP / PR as well, in case
225 	 *   the ALPM with main-link off mode is not enabled.
226 	 * - Leave AUX IO enabled for DP / PR, once support for ALPM with
227 	 *   main-link off mode is added for it and this mode gets enabled.
228 	 */
229 	return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
230 	       intel_encoder_can_psr(encoder);
231 }
232 
233 static bool psr_global_enabled(struct intel_dp *intel_dp)
234 {
235 	struct intel_display *display = to_intel_display(intel_dp);
236 	struct intel_connector *connector = intel_dp->attached_connector;
237 
238 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
239 	case I915_PSR_DEBUG_DEFAULT:
240 		if (display->params.enable_psr == -1)
241 			return intel_dp_is_edp(intel_dp) ?
242 				connector->panel.vbt.psr.enable :
243 				true;
244 		return display->params.enable_psr;
245 	case I915_PSR_DEBUG_DISABLE:
246 		return false;
247 	default:
248 		return true;
249 	}
250 }
251 
252 static bool psr2_global_enabled(struct intel_dp *intel_dp)
253 {
254 	struct intel_display *display = to_intel_display(intel_dp);
255 
256 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
257 	case I915_PSR_DEBUG_DISABLE:
258 	case I915_PSR_DEBUG_FORCE_PSR1:
259 		return false;
260 	default:
261 		if (display->params.enable_psr == 1)
262 			return false;
263 		return true;
264 	}
265 }
266 
267 static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp)
268 {
269 	struct intel_display *display = to_intel_display(intel_dp);
270 
271 	if (display->params.enable_psr != -1)
272 		return false;
273 
274 	return true;
275 }
276 
277 static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
278 {
279 	struct intel_display *display = to_intel_display(intel_dp);
280 
281 	if ((display->params.enable_psr != -1) ||
282 	    (intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE))
283 		return false;
284 	return true;
285 }
286 
287 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
288 {
289 	struct intel_display *display = to_intel_display(intel_dp);
290 
291 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_ERROR :
292 		EDP_PSR_ERROR(intel_dp->psr.transcoder);
293 }
294 
295 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
296 {
297 	struct intel_display *display = to_intel_display(intel_dp);
298 
299 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_POST_EXIT :
300 		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
301 }
302 
303 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
304 {
305 	struct intel_display *display = to_intel_display(intel_dp);
306 
307 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_PRE_ENTRY :
308 		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
309 }
310 
311 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
312 {
313 	struct intel_display *display = to_intel_display(intel_dp);
314 
315 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_MASK :
316 		EDP_PSR_MASK(intel_dp->psr.transcoder);
317 }
318 
319 static i915_reg_t psr_ctl_reg(struct intel_display *display,
320 			      enum transcoder cpu_transcoder)
321 {
322 	if (DISPLAY_VER(display) >= 8)
323 		return EDP_PSR_CTL(display, cpu_transcoder);
324 	else
325 		return HSW_SRD_CTL;
326 }
327 
328 static i915_reg_t psr_debug_reg(struct intel_display *display,
329 				enum transcoder cpu_transcoder)
330 {
331 	if (DISPLAY_VER(display) >= 8)
332 		return EDP_PSR_DEBUG(display, cpu_transcoder);
333 	else
334 		return HSW_SRD_DEBUG;
335 }
336 
337 static i915_reg_t psr_perf_cnt_reg(struct intel_display *display,
338 				   enum transcoder cpu_transcoder)
339 {
340 	if (DISPLAY_VER(display) >= 8)
341 		return EDP_PSR_PERF_CNT(display, cpu_transcoder);
342 	else
343 		return HSW_SRD_PERF_CNT;
344 }
345 
346 static i915_reg_t psr_status_reg(struct intel_display *display,
347 				 enum transcoder cpu_transcoder)
348 {
349 	if (DISPLAY_VER(display) >= 8)
350 		return EDP_PSR_STATUS(display, cpu_transcoder);
351 	else
352 		return HSW_SRD_STATUS;
353 }
354 
355 static i915_reg_t psr_imr_reg(struct intel_display *display,
356 			      enum transcoder cpu_transcoder)
357 {
358 	if (DISPLAY_VER(display) >= 12)
359 		return TRANS_PSR_IMR(display, cpu_transcoder);
360 	else
361 		return EDP_PSR_IMR;
362 }
363 
364 static i915_reg_t psr_iir_reg(struct intel_display *display,
365 			      enum transcoder cpu_transcoder)
366 {
367 	if (DISPLAY_VER(display) >= 12)
368 		return TRANS_PSR_IIR(display, cpu_transcoder);
369 	else
370 		return EDP_PSR_IIR;
371 }
372 
373 static i915_reg_t psr_aux_ctl_reg(struct intel_display *display,
374 				  enum transcoder cpu_transcoder)
375 {
376 	if (DISPLAY_VER(display) >= 8)
377 		return EDP_PSR_AUX_CTL(display, cpu_transcoder);
378 	else
379 		return HSW_SRD_AUX_CTL;
380 }
381 
382 static i915_reg_t psr_aux_data_reg(struct intel_display *display,
383 				   enum transcoder cpu_transcoder, int i)
384 {
385 	if (DISPLAY_VER(display) >= 8)
386 		return EDP_PSR_AUX_DATA(display, cpu_transcoder, i);
387 	else
388 		return HSW_SRD_AUX_DATA(i);
389 }
390 
391 static void psr_irq_control(struct intel_dp *intel_dp)
392 {
393 	struct intel_display *display = to_intel_display(intel_dp);
394 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
395 	u32 mask;
396 
397 	if (intel_dp->psr.panel_replay_enabled)
398 		return;
399 
400 	mask = psr_irq_psr_error_bit_get(intel_dp);
401 	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
402 		mask |= psr_irq_post_exit_bit_get(intel_dp) |
403 			psr_irq_pre_entry_bit_get(intel_dp);
404 
405 	intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
406 		     psr_irq_mask_get(intel_dp), ~mask);
407 }
408 
409 static void psr_event_print(struct intel_display *display,
410 			    u32 val, bool sel_update_enabled)
411 {
412 	drm_dbg_kms(display->drm, "PSR exit events: 0x%x\n", val);
413 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
414 		drm_dbg_kms(display->drm, "\tPSR2 watchdog timer expired\n");
415 	if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled)
416 		drm_dbg_kms(display->drm, "\tPSR2 disabled\n");
417 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
418 		drm_dbg_kms(display->drm, "\tSU dirty FIFO underrun\n");
419 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
420 		drm_dbg_kms(display->drm, "\tSU CRC FIFO underrun\n");
421 	if (val & PSR_EVENT_GRAPHICS_RESET)
422 		drm_dbg_kms(display->drm, "\tGraphics reset\n");
423 	if (val & PSR_EVENT_PCH_INTERRUPT)
424 		drm_dbg_kms(display->drm, "\tPCH interrupt\n");
425 	if (val & PSR_EVENT_MEMORY_UP)
426 		drm_dbg_kms(display->drm, "\tMemory up\n");
427 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
428 		drm_dbg_kms(display->drm, "\tFront buffer modification\n");
429 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
430 		drm_dbg_kms(display->drm, "\tPSR watchdog timer expired\n");
431 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
432 		drm_dbg_kms(display->drm, "\tPIPE registers updated\n");
433 	if (val & PSR_EVENT_REGISTER_UPDATE)
434 		drm_dbg_kms(display->drm, "\tRegister updated\n");
435 	if (val & PSR_EVENT_HDCP_ENABLE)
436 		drm_dbg_kms(display->drm, "\tHDCP enabled\n");
437 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
438 		drm_dbg_kms(display->drm, "\tKVMR session enabled\n");
439 	if (val & PSR_EVENT_VBI_ENABLE)
440 		drm_dbg_kms(display->drm, "\tVBI enabled\n");
441 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
442 		drm_dbg_kms(display->drm, "\tLPSP mode exited\n");
443 	if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled)
444 		drm_dbg_kms(display->drm, "\tPSR disabled\n");
445 }
446 
447 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
448 {
449 	struct intel_display *display = to_intel_display(intel_dp);
450 	struct drm_i915_private *dev_priv = to_i915(display->drm);
451 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
452 	ktime_t time_ns =  ktime_get();
453 
454 	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
455 		intel_dp->psr.last_entry_attempt = time_ns;
456 		drm_dbg_kms(display->drm,
457 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
458 			    transcoder_name(cpu_transcoder));
459 	}
460 
461 	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
462 		intel_dp->psr.last_exit = time_ns;
463 		drm_dbg_kms(display->drm,
464 			    "[transcoder %s] PSR exit completed\n",
465 			    transcoder_name(cpu_transcoder));
466 
467 		if (DISPLAY_VER(display) >= 9) {
468 			u32 val;
469 
470 			val = intel_de_rmw(display,
471 					   PSR_EVENT(display, cpu_transcoder),
472 					   0, 0);
473 
474 			psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
475 		}
476 	}
477 
478 	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
479 		drm_warn(display->drm, "[transcoder %s] PSR aux error\n",
480 			 transcoder_name(cpu_transcoder));
481 
482 		intel_dp->psr.irq_aux_error = true;
483 
484 		/*
485 		 * If this interruption is not masked it will keep
486 		 * interrupting so fast that it prevents the scheduled
487 		 * work to run.
488 		 * Also after a PSR error, we don't want to arm PSR
489 		 * again so we don't care about unmask the interruption
490 		 * or unset irq_aux_error.
491 		 */
492 		intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
493 			     0, psr_irq_psr_error_bit_get(intel_dp));
494 
495 		queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
496 	}
497 }
498 
499 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
500 {
501 	struct intel_display *display = to_intel_display(intel_dp);
502 	u8 val = 8; /* assume the worst if we can't read the value */
503 
504 	if (drm_dp_dpcd_readb(&intel_dp->aux,
505 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
506 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
507 	else
508 		drm_dbg_kms(display->drm,
509 			    "Unable to get sink synchronization latency, assuming 8 frames\n");
510 	return val;
511 }
512 
513 static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
514 {
515 	u8 su_capability = 0;
516 
517 	if (intel_dp->psr.sink_panel_replay_su_support)
518 		drm_dp_dpcd_readb(&intel_dp->aux,
519 				  DP_PANEL_PANEL_REPLAY_CAPABILITY,
520 				  &su_capability);
521 	else
522 		su_capability = intel_dp->psr_dpcd[1];
523 
524 	return su_capability;
525 }
526 
527 static unsigned int
528 intel_dp_get_su_x_granularity_offset(struct intel_dp *intel_dp)
529 {
530 	return intel_dp->psr.sink_panel_replay_su_support ?
531 		DP_PANEL_PANEL_REPLAY_X_GRANULARITY :
532 		DP_PSR2_SU_X_GRANULARITY;
533 }
534 
535 static unsigned int
536 intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
537 {
538 	return intel_dp->psr.sink_panel_replay_su_support ?
539 		DP_PANEL_PANEL_REPLAY_Y_GRANULARITY :
540 		DP_PSR2_SU_Y_GRANULARITY;
541 }
542 
543 /*
544  * Note: Bits related to granularity are same in panel replay and psr
545  * registers. Rely on PSR definitions on these "common" bits.
546  */
547 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
548 {
549 	struct intel_display *display = to_intel_display(intel_dp);
550 	ssize_t r;
551 	u16 w;
552 	u8 y;
553 
554 	/*
555 	 * TODO: Do we need to take into account panel supporting both PSR and
556 	 * Panel replay?
557 	 */
558 
559 	/*
560 	 * If sink don't have specific granularity requirements set legacy
561 	 * ones.
562 	 */
563 	if (!(intel_dp_get_su_capability(intel_dp) &
564 	      DP_PSR2_SU_GRANULARITY_REQUIRED)) {
565 		/* As PSR2 HW sends full lines, we do not care about x granularity */
566 		w = 4;
567 		y = 4;
568 		goto exit;
569 	}
570 
571 	r = drm_dp_dpcd_read(&intel_dp->aux,
572 			     intel_dp_get_su_x_granularity_offset(intel_dp),
573 			     &w, 2);
574 	if (r != 2)
575 		drm_dbg_kms(display->drm,
576 			    "Unable to read selective update x granularity\n");
577 	/*
578 	 * Spec says that if the value read is 0 the default granularity should
579 	 * be used instead.
580 	 */
581 	if (r != 2 || w == 0)
582 		w = 4;
583 
584 	r = drm_dp_dpcd_read(&intel_dp->aux,
585 			     intel_dp_get_su_y_granularity_offset(intel_dp),
586 			     &y, 1);
587 	if (r != 1) {
588 		drm_dbg_kms(display->drm,
589 			    "Unable to read selective update y granularity\n");
590 		y = 4;
591 	}
592 	if (y == 0)
593 		y = 1;
594 
595 exit:
596 	intel_dp->psr.su_w_granularity = w;
597 	intel_dp->psr.su_y_granularity = y;
598 }
599 
600 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
601 {
602 	struct intel_display *display = to_intel_display(intel_dp);
603 
604 	if (intel_dp_is_edp(intel_dp)) {
605 		if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
606 			drm_dbg_kms(display->drm,
607 				    "Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n");
608 			return;
609 		}
610 
611 		if (!(intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
612 			drm_dbg_kms(display->drm,
613 				    "Panel doesn't support early transport, eDP Panel Replay not possible\n");
614 			return;
615 		}
616 	}
617 
618 	intel_dp->psr.sink_panel_replay_support = true;
619 
620 	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SU_SUPPORT)
621 		intel_dp->psr.sink_panel_replay_su_support = true;
622 
623 	drm_dbg_kms(display->drm,
624 		    "Panel replay %sis supported by panel\n",
625 		    intel_dp->psr.sink_panel_replay_su_support ?
626 		    "selective_update " : "");
627 }
628 
629 static void _psr_init_dpcd(struct intel_dp *intel_dp)
630 {
631 	struct intel_display *display = to_intel_display(intel_dp);
632 
633 	drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
634 		    intel_dp->psr_dpcd[0]);
635 
636 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
637 		drm_dbg_kms(display->drm,
638 			    "PSR support not currently available for this panel\n");
639 		return;
640 	}
641 
642 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
643 		drm_dbg_kms(display->drm,
644 			    "Panel lacks power state control, PSR cannot be enabled\n");
645 		return;
646 	}
647 
648 	intel_dp->psr.sink_support = true;
649 	intel_dp->psr.sink_sync_latency =
650 		intel_dp_get_sink_sync_latency(intel_dp);
651 
652 	if (DISPLAY_VER(display) >= 9 &&
653 	    intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
654 		bool y_req = intel_dp->psr_dpcd[1] &
655 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
656 
657 		/*
658 		 * All panels that supports PSR version 03h (PSR2 +
659 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
660 		 * only sure that it is going to be used when required by the
661 		 * panel. This way panel is capable to do selective update
662 		 * without a aux frame sync.
663 		 *
664 		 * To support PSR version 02h and PSR version 03h without
665 		 * Y-coordinate requirement panels we would need to enable
666 		 * GTC first.
667 		 */
668 		intel_dp->psr.sink_psr2_support = y_req &&
669 			intel_alpm_aux_wake_supported(intel_dp);
670 		drm_dbg_kms(display->drm, "PSR2 %ssupported\n",
671 			    intel_dp->psr.sink_psr2_support ? "" : "not ");
672 	}
673 }
674 
675 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
676 {
677 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
678 			 sizeof(intel_dp->psr_dpcd));
679 	drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP,
680 			  &intel_dp->pr_dpcd);
681 
682 	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SUPPORT)
683 		_panel_replay_init_dpcd(intel_dp);
684 
685 	if (intel_dp->psr_dpcd[0])
686 		_psr_init_dpcd(intel_dp);
687 
688 	if (intel_dp->psr.sink_psr2_support ||
689 	    intel_dp->psr.sink_panel_replay_su_support)
690 		intel_dp_get_su_granularity(intel_dp);
691 }
692 
693 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
694 {
695 	struct intel_display *display = to_intel_display(intel_dp);
696 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
697 	u32 aux_clock_divider, aux_ctl;
698 	/* write DP_SET_POWER=D0 */
699 	static const u8 aux_msg[] = {
700 		[0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
701 		[1] = (DP_SET_POWER >> 8) & 0xff,
702 		[2] = DP_SET_POWER & 0xff,
703 		[3] = 1 - 1,
704 		[4] = DP_SET_POWER_D0,
705 	};
706 	int i;
707 
708 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
709 	for (i = 0; i < sizeof(aux_msg); i += 4)
710 		intel_de_write(display,
711 			       psr_aux_data_reg(display, cpu_transcoder, i >> 2),
712 			       intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
713 
714 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
715 
716 	/* Start with bits set for DDI_AUX_CTL register */
717 	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
718 					     aux_clock_divider);
719 
720 	/* Select only valid bits for SRD_AUX_CTL */
721 	aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
722 		EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
723 		EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
724 		EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
725 
726 	intel_de_write(display, psr_aux_ctl_reg(display, cpu_transcoder),
727 		       aux_ctl);
728 }
729 
730 static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay)
731 {
732 	struct intel_display *display = to_intel_display(intel_dp);
733 
734 	if (DISPLAY_VER(display) < 20 || !intel_dp_is_edp(intel_dp) ||
735 	    intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)
736 		return false;
737 
738 	return panel_replay ?
739 		intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
740 		intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
741 		psr2_su_region_et_global_enabled(intel_dp);
742 }
743 
744 static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
745 				      const struct intel_crtc_state *crtc_state)
746 {
747 	u8 val = DP_PANEL_REPLAY_ENABLE |
748 		DP_PANEL_REPLAY_VSC_SDP_CRC_EN |
749 		DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
750 		DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN |
751 		DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN;
752 	u8 panel_replay_config2 = DP_PANEL_REPLAY_CRC_VERIFICATION;
753 
754 	if (crtc_state->has_sel_update)
755 		val |= DP_PANEL_REPLAY_SU_ENABLE;
756 
757 	if (crtc_state->enable_psr2_su_region_et)
758 		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
759 
760 	if (crtc_state->req_psr2_sdp_prior_scanline)
761 		panel_replay_config2 |=
762 			DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE;
763 
764 	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, val);
765 
766 	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG2,
767 			   panel_replay_config2);
768 }
769 
770 static void _psr_enable_sink(struct intel_dp *intel_dp,
771 			     const struct intel_crtc_state *crtc_state)
772 {
773 	struct intel_display *display = to_intel_display(intel_dp);
774 	u8 val = 0;
775 
776 	if (crtc_state->has_sel_update) {
777 		val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
778 	} else {
779 		if (intel_dp->psr.link_standby)
780 			val |= DP_PSR_MAIN_LINK_ACTIVE;
781 
782 		if (DISPLAY_VER(display) >= 8)
783 			val |= DP_PSR_CRC_VERIFICATION;
784 	}
785 
786 	if (crtc_state->req_psr2_sdp_prior_scanline)
787 		val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
788 
789 	if (crtc_state->enable_psr2_su_region_et)
790 		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
791 
792 	if (intel_dp->psr.entry_setup_frames > 0)
793 		val |= DP_PSR_FRAME_CAPTURE;
794 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
795 
796 	val |= DP_PSR_ENABLE;
797 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
798 }
799 
800 static void intel_psr_enable_sink(struct intel_dp *intel_dp,
801 				  const struct intel_crtc_state *crtc_state)
802 {
803 	crtc_state->has_panel_replay ?
804 		_panel_replay_enable_sink(intel_dp, crtc_state) :
805 		_psr_enable_sink(intel_dp, crtc_state);
806 
807 	if (intel_dp_is_edp(intel_dp))
808 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
809 }
810 
811 void intel_psr_panel_replay_enable_sink(struct intel_dp *intel_dp)
812 {
813 	if (CAN_PANEL_REPLAY(intel_dp))
814 		drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
815 				   DP_PANEL_REPLAY_ENABLE);
816 }
817 
818 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
819 {
820 	struct intel_display *display = to_intel_display(intel_dp);
821 	struct intel_connector *connector = intel_dp->attached_connector;
822 	u32 val = 0;
823 
824 	if (DISPLAY_VER(display) >= 11)
825 		val |= EDP_PSR_TP4_TIME_0us;
826 
827 	if (display->params.psr_safest_params) {
828 		val |= EDP_PSR_TP1_TIME_2500us;
829 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
830 		goto check_tp3_sel;
831 	}
832 
833 	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
834 		val |= EDP_PSR_TP1_TIME_0us;
835 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
836 		val |= EDP_PSR_TP1_TIME_100us;
837 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
838 		val |= EDP_PSR_TP1_TIME_500us;
839 	else
840 		val |= EDP_PSR_TP1_TIME_2500us;
841 
842 	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
843 		val |= EDP_PSR_TP2_TP3_TIME_0us;
844 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
845 		val |= EDP_PSR_TP2_TP3_TIME_100us;
846 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
847 		val |= EDP_PSR_TP2_TP3_TIME_500us;
848 	else
849 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
850 
851 	/*
852 	 * WA 0479: hsw,bdw
853 	 * "Do not skip both TP1 and TP2/TP3"
854 	 */
855 	if (DISPLAY_VER(display) < 9 &&
856 	    connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
857 	    connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
858 		val |= EDP_PSR_TP2_TP3_TIME_100us;
859 
860 check_tp3_sel:
861 	if (intel_dp_source_supports_tps3(display) &&
862 	    drm_dp_tps3_supported(intel_dp->dpcd))
863 		val |= EDP_PSR_TP_TP1_TP3;
864 	else
865 		val |= EDP_PSR_TP_TP1_TP2;
866 
867 	return val;
868 }
869 
870 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
871 {
872 	struct intel_display *display = to_intel_display(intel_dp);
873 	struct intel_connector *connector = intel_dp->attached_connector;
874 	int idle_frames;
875 
876 	/* Let's use 6 as the minimum to cover all known cases including the
877 	 * off-by-one issue that HW has in some cases.
878 	 */
879 	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
880 	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
881 
882 	if (drm_WARN_ON(display->drm, idle_frames > 0xf))
883 		idle_frames = 0xf;
884 
885 	return idle_frames;
886 }
887 
888 static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp)
889 {
890 	struct intel_display *display = to_intel_display(intel_dp);
891 	u32 current_dc_state = intel_display_power_get_current_dc_state(display);
892 	struct drm_vblank_crtc *vblank = &display->drm->vblank[intel_dp->psr.pipe];
893 
894 	return (current_dc_state != DC_STATE_EN_UPTO_DC5 &&
895 		current_dc_state != DC_STATE_EN_UPTO_DC6) ||
896 		intel_dp->psr.active_non_psr_pipes ||
897 		READ_ONCE(vblank->enabled);
898 }
899 
900 static void hsw_activate_psr1(struct intel_dp *intel_dp)
901 {
902 	struct intel_display *display = to_intel_display(intel_dp);
903 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
904 	u32 max_sleep_time = 0x1f;
905 	u32 val = EDP_PSR_ENABLE;
906 
907 	val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
908 
909 	if (DISPLAY_VER(display) < 20)
910 		val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
911 
912 	if (display->platform.haswell)
913 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
914 
915 	if (intel_dp->psr.link_standby)
916 		val |= EDP_PSR_LINK_STANDBY;
917 
918 	val |= intel_psr1_get_tp_time(intel_dp);
919 
920 	if (DISPLAY_VER(display) >= 8)
921 		val |= EDP_PSR_CRC_ENABLE;
922 
923 	if (DISPLAY_VER(display) >= 20)
924 		val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
925 
926 	intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder),
927 		     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
928 
929 	/* Wa_16025596647 */
930 	if ((DISPLAY_VER(display) == 20 ||
931 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
932 	    is_dc5_dc6_blocked(intel_dp))
933 		intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
934 								       intel_dp->psr.pipe,
935 								       true);
936 }
937 
938 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
939 {
940 	struct intel_display *display = to_intel_display(intel_dp);
941 	struct intel_connector *connector = intel_dp->attached_connector;
942 	u32 val = 0;
943 
944 	if (display->params.psr_safest_params)
945 		return EDP_PSR2_TP2_TIME_2500us;
946 
947 	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
948 	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
949 		val |= EDP_PSR2_TP2_TIME_50us;
950 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
951 		val |= EDP_PSR2_TP2_TIME_100us;
952 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
953 		val |= EDP_PSR2_TP2_TIME_500us;
954 	else
955 		val |= EDP_PSR2_TP2_TIME_2500us;
956 
957 	return val;
958 }
959 
960 static int psr2_block_count_lines(struct intel_dp *intel_dp)
961 {
962 	return intel_dp->alpm_parameters.io_wake_lines < 9 &&
963 		intel_dp->alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
964 }
965 
966 static int psr2_block_count(struct intel_dp *intel_dp)
967 {
968 	return psr2_block_count_lines(intel_dp) / 4;
969 }
970 
971 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
972 {
973 	u8 frames_before_su_entry;
974 
975 	frames_before_su_entry = max_t(u8,
976 				       intel_dp->psr.sink_sync_latency + 1,
977 				       2);
978 
979 	/* Entry setup frames must be at least 1 less than frames before SU entry */
980 	if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
981 		frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
982 
983 	return frames_before_su_entry;
984 }
985 
986 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
987 {
988 	struct intel_display *display = to_intel_display(intel_dp);
989 	struct intel_psr *psr = &intel_dp->psr;
990 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
991 
992 	if (intel_dp_is_edp(intel_dp) && psr->sel_update_enabled) {
993 		u32 val = psr->su_region_et_enabled ?
994 			LNL_EDP_PSR2_SU_REGION_ET_ENABLE : 0;
995 
996 		if (intel_dp->psr.req_psr2_sdp_prior_scanline)
997 			val |= EDP_PSR2_SU_SDP_SCANLINE;
998 
999 		intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder),
1000 			       val);
1001 	}
1002 
1003 	intel_de_rmw(display,
1004 		     PSR2_MAN_TRK_CTL(display, intel_dp->psr.transcoder),
1005 		     0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
1006 
1007 	intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
1008 		     TRANS_DP2_PANEL_REPLAY_ENABLE);
1009 }
1010 
1011 static void hsw_activate_psr2(struct intel_dp *intel_dp)
1012 {
1013 	struct intel_display *display = to_intel_display(intel_dp);
1014 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1015 	u32 val = EDP_PSR2_ENABLE;
1016 	u32 psr_val = 0;
1017 	u8 idle_frames;
1018 
1019 	/* Wa_16025596647 */
1020 	if ((DISPLAY_VER(display) == 20 ||
1021 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
1022 	    is_dc5_dc6_blocked(intel_dp))
1023 		idle_frames = 0;
1024 	else
1025 		idle_frames = psr_compute_idle_frames(intel_dp);
1026 	val |= EDP_PSR2_IDLE_FRAMES(idle_frames);
1027 
1028 	if (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)
1029 		val |= EDP_SU_TRACK_ENABLE;
1030 
1031 	if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
1032 		val |= EDP_Y_COORDINATE_ENABLE;
1033 
1034 	val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
1035 
1036 	val |= intel_psr2_get_tp_time(intel_dp);
1037 
1038 	if (DISPLAY_VER(display) >= 12 && DISPLAY_VER(display) < 20) {
1039 		if (psr2_block_count(intel_dp) > 2)
1040 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
1041 		else
1042 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
1043 	}
1044 
1045 	/* Wa_22012278275:adl-p */
1046 	if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
1047 		static const u8 map[] = {
1048 			2, /* 5 lines */
1049 			1, /* 6 lines */
1050 			0, /* 7 lines */
1051 			3, /* 8 lines */
1052 			6, /* 9 lines */
1053 			5, /* 10 lines */
1054 			4, /* 11 lines */
1055 			7, /* 12 lines */
1056 		};
1057 		/*
1058 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
1059 		 * comments below for more information
1060 		 */
1061 		int tmp;
1062 
1063 		tmp = map[intel_dp->alpm_parameters.io_wake_lines -
1064 			  TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
1065 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
1066 
1067 		tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
1068 		val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
1069 	} else if (DISPLAY_VER(display) >= 20) {
1070 		val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1071 	} else if (DISPLAY_VER(display) >= 12) {
1072 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1073 		val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1074 	} else if (DISPLAY_VER(display) >= 9) {
1075 		val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1076 		val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1077 	}
1078 
1079 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
1080 		val |= EDP_PSR2_SU_SDP_SCANLINE;
1081 
1082 	if (DISPLAY_VER(display) >= 20)
1083 		psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
1084 
1085 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
1086 		u32 tmp;
1087 
1088 		tmp = intel_de_read(display,
1089 				    PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1090 		drm_WARN_ON(display->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
1091 	} else if (HAS_PSR2_SEL_FETCH(display)) {
1092 		intel_de_write(display,
1093 			       PSR2_MAN_TRK_CTL(display, cpu_transcoder), 0);
1094 	}
1095 
1096 	if (intel_dp->psr.su_region_et_enabled)
1097 		val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
1098 
1099 	/*
1100 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
1101 	 * recommending keep this bit unset while PSR2 is enabled.
1102 	 */
1103 	intel_de_write(display, psr_ctl_reg(display, cpu_transcoder), psr_val);
1104 
1105 	intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), val);
1106 }
1107 
1108 static bool
1109 transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
1110 {
1111 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1112 		return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
1113 	else if (DISPLAY_VER(display) >= 12)
1114 		return cpu_transcoder == TRANSCODER_A;
1115 	else if (DISPLAY_VER(display) >= 9)
1116 		return cpu_transcoder == TRANSCODER_EDP;
1117 	else
1118 		return false;
1119 }
1120 
1121 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
1122 {
1123 	if (!crtc_state->hw.active)
1124 		return 0;
1125 
1126 	return DIV_ROUND_UP(1000 * 1000,
1127 			    drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
1128 }
1129 
1130 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
1131 				     u32 idle_frames)
1132 {
1133 	struct intel_display *display = to_intel_display(intel_dp);
1134 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1135 
1136 	intel_de_rmw(display, EDP_PSR2_CTL(display, cpu_transcoder),
1137 		     EDP_PSR2_IDLE_FRAMES_MASK,
1138 		     EDP_PSR2_IDLE_FRAMES(idle_frames));
1139 }
1140 
1141 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
1142 {
1143 	struct intel_display *display = to_intel_display(intel_dp);
1144 
1145 	psr2_program_idle_frames(intel_dp, 0);
1146 	intel_display_power_set_target_dc_state(display, DC_STATE_EN_DC3CO);
1147 }
1148 
1149 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
1150 {
1151 	struct intel_display *display = to_intel_display(intel_dp);
1152 
1153 	intel_display_power_set_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
1154 	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
1155 }
1156 
1157 static void tgl_dc3co_disable_work(struct work_struct *work)
1158 {
1159 	struct intel_dp *intel_dp =
1160 		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
1161 
1162 	mutex_lock(&intel_dp->psr.lock);
1163 	/* If delayed work is pending, it is not idle */
1164 	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
1165 		goto unlock;
1166 
1167 	tgl_psr2_disable_dc3co(intel_dp);
1168 unlock:
1169 	mutex_unlock(&intel_dp->psr.lock);
1170 }
1171 
1172 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
1173 {
1174 	if (!intel_dp->psr.dc3co_exitline)
1175 		return;
1176 
1177 	cancel_delayed_work(&intel_dp->psr.dc3co_work);
1178 	/* Before PSR2 exit disallow dc3co*/
1179 	tgl_psr2_disable_dc3co(intel_dp);
1180 }
1181 
1182 static bool
1183 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
1184 			      struct intel_crtc_state *crtc_state)
1185 {
1186 	struct intel_display *display = to_intel_display(intel_dp);
1187 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1188 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1189 	enum port port = dig_port->base.port;
1190 
1191 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1192 		return pipe <= PIPE_B && port <= PORT_B;
1193 	else
1194 		return pipe == PIPE_A && port == PORT_A;
1195 }
1196 
1197 static void
1198 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
1199 				  struct intel_crtc_state *crtc_state)
1200 {
1201 	struct intel_display *display = to_intel_display(intel_dp);
1202 	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
1203 	struct i915_power_domains *power_domains = &display->power.domains;
1204 	u32 exit_scanlines;
1205 
1206 	/*
1207 	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1208 	 * disable DC3CO until the changed dc3co activating/deactivating sequence
1209 	 * is applied. B.Specs:49196
1210 	 */
1211 	return;
1212 
1213 	/*
1214 	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1215 	 * TODO: when the issue is addressed, this restriction should be removed.
1216 	 */
1217 	if (crtc_state->enable_psr2_sel_fetch)
1218 		return;
1219 
1220 	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1221 		return;
1222 
1223 	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1224 		return;
1225 
1226 	/* Wa_16011303918:adl-p */
1227 	if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
1228 		return;
1229 
1230 	/*
1231 	 * DC3CO Exit time 200us B.Spec 49196
1232 	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1233 	 */
1234 	exit_scanlines =
1235 		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1236 
1237 	if (drm_WARN_ON(display->drm, exit_scanlines > crtc_vdisplay))
1238 		return;
1239 
1240 	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1241 }
1242 
1243 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1244 					      struct intel_crtc_state *crtc_state)
1245 {
1246 	struct intel_display *display = to_intel_display(intel_dp);
1247 
1248 	if (!display->params.enable_psr2_sel_fetch &&
1249 	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1250 		drm_dbg_kms(display->drm,
1251 			    "PSR2 sel fetch not enabled, disabled by parameter\n");
1252 		return false;
1253 	}
1254 
1255 	if (crtc_state->uapi.async_flip) {
1256 		drm_dbg_kms(display->drm,
1257 			    "PSR2 sel fetch not enabled, async flip enabled\n");
1258 		return false;
1259 	}
1260 
1261 	return crtc_state->enable_psr2_sel_fetch = true;
1262 }
1263 
1264 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1265 				   struct intel_crtc_state *crtc_state)
1266 {
1267 	struct intel_display *display = to_intel_display(intel_dp);
1268 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1269 	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1270 	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1271 	u16 y_granularity = 0;
1272 
1273 	/* PSR2 HW only send full lines so we only need to validate the width */
1274 	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1275 		return false;
1276 
1277 	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1278 		return false;
1279 
1280 	/* HW tracking is only aligned to 4 lines */
1281 	if (!crtc_state->enable_psr2_sel_fetch)
1282 		return intel_dp->psr.su_y_granularity == 4;
1283 
1284 	/*
1285 	 * adl_p and mtl platforms have 1 line granularity.
1286 	 * For other platforms with SW tracking we can adjust the y coordinates
1287 	 * to match sink requirement if multiple of 4.
1288 	 */
1289 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1290 		y_granularity = intel_dp->psr.su_y_granularity;
1291 	else if (intel_dp->psr.su_y_granularity <= 2)
1292 		y_granularity = 4;
1293 	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1294 		y_granularity = intel_dp->psr.su_y_granularity;
1295 
1296 	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1297 		return false;
1298 
1299 	if (crtc_state->dsc.compression_enable &&
1300 	    vdsc_cfg->slice_height % y_granularity)
1301 		return false;
1302 
1303 	crtc_state->su_y_granularity = y_granularity;
1304 	return true;
1305 }
1306 
1307 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1308 							struct intel_crtc_state *crtc_state)
1309 {
1310 	struct intel_display *display = to_intel_display(intel_dp);
1311 	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1312 	u32 hblank_total, hblank_ns, req_ns;
1313 
1314 	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1315 	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1316 
1317 	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1318 	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1319 
1320 	if ((hblank_ns - req_ns) > 100)
1321 		return true;
1322 
1323 	/* Not supported <13 / Wa_22012279113:adl-p */
1324 	if (DISPLAY_VER(display) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1325 		return false;
1326 
1327 	crtc_state->req_psr2_sdp_prior_scanline = true;
1328 	return true;
1329 }
1330 
1331 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1332 					const struct drm_display_mode *adjusted_mode)
1333 {
1334 	struct intel_display *display = to_intel_display(intel_dp);
1335 	int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1336 	int entry_setup_frames = 0;
1337 
1338 	if (psr_setup_time < 0) {
1339 		drm_dbg_kms(display->drm,
1340 			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1341 			    intel_dp->psr_dpcd[1]);
1342 		return -ETIME;
1343 	}
1344 
1345 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1346 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1347 		if (DISPLAY_VER(display) >= 20) {
1348 			/* setup entry frames can be up to 3 frames */
1349 			entry_setup_frames = 1;
1350 			drm_dbg_kms(display->drm,
1351 				    "PSR setup entry frames %d\n",
1352 				    entry_setup_frames);
1353 		} else {
1354 			drm_dbg_kms(display->drm,
1355 				    "PSR condition failed: PSR setup time (%d us) too long\n",
1356 				    psr_setup_time);
1357 			return -ETIME;
1358 		}
1359 	}
1360 
1361 	return entry_setup_frames;
1362 }
1363 
1364 static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
1365 				       const struct intel_crtc_state *crtc_state,
1366 				       bool aux_less)
1367 {
1368 	struct intel_display *display = to_intel_display(intel_dp);
1369 	int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
1370 		crtc_state->hw.adjusted_mode.crtc_vblank_start;
1371 	int wake_lines;
1372 
1373 	if (aux_less)
1374 		wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
1375 	else
1376 		wake_lines = DISPLAY_VER(display) < 20 ?
1377 			psr2_block_count_lines(intel_dp) :
1378 			intel_dp->alpm_parameters.io_wake_lines;
1379 
1380 	if (crtc_state->req_psr2_sdp_prior_scanline)
1381 		vblank -= 1;
1382 
1383 	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
1384 	if (vblank < wake_lines)
1385 		return false;
1386 
1387 	return true;
1388 }
1389 
1390 static bool alpm_config_valid(struct intel_dp *intel_dp,
1391 			      const struct intel_crtc_state *crtc_state,
1392 			      bool aux_less)
1393 {
1394 	struct intel_display *display = to_intel_display(intel_dp);
1395 
1396 	if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
1397 		drm_dbg_kms(display->drm,
1398 			    "PSR2/Panel Replay  not enabled, Unable to use long enough wake times\n");
1399 		return false;
1400 	}
1401 
1402 	if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less)) {
1403 		drm_dbg_kms(display->drm,
1404 			    "PSR2/Panel Replay not enabled, too short vblank time\n");
1405 		return false;
1406 	}
1407 
1408 	return true;
1409 }
1410 
1411 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1412 				    struct intel_crtc_state *crtc_state)
1413 {
1414 	struct intel_display *display = to_intel_display(intel_dp);
1415 	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1416 	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1417 	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1418 
1419 	if (!intel_dp->psr.sink_psr2_support)
1420 		return false;
1421 
1422 	/* JSL and EHL only supports eDP 1.3 */
1423 	if (display->platform.jasperlake || display->platform.elkhartlake) {
1424 		drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
1425 		return false;
1426 	}
1427 
1428 	/* Wa_16011181250 */
1429 	if (display->platform.rocketlake || display->platform.alderlake_s ||
1430 	    display->platform.dg2) {
1431 		drm_dbg_kms(display->drm,
1432 			    "PSR2 is defeatured for this platform\n");
1433 		return false;
1434 	}
1435 
1436 	if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1437 		drm_dbg_kms(display->drm,
1438 			    "PSR2 not completely functional in this stepping\n");
1439 		return false;
1440 	}
1441 
1442 	if (!transcoder_has_psr2(display, crtc_state->cpu_transcoder)) {
1443 		drm_dbg_kms(display->drm,
1444 			    "PSR2 not supported in transcoder %s\n",
1445 			    transcoder_name(crtc_state->cpu_transcoder));
1446 		return false;
1447 	}
1448 
1449 	/*
1450 	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1451 	 * resolution requires DSC to be enabled, priority is given to DSC
1452 	 * over PSR2.
1453 	 */
1454 	if (crtc_state->dsc.compression_enable &&
1455 	    (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)) {
1456 		drm_dbg_kms(display->drm,
1457 			    "PSR2 cannot be enabled since DSC is enabled\n");
1458 		return false;
1459 	}
1460 
1461 	if (DISPLAY_VER(display) >= 20) {
1462 		psr_max_h = crtc_hdisplay;
1463 		psr_max_v = crtc_vdisplay;
1464 		max_bpp = crtc_state->pipe_bpp;
1465 	} else if (IS_DISPLAY_VER(display, 12, 14)) {
1466 		psr_max_h = 5120;
1467 		psr_max_v = 3200;
1468 		max_bpp = 30;
1469 	} else if (IS_DISPLAY_VER(display, 10, 11)) {
1470 		psr_max_h = 4096;
1471 		psr_max_v = 2304;
1472 		max_bpp = 24;
1473 	} else if (DISPLAY_VER(display) == 9) {
1474 		psr_max_h = 3640;
1475 		psr_max_v = 2304;
1476 		max_bpp = 24;
1477 	}
1478 
1479 	if (crtc_state->pipe_bpp > max_bpp) {
1480 		drm_dbg_kms(display->drm,
1481 			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1482 			    crtc_state->pipe_bpp, max_bpp);
1483 		return false;
1484 	}
1485 
1486 	/* Wa_16011303918:adl-p */
1487 	if (crtc_state->vrr.enable &&
1488 	    display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1489 		drm_dbg_kms(display->drm,
1490 			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1491 		return false;
1492 	}
1493 
1494 	if (!alpm_config_valid(intel_dp, crtc_state, false))
1495 		return false;
1496 
1497 	if (!crtc_state->enable_psr2_sel_fetch &&
1498 	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1499 		drm_dbg_kms(display->drm,
1500 			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1501 			    crtc_hdisplay, crtc_vdisplay,
1502 			    psr_max_h, psr_max_v);
1503 		return false;
1504 	}
1505 
1506 	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1507 
1508 	return true;
1509 }
1510 
1511 static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
1512 					  struct intel_crtc_state *crtc_state)
1513 {
1514 	struct intel_display *display = to_intel_display(intel_dp);
1515 
1516 	if (HAS_PSR2_SEL_FETCH(display) &&
1517 	    !intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1518 	    !HAS_PSR_HW_TRACKING(display)) {
1519 		drm_dbg_kms(display->drm,
1520 			    "Selective update not enabled, selective fetch not valid and no HW tracking available\n");
1521 		goto unsupported;
1522 	}
1523 
1524 	if (!psr2_global_enabled(intel_dp)) {
1525 		drm_dbg_kms(display->drm,
1526 			    "Selective update disabled by flag\n");
1527 		goto unsupported;
1528 	}
1529 
1530 	if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state))
1531 		goto unsupported;
1532 
1533 	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1534 		drm_dbg_kms(display->drm,
1535 			    "Selective update not enabled, SDP indication do not fit in hblank\n");
1536 		goto unsupported;
1537 	}
1538 
1539 	if (crtc_state->has_panel_replay && (DISPLAY_VER(display) < 14 ||
1540 					     !intel_dp->psr.sink_panel_replay_su_support))
1541 		goto unsupported;
1542 
1543 	if (crtc_state->crc_enabled) {
1544 		drm_dbg_kms(display->drm,
1545 			    "Selective update not enabled because it would inhibit pipe CRC calculation\n");
1546 		goto unsupported;
1547 	}
1548 
1549 	if (!psr2_granularity_check(intel_dp, crtc_state)) {
1550 		drm_dbg_kms(display->drm,
1551 			    "Selective update not enabled, SU granularity not compatible\n");
1552 		goto unsupported;
1553 	}
1554 
1555 	crtc_state->enable_psr2_su_region_et =
1556 		psr2_su_region_et_valid(intel_dp, crtc_state->has_panel_replay);
1557 
1558 	return true;
1559 
1560 unsupported:
1561 	crtc_state->enable_psr2_sel_fetch = false;
1562 	return false;
1563 }
1564 
1565 static bool _psr_compute_config(struct intel_dp *intel_dp,
1566 				struct intel_crtc_state *crtc_state)
1567 {
1568 	struct intel_display *display = to_intel_display(intel_dp);
1569 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1570 	int entry_setup_frames;
1571 
1572 	if (!CAN_PSR(intel_dp))
1573 		return false;
1574 
1575 	entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1576 
1577 	if (entry_setup_frames >= 0) {
1578 		intel_dp->psr.entry_setup_frames = entry_setup_frames;
1579 	} else {
1580 		drm_dbg_kms(display->drm,
1581 			    "PSR condition failed: PSR setup timing not met\n");
1582 		return false;
1583 	}
1584 
1585 	return true;
1586 }
1587 
1588 static bool
1589 _panel_replay_compute_config(struct intel_dp *intel_dp,
1590 			     const struct intel_crtc_state *crtc_state,
1591 			     const struct drm_connector_state *conn_state)
1592 {
1593 	struct intel_display *display = to_intel_display(intel_dp);
1594 	struct intel_connector *connector =
1595 		to_intel_connector(conn_state->connector);
1596 	struct intel_hdcp *hdcp = &connector->hdcp;
1597 
1598 	if (!CAN_PANEL_REPLAY(intel_dp))
1599 		return false;
1600 
1601 	if (!panel_replay_global_enabled(intel_dp)) {
1602 		drm_dbg_kms(display->drm, "Panel Replay disabled by flag\n");
1603 		return false;
1604 	}
1605 
1606 	if (crtc_state->crc_enabled) {
1607 		drm_dbg_kms(display->drm,
1608 			    "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
1609 		return false;
1610 	}
1611 
1612 	if (!intel_dp_is_edp(intel_dp))
1613 		return true;
1614 
1615 	/* Remaining checks are for eDP only */
1616 
1617 	if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A &&
1618 	    to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_B)
1619 		return false;
1620 
1621 	/* 128b/132b Panel Replay is not supported on eDP */
1622 	if (intel_dp_is_uhbr(crtc_state)) {
1623 		drm_dbg_kms(display->drm,
1624 			    "Panel Replay is not supported with 128b/132b\n");
1625 		return false;
1626 	}
1627 
1628 	/* HW will not allow Panel Replay on eDP when HDCP enabled */
1629 	if (conn_state->content_protection ==
1630 	    DRM_MODE_CONTENT_PROTECTION_DESIRED ||
1631 	    (conn_state->content_protection ==
1632 	     DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
1633 	     DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) {
1634 		drm_dbg_kms(display->drm,
1635 			    "Panel Replay is not supported with HDCP\n");
1636 		return false;
1637 	}
1638 
1639 	if (!alpm_config_valid(intel_dp, crtc_state, true))
1640 		return false;
1641 
1642 	return true;
1643 }
1644 
1645 static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp,
1646 					   struct intel_crtc_state *crtc_state)
1647 {
1648 	struct intel_display *display = to_intel_display(intel_dp);
1649 
1650 	return (DISPLAY_VER(display) == 20 && intel_dp->psr.entry_setup_frames > 0 &&
1651 		!crtc_state->has_sel_update);
1652 }
1653 
1654 void intel_psr_compute_config(struct intel_dp *intel_dp,
1655 			      struct intel_crtc_state *crtc_state,
1656 			      struct drm_connector_state *conn_state)
1657 {
1658 	struct intel_display *display = to_intel_display(intel_dp);
1659 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1660 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
1661 	struct intel_crtc *crtc;
1662 	u8 active_pipes = 0;
1663 
1664 	if (!psr_global_enabled(intel_dp)) {
1665 		drm_dbg_kms(display->drm, "PSR disabled by flag\n");
1666 		return;
1667 	}
1668 
1669 	if (intel_dp->psr.sink_not_reliable) {
1670 		drm_dbg_kms(display->drm,
1671 			    "PSR sink implementation is not reliable\n");
1672 		return;
1673 	}
1674 
1675 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1676 		drm_dbg_kms(display->drm,
1677 			    "PSR condition failed: Interlaced mode enabled\n");
1678 		return;
1679 	}
1680 
1681 	/*
1682 	 * FIXME figure out what is wrong with PSR+joiner and
1683 	 * fix it. Presumably something related to the fact that
1684 	 * PSR is a transcoder level feature.
1685 	 */
1686 	if (crtc_state->joiner_pipes) {
1687 		drm_dbg_kms(display->drm,
1688 			    "PSR disabled due to joiner\n");
1689 		return;
1690 	}
1691 
1692 	/*
1693 	 * Currently PSR/PR doesn't work reliably with VRR enabled.
1694 	 */
1695 	if (crtc_state->vrr.enable)
1696 		return;
1697 
1698 	crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
1699 								    crtc_state,
1700 								    conn_state);
1701 
1702 	crtc_state->has_psr = crtc_state->has_panel_replay ? true :
1703 		_psr_compute_config(intel_dp, crtc_state);
1704 
1705 	if (!crtc_state->has_psr)
1706 		return;
1707 
1708 	crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
1709 
1710 	/* Wa_18037818876 */
1711 	if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) {
1712 		crtc_state->has_psr = false;
1713 		drm_dbg_kms(display->drm,
1714 			    "PSR disabled to workaround PSR FSM hang issue\n");
1715 	}
1716 
1717 	/* Rest is for Wa_16025596647 */
1718 	if (DISPLAY_VER(display) != 20 &&
1719 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
1720 		return;
1721 
1722 	/* Not needed by Panel Replay  */
1723 	if (crtc_state->has_panel_replay)
1724 		return;
1725 
1726 	/* We ignore possible secondary PSR/Panel Replay capable eDP */
1727 	for_each_intel_crtc(display->drm, crtc)
1728 		active_pipes |= crtc->active ? BIT(crtc->pipe) : 0;
1729 
1730 	active_pipes = intel_calc_active_pipes(state, active_pipes);
1731 
1732 	crtc_state->active_non_psr_pipes = active_pipes &
1733 		~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe);
1734 }
1735 
1736 void intel_psr_get_config(struct intel_encoder *encoder,
1737 			  struct intel_crtc_state *pipe_config)
1738 {
1739 	struct intel_display *display = to_intel_display(encoder);
1740 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1741 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1742 	struct intel_dp *intel_dp;
1743 	u32 val;
1744 
1745 	if (!dig_port)
1746 		return;
1747 
1748 	intel_dp = &dig_port->dp;
1749 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1750 		return;
1751 
1752 	mutex_lock(&intel_dp->psr.lock);
1753 	if (!intel_dp->psr.enabled)
1754 		goto unlock;
1755 
1756 	if (intel_dp->psr.panel_replay_enabled) {
1757 		pipe_config->has_psr = pipe_config->has_panel_replay = true;
1758 	} else {
1759 		/*
1760 		 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1761 		 * enabled/disabled because of frontbuffer tracking and others.
1762 		 */
1763 		pipe_config->has_psr = true;
1764 	}
1765 
1766 	pipe_config->has_sel_update = intel_dp->psr.sel_update_enabled;
1767 	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1768 
1769 	if (!intel_dp->psr.sel_update_enabled)
1770 		goto unlock;
1771 
1772 	if (HAS_PSR2_SEL_FETCH(display)) {
1773 		val = intel_de_read(display,
1774 				    PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1775 		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1776 			pipe_config->enable_psr2_sel_fetch = true;
1777 	}
1778 
1779 	pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled;
1780 
1781 	if (DISPLAY_VER(display) >= 12) {
1782 		val = intel_de_read(display,
1783 				    TRANS_EXITLINE(display, cpu_transcoder));
1784 		pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1785 	}
1786 unlock:
1787 	mutex_unlock(&intel_dp->psr.lock);
1788 }
1789 
1790 static void intel_psr_activate(struct intel_dp *intel_dp)
1791 {
1792 	struct intel_display *display = to_intel_display(intel_dp);
1793 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1794 
1795 	drm_WARN_ON(display->drm,
1796 		    transcoder_has_psr2(display, cpu_transcoder) &&
1797 		    intel_de_read(display, EDP_PSR2_CTL(display, cpu_transcoder)) & EDP_PSR2_ENABLE);
1798 
1799 	drm_WARN_ON(display->drm,
1800 		    intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)) & EDP_PSR_ENABLE);
1801 
1802 	drm_WARN_ON(display->drm, intel_dp->psr.active);
1803 
1804 	lockdep_assert_held(&intel_dp->psr.lock);
1805 
1806 	/* psr1, psr2 and panel-replay are mutually exclusive.*/
1807 	if (intel_dp->psr.panel_replay_enabled)
1808 		dg2_activate_panel_replay(intel_dp);
1809 	else if (intel_dp->psr.sel_update_enabled)
1810 		hsw_activate_psr2(intel_dp);
1811 	else
1812 		hsw_activate_psr1(intel_dp);
1813 
1814 	intel_dp->psr.active = true;
1815 }
1816 
1817 /*
1818  * Wa_16013835468
1819  * Wa_14015648006
1820  */
1821 static void wm_optimization_wa(struct intel_dp *intel_dp,
1822 			       const struct intel_crtc_state *crtc_state)
1823 {
1824 	struct intel_display *display = to_intel_display(intel_dp);
1825 	enum pipe pipe = intel_dp->psr.pipe;
1826 	bool activate = false;
1827 
1828 	/* Wa_14015648006 */
1829 	if (IS_DISPLAY_VER(display, 11, 14) && crtc_state->wm_level_disabled)
1830 		activate = true;
1831 
1832 	/* Wa_16013835468 */
1833 	if (DISPLAY_VER(display) == 12 &&
1834 	    crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1835 	    crtc_state->hw.adjusted_mode.crtc_vdisplay)
1836 		activate = true;
1837 
1838 	if (activate)
1839 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
1840 			     0, LATENCY_REPORTING_REMOVED(pipe));
1841 	else
1842 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
1843 			     LATENCY_REPORTING_REMOVED(pipe), 0);
1844 }
1845 
1846 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1847 				    const struct intel_crtc_state *crtc_state)
1848 {
1849 	struct intel_display *display = to_intel_display(intel_dp);
1850 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1851 	u32 mask = 0;
1852 
1853 	/*
1854 	 * Only HSW and BDW have PSR AUX registers that need to be setup.
1855 	 * SKL+ use hardcoded values PSR AUX transactions
1856 	 */
1857 	if (DISPLAY_VER(display) < 9)
1858 		hsw_psr_setup_aux(intel_dp);
1859 
1860 	/*
1861 	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1862 	 * mask LPSP to avoid dependency on other drivers that might block
1863 	 * runtime_pm besides preventing  other hw tracking issues now we
1864 	 * can rely on frontbuffer tracking.
1865 	 *
1866 	 * From bspec prior LunarLake:
1867 	 * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
1868 	 * panel replay mode.
1869 	 *
1870 	 * From bspec beyod LunarLake:
1871 	 * Panel Replay on DP: No bits are applicable
1872 	 * Panel Replay on eDP: All bits are applicable
1873 	 */
1874 	if (DISPLAY_VER(display) < 20 || intel_dp_is_edp(intel_dp))
1875 		mask = EDP_PSR_DEBUG_MASK_HPD;
1876 
1877 	if (intel_dp_is_edp(intel_dp)) {
1878 		mask |= EDP_PSR_DEBUG_MASK_MEMUP;
1879 
1880 		/*
1881 		 * For some unknown reason on HSW non-ULT (or at least on
1882 		 * Dell Latitude E6540) external displays start to flicker
1883 		 * when PSR is enabled on the eDP. SR/PC6 residency is much
1884 		 * higher than should be possible with an external display.
1885 		 * As a workaround leave LPSP unmasked to prevent PSR entry
1886 		 * when external displays are active.
1887 		 */
1888 		if (DISPLAY_VER(display) >= 8 || display->platform.haswell_ult)
1889 			mask |= EDP_PSR_DEBUG_MASK_LPSP;
1890 
1891 		if (DISPLAY_VER(display) < 20)
1892 			mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1893 
1894 		/*
1895 		 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1896 		 * registers in order to keep the CURSURFLIVE tricks working :(
1897 		 */
1898 		if (IS_DISPLAY_VER(display, 9, 10))
1899 			mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1900 
1901 		/* allow PSR with sprite enabled */
1902 		if (display->platform.haswell)
1903 			mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1904 	}
1905 
1906 	intel_de_write(display, psr_debug_reg(display, cpu_transcoder), mask);
1907 
1908 	psr_irq_control(intel_dp);
1909 
1910 	/*
1911 	 * TODO: if future platforms supports DC3CO in more than one
1912 	 * transcoder, EXITLINE will need to be unset when disabling PSR
1913 	 */
1914 	if (intel_dp->psr.dc3co_exitline)
1915 		intel_de_rmw(display,
1916 			     TRANS_EXITLINE(display, cpu_transcoder),
1917 			     EXITLINE_MASK,
1918 			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1919 
1920 	if (HAS_PSR_HW_TRACKING(display) && HAS_PSR2_SEL_FETCH(display))
1921 		intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1922 			     intel_dp->psr.psr2_sel_fetch_enabled ?
1923 			     IGNORE_PSR2_HW_TRACKING : 0);
1924 
1925 	/*
1926 	 * Wa_16013835468
1927 	 * Wa_14015648006
1928 	 */
1929 	wm_optimization_wa(intel_dp, crtc_state);
1930 
1931 	if (intel_dp->psr.sel_update_enabled) {
1932 		if (DISPLAY_VER(display) == 9)
1933 			intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 0,
1934 				     PSR2_VSC_ENABLE_PROG_HEADER |
1935 				     PSR2_ADD_VERTICAL_LINE_COUNT);
1936 
1937 		/*
1938 		 * Wa_16014451276:adlp,mtl[a0,b0]
1939 		 * All supported adlp panels have 1-based X granularity, this may
1940 		 * cause issues if non-supported panels are used.
1941 		 */
1942 		if (!intel_dp->psr.panel_replay_enabled &&
1943 		    (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
1944 		     display->platform.alderlake_p))
1945 			intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
1946 				     0, ADLP_1_BASED_X_GRANULARITY);
1947 
1948 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1949 		if (!intel_dp->psr.panel_replay_enabled &&
1950 		    IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
1951 			intel_de_rmw(display,
1952 				     MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
1953 				     0,
1954 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1955 		else if (display->platform.alderlake_p)
1956 			intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
1957 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1958 	}
1959 
1960 	/* Wa_16025596647 */
1961 	if ((DISPLAY_VER(display) == 20 ||
1962 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
1963 	    !intel_dp->psr.panel_replay_enabled)
1964 		intel_dmc_block_pkgc(display, intel_dp->psr.pipe, true);
1965 }
1966 
1967 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1968 {
1969 	struct intel_display *display = to_intel_display(intel_dp);
1970 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1971 	u32 val;
1972 
1973 	if (intel_dp->psr.panel_replay_enabled)
1974 		goto no_err;
1975 
1976 	/*
1977 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1978 	 * will still keep the error set even after the reset done in the
1979 	 * irq_preinstall and irq_uninstall hooks.
1980 	 * And enabling in this situation cause the screen to freeze in the
1981 	 * first time that PSR HW tries to activate so lets keep PSR disabled
1982 	 * to avoid any rendering problems.
1983 	 */
1984 	val = intel_de_read(display, psr_iir_reg(display, cpu_transcoder));
1985 	val &= psr_irq_psr_error_bit_get(intel_dp);
1986 	if (val) {
1987 		intel_dp->psr.sink_not_reliable = true;
1988 		drm_dbg_kms(display->drm,
1989 			    "PSR interruption error set, not enabling PSR\n");
1990 		return false;
1991 	}
1992 
1993 no_err:
1994 	return true;
1995 }
1996 
1997 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1998 				    const struct intel_crtc_state *crtc_state)
1999 {
2000 	struct intel_display *display = to_intel_display(intel_dp);
2001 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2002 	u32 val;
2003 
2004 	drm_WARN_ON(display->drm, intel_dp->psr.enabled);
2005 
2006 	intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update;
2007 	intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
2008 	intel_dp->psr.busy_frontbuffer_bits = 0;
2009 	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
2010 	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
2011 	/* DC5/DC6 requires at least 6 idle frames */
2012 	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
2013 	intel_dp->psr.dc3co_exit_delay = val;
2014 	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
2015 	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
2016 	intel_dp->psr.su_region_et_enabled = crtc_state->enable_psr2_su_region_et;
2017 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2018 	intel_dp->psr.req_psr2_sdp_prior_scanline =
2019 		crtc_state->req_psr2_sdp_prior_scanline;
2020 	intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes;
2021 
2022 	if (!psr_interrupt_error_check(intel_dp))
2023 		return;
2024 
2025 	if (intel_dp->psr.panel_replay_enabled)
2026 		drm_dbg_kms(display->drm, "Enabling Panel Replay\n");
2027 	else
2028 		drm_dbg_kms(display->drm, "Enabling PSR%s\n",
2029 			    intel_dp->psr.sel_update_enabled ? "2" : "1");
2030 
2031 	/*
2032 	 * Enabling here only for PSR. Panel Replay enable bit is already
2033 	 * written at this point. See
2034 	 * intel_psr_panel_replay_enable_sink. Modifiers/options:
2035 	 *  - Selective Update
2036 	 *  - Region Early Transport
2037 	 *  - Selective Update Region Scanline Capture
2038 	 *  - VSC_SDP_CRC
2039 	 *  - HPD on different Errors
2040 	 *  - CRC verification
2041 	 * are written for PSR and Panel Replay here.
2042 	 */
2043 	intel_psr_enable_sink(intel_dp, crtc_state);
2044 
2045 	if (intel_dp_is_edp(intel_dp))
2046 		intel_snps_phy_update_psr_power_state(&dig_port->base, true);
2047 
2048 	intel_psr_enable_source(intel_dp, crtc_state);
2049 	intel_dp->psr.enabled = true;
2050 	intel_dp->psr.pause_counter = 0;
2051 
2052 	/*
2053 	 * Link_ok is sticky and set here on PSR enable. We can assume link
2054 	 * training is complete as we never continue to PSR enable with
2055 	 * untrained link. Link_ok is kept as set until first short pulse
2056 	 * interrupt. This is targeted to workaround panels stating bad link
2057 	 * after PSR is enabled.
2058 	 */
2059 	intel_dp->psr.link_ok = true;
2060 
2061 	intel_psr_activate(intel_dp);
2062 }
2063 
2064 static void intel_psr_exit(struct intel_dp *intel_dp)
2065 {
2066 	struct intel_display *display = to_intel_display(intel_dp);
2067 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2068 	u32 val;
2069 
2070 	if (!intel_dp->psr.active) {
2071 		if (transcoder_has_psr2(display, cpu_transcoder)) {
2072 			val = intel_de_read(display,
2073 					    EDP_PSR2_CTL(display, cpu_transcoder));
2074 			drm_WARN_ON(display->drm, val & EDP_PSR2_ENABLE);
2075 		}
2076 
2077 		val = intel_de_read(display,
2078 				    psr_ctl_reg(display, cpu_transcoder));
2079 		drm_WARN_ON(display->drm, val & EDP_PSR_ENABLE);
2080 
2081 		return;
2082 	}
2083 
2084 	if (intel_dp->psr.panel_replay_enabled) {
2085 		intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder),
2086 			     TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
2087 	} else if (intel_dp->psr.sel_update_enabled) {
2088 		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
2089 
2090 		val = intel_de_rmw(display,
2091 				   EDP_PSR2_CTL(display, cpu_transcoder),
2092 				   EDP_PSR2_ENABLE, 0);
2093 
2094 		drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
2095 	} else {
2096 		if (DISPLAY_VER(display) == 20 ||
2097 		    IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
2098 			intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
2099 								       intel_dp->psr.pipe,
2100 								       false);
2101 
2102 		val = intel_de_rmw(display,
2103 				   psr_ctl_reg(display, cpu_transcoder),
2104 				   EDP_PSR_ENABLE, 0);
2105 
2106 		drm_WARN_ON(display->drm, !(val & EDP_PSR_ENABLE));
2107 	}
2108 	intel_dp->psr.active = false;
2109 }
2110 
2111 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
2112 {
2113 	struct intel_display *display = to_intel_display(intel_dp);
2114 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2115 	i915_reg_t psr_status;
2116 	u32 psr_status_mask;
2117 
2118 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
2119 					  intel_dp->psr.panel_replay_enabled)) {
2120 		psr_status = EDP_PSR2_STATUS(display, cpu_transcoder);
2121 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
2122 	} else {
2123 		psr_status = psr_status_reg(display, cpu_transcoder);
2124 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
2125 	}
2126 
2127 	/* Wait till PSR is idle */
2128 	if (intel_de_wait_for_clear(display, psr_status,
2129 				    psr_status_mask, 2000))
2130 		drm_err(display->drm, "Timed out waiting PSR idle state\n");
2131 }
2132 
2133 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
2134 {
2135 	struct intel_display *display = to_intel_display(intel_dp);
2136 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2137 
2138 	lockdep_assert_held(&intel_dp->psr.lock);
2139 
2140 	if (!intel_dp->psr.enabled)
2141 		return;
2142 
2143 	if (intel_dp->psr.panel_replay_enabled)
2144 		drm_dbg_kms(display->drm, "Disabling Panel Replay\n");
2145 	else
2146 		drm_dbg_kms(display->drm, "Disabling PSR%s\n",
2147 			    intel_dp->psr.sel_update_enabled ? "2" : "1");
2148 
2149 	intel_psr_exit(intel_dp);
2150 	intel_psr_wait_exit_locked(intel_dp);
2151 
2152 	/*
2153 	 * Wa_16013835468
2154 	 * Wa_14015648006
2155 	 */
2156 	if (DISPLAY_VER(display) >= 11)
2157 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
2158 			     LATENCY_REPORTING_REMOVED(intel_dp->psr.pipe), 0);
2159 
2160 	if (intel_dp->psr.sel_update_enabled) {
2161 		/* Wa_16012604467:adlp,mtl[a0,b0] */
2162 		if (!intel_dp->psr.panel_replay_enabled &&
2163 		    IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
2164 			intel_de_rmw(display,
2165 				     MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
2166 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
2167 		else if (display->platform.alderlake_p)
2168 			intel_de_rmw(display, CLKGATE_DIS_MISC,
2169 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
2170 	}
2171 
2172 	if (intel_dp_is_edp(intel_dp))
2173 		intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
2174 
2175 	/* Disable PSR on Sink */
2176 	if (!intel_dp->psr.panel_replay_enabled) {
2177 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
2178 
2179 		if (intel_dp->psr.sel_update_enabled)
2180 			drm_dp_dpcd_writeb(&intel_dp->aux,
2181 					   DP_RECEIVER_ALPM_CONFIG, 0);
2182 	}
2183 
2184 	/* Wa_16025596647 */
2185 	if ((DISPLAY_VER(display) == 20 ||
2186 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2187 	    !intel_dp->psr.panel_replay_enabled)
2188 		intel_dmc_block_pkgc(display, intel_dp->psr.pipe, false);
2189 
2190 	intel_dp->psr.enabled = false;
2191 	intel_dp->psr.panel_replay_enabled = false;
2192 	intel_dp->psr.sel_update_enabled = false;
2193 	intel_dp->psr.psr2_sel_fetch_enabled = false;
2194 	intel_dp->psr.su_region_et_enabled = false;
2195 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2196 	intel_dp->psr.active_non_psr_pipes = 0;
2197 }
2198 
2199 /**
2200  * intel_psr_disable - Disable PSR
2201  * @intel_dp: Intel DP
2202  * @old_crtc_state: old CRTC state
2203  *
2204  * This function needs to be called before disabling pipe.
2205  */
2206 void intel_psr_disable(struct intel_dp *intel_dp,
2207 		       const struct intel_crtc_state *old_crtc_state)
2208 {
2209 	struct intel_display *display = to_intel_display(intel_dp);
2210 
2211 	if (!old_crtc_state->has_psr)
2212 		return;
2213 
2214 	if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp) &&
2215 			!CAN_PANEL_REPLAY(intel_dp)))
2216 		return;
2217 
2218 	mutex_lock(&intel_dp->psr.lock);
2219 
2220 	intel_psr_disable_locked(intel_dp);
2221 
2222 	intel_dp->psr.link_ok = false;
2223 
2224 	mutex_unlock(&intel_dp->psr.lock);
2225 	cancel_work_sync(&intel_dp->psr.work);
2226 	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
2227 }
2228 
2229 /**
2230  * intel_psr_pause - Pause PSR
2231  * @intel_dp: Intel DP
2232  *
2233  * This function need to be called after enabling psr.
2234  */
2235 void intel_psr_pause(struct intel_dp *intel_dp)
2236 {
2237 	struct intel_psr *psr = &intel_dp->psr;
2238 
2239 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2240 		return;
2241 
2242 	mutex_lock(&psr->lock);
2243 
2244 	if (!psr->enabled) {
2245 		mutex_unlock(&psr->lock);
2246 		return;
2247 	}
2248 
2249 	if (intel_dp->psr.pause_counter++ == 0) {
2250 		intel_psr_exit(intel_dp);
2251 		intel_psr_wait_exit_locked(intel_dp);
2252 	}
2253 
2254 	mutex_unlock(&psr->lock);
2255 
2256 	cancel_work_sync(&psr->work);
2257 	cancel_delayed_work_sync(&psr->dc3co_work);
2258 }
2259 
2260 /**
2261  * intel_psr_resume - Resume PSR
2262  * @intel_dp: Intel DP
2263  *
2264  * This function need to be called after pausing psr.
2265  */
2266 void intel_psr_resume(struct intel_dp *intel_dp)
2267 {
2268 	struct intel_display *display = to_intel_display(intel_dp);
2269 	struct intel_psr *psr = &intel_dp->psr;
2270 
2271 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2272 		return;
2273 
2274 	mutex_lock(&psr->lock);
2275 
2276 	if (!psr->enabled)
2277 		goto out;
2278 
2279 	if (!psr->pause_counter) {
2280 		drm_warn(display->drm, "Unbalanced PSR pause/resume!\n");
2281 		goto out;
2282 	}
2283 
2284 	if (--intel_dp->psr.pause_counter == 0)
2285 		intel_psr_activate(intel_dp);
2286 
2287 out:
2288 	mutex_unlock(&psr->lock);
2289 }
2290 
2291 /**
2292  * intel_psr_needs_vblank_notification - Check if PSR need vblank enable/disable
2293  * notification.
2294  * @crtc_state: CRTC status
2295  *
2296  * We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't
2297  * prevent it in case of Panel Replay. Panel Replay switches main link off on
2298  * DC entry. This means vblank interrupts are not fired and is a problem if
2299  * user-space is polling for vblank events. Also Wa_16025596647 needs
2300  * information when vblank is enabled/disabled.
2301  */
2302 bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state)
2303 {
2304 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2305 	struct intel_display *display = to_intel_display(crtc_state);
2306 	struct intel_encoder *encoder;
2307 
2308 	for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) {
2309 		struct intel_dp *intel_dp;
2310 
2311 		if (!intel_encoder_is_dp(encoder))
2312 			continue;
2313 
2314 		intel_dp = enc_to_intel_dp(encoder);
2315 
2316 		if (!intel_dp_is_edp(intel_dp))
2317 			continue;
2318 
2319 		if (CAN_PANEL_REPLAY(intel_dp))
2320 			return true;
2321 
2322 		if ((DISPLAY_VER(display) == 20 ||
2323 		     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2324 		    CAN_PSR(intel_dp))
2325 			return true;
2326 	}
2327 
2328 	return false;
2329 }
2330 
2331 /**
2332  * intel_psr_trigger_frame_change_event - Trigger "Frame Change" event
2333  * @dsb: DSB context
2334  * @state: the atomic state
2335  * @crtc: the CRTC
2336  *
2337  * Generate PSR "Frame Change" event.
2338  */
2339 void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
2340 					  struct intel_atomic_state *state,
2341 					  struct intel_crtc *crtc)
2342 {
2343 	const struct intel_crtc_state *crtc_state =
2344 		intel_pre_commit_crtc_state(state, crtc);
2345 	struct intel_display *display = to_intel_display(crtc);
2346 
2347 	if (crtc_state->has_psr)
2348 		intel_de_write_dsb(display, dsb,
2349 				   CURSURFLIVE(display, crtc->pipe), 0);
2350 }
2351 
2352 /**
2353  * intel_psr_min_vblank_delay - Minimum vblank delay needed by PSR
2354  * @crtc_state: the crtc state
2355  *
2356  * Return minimum vblank delay needed by PSR.
2357  */
2358 int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state)
2359 {
2360 	struct intel_display *display = to_intel_display(crtc_state);
2361 
2362 	if (!crtc_state->has_psr)
2363 		return 0;
2364 
2365 	/* Wa_14015401596 */
2366 	if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14))
2367 		return 1;
2368 
2369 	/* Rest is for SRD_STATUS needed on LunarLake and onwards */
2370 	if (DISPLAY_VER(display) < 20)
2371 		return 0;
2372 
2373 	/*
2374 	 * Comment on SRD_STATUS register in Bspec for LunarLake and onwards:
2375 	 *
2376 	 * To deterministically capture the transition of the state machine
2377 	 * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least
2378 	 * one line after the non-delayed V. Blank.
2379 	 *
2380 	 * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0
2381 	 * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ]
2382 	 * - TRANS_VTOTAL[ Vertical Active ])
2383 	 *
2384 	 * SRD_STATUS is used only by PSR1 on PantherLake.
2385 	 * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake.
2386 	 */
2387 
2388 	if (DISPLAY_VER(display) >= 30 && (crtc_state->has_panel_replay ||
2389 					   crtc_state->has_sel_update))
2390 		return 0;
2391 	else if (DISPLAY_VER(display) < 30 && (crtc_state->has_sel_update ||
2392 					       intel_crtc_has_type(crtc_state,
2393 								   INTEL_OUTPUT_EDP)))
2394 		return 0;
2395 	else
2396 		return 1;
2397 }
2398 
2399 static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
2400 {
2401 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 0 :
2402 		PSR2_MAN_TRK_CTL_ENABLE;
2403 }
2404 
2405 static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
2406 {
2407 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2408 	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
2409 	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
2410 }
2411 
2412 static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
2413 {
2414 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2415 	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
2416 	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
2417 }
2418 
2419 static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
2420 {
2421 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2422 	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
2423 	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
2424 }
2425 
2426 static void intel_psr_force_update(struct intel_dp *intel_dp)
2427 {
2428 	struct intel_display *display = to_intel_display(intel_dp);
2429 
2430 	/*
2431 	 * Display WA #0884: skl+
2432 	 * This documented WA for bxt can be safely applied
2433 	 * broadly so we can force HW tracking to exit PSR
2434 	 * instead of disabling and re-enabling.
2435 	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
2436 	 * but it makes more sense write to the current active
2437 	 * pipe.
2438 	 *
2439 	 * This workaround do not exist for platforms with display 10 or newer
2440 	 * but testing proved that it works for up display 13, for newer
2441 	 * than that testing will be needed.
2442 	 */
2443 	intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0);
2444 }
2445 
2446 void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
2447 					  const struct intel_crtc_state *crtc_state)
2448 {
2449 	struct intel_display *display = to_intel_display(crtc_state);
2450 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2451 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2452 	struct intel_encoder *encoder;
2453 
2454 	if (!crtc_state->enable_psr2_sel_fetch)
2455 		return;
2456 
2457 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2458 					     crtc_state->uapi.encoder_mask) {
2459 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2460 
2461 		if (!dsb)
2462 			lockdep_assert_held(&intel_dp->psr.lock);
2463 
2464 		if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_cff_enabled)
2465 			return;
2466 		break;
2467 	}
2468 
2469 	intel_de_write_dsb(display, dsb,
2470 			   PSR2_MAN_TRK_CTL(display, cpu_transcoder),
2471 			   crtc_state->psr2_man_track_ctl);
2472 
2473 	if (!crtc_state->enable_psr2_su_region_et)
2474 		return;
2475 
2476 	intel_de_write_dsb(display, dsb, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
2477 			   crtc_state->pipe_srcsz_early_tpt);
2478 }
2479 
2480 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2481 				  bool full_update)
2482 {
2483 	struct intel_display *display = to_intel_display(crtc_state);
2484 	u32 val = man_trk_ctl_enable_bit_get(display);
2485 
2486 	/* SF partial frame enable has to be set even on full update */
2487 	val |= man_trk_ctl_partial_frame_bit_get(display);
2488 
2489 	if (full_update) {
2490 		val |= man_trk_ctl_continuos_full_frame(display);
2491 		goto exit;
2492 	}
2493 
2494 	if (crtc_state->psr2_su_area.y1 == -1)
2495 		goto exit;
2496 
2497 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) {
2498 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2499 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2500 	} else {
2501 		drm_WARN_ON(crtc_state->uapi.crtc->dev,
2502 			    crtc_state->psr2_su_area.y1 % 4 ||
2503 			    crtc_state->psr2_su_area.y2 % 4);
2504 
2505 		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2506 			crtc_state->psr2_su_area.y1 / 4 + 1);
2507 		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2508 			crtc_state->psr2_su_area.y2 / 4 + 1);
2509 	}
2510 exit:
2511 	crtc_state->psr2_man_track_ctl = val;
2512 }
2513 
2514 static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
2515 					  bool full_update)
2516 {
2517 	int width, height;
2518 
2519 	if (!crtc_state->enable_psr2_su_region_et || full_update)
2520 		return 0;
2521 
2522 	width = drm_rect_width(&crtc_state->psr2_su_area);
2523 	height = drm_rect_height(&crtc_state->psr2_su_area);
2524 
2525 	return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
2526 }
2527 
2528 static void clip_area_update(struct drm_rect *overlap_damage_area,
2529 			     struct drm_rect *damage_area,
2530 			     struct drm_rect *pipe_src)
2531 {
2532 	if (!drm_rect_intersect(damage_area, pipe_src))
2533 		return;
2534 
2535 	if (overlap_damage_area->y1 == -1) {
2536 		overlap_damage_area->y1 = damage_area->y1;
2537 		overlap_damage_area->y2 = damage_area->y2;
2538 		return;
2539 	}
2540 
2541 	if (damage_area->y1 < overlap_damage_area->y1)
2542 		overlap_damage_area->y1 = damage_area->y1;
2543 
2544 	if (damage_area->y2 > overlap_damage_area->y2)
2545 		overlap_damage_area->y2 = damage_area->y2;
2546 }
2547 
2548 static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2549 {
2550 	struct intel_display *display = to_intel_display(crtc_state);
2551 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2552 	u16 y_alignment;
2553 
2554 	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2555 	if (crtc_state->dsc.compression_enable &&
2556 	    (display->platform.alderlake_p || DISPLAY_VER(display) >= 14))
2557 		y_alignment = vdsc_cfg->slice_height;
2558 	else
2559 		y_alignment = crtc_state->su_y_granularity;
2560 
2561 	crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2562 	if (crtc_state->psr2_su_area.y2 % y_alignment)
2563 		crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2564 						y_alignment) + 1) * y_alignment;
2565 }
2566 
2567 /*
2568  * When early transport is in use we need to extend SU area to cover
2569  * cursor fully when cursor is in SU area.
2570  */
2571 static void
2572 intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
2573 				  struct intel_crtc *crtc,
2574 				  bool *cursor_in_su_area)
2575 {
2576 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2577 	struct intel_plane_state *new_plane_state;
2578 	struct intel_plane *plane;
2579 	int i;
2580 
2581 	if (!crtc_state->enable_psr2_su_region_et)
2582 		return;
2583 
2584 	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
2585 		struct drm_rect inter;
2586 
2587 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2588 			continue;
2589 
2590 		if (plane->id != PLANE_CURSOR)
2591 			continue;
2592 
2593 		if (!new_plane_state->uapi.visible)
2594 			continue;
2595 
2596 		inter = crtc_state->psr2_su_area;
2597 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2598 			continue;
2599 
2600 		clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
2601 				 &crtc_state->pipe_src);
2602 		*cursor_in_su_area = true;
2603 	}
2604 }
2605 
2606 /*
2607  * TODO: Not clear how to handle planes with negative position,
2608  * also planes are not updated if they have a negative X
2609  * position so for now doing a full update in this cases
2610  *
2611  * Plane scaling and rotation is not supported by selective fetch and both
2612  * properties can change without a modeset, so need to be check at every
2613  * atomic commit.
2614  */
2615 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2616 {
2617 	if (plane_state->uapi.dst.y1 < 0 ||
2618 	    plane_state->uapi.dst.x1 < 0 ||
2619 	    plane_state->scaler_id >= 0 ||
2620 	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2621 		return false;
2622 
2623 	return true;
2624 }
2625 
2626 /*
2627  * Check for pipe properties that is not supported by selective fetch.
2628  *
2629  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2630  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2631  * enabled and going to the full update path.
2632  */
2633 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2634 {
2635 	if (crtc_state->scaler_state.scaler_id >= 0)
2636 		return false;
2637 
2638 	return true;
2639 }
2640 
2641 /* Wa 14019834836 */
2642 static void intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state *crtc_state)
2643 {
2644 	struct intel_display *display = to_intel_display(crtc_state);
2645 	struct intel_encoder *encoder;
2646 	int hactive_limit;
2647 
2648 	if (crtc_state->psr2_su_area.y1 != 0 ||
2649 	    crtc_state->psr2_su_area.y2 != 0)
2650 		return;
2651 
2652 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2653 		hactive_limit = intel_dp_is_uhbr(crtc_state) ? 1230 : 546;
2654 	else
2655 		hactive_limit = intel_dp_is_uhbr(crtc_state) ? 615 : 273;
2656 
2657 	if (crtc_state->hw.adjusted_mode.hdisplay < hactive_limit)
2658 		return;
2659 
2660 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2661 					     crtc_state->uapi.encoder_mask) {
2662 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2663 
2664 		if (!intel_dp_is_edp(intel_dp) &&
2665 		    intel_dp->psr.panel_replay_enabled &&
2666 		    intel_dp->psr.sel_update_enabled) {
2667 			crtc_state->psr2_su_area.y2++;
2668 			return;
2669 		}
2670 	}
2671 }
2672 
2673 static void
2674 intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state)
2675 {
2676 	struct intel_display *display = to_intel_display(crtc_state);
2677 
2678 	/* Wa_14014971492 */
2679 	if (!crtc_state->has_panel_replay &&
2680 	    ((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
2681 	      display->platform.alderlake_p || display->platform.tigerlake)) &&
2682 	    crtc_state->splitter.enable)
2683 		crtc_state->psr2_su_area.y1 = 0;
2684 
2685 	/* Wa 14019834836 */
2686 	if (DISPLAY_VER(display) == 30)
2687 		intel_psr_apply_pr_link_on_su_wa(crtc_state);
2688 }
2689 
2690 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2691 				struct intel_crtc *crtc)
2692 {
2693 	struct intel_display *display = to_intel_display(state);
2694 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2695 	struct intel_plane_state *new_plane_state, *old_plane_state;
2696 	struct intel_plane *plane;
2697 	bool full_update = false, cursor_in_su_area = false;
2698 	int i, ret;
2699 
2700 	if (!crtc_state->enable_psr2_sel_fetch)
2701 		return 0;
2702 
2703 	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2704 		full_update = true;
2705 		goto skip_sel_fetch_set_loop;
2706 	}
2707 
2708 	crtc_state->psr2_su_area.x1 = 0;
2709 	crtc_state->psr2_su_area.y1 = -1;
2710 	crtc_state->psr2_su_area.x2 = drm_rect_width(&crtc_state->pipe_src);
2711 	crtc_state->psr2_su_area.y2 = -1;
2712 
2713 	/*
2714 	 * Calculate minimal selective fetch area of each plane and calculate
2715 	 * the pipe damaged area.
2716 	 * In the next loop the plane selective fetch area will actually be set
2717 	 * using whole pipe damaged area.
2718 	 */
2719 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2720 					     new_plane_state, i) {
2721 		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2722 						      .x2 = INT_MAX };
2723 
2724 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2725 			continue;
2726 
2727 		if (!new_plane_state->uapi.visible &&
2728 		    !old_plane_state->uapi.visible)
2729 			continue;
2730 
2731 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2732 			full_update = true;
2733 			break;
2734 		}
2735 
2736 		/*
2737 		 * If visibility or plane moved, mark the whole plane area as
2738 		 * damaged as it needs to be complete redraw in the new and old
2739 		 * position.
2740 		 */
2741 		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2742 		    !drm_rect_equals(&new_plane_state->uapi.dst,
2743 				     &old_plane_state->uapi.dst)) {
2744 			if (old_plane_state->uapi.visible) {
2745 				damaged_area.y1 = old_plane_state->uapi.dst.y1;
2746 				damaged_area.y2 = old_plane_state->uapi.dst.y2;
2747 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2748 						 &crtc_state->pipe_src);
2749 			}
2750 
2751 			if (new_plane_state->uapi.visible) {
2752 				damaged_area.y1 = new_plane_state->uapi.dst.y1;
2753 				damaged_area.y2 = new_plane_state->uapi.dst.y2;
2754 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2755 						 &crtc_state->pipe_src);
2756 			}
2757 			continue;
2758 		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2759 			/* If alpha changed mark the whole plane area as damaged */
2760 			damaged_area.y1 = new_plane_state->uapi.dst.y1;
2761 			damaged_area.y2 = new_plane_state->uapi.dst.y2;
2762 			clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2763 					 &crtc_state->pipe_src);
2764 			continue;
2765 		}
2766 
2767 		src = drm_plane_state_src(&new_plane_state->uapi);
2768 		drm_rect_fp_to_int(&src, &src);
2769 
2770 		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2771 						     &new_plane_state->uapi, &damaged_area))
2772 			continue;
2773 
2774 		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2775 		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2776 		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2777 		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2778 
2779 		clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
2780 	}
2781 
2782 	/*
2783 	 * TODO: For now we are just using full update in case
2784 	 * selective fetch area calculation fails. To optimize this we
2785 	 * should identify cases where this happens and fix the area
2786 	 * calculation for those.
2787 	 */
2788 	if (crtc_state->psr2_su_area.y1 == -1) {
2789 		drm_info_once(display->drm,
2790 			      "Selective fetch area calculation failed in pipe %c\n",
2791 			      pipe_name(crtc->pipe));
2792 		full_update = true;
2793 	}
2794 
2795 	if (full_update)
2796 		goto skip_sel_fetch_set_loop;
2797 
2798 	intel_psr_apply_su_area_workarounds(crtc_state);
2799 
2800 	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2801 	if (ret)
2802 		return ret;
2803 
2804 	/*
2805 	 * Adjust su area to cover cursor fully as necessary (early
2806 	 * transport). This needs to be done after
2807 	 * drm_atomic_add_affected_planes to ensure visible cursor is added into
2808 	 * affected planes even when cursor is not updated by itself.
2809 	 */
2810 	intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
2811 
2812 	intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2813 
2814 	/*
2815 	 * Now that we have the pipe damaged area check if it intersect with
2816 	 * every plane, if it does set the plane selective fetch area.
2817 	 */
2818 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2819 					     new_plane_state, i) {
2820 		struct drm_rect *sel_fetch_area, inter;
2821 		struct intel_plane *linked = new_plane_state->planar_linked_plane;
2822 
2823 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2824 		    !new_plane_state->uapi.visible)
2825 			continue;
2826 
2827 		inter = crtc_state->psr2_su_area;
2828 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2829 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2830 			sel_fetch_area->y1 = -1;
2831 			sel_fetch_area->y2 = -1;
2832 			/*
2833 			 * if plane sel fetch was previously enabled ->
2834 			 * disable it
2835 			 */
2836 			if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2837 				crtc_state->update_planes |= BIT(plane->id);
2838 
2839 			continue;
2840 		}
2841 
2842 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2843 			full_update = true;
2844 			break;
2845 		}
2846 
2847 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2848 		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2849 		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2850 		crtc_state->update_planes |= BIT(plane->id);
2851 
2852 		/*
2853 		 * Sel_fetch_area is calculated for UV plane. Use
2854 		 * same area for Y plane as well.
2855 		 */
2856 		if (linked) {
2857 			struct intel_plane_state *linked_new_plane_state;
2858 			struct drm_rect *linked_sel_fetch_area;
2859 
2860 			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2861 			if (IS_ERR(linked_new_plane_state))
2862 				return PTR_ERR(linked_new_plane_state);
2863 
2864 			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2865 			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2866 			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2867 			crtc_state->update_planes |= BIT(linked->id);
2868 		}
2869 	}
2870 
2871 skip_sel_fetch_set_loop:
2872 	psr2_man_trk_ctl_calc(crtc_state, full_update);
2873 	crtc_state->pipe_srcsz_early_tpt =
2874 		psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
2875 	return 0;
2876 }
2877 
2878 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2879 				struct intel_crtc *crtc)
2880 {
2881 	struct intel_display *display = to_intel_display(state);
2882 	const struct intel_crtc_state *old_crtc_state =
2883 		intel_atomic_get_old_crtc_state(state, crtc);
2884 	const struct intel_crtc_state *new_crtc_state =
2885 		intel_atomic_get_new_crtc_state(state, crtc);
2886 	struct intel_encoder *encoder;
2887 
2888 	if (!HAS_PSR(display))
2889 		return;
2890 
2891 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2892 					     old_crtc_state->uapi.encoder_mask) {
2893 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2894 		struct intel_psr *psr = &intel_dp->psr;
2895 
2896 		mutex_lock(&psr->lock);
2897 
2898 		if (psr->enabled) {
2899 			/*
2900 			 * Reasons to disable:
2901 			 * - PSR disabled in new state
2902 			 * - All planes will go inactive
2903 			 * - Changing between PSR versions
2904 			 * - Region Early Transport changing
2905 			 * - Display WA #1136: skl, bxt
2906 			 */
2907 			if (intel_crtc_needs_modeset(new_crtc_state) ||
2908 			    !new_crtc_state->has_psr ||
2909 			    !new_crtc_state->active_planes ||
2910 			    new_crtc_state->has_sel_update != psr->sel_update_enabled ||
2911 			    new_crtc_state->enable_psr2_su_region_et != psr->su_region_et_enabled ||
2912 			    new_crtc_state->has_panel_replay != psr->panel_replay_enabled ||
2913 			    (DISPLAY_VER(display) < 11 && new_crtc_state->wm_level_disabled))
2914 				intel_psr_disable_locked(intel_dp);
2915 			else if (new_crtc_state->wm_level_disabled)
2916 				/* Wa_14015648006 */
2917 				wm_optimization_wa(intel_dp, new_crtc_state);
2918 		}
2919 
2920 		mutex_unlock(&psr->lock);
2921 	}
2922 }
2923 
2924 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2925 				 struct intel_crtc *crtc)
2926 {
2927 	struct intel_display *display = to_intel_display(state);
2928 	const struct intel_crtc_state *crtc_state =
2929 		intel_atomic_get_new_crtc_state(state, crtc);
2930 	struct intel_encoder *encoder;
2931 
2932 	if (!crtc_state->has_psr)
2933 		return;
2934 
2935 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2936 					     crtc_state->uapi.encoder_mask) {
2937 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2938 		struct intel_psr *psr = &intel_dp->psr;
2939 		bool keep_disabled = false;
2940 
2941 		mutex_lock(&psr->lock);
2942 
2943 		drm_WARN_ON(display->drm,
2944 			    psr->enabled && !crtc_state->active_planes);
2945 
2946 		keep_disabled |= psr->sink_not_reliable;
2947 		keep_disabled |= !crtc_state->active_planes;
2948 
2949 		/* Display WA #1136: skl, bxt */
2950 		keep_disabled |= DISPLAY_VER(display) < 11 &&
2951 			crtc_state->wm_level_disabled;
2952 
2953 		if (!psr->enabled && !keep_disabled)
2954 			intel_psr_enable_locked(intel_dp, crtc_state);
2955 		else if (psr->enabled && !crtc_state->wm_level_disabled)
2956 			/* Wa_14015648006 */
2957 			wm_optimization_wa(intel_dp, crtc_state);
2958 
2959 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2960 		if (crtc_state->crc_enabled && psr->enabled)
2961 			intel_psr_force_update(intel_dp);
2962 
2963 		/*
2964 		 * Clear possible busy bits in case we have
2965 		 * invalidate -> flip -> flush sequence.
2966 		 */
2967 		intel_dp->psr.busy_frontbuffer_bits = 0;
2968 
2969 		mutex_unlock(&psr->lock);
2970 	}
2971 }
2972 
2973 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2974 {
2975 	struct intel_display *display = to_intel_display(intel_dp);
2976 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2977 
2978 	/*
2979 	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2980 	 * As all higher states has bit 4 of PSR2 state set we can just wait for
2981 	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2982 	 */
2983 	return intel_de_wait_for_clear(display,
2984 				       EDP_PSR2_STATUS(display, cpu_transcoder),
2985 				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2986 }
2987 
2988 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2989 {
2990 	struct intel_display *display = to_intel_display(intel_dp);
2991 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2992 
2993 	/*
2994 	 * From bspec: Panel Self Refresh (BDW+)
2995 	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2996 	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2997 	 * defensive enough to cover everything.
2998 	 */
2999 	return intel_de_wait_for_clear(display,
3000 				       psr_status_reg(display, cpu_transcoder),
3001 				       EDP_PSR_STATUS_STATE_MASK, 50);
3002 }
3003 
3004 /**
3005  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
3006  * @new_crtc_state: new CRTC state
3007  *
3008  * This function is expected to be called from pipe_update_start() where it is
3009  * not expected to race with PSR enable or disable.
3010  */
3011 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
3012 {
3013 	struct intel_display *display = to_intel_display(new_crtc_state);
3014 	struct intel_encoder *encoder;
3015 
3016 	if (!new_crtc_state->has_psr)
3017 		return;
3018 
3019 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3020 					     new_crtc_state->uapi.encoder_mask) {
3021 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3022 		int ret;
3023 
3024 		lockdep_assert_held(&intel_dp->psr.lock);
3025 
3026 		if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
3027 			continue;
3028 
3029 		if (intel_dp->psr.sel_update_enabled)
3030 			ret = _psr2_ready_for_pipe_update_locked(intel_dp);
3031 		else
3032 			ret = _psr1_ready_for_pipe_update_locked(intel_dp);
3033 
3034 		if (ret)
3035 			drm_err(display->drm,
3036 				"PSR wait timed out, atomic update may fail\n");
3037 	}
3038 }
3039 
3040 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
3041 {
3042 	struct intel_display *display = to_intel_display(intel_dp);
3043 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3044 	i915_reg_t reg;
3045 	u32 mask;
3046 	int err;
3047 
3048 	if (!intel_dp->psr.enabled)
3049 		return false;
3050 
3051 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
3052 					  intel_dp->psr.panel_replay_enabled)) {
3053 		reg = EDP_PSR2_STATUS(display, cpu_transcoder);
3054 		mask = EDP_PSR2_STATUS_STATE_MASK;
3055 	} else {
3056 		reg = psr_status_reg(display, cpu_transcoder);
3057 		mask = EDP_PSR_STATUS_STATE_MASK;
3058 	}
3059 
3060 	mutex_unlock(&intel_dp->psr.lock);
3061 
3062 	err = intel_de_wait_for_clear(display, reg, mask, 50);
3063 	if (err)
3064 		drm_err(display->drm,
3065 			"Timed out waiting for PSR Idle for re-enable\n");
3066 
3067 	/* After the unlocked wait, verify that PSR is still wanted! */
3068 	mutex_lock(&intel_dp->psr.lock);
3069 	return err == 0 && intel_dp->psr.enabled;
3070 }
3071 
3072 static int intel_psr_fastset_force(struct intel_display *display)
3073 {
3074 	struct drm_connector_list_iter conn_iter;
3075 	struct drm_modeset_acquire_ctx ctx;
3076 	struct drm_atomic_state *state;
3077 	struct drm_connector *conn;
3078 	int err = 0;
3079 
3080 	state = drm_atomic_state_alloc(display->drm);
3081 	if (!state)
3082 		return -ENOMEM;
3083 
3084 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
3085 
3086 	state->acquire_ctx = &ctx;
3087 	to_intel_atomic_state(state)->internal = true;
3088 
3089 retry:
3090 	drm_connector_list_iter_begin(display->drm, &conn_iter);
3091 	drm_for_each_connector_iter(conn, &conn_iter) {
3092 		struct drm_connector_state *conn_state;
3093 		struct drm_crtc_state *crtc_state;
3094 
3095 		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
3096 			continue;
3097 
3098 		conn_state = drm_atomic_get_connector_state(state, conn);
3099 		if (IS_ERR(conn_state)) {
3100 			err = PTR_ERR(conn_state);
3101 			break;
3102 		}
3103 
3104 		if (!conn_state->crtc)
3105 			continue;
3106 
3107 		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
3108 		if (IS_ERR(crtc_state)) {
3109 			err = PTR_ERR(crtc_state);
3110 			break;
3111 		}
3112 
3113 		/* Mark mode as changed to trigger a pipe->update() */
3114 		crtc_state->mode_changed = true;
3115 	}
3116 	drm_connector_list_iter_end(&conn_iter);
3117 
3118 	if (err == 0)
3119 		err = drm_atomic_commit(state);
3120 
3121 	if (err == -EDEADLK) {
3122 		drm_atomic_state_clear(state);
3123 		err = drm_modeset_backoff(&ctx);
3124 		if (!err)
3125 			goto retry;
3126 	}
3127 
3128 	drm_modeset_drop_locks(&ctx);
3129 	drm_modeset_acquire_fini(&ctx);
3130 	drm_atomic_state_put(state);
3131 
3132 	return err;
3133 }
3134 
3135 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
3136 {
3137 	struct intel_display *display = to_intel_display(intel_dp);
3138 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
3139 	const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3140 					I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3141 	u32 old_mode, old_disable_bits;
3142 	int ret;
3143 
3144 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3145 		    I915_PSR_DEBUG_PANEL_REPLAY_DISABLE |
3146 		    I915_PSR_DEBUG_MODE_MASK) ||
3147 	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
3148 		drm_dbg_kms(display->drm, "Invalid debug mask %llx\n", val);
3149 		return -EINVAL;
3150 	}
3151 
3152 	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
3153 	if (ret)
3154 		return ret;
3155 
3156 	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
3157 	old_disable_bits = intel_dp->psr.debug &
3158 		(I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3159 		 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3160 
3161 	intel_dp->psr.debug = val;
3162 
3163 	/*
3164 	 * Do it right away if it's already enabled, otherwise it will be done
3165 	 * when enabling the source.
3166 	 */
3167 	if (intel_dp->psr.enabled)
3168 		psr_irq_control(intel_dp);
3169 
3170 	mutex_unlock(&intel_dp->psr.lock);
3171 
3172 	if (old_mode != mode || old_disable_bits != disable_bits)
3173 		ret = intel_psr_fastset_force(display);
3174 
3175 	return ret;
3176 }
3177 
3178 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
3179 {
3180 	struct intel_psr *psr = &intel_dp->psr;
3181 
3182 	intel_psr_disable_locked(intel_dp);
3183 	psr->sink_not_reliable = true;
3184 	/* let's make sure that sink is awaken */
3185 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
3186 }
3187 
3188 static void intel_psr_work(struct work_struct *work)
3189 {
3190 	struct intel_dp *intel_dp =
3191 		container_of(work, typeof(*intel_dp), psr.work);
3192 
3193 	mutex_lock(&intel_dp->psr.lock);
3194 
3195 	if (!intel_dp->psr.enabled)
3196 		goto unlock;
3197 
3198 	if (READ_ONCE(intel_dp->psr.irq_aux_error))
3199 		intel_psr_handle_irq(intel_dp);
3200 
3201 	/*
3202 	 * We have to make sure PSR is ready for re-enable
3203 	 * otherwise it keeps disabled until next full enable/disable cycle.
3204 	 * PSR might take some time to get fully disabled
3205 	 * and be ready for re-enable.
3206 	 */
3207 	if (!__psr_wait_for_idle_locked(intel_dp))
3208 		goto unlock;
3209 
3210 	/*
3211 	 * The delayed work can race with an invalidate hence we need to
3212 	 * recheck. Since psr_flush first clears this and then reschedules we
3213 	 * won't ever miss a flush when bailing out here.
3214 	 */
3215 	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
3216 		goto unlock;
3217 
3218 	intel_psr_activate(intel_dp);
3219 unlock:
3220 	mutex_unlock(&intel_dp->psr.lock);
3221 }
3222 
3223 static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp)
3224 {
3225 	struct intel_display *display = to_intel_display(intel_dp);
3226 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3227 
3228 	if (!intel_dp->psr.psr2_sel_fetch_enabled)
3229 		return;
3230 
3231 	if (DISPLAY_VER(display) >= 20)
3232 		intel_de_write(display, LNL_SFF_CTL(cpu_transcoder),
3233 			       LNL_SFF_CTL_SF_SINGLE_FULL_FRAME);
3234 	else
3235 		intel_de_write(display,
3236 			       PSR2_MAN_TRK_CTL(display, cpu_transcoder),
3237 			       man_trk_ctl_enable_bit_get(display) |
3238 			       man_trk_ctl_partial_frame_bit_get(display) |
3239 			       man_trk_ctl_single_full_frame_bit_get(display) |
3240 			       man_trk_ctl_continuos_full_frame(display));
3241 }
3242 
3243 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
3244 {
3245 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
3246 		if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3247 			intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
3248 			intel_psr_configure_full_frame_update(intel_dp);
3249 		}
3250 
3251 		intel_psr_force_update(intel_dp);
3252 	} else {
3253 		intel_psr_exit(intel_dp);
3254 	}
3255 }
3256 
3257 /**
3258  * intel_psr_invalidate - Invalidate PSR
3259  * @display: display device
3260  * @frontbuffer_bits: frontbuffer plane tracking bits
3261  * @origin: which operation caused the invalidate
3262  *
3263  * Since the hardware frontbuffer tracking has gaps we need to integrate
3264  * with the software frontbuffer tracking. This function gets called every
3265  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
3266  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
3267  *
3268  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
3269  */
3270 void intel_psr_invalidate(struct intel_display *display,
3271 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
3272 {
3273 	struct intel_encoder *encoder;
3274 
3275 	if (origin == ORIGIN_FLIP)
3276 		return;
3277 
3278 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3279 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3280 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3281 
3282 		mutex_lock(&intel_dp->psr.lock);
3283 		if (!intel_dp->psr.enabled) {
3284 			mutex_unlock(&intel_dp->psr.lock);
3285 			continue;
3286 		}
3287 
3288 		pipe_frontbuffer_bits &=
3289 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3290 		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
3291 
3292 		if (pipe_frontbuffer_bits)
3293 			_psr_invalidate_handle(intel_dp);
3294 
3295 		mutex_unlock(&intel_dp->psr.lock);
3296 	}
3297 }
3298 /*
3299  * When we will be completely rely on PSR2 S/W tracking in future,
3300  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
3301  * event also therefore tgl_dc3co_flush_locked() require to be changed
3302  * accordingly in future.
3303  */
3304 static void
3305 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
3306 		       enum fb_op_origin origin)
3307 {
3308 	struct intel_display *display = to_intel_display(intel_dp);
3309 	struct drm_i915_private *i915 = to_i915(display->drm);
3310 
3311 	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
3312 	    !intel_dp->psr.active)
3313 		return;
3314 
3315 	/*
3316 	 * At every frontbuffer flush flip event modified delay of delayed work,
3317 	 * when delayed work schedules that means display has been idle.
3318 	 */
3319 	if (!(frontbuffer_bits &
3320 	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
3321 		return;
3322 
3323 	tgl_psr2_enable_dc3co(intel_dp);
3324 	mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
3325 			 intel_dp->psr.dc3co_exit_delay);
3326 }
3327 
3328 static void _psr_flush_handle(struct intel_dp *intel_dp)
3329 {
3330 	struct intel_display *display = to_intel_display(intel_dp);
3331 	struct drm_i915_private *dev_priv = to_i915(display->drm);
3332 
3333 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
3334 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3335 			/* can we turn CFF off? */
3336 			if (intel_dp->psr.busy_frontbuffer_bits == 0)
3337 				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
3338 		}
3339 
3340 		/*
3341 		 * Still keep cff bit enabled as we don't have proper SU
3342 		 * configuration in case update is sent for any reason after
3343 		 * sff bit gets cleared by the HW on next vblank.
3344 		 *
3345 		 * NOTE: Setting cff bit is not needed for LunarLake onwards as
3346 		 * we have own register for SFF bit and we are not overwriting
3347 		 * existing SU configuration
3348 		 */
3349 		intel_psr_configure_full_frame_update(intel_dp);
3350 	}
3351 
3352 	intel_psr_force_update(intel_dp);
3353 
3354 	if (!intel_dp->psr.psr2_sel_fetch_enabled && !intel_dp->psr.active &&
3355 	    !intel_dp->psr.busy_frontbuffer_bits)
3356 		queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
3357 }
3358 
3359 /**
3360  * intel_psr_flush - Flush PSR
3361  * @display: display device
3362  * @frontbuffer_bits: frontbuffer plane tracking bits
3363  * @origin: which operation caused the flush
3364  *
3365  * Since the hardware frontbuffer tracking has gaps we need to integrate
3366  * with the software frontbuffer tracking. This function gets called every
3367  * time frontbuffer rendering has completed and flushed out to memory. PSR
3368  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
3369  *
3370  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
3371  */
3372 void intel_psr_flush(struct intel_display *display,
3373 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
3374 {
3375 	struct intel_encoder *encoder;
3376 
3377 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3378 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3379 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3380 
3381 		mutex_lock(&intel_dp->psr.lock);
3382 		if (!intel_dp->psr.enabled) {
3383 			mutex_unlock(&intel_dp->psr.lock);
3384 			continue;
3385 		}
3386 
3387 		pipe_frontbuffer_bits &=
3388 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3389 		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
3390 
3391 		/*
3392 		 * If the PSR is paused by an explicit intel_psr_paused() call,
3393 		 * we have to ensure that the PSR is not activated until
3394 		 * intel_psr_resume() is called.
3395 		 */
3396 		if (intel_dp->psr.pause_counter)
3397 			goto unlock;
3398 
3399 		if (origin == ORIGIN_FLIP ||
3400 		    (origin == ORIGIN_CURSOR_UPDATE &&
3401 		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
3402 			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
3403 			goto unlock;
3404 		}
3405 
3406 		if (pipe_frontbuffer_bits == 0)
3407 			goto unlock;
3408 
3409 		/* By definition flush = invalidate + flush */
3410 		_psr_flush_handle(intel_dp);
3411 unlock:
3412 		mutex_unlock(&intel_dp->psr.lock);
3413 	}
3414 }
3415 
3416 /**
3417  * intel_psr_init - Init basic PSR work and mutex.
3418  * @intel_dp: Intel DP
3419  *
3420  * This function is called after the initializing connector.
3421  * (the initializing of connector treats the handling of connector capabilities)
3422  * And it initializes basic PSR stuff for each DP Encoder.
3423  */
3424 void intel_psr_init(struct intel_dp *intel_dp)
3425 {
3426 	struct intel_display *display = to_intel_display(intel_dp);
3427 	struct intel_connector *connector = intel_dp->attached_connector;
3428 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3429 
3430 	if (!(HAS_PSR(display) || HAS_DP20(display)))
3431 		return;
3432 
3433 	/*
3434 	 * HSW spec explicitly says PSR is tied to port A.
3435 	 * BDW+ platforms have a instance of PSR registers per transcoder but
3436 	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
3437 	 * than eDP one.
3438 	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
3439 	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
3440 	 * But GEN12 supports a instance of PSR registers per transcoder.
3441 	 */
3442 	if (DISPLAY_VER(display) < 12 && dig_port->base.port != PORT_A) {
3443 		drm_dbg_kms(display->drm,
3444 			    "PSR condition failed: Port not supported\n");
3445 		return;
3446 	}
3447 
3448 	if ((HAS_DP20(display) && !intel_dp_is_edp(intel_dp)) ||
3449 	    DISPLAY_VER(display) >= 20)
3450 		intel_dp->psr.source_panel_replay_support = true;
3451 
3452 	if (HAS_PSR(display) && intel_dp_is_edp(intel_dp))
3453 		intel_dp->psr.source_support = true;
3454 
3455 	/* Set link_standby x link_off defaults */
3456 	if (DISPLAY_VER(display) < 12)
3457 		/* For new platforms up to TGL let's respect VBT back again */
3458 		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
3459 
3460 	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
3461 	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
3462 	mutex_init(&intel_dp->psr.lock);
3463 }
3464 
3465 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
3466 					   u8 *status, u8 *error_status)
3467 {
3468 	struct drm_dp_aux *aux = &intel_dp->aux;
3469 	int ret;
3470 	unsigned int offset;
3471 
3472 	offset = intel_dp->psr.panel_replay_enabled ?
3473 		 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
3474 
3475 	ret = drm_dp_dpcd_readb(aux, offset, status);
3476 	if (ret != 1)
3477 		return ret;
3478 
3479 	offset = intel_dp->psr.panel_replay_enabled ?
3480 		 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
3481 
3482 	ret = drm_dp_dpcd_readb(aux, offset, error_status);
3483 	if (ret != 1)
3484 		return ret;
3485 
3486 	*status = *status & DP_PSR_SINK_STATE_MASK;
3487 
3488 	return 0;
3489 }
3490 
3491 static void psr_alpm_check(struct intel_dp *intel_dp)
3492 {
3493 	struct intel_psr *psr = &intel_dp->psr;
3494 
3495 	if (!psr->sel_update_enabled)
3496 		return;
3497 
3498 	if (intel_alpm_get_error(intel_dp)) {
3499 		intel_psr_disable_locked(intel_dp);
3500 		psr->sink_not_reliable = true;
3501 		intel_alpm_disable(intel_dp);
3502 	}
3503 }
3504 
3505 static void psr_capability_changed_check(struct intel_dp *intel_dp)
3506 {
3507 	struct intel_display *display = to_intel_display(intel_dp);
3508 	struct intel_psr *psr = &intel_dp->psr;
3509 	u8 val;
3510 	int r;
3511 
3512 	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
3513 	if (r != 1) {
3514 		drm_err(display->drm, "Error reading DP_PSR_ESI\n");
3515 		return;
3516 	}
3517 
3518 	if (val & DP_PSR_CAPS_CHANGE) {
3519 		intel_psr_disable_locked(intel_dp);
3520 		psr->sink_not_reliable = true;
3521 		drm_dbg_kms(display->drm,
3522 			    "Sink PSR capability changed, disabling PSR\n");
3523 
3524 		/* Clearing it */
3525 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
3526 	}
3527 }
3528 
3529 /*
3530  * On common bits:
3531  * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
3532  * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
3533  * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
3534  * this function is relying on PSR definitions
3535  */
3536 void intel_psr_short_pulse(struct intel_dp *intel_dp)
3537 {
3538 	struct intel_display *display = to_intel_display(intel_dp);
3539 	struct intel_psr *psr = &intel_dp->psr;
3540 	u8 status, error_status;
3541 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3542 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3543 			  DP_PSR_LINK_CRC_ERROR;
3544 
3545 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
3546 		return;
3547 
3548 	mutex_lock(&psr->lock);
3549 
3550 	psr->link_ok = false;
3551 
3552 	if (!psr->enabled)
3553 		goto exit;
3554 
3555 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
3556 		drm_err(display->drm,
3557 			"Error reading PSR status or error status\n");
3558 		goto exit;
3559 	}
3560 
3561 	if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
3562 	    (error_status & errors)) {
3563 		intel_psr_disable_locked(intel_dp);
3564 		psr->sink_not_reliable = true;
3565 	}
3566 
3567 	if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
3568 	    !error_status)
3569 		drm_dbg_kms(display->drm,
3570 			    "PSR sink internal error, disabling PSR\n");
3571 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3572 		drm_dbg_kms(display->drm,
3573 			    "PSR RFB storage error, disabling PSR\n");
3574 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3575 		drm_dbg_kms(display->drm,
3576 			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
3577 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3578 		drm_dbg_kms(display->drm,
3579 			    "PSR Link CRC error, disabling PSR\n");
3580 
3581 	if (error_status & ~errors)
3582 		drm_err(display->drm,
3583 			"PSR_ERROR_STATUS unhandled errors %x\n",
3584 			error_status & ~errors);
3585 	/* clear status register */
3586 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
3587 
3588 	if (!psr->panel_replay_enabled) {
3589 		psr_alpm_check(intel_dp);
3590 		psr_capability_changed_check(intel_dp);
3591 	}
3592 
3593 exit:
3594 	mutex_unlock(&psr->lock);
3595 }
3596 
3597 bool intel_psr_enabled(struct intel_dp *intel_dp)
3598 {
3599 	bool ret;
3600 
3601 	if (!CAN_PSR(intel_dp))
3602 		return false;
3603 
3604 	mutex_lock(&intel_dp->psr.lock);
3605 	ret = intel_dp->psr.enabled;
3606 	mutex_unlock(&intel_dp->psr.lock);
3607 
3608 	return ret;
3609 }
3610 
3611 /**
3612  * intel_psr_link_ok - return psr->link_ok
3613  * @intel_dp: struct intel_dp
3614  *
3615  * We are seeing unexpected link re-trainings with some panels. This is caused
3616  * by panel stating bad link status after PSR is enabled. Code checking link
3617  * status can call this to ensure it can ignore bad link status stated by the
3618  * panel I.e. if panel is stating bad link and intel_psr_link_ok is stating link
3619  * is ok caller should rely on latter.
3620  *
3621  * Return value of link_ok
3622  */
3623 bool intel_psr_link_ok(struct intel_dp *intel_dp)
3624 {
3625 	bool ret;
3626 
3627 	if ((!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) ||
3628 	    !intel_dp_is_edp(intel_dp))
3629 		return false;
3630 
3631 	mutex_lock(&intel_dp->psr.lock);
3632 	ret = intel_dp->psr.link_ok;
3633 	mutex_unlock(&intel_dp->psr.lock);
3634 
3635 	return ret;
3636 }
3637 
3638 /**
3639  * intel_psr_lock - grab PSR lock
3640  * @crtc_state: the crtc state
3641  *
3642  * This is initially meant to be used by around CRTC update, when
3643  * vblank sensitive registers are updated and we need grab the lock
3644  * before it to avoid vblank evasion.
3645  */
3646 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3647 {
3648 	struct intel_display *display = to_intel_display(crtc_state);
3649 	struct intel_encoder *encoder;
3650 
3651 	if (!crtc_state->has_psr)
3652 		return;
3653 
3654 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3655 					     crtc_state->uapi.encoder_mask) {
3656 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3657 
3658 		mutex_lock(&intel_dp->psr.lock);
3659 		break;
3660 	}
3661 }
3662 
3663 /**
3664  * intel_psr_unlock - release PSR lock
3665  * @crtc_state: the crtc state
3666  *
3667  * Release the PSR lock that was held during pipe update.
3668  */
3669 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3670 {
3671 	struct intel_display *display = to_intel_display(crtc_state);
3672 	struct intel_encoder *encoder;
3673 
3674 	if (!crtc_state->has_psr)
3675 		return;
3676 
3677 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3678 					     crtc_state->uapi.encoder_mask) {
3679 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3680 
3681 		mutex_unlock(&intel_dp->psr.lock);
3682 		break;
3683 	}
3684 }
3685 
3686 /* Wa_16025596647 */
3687 static void intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp *intel_dp)
3688 {
3689 	struct intel_display *display = to_intel_display(intel_dp);
3690 	bool dc5_dc6_blocked;
3691 
3692 	if (!intel_dp->psr.active)
3693 		return;
3694 
3695 	dc5_dc6_blocked = is_dc5_dc6_blocked(intel_dp);
3696 
3697 	if (intel_dp->psr.sel_update_enabled)
3698 		psr2_program_idle_frames(intel_dp, dc5_dc6_blocked ? 0 :
3699 					 psr_compute_idle_frames(intel_dp));
3700 	else
3701 		intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
3702 								       intel_dp->psr.pipe,
3703 								       dc5_dc6_blocked);
3704 }
3705 
3706 static void psr_dc5_dc6_wa_work(struct work_struct *work)
3707 {
3708 	struct intel_display *display = container_of(work, typeof(*display),
3709 						     psr_dc5_dc6_wa_work);
3710 	struct intel_encoder *encoder;
3711 
3712 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3713 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3714 
3715 		mutex_lock(&intel_dp->psr.lock);
3716 
3717 		if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled)
3718 			intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
3719 
3720 		mutex_unlock(&intel_dp->psr.lock);
3721 	}
3722 }
3723 
3724 /**
3725  * intel_psr_notify_dc5_dc6 - Notify PSR about enable/disable dc5/dc6
3726  * @display: intel atomic state
3727  *
3728  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to schedule
3729  * psr_dc5_dc6_wa_work used for applying/removing the workaround.
3730  */
3731 void intel_psr_notify_dc5_dc6(struct intel_display *display)
3732 {
3733 	if (DISPLAY_VER(display) != 20 &&
3734 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3735 		return;
3736 
3737 	schedule_work(&display->psr_dc5_dc6_wa_work);
3738 }
3739 
3740 /**
3741  * intel_psr_dc5_dc6_wa_init - Init work for underrun on idle PSR HW bug wa
3742  * @display: intel atomic state
3743  *
3744  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to init
3745  * psr_dc5_dc6_wa_work used for applying the workaround.
3746  */
3747 void intel_psr_dc5_dc6_wa_init(struct intel_display *display)
3748 {
3749 	if (DISPLAY_VER(display) != 20 &&
3750 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3751 		return;
3752 
3753 	INIT_WORK(&display->psr_dc5_dc6_wa_work, psr_dc5_dc6_wa_work);
3754 }
3755 
3756 /**
3757  * intel_psr_notify_pipe_change - Notify PSR about enable/disable of a pipe
3758  * @state: intel atomic state
3759  * @crtc: intel crtc
3760  * @enable: enable/disable
3761  *
3762  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
3763  * remove the workaround when pipe is getting enabled/disabled
3764  */
3765 void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
3766 				  struct intel_crtc *crtc, bool enable)
3767 {
3768 	struct intel_display *display = to_intel_display(state);
3769 	struct intel_encoder *encoder;
3770 
3771 	if (DISPLAY_VER(display) != 20 &&
3772 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3773 		return;
3774 
3775 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3776 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3777 		u8 active_non_psr_pipes;
3778 
3779 		mutex_lock(&intel_dp->psr.lock);
3780 
3781 		if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
3782 			goto unlock;
3783 
3784 		active_non_psr_pipes = intel_dp->psr.active_non_psr_pipes;
3785 
3786 		if (enable)
3787 			active_non_psr_pipes |= BIT(crtc->pipe);
3788 		else
3789 			active_non_psr_pipes &= ~BIT(crtc->pipe);
3790 
3791 		if (active_non_psr_pipes == intel_dp->psr.active_non_psr_pipes)
3792 			goto unlock;
3793 
3794 		if ((enable && intel_dp->psr.active_non_psr_pipes) ||
3795 		    (!enable && !intel_dp->psr.active_non_psr_pipes)) {
3796 			intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
3797 			goto unlock;
3798 		}
3799 
3800 		intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
3801 
3802 		intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
3803 unlock:
3804 		mutex_unlock(&intel_dp->psr.lock);
3805 	}
3806 }
3807 
3808 /**
3809  * intel_psr_notify_vblank_enable_disable - Notify PSR about enable/disable of vblank
3810  * @display: intel display struct
3811  * @enable: enable/disable
3812  *
3813  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
3814  * remove the workaround when vblank is getting enabled/disabled
3815  */
3816 void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
3817 					    bool enable)
3818 {
3819 	struct intel_encoder *encoder;
3820 
3821 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3822 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3823 
3824 		mutex_lock(&intel_dp->psr.lock);
3825 		if (intel_dp->psr.panel_replay_enabled) {
3826 			mutex_unlock(&intel_dp->psr.lock);
3827 			break;
3828 		}
3829 
3830 		if (intel_dp->psr.enabled)
3831 			intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
3832 
3833 		mutex_unlock(&intel_dp->psr.lock);
3834 		return;
3835 	}
3836 
3837 	/*
3838 	 * NOTE: intel_display_power_set_target_dc_state is used
3839 	 * only by PSR * code for DC3CO handling. DC3CO target
3840 	 * state is currently disabled in * PSR code. If DC3CO
3841 	 * is taken into use we need take that into account here
3842 	 * as well.
3843 	 */
3844 	intel_display_power_set_target_dc_state(display, enable ? DC_STATE_DISABLE :
3845 						DC_STATE_EN_UPTO_DC6);
3846 }
3847 
3848 static void
3849 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3850 {
3851 	struct intel_display *display = to_intel_display(intel_dp);
3852 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3853 	const char *status = "unknown";
3854 	u32 val, status_val;
3855 
3856 	if ((intel_dp_is_edp(intel_dp) || DISPLAY_VER(display) >= 30) &&
3857 	    (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) {
3858 		static const char * const live_status[] = {
3859 			"IDLE",
3860 			"CAPTURE",
3861 			"CAPTURE_FS",
3862 			"SLEEP",
3863 			"BUFON_FW",
3864 			"ML_UP",
3865 			"SU_STANDBY",
3866 			"FAST_SLEEP",
3867 			"DEEP_SLEEP",
3868 			"BUF_ON",
3869 			"TG_ON"
3870 		};
3871 		val = intel_de_read(display,
3872 				    EDP_PSR2_STATUS(display, cpu_transcoder));
3873 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3874 		if (status_val < ARRAY_SIZE(live_status))
3875 			status = live_status[status_val];
3876 	} else {
3877 		static const char * const live_status[] = {
3878 			"IDLE",
3879 			"SRDONACK",
3880 			"SRDENT",
3881 			"BUFOFF",
3882 			"BUFON",
3883 			"AUXACK",
3884 			"SRDOFFACK",
3885 			"SRDENT_ON",
3886 		};
3887 		val = intel_de_read(display,
3888 				    psr_status_reg(display, cpu_transcoder));
3889 		status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3890 		if (status_val < ARRAY_SIZE(live_status))
3891 			status = live_status[status_val];
3892 	}
3893 
3894 	seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3895 }
3896 
3897 static void intel_psr_sink_capability(struct intel_dp *intel_dp,
3898 				      struct seq_file *m)
3899 {
3900 	struct intel_psr *psr = &intel_dp->psr;
3901 
3902 	seq_printf(m, "Sink support: PSR = %s",
3903 		   str_yes_no(psr->sink_support));
3904 
3905 	if (psr->sink_support)
3906 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3907 	if (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
3908 		seq_printf(m, " (Early Transport)");
3909 	seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
3910 	seq_printf(m, ", Panel Replay Selective Update = %s",
3911 		   str_yes_no(psr->sink_panel_replay_su_support));
3912 	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
3913 		seq_printf(m, " (Early Transport)");
3914 	seq_printf(m, "\n");
3915 }
3916 
3917 static void intel_psr_print_mode(struct intel_dp *intel_dp,
3918 				 struct seq_file *m)
3919 {
3920 	struct intel_psr *psr = &intel_dp->psr;
3921 	const char *status, *mode, *region_et;
3922 
3923 	if (psr->enabled)
3924 		status = " enabled";
3925 	else
3926 		status = "disabled";
3927 
3928 	if (psr->panel_replay_enabled && psr->sel_update_enabled)
3929 		mode = "Panel Replay Selective Update";
3930 	else if (psr->panel_replay_enabled)
3931 		mode = "Panel Replay";
3932 	else if (psr->sel_update_enabled)
3933 		mode = "PSR2";
3934 	else if (psr->enabled)
3935 		mode = "PSR1";
3936 	else
3937 		mode = "";
3938 
3939 	if (psr->su_region_et_enabled)
3940 		region_et = " (Early Transport)";
3941 	else
3942 		region_et = "";
3943 
3944 	seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et);
3945 }
3946 
3947 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3948 {
3949 	struct intel_display *display = to_intel_display(intel_dp);
3950 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3951 	struct intel_psr *psr = &intel_dp->psr;
3952 	struct ref_tracker *wakeref;
3953 	bool enabled;
3954 	u32 val, psr2_ctl;
3955 
3956 	intel_psr_sink_capability(intel_dp, m);
3957 
3958 	if (!(psr->sink_support || psr->sink_panel_replay_support))
3959 		return 0;
3960 
3961 	wakeref = intel_display_rpm_get(display);
3962 	mutex_lock(&psr->lock);
3963 
3964 	intel_psr_print_mode(intel_dp, m);
3965 
3966 	if (!psr->enabled) {
3967 		seq_printf(m, "PSR sink not reliable: %s\n",
3968 			   str_yes_no(psr->sink_not_reliable));
3969 
3970 		goto unlock;
3971 	}
3972 
3973 	if (psr->panel_replay_enabled) {
3974 		val = intel_de_read(display, TRANS_DP2_CTL(cpu_transcoder));
3975 
3976 		if (intel_dp_is_edp(intel_dp))
3977 			psr2_ctl = intel_de_read(display,
3978 						 EDP_PSR2_CTL(display,
3979 							      cpu_transcoder));
3980 
3981 		enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3982 	} else if (psr->sel_update_enabled) {
3983 		val = intel_de_read(display,
3984 				    EDP_PSR2_CTL(display, cpu_transcoder));
3985 		enabled = val & EDP_PSR2_ENABLE;
3986 	} else {
3987 		val = intel_de_read(display, psr_ctl_reg(display, cpu_transcoder));
3988 		enabled = val & EDP_PSR_ENABLE;
3989 	}
3990 	seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
3991 		   str_enabled_disabled(enabled), val);
3992 	if (psr->panel_replay_enabled && intel_dp_is_edp(intel_dp))
3993 		seq_printf(m, "PSR2_CTL: 0x%08x\n",
3994 			   psr2_ctl);
3995 	psr_source_status(intel_dp, m);
3996 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3997 		   psr->busy_frontbuffer_bits);
3998 
3999 	/*
4000 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
4001 	 */
4002 	val = intel_de_read(display, psr_perf_cnt_reg(display, cpu_transcoder));
4003 	seq_printf(m, "Performance counter: %u\n",
4004 		   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
4005 
4006 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
4007 		seq_printf(m, "Last attempted entry at: %lld\n",
4008 			   psr->last_entry_attempt);
4009 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
4010 	}
4011 
4012 	if (psr->sel_update_enabled) {
4013 		u32 su_frames_val[3];
4014 		int frame;
4015 
4016 		/*
4017 		 * Reading all 3 registers before hand to minimize crossing a
4018 		 * frame boundary between register reads
4019 		 */
4020 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
4021 			val = intel_de_read(display,
4022 					    PSR2_SU_STATUS(display, cpu_transcoder, frame));
4023 			su_frames_val[frame / 3] = val;
4024 		}
4025 
4026 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
4027 
4028 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
4029 			u32 su_blocks;
4030 
4031 			su_blocks = su_frames_val[frame / 3] &
4032 				    PSR2_SU_STATUS_MASK(frame);
4033 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
4034 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
4035 		}
4036 
4037 		seq_printf(m, "PSR2 selective fetch: %s\n",
4038 			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
4039 	}
4040 
4041 unlock:
4042 	mutex_unlock(&psr->lock);
4043 	intel_display_rpm_put(display, wakeref);
4044 
4045 	return 0;
4046 }
4047 
4048 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
4049 {
4050 	struct intel_display *display = m->private;
4051 	struct intel_dp *intel_dp = NULL;
4052 	struct intel_encoder *encoder;
4053 
4054 	if (!HAS_PSR(display))
4055 		return -ENODEV;
4056 
4057 	/* Find the first EDP which supports PSR */
4058 	for_each_intel_encoder_with_psr(display->drm, encoder) {
4059 		intel_dp = enc_to_intel_dp(encoder);
4060 		break;
4061 	}
4062 
4063 	if (!intel_dp)
4064 		return -ENODEV;
4065 
4066 	return intel_psr_status(m, intel_dp);
4067 }
4068 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
4069 
4070 static int
4071 i915_edp_psr_debug_set(void *data, u64 val)
4072 {
4073 	struct intel_display *display = data;
4074 	struct intel_encoder *encoder;
4075 	int ret = -ENODEV;
4076 
4077 	if (!HAS_PSR(display))
4078 		return ret;
4079 
4080 	for_each_intel_encoder_with_psr(display->drm, encoder) {
4081 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4082 
4083 		drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);
4084 
4085 		// TODO: split to each transcoder's PSR debug state
4086 		with_intel_display_rpm(display)
4087 			ret = intel_psr_debug_set(intel_dp, val);
4088 	}
4089 
4090 	return ret;
4091 }
4092 
4093 static int
4094 i915_edp_psr_debug_get(void *data, u64 *val)
4095 {
4096 	struct intel_display *display = data;
4097 	struct intel_encoder *encoder;
4098 
4099 	if (!HAS_PSR(display))
4100 		return -ENODEV;
4101 
4102 	for_each_intel_encoder_with_psr(display->drm, encoder) {
4103 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4104 
4105 		// TODO: split to each transcoder's PSR debug state
4106 		*val = READ_ONCE(intel_dp->psr.debug);
4107 		return 0;
4108 	}
4109 
4110 	return -ENODEV;
4111 }
4112 
4113 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
4114 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
4115 			"%llu\n");
4116 
4117 void intel_psr_debugfs_register(struct intel_display *display)
4118 {
4119 	struct drm_minor *minor = display->drm->primary;
4120 
4121 	debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
4122 			    display, &i915_edp_psr_debug_fops);
4123 
4124 	debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
4125 			    display, &i915_edp_psr_status_fops);
4126 }
4127 
4128 static const char *psr_mode_str(struct intel_dp *intel_dp)
4129 {
4130 	if (intel_dp->psr.panel_replay_enabled)
4131 		return "PANEL-REPLAY";
4132 	else if (intel_dp->psr.enabled)
4133 		return "PSR";
4134 
4135 	return "unknown";
4136 }
4137 
4138 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
4139 {
4140 	struct intel_connector *connector = m->private;
4141 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4142 	static const char * const sink_status[] = {
4143 		"inactive",
4144 		"transition to active, capture and display",
4145 		"active, display from RFB",
4146 		"active, capture and display on sink device timings",
4147 		"transition to inactive, capture and display, timing re-sync",
4148 		"reserved",
4149 		"reserved",
4150 		"sink internal error",
4151 	};
4152 	const char *str;
4153 	int ret;
4154 	u8 status, error_status;
4155 
4156 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
4157 		seq_puts(m, "PSR/Panel-Replay Unsupported\n");
4158 		return -ENODEV;
4159 	}
4160 
4161 	if (connector->base.status != connector_status_connected)
4162 		return -ENODEV;
4163 
4164 	ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
4165 	if (ret)
4166 		return ret;
4167 
4168 	status &= DP_PSR_SINK_STATE_MASK;
4169 	if (status < ARRAY_SIZE(sink_status))
4170 		str = sink_status[status];
4171 	else
4172 		str = "unknown";
4173 
4174 	seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
4175 
4176 	seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
4177 
4178 	if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
4179 			    DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
4180 			    DP_PSR_LINK_CRC_ERROR))
4181 		seq_puts(m, ":\n");
4182 	else
4183 		seq_puts(m, "\n");
4184 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
4185 		seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
4186 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
4187 		seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
4188 	if (error_status & DP_PSR_LINK_CRC_ERROR)
4189 		seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
4190 
4191 	return ret;
4192 }
4193 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
4194 
4195 static int i915_psr_status_show(struct seq_file *m, void *data)
4196 {
4197 	struct intel_connector *connector = m->private;
4198 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4199 
4200 	return intel_psr_status(m, intel_dp);
4201 }
4202 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
4203 
4204 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
4205 {
4206 	struct intel_display *display = to_intel_display(connector);
4207 	struct dentry *root = connector->base.debugfs_entry;
4208 
4209 	if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
4210 	    connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
4211 		return;
4212 
4213 	debugfs_create_file("i915_psr_sink_status", 0444, root,
4214 			    connector, &i915_psr_sink_status_fops);
4215 
4216 	if (HAS_PSR(display) || HAS_DP20(display))
4217 		debugfs_create_file("i915_psr_status", 0444, root,
4218 				    connector, &i915_psr_status_fops);
4219 }
4220 
4221 bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state)
4222 {
4223 	/*
4224 	 * eDP Panel Replay uses always ALPM
4225 	 * PSR2 uses ALPM but PSR1 doesn't
4226 	 */
4227 	return intel_dp_is_edp(intel_dp) && (crtc_state->has_sel_update ||
4228 					     crtc_state->has_panel_replay);
4229 }
4230