xref: /linux/drivers/gpu/drm/i915/display/intel_psr.c (revision 25489a4f556414445d342951615178368ee45cde)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/debugfs.h>
25 
26 #include <drm/drm_atomic_helper.h>
27 #include <drm/drm_damage_helper.h>
28 #include <drm/drm_debugfs.h>
29 #include <drm/drm_vblank.h>
30 
31 #include "i915_drv.h"
32 #include "i915_reg.h"
33 #include "intel_alpm.h"
34 #include "intel_atomic.h"
35 #include "intel_crtc.h"
36 #include "intel_cursor_regs.h"
37 #include "intel_ddi.h"
38 #include "intel_de.h"
39 #include "intel_display_irq.h"
40 #include "intel_display_rpm.h"
41 #include "intel_display_types.h"
42 #include "intel_dmc.h"
43 #include "intel_dp.h"
44 #include "intel_dp_aux.h"
45 #include "intel_frontbuffer.h"
46 #include "intel_hdmi.h"
47 #include "intel_psr.h"
48 #include "intel_psr_regs.h"
49 #include "intel_snps_phy.h"
50 #include "intel_vblank.h"
51 #include "intel_vrr.h"
52 #include "skl_universal_plane.h"
53 
54 /**
55  * DOC: Panel Self Refresh (PSR/SRD)
56  *
57  * Since Haswell Display controller supports Panel Self-Refresh on display
58  * panels witch have a remote frame buffer (RFB) implemented according to PSR
59  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
60  * when system is idle but display is on as it eliminates display refresh
61  * request to DDR memory completely as long as the frame buffer for that
62  * display is unchanged.
63  *
64  * Panel Self Refresh must be supported by both Hardware (source) and
65  * Panel (sink).
66  *
67  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
68  * to power down the link and memory controller. For DSI panels the same idea
69  * is called "manual mode".
70  *
71  * The implementation uses the hardware-based PSR support which automatically
72  * enters/exits self-refresh mode. The hardware takes care of sending the
73  * required DP aux message and could even retrain the link (that part isn't
74  * enabled yet though). The hardware also keeps track of any frontbuffer
75  * changes to know when to exit self-refresh mode again. Unfortunately that
76  * part doesn't work too well, hence why the i915 PSR support uses the
77  * software frontbuffer tracking to make sure it doesn't miss a screen
78  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
79  * get called by the frontbuffer tracking code. Note that because of locking
80  * issues the self-refresh re-enable code is done from a work queue, which
81  * must be correctly synchronized/cancelled when shutting down the pipe."
82  *
83  * DC3CO (DC3 clock off)
84  *
85  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
86  * clock off automatically during PSR2 idle state.
87  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
88  * entry/exit allows the HW to enter a low-power state even when page flipping
89  * periodically (for instance a 30fps video playback scenario).
90  *
91  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
92  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
93  * frames, if no other flip occurs and the function above is executed, DC3CO is
94  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
95  * of another flip.
96  * Front buffer modifications do not trigger DC3CO activation on purpose as it
97  * would bring a lot of complexity and most of the moderns systems will only
98  * use page flips.
99  */
100 
101 /*
102  * Description of PSR mask bits:
103  *
104  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
105  *
106  *  When unmasked (nearly) all display register writes (eg. even
107  *  SWF) trigger a PSR exit. Some registers are excluded from this
108  *  and they have a more specific mask (described below). On icl+
109  *  this bit no longer exists and is effectively always set.
110  *
111  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
112  *
113  *  When unmasked (nearly) all pipe/plane register writes
114  *  trigger a PSR exit. Some plane registers are excluded from this
115  *  and they have a more specific mask (described below).
116  *
117  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
118  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
119  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
120  *
121  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
122  *  SPR_SURF/CURBASE are not included in this and instead are
123  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
124  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
125  *
126  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
127  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
128  *
129  *  When unmasked PSR is blocked as long as the sprite
130  *  plane is enabled. skl+ with their universal planes no
131  *  longer have a mask bit like this, and no plane being
132  *  enabledb blocks PSR.
133  *
134  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
135  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
136  *
137  *  When umasked CURPOS writes trigger a PSR exit. On skl+
138  *  this doesn't exit but CURPOS is included in the
139  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
140  *
141  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
142  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
143  *
144  *  When unmasked PSR is blocked as long as vblank and/or vsync
145  *  interrupt is unmasked in IMR *and* enabled in IER.
146  *
147  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
148  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
149  *
150  *  Selectcs whether PSR exit generates an extra vblank before
151  *  the first frame is transmitted. Also note the opposite polarity
152  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
153  *  unmasked==do not generate the extra vblank).
154  *
155  *  With DC states enabled the extra vblank happens after link training,
156  *  with DC states disabled it happens immediately upuon PSR exit trigger.
157  *  No idea as of now why there is a difference. HSW/BDW (which don't
158  *  even have DMC) always generate it after link training. Go figure.
159  *
160  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
161  *  and thus won't latch until the first vblank. So with DC states
162  *  enabled the register effectively uses the reset value during DC5
163  *  exit+PSR exit sequence, and thus the bit does nothing until
164  *  latched by the vblank that it was trying to prevent from being
165  *  generated in the first place. So we should probably call this
166  *  one a chicken/egg bit instead on skl+.
167  *
168  *  In standby mode (as opposed to link-off) this makes no difference
169  *  as the timing generator keeps running the whole time generating
170  *  normal periodic vblanks.
171  *
172  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
173  *  and doing so makes the behaviour match the skl+ reset value.
174  *
175  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
176  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
177  *
178  *  On BDW without this bit is no vblanks whatsoever are
179  *  generated after PSR exit. On HSW this has no apparent effect.
180  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
181  *
182  * The rest of the bits are more self-explanatory and/or
183  * irrelevant for normal operation.
184  *
185  * Description of intel_crtc_state variables. has_psr, has_panel_replay and
186  * has_sel_update:
187  *
188  *  has_psr (alone):					PSR1
189  *  has_psr + has_sel_update:				PSR2
190  *  has_psr + has_panel_replay:				Panel Replay
191  *  has_psr + has_panel_replay + has_sel_update:	Panel Replay Selective Update
192  *
193  * Description of some intel_psr variables. enabled, panel_replay_enabled,
194  * sel_update_enabled
195  *
196  *  enabled (alone):						PSR1
197  *  enabled + sel_update_enabled:				PSR2
198  *  enabled + panel_replay_enabled:				Panel Replay
199  *  enabled + panel_replay_enabled + sel_update_enabled:	Panel Replay SU
200  */
201 
202 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
203 			   (intel_dp)->psr.source_support)
204 
205 bool intel_encoder_can_psr(struct intel_encoder *encoder)
206 {
207 	if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
208 		return CAN_PSR(enc_to_intel_dp(encoder)) ||
209 		       CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
210 	else
211 		return false;
212 }
213 
214 bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
215 				  const struct intel_crtc_state *crtc_state)
216 {
217 	/*
218 	 * For PSR/PR modes only eDP requires the AUX IO power to be enabled whenever
219 	 * the output is enabled. For non-eDP outputs the main link is always
220 	 * on, hence it doesn't require the HW initiated AUX wake-up signaling used
221 	 * for eDP.
222 	 *
223 	 * TODO:
224 	 * - Consider leaving AUX IO disabled for eDP / PR as well, in case
225 	 *   the ALPM with main-link off mode is not enabled.
226 	 * - Leave AUX IO enabled for DP / PR, once support for ALPM with
227 	 *   main-link off mode is added for it and this mode gets enabled.
228 	 */
229 	return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
230 	       intel_encoder_can_psr(encoder);
231 }
232 
233 static bool psr_global_enabled(struct intel_dp *intel_dp)
234 {
235 	struct intel_display *display = to_intel_display(intel_dp);
236 	struct intel_connector *connector = intel_dp->attached_connector;
237 
238 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
239 	case I915_PSR_DEBUG_DEFAULT:
240 		if (display->params.enable_psr == -1)
241 			return intel_dp_is_edp(intel_dp) ?
242 				connector->panel.vbt.psr.enable :
243 				true;
244 		return display->params.enable_psr;
245 	case I915_PSR_DEBUG_DISABLE:
246 		return false;
247 	default:
248 		return true;
249 	}
250 }
251 
252 static bool psr2_global_enabled(struct intel_dp *intel_dp)
253 {
254 	struct intel_display *display = to_intel_display(intel_dp);
255 
256 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
257 	case I915_PSR_DEBUG_DISABLE:
258 	case I915_PSR_DEBUG_FORCE_PSR1:
259 		return false;
260 	default:
261 		if (display->params.enable_psr == 1)
262 			return false;
263 		return true;
264 	}
265 }
266 
267 static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp)
268 {
269 	struct intel_display *display = to_intel_display(intel_dp);
270 
271 	if (display->params.enable_psr != -1)
272 		return false;
273 
274 	return true;
275 }
276 
277 static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
278 {
279 	struct intel_display *display = to_intel_display(intel_dp);
280 
281 	if ((display->params.enable_psr != -1) ||
282 	    (intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE))
283 		return false;
284 	return true;
285 }
286 
287 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
288 {
289 	struct intel_display *display = to_intel_display(intel_dp);
290 
291 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_ERROR :
292 		EDP_PSR_ERROR(intel_dp->psr.transcoder);
293 }
294 
295 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
296 {
297 	struct intel_display *display = to_intel_display(intel_dp);
298 
299 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_POST_EXIT :
300 		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
301 }
302 
303 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
304 {
305 	struct intel_display *display = to_intel_display(intel_dp);
306 
307 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_PRE_ENTRY :
308 		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
309 }
310 
311 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
312 {
313 	struct intel_display *display = to_intel_display(intel_dp);
314 
315 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_MASK :
316 		EDP_PSR_MASK(intel_dp->psr.transcoder);
317 }
318 
319 static i915_reg_t psr_ctl_reg(struct intel_display *display,
320 			      enum transcoder cpu_transcoder)
321 {
322 	if (DISPLAY_VER(display) >= 8)
323 		return EDP_PSR_CTL(display, cpu_transcoder);
324 	else
325 		return HSW_SRD_CTL;
326 }
327 
328 static i915_reg_t psr_debug_reg(struct intel_display *display,
329 				enum transcoder cpu_transcoder)
330 {
331 	if (DISPLAY_VER(display) >= 8)
332 		return EDP_PSR_DEBUG(display, cpu_transcoder);
333 	else
334 		return HSW_SRD_DEBUG;
335 }
336 
337 static i915_reg_t psr_perf_cnt_reg(struct intel_display *display,
338 				   enum transcoder cpu_transcoder)
339 {
340 	if (DISPLAY_VER(display) >= 8)
341 		return EDP_PSR_PERF_CNT(display, cpu_transcoder);
342 	else
343 		return HSW_SRD_PERF_CNT;
344 }
345 
346 static i915_reg_t psr_status_reg(struct intel_display *display,
347 				 enum transcoder cpu_transcoder)
348 {
349 	if (DISPLAY_VER(display) >= 8)
350 		return EDP_PSR_STATUS(display, cpu_transcoder);
351 	else
352 		return HSW_SRD_STATUS;
353 }
354 
355 static i915_reg_t psr_imr_reg(struct intel_display *display,
356 			      enum transcoder cpu_transcoder)
357 {
358 	if (DISPLAY_VER(display) >= 12)
359 		return TRANS_PSR_IMR(display, cpu_transcoder);
360 	else
361 		return EDP_PSR_IMR;
362 }
363 
364 static i915_reg_t psr_iir_reg(struct intel_display *display,
365 			      enum transcoder cpu_transcoder)
366 {
367 	if (DISPLAY_VER(display) >= 12)
368 		return TRANS_PSR_IIR(display, cpu_transcoder);
369 	else
370 		return EDP_PSR_IIR;
371 }
372 
373 static i915_reg_t psr_aux_ctl_reg(struct intel_display *display,
374 				  enum transcoder cpu_transcoder)
375 {
376 	if (DISPLAY_VER(display) >= 8)
377 		return EDP_PSR_AUX_CTL(display, cpu_transcoder);
378 	else
379 		return HSW_SRD_AUX_CTL;
380 }
381 
382 static i915_reg_t psr_aux_data_reg(struct intel_display *display,
383 				   enum transcoder cpu_transcoder, int i)
384 {
385 	if (DISPLAY_VER(display) >= 8)
386 		return EDP_PSR_AUX_DATA(display, cpu_transcoder, i);
387 	else
388 		return HSW_SRD_AUX_DATA(i);
389 }
390 
391 static void psr_irq_control(struct intel_dp *intel_dp)
392 {
393 	struct intel_display *display = to_intel_display(intel_dp);
394 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
395 	u32 mask;
396 
397 	if (intel_dp->psr.panel_replay_enabled)
398 		return;
399 
400 	mask = psr_irq_psr_error_bit_get(intel_dp);
401 	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
402 		mask |= psr_irq_post_exit_bit_get(intel_dp) |
403 			psr_irq_pre_entry_bit_get(intel_dp);
404 
405 	intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
406 		     psr_irq_mask_get(intel_dp), ~mask);
407 }
408 
409 static void psr_event_print(struct intel_display *display,
410 			    u32 val, bool sel_update_enabled)
411 {
412 	drm_dbg_kms(display->drm, "PSR exit events: 0x%x\n", val);
413 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
414 		drm_dbg_kms(display->drm, "\tPSR2 watchdog timer expired\n");
415 	if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled)
416 		drm_dbg_kms(display->drm, "\tPSR2 disabled\n");
417 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
418 		drm_dbg_kms(display->drm, "\tSU dirty FIFO underrun\n");
419 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
420 		drm_dbg_kms(display->drm, "\tSU CRC FIFO underrun\n");
421 	if (val & PSR_EVENT_GRAPHICS_RESET)
422 		drm_dbg_kms(display->drm, "\tGraphics reset\n");
423 	if (val & PSR_EVENT_PCH_INTERRUPT)
424 		drm_dbg_kms(display->drm, "\tPCH interrupt\n");
425 	if (val & PSR_EVENT_MEMORY_UP)
426 		drm_dbg_kms(display->drm, "\tMemory up\n");
427 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
428 		drm_dbg_kms(display->drm, "\tFront buffer modification\n");
429 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
430 		drm_dbg_kms(display->drm, "\tPSR watchdog timer expired\n");
431 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
432 		drm_dbg_kms(display->drm, "\tPIPE registers updated\n");
433 	if (val & PSR_EVENT_REGISTER_UPDATE)
434 		drm_dbg_kms(display->drm, "\tRegister updated\n");
435 	if (val & PSR_EVENT_HDCP_ENABLE)
436 		drm_dbg_kms(display->drm, "\tHDCP enabled\n");
437 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
438 		drm_dbg_kms(display->drm, "\tKVMR session enabled\n");
439 	if (val & PSR_EVENT_VBI_ENABLE)
440 		drm_dbg_kms(display->drm, "\tVBI enabled\n");
441 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
442 		drm_dbg_kms(display->drm, "\tLPSP mode exited\n");
443 	if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled)
444 		drm_dbg_kms(display->drm, "\tPSR disabled\n");
445 }
446 
447 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
448 {
449 	struct intel_display *display = to_intel_display(intel_dp);
450 	struct drm_i915_private *dev_priv = to_i915(display->drm);
451 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
452 	ktime_t time_ns =  ktime_get();
453 
454 	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
455 		intel_dp->psr.last_entry_attempt = time_ns;
456 		drm_dbg_kms(display->drm,
457 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
458 			    transcoder_name(cpu_transcoder));
459 	}
460 
461 	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
462 		intel_dp->psr.last_exit = time_ns;
463 		drm_dbg_kms(display->drm,
464 			    "[transcoder %s] PSR exit completed\n",
465 			    transcoder_name(cpu_transcoder));
466 
467 		if (DISPLAY_VER(display) >= 9) {
468 			u32 val;
469 
470 			val = intel_de_rmw(display,
471 					   PSR_EVENT(display, cpu_transcoder),
472 					   0, 0);
473 
474 			psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
475 		}
476 	}
477 
478 	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
479 		drm_warn(display->drm, "[transcoder %s] PSR aux error\n",
480 			 transcoder_name(cpu_transcoder));
481 
482 		intel_dp->psr.irq_aux_error = true;
483 
484 		/*
485 		 * If this interruption is not masked it will keep
486 		 * interrupting so fast that it prevents the scheduled
487 		 * work to run.
488 		 * Also after a PSR error, we don't want to arm PSR
489 		 * again so we don't care about unmask the interruption
490 		 * or unset irq_aux_error.
491 		 */
492 		intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
493 			     0, psr_irq_psr_error_bit_get(intel_dp));
494 
495 		queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
496 	}
497 }
498 
499 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
500 {
501 	struct intel_display *display = to_intel_display(intel_dp);
502 	u8 val = 8; /* assume the worst if we can't read the value */
503 
504 	if (drm_dp_dpcd_readb(&intel_dp->aux,
505 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
506 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
507 	else
508 		drm_dbg_kms(display->drm,
509 			    "Unable to get sink synchronization latency, assuming 8 frames\n");
510 	return val;
511 }
512 
513 static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
514 {
515 	u8 su_capability = 0;
516 
517 	if (intel_dp->psr.sink_panel_replay_su_support)
518 		drm_dp_dpcd_readb(&intel_dp->aux,
519 				  DP_PANEL_PANEL_REPLAY_CAPABILITY,
520 				  &su_capability);
521 	else
522 		su_capability = intel_dp->psr_dpcd[1];
523 
524 	return su_capability;
525 }
526 
527 static unsigned int
528 intel_dp_get_su_x_granularity_offset(struct intel_dp *intel_dp)
529 {
530 	return intel_dp->psr.sink_panel_replay_su_support ?
531 		DP_PANEL_PANEL_REPLAY_X_GRANULARITY :
532 		DP_PSR2_SU_X_GRANULARITY;
533 }
534 
535 static unsigned int
536 intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
537 {
538 	return intel_dp->psr.sink_panel_replay_su_support ?
539 		DP_PANEL_PANEL_REPLAY_Y_GRANULARITY :
540 		DP_PSR2_SU_Y_GRANULARITY;
541 }
542 
543 /*
544  * Note: Bits related to granularity are same in panel replay and psr
545  * registers. Rely on PSR definitions on these "common" bits.
546  */
547 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
548 {
549 	struct intel_display *display = to_intel_display(intel_dp);
550 	ssize_t r;
551 	u16 w;
552 	u8 y;
553 
554 	/*
555 	 * TODO: Do we need to take into account panel supporting both PSR and
556 	 * Panel replay?
557 	 */
558 
559 	/*
560 	 * If sink don't have specific granularity requirements set legacy
561 	 * ones.
562 	 */
563 	if (!(intel_dp_get_su_capability(intel_dp) &
564 	      DP_PSR2_SU_GRANULARITY_REQUIRED)) {
565 		/* As PSR2 HW sends full lines, we do not care about x granularity */
566 		w = 4;
567 		y = 4;
568 		goto exit;
569 	}
570 
571 	r = drm_dp_dpcd_read(&intel_dp->aux,
572 			     intel_dp_get_su_x_granularity_offset(intel_dp),
573 			     &w, 2);
574 	if (r != 2)
575 		drm_dbg_kms(display->drm,
576 			    "Unable to read selective update x granularity\n");
577 	/*
578 	 * Spec says that if the value read is 0 the default granularity should
579 	 * be used instead.
580 	 */
581 	if (r != 2 || w == 0)
582 		w = 4;
583 
584 	r = drm_dp_dpcd_read(&intel_dp->aux,
585 			     intel_dp_get_su_y_granularity_offset(intel_dp),
586 			     &y, 1);
587 	if (r != 1) {
588 		drm_dbg_kms(display->drm,
589 			    "Unable to read selective update y granularity\n");
590 		y = 4;
591 	}
592 	if (y == 0)
593 		y = 1;
594 
595 exit:
596 	intel_dp->psr.su_w_granularity = w;
597 	intel_dp->psr.su_y_granularity = y;
598 }
599 
600 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
601 {
602 	struct intel_display *display = to_intel_display(intel_dp);
603 
604 	if (intel_dp_is_edp(intel_dp)) {
605 		if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
606 			drm_dbg_kms(display->drm,
607 				    "Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n");
608 			return;
609 		}
610 
611 		if (!(intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
612 			drm_dbg_kms(display->drm,
613 				    "Panel doesn't support early transport, eDP Panel Replay not possible\n");
614 			return;
615 		}
616 	}
617 
618 	intel_dp->psr.sink_panel_replay_support = true;
619 
620 	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SU_SUPPORT)
621 		intel_dp->psr.sink_panel_replay_su_support = true;
622 
623 	drm_dbg_kms(display->drm,
624 		    "Panel replay %sis supported by panel\n",
625 		    intel_dp->psr.sink_panel_replay_su_support ?
626 		    "selective_update " : "");
627 }
628 
629 static void _psr_init_dpcd(struct intel_dp *intel_dp)
630 {
631 	struct intel_display *display = to_intel_display(intel_dp);
632 
633 	drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
634 		    intel_dp->psr_dpcd[0]);
635 
636 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
637 		drm_dbg_kms(display->drm,
638 			    "PSR support not currently available for this panel\n");
639 		return;
640 	}
641 
642 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
643 		drm_dbg_kms(display->drm,
644 			    "Panel lacks power state control, PSR cannot be enabled\n");
645 		return;
646 	}
647 
648 	intel_dp->psr.sink_support = true;
649 	intel_dp->psr.sink_sync_latency =
650 		intel_dp_get_sink_sync_latency(intel_dp);
651 
652 	if (DISPLAY_VER(display) >= 9 &&
653 	    intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
654 		bool y_req = intel_dp->psr_dpcd[1] &
655 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
656 
657 		/*
658 		 * All panels that supports PSR version 03h (PSR2 +
659 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
660 		 * only sure that it is going to be used when required by the
661 		 * panel. This way panel is capable to do selective update
662 		 * without a aux frame sync.
663 		 *
664 		 * To support PSR version 02h and PSR version 03h without
665 		 * Y-coordinate requirement panels we would need to enable
666 		 * GTC first.
667 		 */
668 		intel_dp->psr.sink_psr2_support = y_req &&
669 			intel_alpm_aux_wake_supported(intel_dp);
670 		drm_dbg_kms(display->drm, "PSR2 %ssupported\n",
671 			    intel_dp->psr.sink_psr2_support ? "" : "not ");
672 	}
673 }
674 
675 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
676 {
677 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
678 			 sizeof(intel_dp->psr_dpcd));
679 	drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP,
680 			  &intel_dp->pr_dpcd);
681 
682 	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SUPPORT)
683 		_panel_replay_init_dpcd(intel_dp);
684 
685 	if (intel_dp->psr_dpcd[0])
686 		_psr_init_dpcd(intel_dp);
687 
688 	if (intel_dp->psr.sink_psr2_support ||
689 	    intel_dp->psr.sink_panel_replay_su_support)
690 		intel_dp_get_su_granularity(intel_dp);
691 }
692 
693 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
694 {
695 	struct intel_display *display = to_intel_display(intel_dp);
696 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
697 	u32 aux_clock_divider, aux_ctl;
698 	/* write DP_SET_POWER=D0 */
699 	static const u8 aux_msg[] = {
700 		[0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
701 		[1] = (DP_SET_POWER >> 8) & 0xff,
702 		[2] = DP_SET_POWER & 0xff,
703 		[3] = 1 - 1,
704 		[4] = DP_SET_POWER_D0,
705 	};
706 	int i;
707 
708 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
709 	for (i = 0; i < sizeof(aux_msg); i += 4)
710 		intel_de_write(display,
711 			       psr_aux_data_reg(display, cpu_transcoder, i >> 2),
712 			       intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
713 
714 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
715 
716 	/* Start with bits set for DDI_AUX_CTL register */
717 	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
718 					     aux_clock_divider);
719 
720 	/* Select only valid bits for SRD_AUX_CTL */
721 	aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
722 		EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
723 		EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
724 		EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
725 
726 	intel_de_write(display, psr_aux_ctl_reg(display, cpu_transcoder),
727 		       aux_ctl);
728 }
729 
730 static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay)
731 {
732 	struct intel_display *display = to_intel_display(intel_dp);
733 
734 	if (DISPLAY_VER(display) < 20 || !intel_dp_is_edp(intel_dp) ||
735 	    intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)
736 		return false;
737 
738 	return panel_replay ?
739 		intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
740 		intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
741 		psr2_su_region_et_global_enabled(intel_dp);
742 }
743 
744 static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
745 				      const struct intel_crtc_state *crtc_state)
746 {
747 	u8 val = DP_PANEL_REPLAY_ENABLE |
748 		DP_PANEL_REPLAY_VSC_SDP_CRC_EN |
749 		DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
750 		DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN |
751 		DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN;
752 	u8 panel_replay_config2 = DP_PANEL_REPLAY_CRC_VERIFICATION;
753 
754 	if (crtc_state->has_sel_update)
755 		val |= DP_PANEL_REPLAY_SU_ENABLE;
756 
757 	if (crtc_state->enable_psr2_su_region_et)
758 		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
759 
760 	if (crtc_state->req_psr2_sdp_prior_scanline)
761 		panel_replay_config2 |=
762 			DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE;
763 
764 	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, val);
765 
766 	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG2,
767 			   panel_replay_config2);
768 }
769 
770 static void _psr_enable_sink(struct intel_dp *intel_dp,
771 			     const struct intel_crtc_state *crtc_state)
772 {
773 	struct intel_display *display = to_intel_display(intel_dp);
774 	u8 val = 0;
775 
776 	if (crtc_state->has_sel_update) {
777 		val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
778 	} else {
779 		if (intel_dp->psr.link_standby)
780 			val |= DP_PSR_MAIN_LINK_ACTIVE;
781 
782 		if (DISPLAY_VER(display) >= 8)
783 			val |= DP_PSR_CRC_VERIFICATION;
784 	}
785 
786 	if (crtc_state->req_psr2_sdp_prior_scanline)
787 		val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
788 
789 	if (crtc_state->enable_psr2_su_region_et)
790 		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
791 
792 	if (intel_dp->psr.entry_setup_frames > 0)
793 		val |= DP_PSR_FRAME_CAPTURE;
794 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
795 
796 	val |= DP_PSR_ENABLE;
797 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
798 }
799 
800 static void intel_psr_enable_sink(struct intel_dp *intel_dp,
801 				  const struct intel_crtc_state *crtc_state)
802 {
803 	intel_alpm_enable_sink(intel_dp, crtc_state);
804 
805 	crtc_state->has_panel_replay ?
806 		_panel_replay_enable_sink(intel_dp, crtc_state) :
807 		_psr_enable_sink(intel_dp, crtc_state);
808 
809 	if (intel_dp_is_edp(intel_dp))
810 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
811 }
812 
813 void intel_psr_panel_replay_enable_sink(struct intel_dp *intel_dp)
814 {
815 	if (CAN_PANEL_REPLAY(intel_dp))
816 		drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
817 				   DP_PANEL_REPLAY_ENABLE);
818 }
819 
820 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
821 {
822 	struct intel_display *display = to_intel_display(intel_dp);
823 	struct intel_connector *connector = intel_dp->attached_connector;
824 	u32 val = 0;
825 
826 	if (DISPLAY_VER(display) >= 11)
827 		val |= EDP_PSR_TP4_TIME_0us;
828 
829 	if (display->params.psr_safest_params) {
830 		val |= EDP_PSR_TP1_TIME_2500us;
831 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
832 		goto check_tp3_sel;
833 	}
834 
835 	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
836 		val |= EDP_PSR_TP1_TIME_0us;
837 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
838 		val |= EDP_PSR_TP1_TIME_100us;
839 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
840 		val |= EDP_PSR_TP1_TIME_500us;
841 	else
842 		val |= EDP_PSR_TP1_TIME_2500us;
843 
844 	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
845 		val |= EDP_PSR_TP2_TP3_TIME_0us;
846 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
847 		val |= EDP_PSR_TP2_TP3_TIME_100us;
848 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
849 		val |= EDP_PSR_TP2_TP3_TIME_500us;
850 	else
851 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
852 
853 	/*
854 	 * WA 0479: hsw,bdw
855 	 * "Do not skip both TP1 and TP2/TP3"
856 	 */
857 	if (DISPLAY_VER(display) < 9 &&
858 	    connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
859 	    connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
860 		val |= EDP_PSR_TP2_TP3_TIME_100us;
861 
862 check_tp3_sel:
863 	if (intel_dp_source_supports_tps3(display) &&
864 	    drm_dp_tps3_supported(intel_dp->dpcd))
865 		val |= EDP_PSR_TP_TP1_TP3;
866 	else
867 		val |= EDP_PSR_TP_TP1_TP2;
868 
869 	return val;
870 }
871 
872 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
873 {
874 	struct intel_display *display = to_intel_display(intel_dp);
875 	struct intel_connector *connector = intel_dp->attached_connector;
876 	int idle_frames;
877 
878 	/* Let's use 6 as the minimum to cover all known cases including the
879 	 * off-by-one issue that HW has in some cases.
880 	 */
881 	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
882 	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
883 
884 	if (drm_WARN_ON(display->drm, idle_frames > 0xf))
885 		idle_frames = 0xf;
886 
887 	return idle_frames;
888 }
889 
890 static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp)
891 {
892 	struct intel_display *display = to_intel_display(intel_dp);
893 	u32 current_dc_state = intel_display_power_get_current_dc_state(display);
894 	struct drm_vblank_crtc *vblank = &display->drm->vblank[intel_dp->psr.pipe];
895 
896 	return (current_dc_state != DC_STATE_EN_UPTO_DC5 &&
897 		current_dc_state != DC_STATE_EN_UPTO_DC6) ||
898 		intel_dp->psr.active_non_psr_pipes ||
899 		READ_ONCE(vblank->enabled);
900 }
901 
902 static void hsw_activate_psr1(struct intel_dp *intel_dp)
903 {
904 	struct intel_display *display = to_intel_display(intel_dp);
905 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
906 	u32 max_sleep_time = 0x1f;
907 	u32 val = EDP_PSR_ENABLE;
908 
909 	val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
910 
911 	if (DISPLAY_VER(display) < 20)
912 		val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
913 
914 	if (display->platform.haswell)
915 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
916 
917 	if (intel_dp->psr.link_standby)
918 		val |= EDP_PSR_LINK_STANDBY;
919 
920 	val |= intel_psr1_get_tp_time(intel_dp);
921 
922 	if (DISPLAY_VER(display) >= 8)
923 		val |= EDP_PSR_CRC_ENABLE;
924 
925 	if (DISPLAY_VER(display) >= 20)
926 		val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
927 
928 	intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder),
929 		     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
930 
931 	/* Wa_16025596647 */
932 	if ((DISPLAY_VER(display) == 20 ||
933 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
934 	    is_dc5_dc6_blocked(intel_dp))
935 		intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
936 								       intel_dp->psr.pipe,
937 								       true);
938 }
939 
940 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
941 {
942 	struct intel_display *display = to_intel_display(intel_dp);
943 	struct intel_connector *connector = intel_dp->attached_connector;
944 	u32 val = 0;
945 
946 	if (display->params.psr_safest_params)
947 		return EDP_PSR2_TP2_TIME_2500us;
948 
949 	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
950 	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
951 		val |= EDP_PSR2_TP2_TIME_50us;
952 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
953 		val |= EDP_PSR2_TP2_TIME_100us;
954 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
955 		val |= EDP_PSR2_TP2_TIME_500us;
956 	else
957 		val |= EDP_PSR2_TP2_TIME_2500us;
958 
959 	return val;
960 }
961 
962 static int psr2_block_count_lines(struct intel_dp *intel_dp)
963 {
964 	return intel_dp->alpm_parameters.io_wake_lines < 9 &&
965 		intel_dp->alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
966 }
967 
968 static int psr2_block_count(struct intel_dp *intel_dp)
969 {
970 	return psr2_block_count_lines(intel_dp) / 4;
971 }
972 
973 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
974 {
975 	u8 frames_before_su_entry;
976 
977 	frames_before_su_entry = max_t(u8,
978 				       intel_dp->psr.sink_sync_latency + 1,
979 				       2);
980 
981 	/* Entry setup frames must be at least 1 less than frames before SU entry */
982 	if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
983 		frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
984 
985 	return frames_before_su_entry;
986 }
987 
988 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
989 {
990 	struct intel_display *display = to_intel_display(intel_dp);
991 	struct intel_psr *psr = &intel_dp->psr;
992 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
993 
994 	if (intel_dp_is_edp(intel_dp) && psr->sel_update_enabled) {
995 		u32 val = psr->su_region_et_enabled ?
996 			LNL_EDP_PSR2_SU_REGION_ET_ENABLE : 0;
997 
998 		if (intel_dp->psr.req_psr2_sdp_prior_scanline)
999 			val |= EDP_PSR2_SU_SDP_SCANLINE;
1000 
1001 		intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder),
1002 			       val);
1003 	}
1004 
1005 	intel_de_rmw(display,
1006 		     PSR2_MAN_TRK_CTL(display, intel_dp->psr.transcoder),
1007 		     0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
1008 
1009 	intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
1010 		     TRANS_DP2_PANEL_REPLAY_ENABLE);
1011 }
1012 
1013 static void hsw_activate_psr2(struct intel_dp *intel_dp)
1014 {
1015 	struct intel_display *display = to_intel_display(intel_dp);
1016 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1017 	u32 val = EDP_PSR2_ENABLE;
1018 	u32 psr_val = 0;
1019 	u8 idle_frames;
1020 
1021 	/* Wa_16025596647 */
1022 	if ((DISPLAY_VER(display) == 20 ||
1023 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
1024 	    is_dc5_dc6_blocked(intel_dp))
1025 		idle_frames = 0;
1026 	else
1027 		idle_frames = psr_compute_idle_frames(intel_dp);
1028 	val |= EDP_PSR2_IDLE_FRAMES(idle_frames);
1029 
1030 	if (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)
1031 		val |= EDP_SU_TRACK_ENABLE;
1032 
1033 	if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
1034 		val |= EDP_Y_COORDINATE_ENABLE;
1035 
1036 	val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
1037 
1038 	val |= intel_psr2_get_tp_time(intel_dp);
1039 
1040 	if (DISPLAY_VER(display) >= 12 && DISPLAY_VER(display) < 20) {
1041 		if (psr2_block_count(intel_dp) > 2)
1042 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
1043 		else
1044 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
1045 	}
1046 
1047 	/* Wa_22012278275:adl-p */
1048 	if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
1049 		static const u8 map[] = {
1050 			2, /* 5 lines */
1051 			1, /* 6 lines */
1052 			0, /* 7 lines */
1053 			3, /* 8 lines */
1054 			6, /* 9 lines */
1055 			5, /* 10 lines */
1056 			4, /* 11 lines */
1057 			7, /* 12 lines */
1058 		};
1059 		/*
1060 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
1061 		 * comments below for more information
1062 		 */
1063 		int tmp;
1064 
1065 		tmp = map[intel_dp->alpm_parameters.io_wake_lines -
1066 			  TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
1067 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
1068 
1069 		tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
1070 		val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
1071 	} else if (DISPLAY_VER(display) >= 20) {
1072 		val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1073 	} else if (DISPLAY_VER(display) >= 12) {
1074 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1075 		val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1076 	} else if (DISPLAY_VER(display) >= 9) {
1077 		val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1078 		val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1079 	}
1080 
1081 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
1082 		val |= EDP_PSR2_SU_SDP_SCANLINE;
1083 
1084 	if (DISPLAY_VER(display) >= 20)
1085 		psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
1086 
1087 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
1088 		u32 tmp;
1089 
1090 		tmp = intel_de_read(display,
1091 				    PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1092 		drm_WARN_ON(display->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
1093 	} else if (HAS_PSR2_SEL_FETCH(display)) {
1094 		intel_de_write(display,
1095 			       PSR2_MAN_TRK_CTL(display, cpu_transcoder), 0);
1096 	}
1097 
1098 	if (intel_dp->psr.su_region_et_enabled)
1099 		val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
1100 
1101 	/*
1102 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
1103 	 * recommending keep this bit unset while PSR2 is enabled.
1104 	 */
1105 	intel_de_write(display, psr_ctl_reg(display, cpu_transcoder), psr_val);
1106 
1107 	intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), val);
1108 }
1109 
1110 static bool
1111 transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
1112 {
1113 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1114 		return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
1115 	else if (DISPLAY_VER(display) >= 12)
1116 		return cpu_transcoder == TRANSCODER_A;
1117 	else if (DISPLAY_VER(display) >= 9)
1118 		return cpu_transcoder == TRANSCODER_EDP;
1119 	else
1120 		return false;
1121 }
1122 
1123 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
1124 {
1125 	if (!crtc_state->hw.active)
1126 		return 0;
1127 
1128 	return DIV_ROUND_UP(1000 * 1000,
1129 			    drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
1130 }
1131 
1132 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
1133 				     u32 idle_frames)
1134 {
1135 	struct intel_display *display = to_intel_display(intel_dp);
1136 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1137 
1138 	intel_de_rmw(display, EDP_PSR2_CTL(display, cpu_transcoder),
1139 		     EDP_PSR2_IDLE_FRAMES_MASK,
1140 		     EDP_PSR2_IDLE_FRAMES(idle_frames));
1141 }
1142 
1143 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
1144 {
1145 	struct intel_display *display = to_intel_display(intel_dp);
1146 
1147 	psr2_program_idle_frames(intel_dp, 0);
1148 	intel_display_power_set_target_dc_state(display, DC_STATE_EN_DC3CO);
1149 }
1150 
1151 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
1152 {
1153 	struct intel_display *display = to_intel_display(intel_dp);
1154 
1155 	intel_display_power_set_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
1156 	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
1157 }
1158 
1159 static void tgl_dc3co_disable_work(struct work_struct *work)
1160 {
1161 	struct intel_dp *intel_dp =
1162 		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
1163 
1164 	mutex_lock(&intel_dp->psr.lock);
1165 	/* If delayed work is pending, it is not idle */
1166 	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
1167 		goto unlock;
1168 
1169 	tgl_psr2_disable_dc3co(intel_dp);
1170 unlock:
1171 	mutex_unlock(&intel_dp->psr.lock);
1172 }
1173 
1174 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
1175 {
1176 	if (!intel_dp->psr.dc3co_exitline)
1177 		return;
1178 
1179 	cancel_delayed_work(&intel_dp->psr.dc3co_work);
1180 	/* Before PSR2 exit disallow dc3co*/
1181 	tgl_psr2_disable_dc3co(intel_dp);
1182 }
1183 
1184 static bool
1185 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
1186 			      struct intel_crtc_state *crtc_state)
1187 {
1188 	struct intel_display *display = to_intel_display(intel_dp);
1189 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1190 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1191 	enum port port = dig_port->base.port;
1192 
1193 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1194 		return pipe <= PIPE_B && port <= PORT_B;
1195 	else
1196 		return pipe == PIPE_A && port == PORT_A;
1197 }
1198 
1199 static void
1200 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
1201 				  struct intel_crtc_state *crtc_state)
1202 {
1203 	struct intel_display *display = to_intel_display(intel_dp);
1204 	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
1205 	struct i915_power_domains *power_domains = &display->power.domains;
1206 	u32 exit_scanlines;
1207 
1208 	/*
1209 	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1210 	 * disable DC3CO until the changed dc3co activating/deactivating sequence
1211 	 * is applied. B.Specs:49196
1212 	 */
1213 	return;
1214 
1215 	/*
1216 	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1217 	 * TODO: when the issue is addressed, this restriction should be removed.
1218 	 */
1219 	if (crtc_state->enable_psr2_sel_fetch)
1220 		return;
1221 
1222 	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1223 		return;
1224 
1225 	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1226 		return;
1227 
1228 	/* Wa_16011303918:adl-p */
1229 	if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
1230 		return;
1231 
1232 	/*
1233 	 * DC3CO Exit time 200us B.Spec 49196
1234 	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1235 	 */
1236 	exit_scanlines =
1237 		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1238 
1239 	if (drm_WARN_ON(display->drm, exit_scanlines > crtc_vdisplay))
1240 		return;
1241 
1242 	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1243 }
1244 
1245 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1246 					      struct intel_crtc_state *crtc_state)
1247 {
1248 	struct intel_display *display = to_intel_display(intel_dp);
1249 
1250 	if (!display->params.enable_psr2_sel_fetch &&
1251 	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1252 		drm_dbg_kms(display->drm,
1253 			    "PSR2 sel fetch not enabled, disabled by parameter\n");
1254 		return false;
1255 	}
1256 
1257 	if (crtc_state->uapi.async_flip) {
1258 		drm_dbg_kms(display->drm,
1259 			    "PSR2 sel fetch not enabled, async flip enabled\n");
1260 		return false;
1261 	}
1262 
1263 	return crtc_state->enable_psr2_sel_fetch = true;
1264 }
1265 
1266 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1267 				   struct intel_crtc_state *crtc_state)
1268 {
1269 	struct intel_display *display = to_intel_display(intel_dp);
1270 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1271 	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1272 	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1273 	u16 y_granularity = 0;
1274 
1275 	/* PSR2 HW only send full lines so we only need to validate the width */
1276 	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1277 		return false;
1278 
1279 	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1280 		return false;
1281 
1282 	/* HW tracking is only aligned to 4 lines */
1283 	if (!crtc_state->enable_psr2_sel_fetch)
1284 		return intel_dp->psr.su_y_granularity == 4;
1285 
1286 	/*
1287 	 * adl_p and mtl platforms have 1 line granularity.
1288 	 * For other platforms with SW tracking we can adjust the y coordinates
1289 	 * to match sink requirement if multiple of 4.
1290 	 */
1291 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1292 		y_granularity = intel_dp->psr.su_y_granularity;
1293 	else if (intel_dp->psr.su_y_granularity <= 2)
1294 		y_granularity = 4;
1295 	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1296 		y_granularity = intel_dp->psr.su_y_granularity;
1297 
1298 	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1299 		return false;
1300 
1301 	if (crtc_state->dsc.compression_enable &&
1302 	    vdsc_cfg->slice_height % y_granularity)
1303 		return false;
1304 
1305 	crtc_state->su_y_granularity = y_granularity;
1306 	return true;
1307 }
1308 
1309 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1310 							struct intel_crtc_state *crtc_state)
1311 {
1312 	struct intel_display *display = to_intel_display(intel_dp);
1313 	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1314 	u32 hblank_total, hblank_ns, req_ns;
1315 
1316 	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1317 	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1318 
1319 	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1320 	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1321 
1322 	if ((hblank_ns - req_ns) > 100)
1323 		return true;
1324 
1325 	/* Not supported <13 / Wa_22012279113:adl-p */
1326 	if (DISPLAY_VER(display) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1327 		return false;
1328 
1329 	crtc_state->req_psr2_sdp_prior_scanline = true;
1330 	return true;
1331 }
1332 
1333 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1334 					const struct drm_display_mode *adjusted_mode)
1335 {
1336 	struct intel_display *display = to_intel_display(intel_dp);
1337 	int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1338 	int entry_setup_frames = 0;
1339 
1340 	if (psr_setup_time < 0) {
1341 		drm_dbg_kms(display->drm,
1342 			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1343 			    intel_dp->psr_dpcd[1]);
1344 		return -ETIME;
1345 	}
1346 
1347 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1348 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1349 		if (DISPLAY_VER(display) >= 20) {
1350 			/* setup entry frames can be up to 3 frames */
1351 			entry_setup_frames = 1;
1352 			drm_dbg_kms(display->drm,
1353 				    "PSR setup entry frames %d\n",
1354 				    entry_setup_frames);
1355 		} else {
1356 			drm_dbg_kms(display->drm,
1357 				    "PSR condition failed: PSR setup time (%d us) too long\n",
1358 				    psr_setup_time);
1359 			return -ETIME;
1360 		}
1361 	}
1362 
1363 	return entry_setup_frames;
1364 }
1365 
1366 static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
1367 				       const struct intel_crtc_state *crtc_state,
1368 				       bool aux_less)
1369 {
1370 	struct intel_display *display = to_intel_display(intel_dp);
1371 	int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
1372 		crtc_state->hw.adjusted_mode.crtc_vblank_start;
1373 	int wake_lines;
1374 
1375 	if (aux_less)
1376 		wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
1377 	else
1378 		wake_lines = DISPLAY_VER(display) < 20 ?
1379 			psr2_block_count_lines(intel_dp) :
1380 			intel_dp->alpm_parameters.io_wake_lines;
1381 
1382 	if (crtc_state->req_psr2_sdp_prior_scanline)
1383 		vblank -= 1;
1384 
1385 	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
1386 	if (vblank < wake_lines)
1387 		return false;
1388 
1389 	return true;
1390 }
1391 
1392 static bool alpm_config_valid(struct intel_dp *intel_dp,
1393 			      const struct intel_crtc_state *crtc_state,
1394 			      bool aux_less)
1395 {
1396 	struct intel_display *display = to_intel_display(intel_dp);
1397 
1398 	if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
1399 		drm_dbg_kms(display->drm,
1400 			    "PSR2/Panel Replay  not enabled, Unable to use long enough wake times\n");
1401 		return false;
1402 	}
1403 
1404 	if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less)) {
1405 		drm_dbg_kms(display->drm,
1406 			    "PSR2/Panel Replay not enabled, too short vblank time\n");
1407 		return false;
1408 	}
1409 
1410 	return true;
1411 }
1412 
1413 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1414 				    struct intel_crtc_state *crtc_state)
1415 {
1416 	struct intel_display *display = to_intel_display(intel_dp);
1417 	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1418 	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1419 	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1420 
1421 	if (!intel_dp->psr.sink_psr2_support)
1422 		return false;
1423 
1424 	/* JSL and EHL only supports eDP 1.3 */
1425 	if (display->platform.jasperlake || display->platform.elkhartlake) {
1426 		drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
1427 		return false;
1428 	}
1429 
1430 	/* Wa_16011181250 */
1431 	if (display->platform.rocketlake || display->platform.alderlake_s ||
1432 	    display->platform.dg2) {
1433 		drm_dbg_kms(display->drm,
1434 			    "PSR2 is defeatured for this platform\n");
1435 		return false;
1436 	}
1437 
1438 	if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1439 		drm_dbg_kms(display->drm,
1440 			    "PSR2 not completely functional in this stepping\n");
1441 		return false;
1442 	}
1443 
1444 	if (!transcoder_has_psr2(display, crtc_state->cpu_transcoder)) {
1445 		drm_dbg_kms(display->drm,
1446 			    "PSR2 not supported in transcoder %s\n",
1447 			    transcoder_name(crtc_state->cpu_transcoder));
1448 		return false;
1449 	}
1450 
1451 	/*
1452 	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1453 	 * resolution requires DSC to be enabled, priority is given to DSC
1454 	 * over PSR2.
1455 	 */
1456 	if (crtc_state->dsc.compression_enable &&
1457 	    (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)) {
1458 		drm_dbg_kms(display->drm,
1459 			    "PSR2 cannot be enabled since DSC is enabled\n");
1460 		return false;
1461 	}
1462 
1463 	if (DISPLAY_VER(display) >= 20) {
1464 		psr_max_h = crtc_hdisplay;
1465 		psr_max_v = crtc_vdisplay;
1466 		max_bpp = crtc_state->pipe_bpp;
1467 	} else if (IS_DISPLAY_VER(display, 12, 14)) {
1468 		psr_max_h = 5120;
1469 		psr_max_v = 3200;
1470 		max_bpp = 30;
1471 	} else if (IS_DISPLAY_VER(display, 10, 11)) {
1472 		psr_max_h = 4096;
1473 		psr_max_v = 2304;
1474 		max_bpp = 24;
1475 	} else if (DISPLAY_VER(display) == 9) {
1476 		psr_max_h = 3640;
1477 		psr_max_v = 2304;
1478 		max_bpp = 24;
1479 	}
1480 
1481 	if (crtc_state->pipe_bpp > max_bpp) {
1482 		drm_dbg_kms(display->drm,
1483 			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1484 			    crtc_state->pipe_bpp, max_bpp);
1485 		return false;
1486 	}
1487 
1488 	/* Wa_16011303918:adl-p */
1489 	if (crtc_state->vrr.enable &&
1490 	    display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1491 		drm_dbg_kms(display->drm,
1492 			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1493 		return false;
1494 	}
1495 
1496 	if (!alpm_config_valid(intel_dp, crtc_state, false))
1497 		return false;
1498 
1499 	if (!crtc_state->enable_psr2_sel_fetch &&
1500 	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1501 		drm_dbg_kms(display->drm,
1502 			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1503 			    crtc_hdisplay, crtc_vdisplay,
1504 			    psr_max_h, psr_max_v);
1505 		return false;
1506 	}
1507 
1508 	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1509 
1510 	return true;
1511 }
1512 
1513 static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
1514 					  struct intel_crtc_state *crtc_state)
1515 {
1516 	struct intel_display *display = to_intel_display(intel_dp);
1517 
1518 	if (HAS_PSR2_SEL_FETCH(display) &&
1519 	    !intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1520 	    !HAS_PSR_HW_TRACKING(display)) {
1521 		drm_dbg_kms(display->drm,
1522 			    "Selective update not enabled, selective fetch not valid and no HW tracking available\n");
1523 		goto unsupported;
1524 	}
1525 
1526 	if (!psr2_global_enabled(intel_dp)) {
1527 		drm_dbg_kms(display->drm,
1528 			    "Selective update disabled by flag\n");
1529 		goto unsupported;
1530 	}
1531 
1532 	if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state))
1533 		goto unsupported;
1534 
1535 	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1536 		drm_dbg_kms(display->drm,
1537 			    "Selective update not enabled, SDP indication do not fit in hblank\n");
1538 		goto unsupported;
1539 	}
1540 
1541 	if (crtc_state->has_panel_replay && (DISPLAY_VER(display) < 14 ||
1542 					     !intel_dp->psr.sink_panel_replay_su_support))
1543 		goto unsupported;
1544 
1545 	if (crtc_state->crc_enabled) {
1546 		drm_dbg_kms(display->drm,
1547 			    "Selective update not enabled because it would inhibit pipe CRC calculation\n");
1548 		goto unsupported;
1549 	}
1550 
1551 	if (!psr2_granularity_check(intel_dp, crtc_state)) {
1552 		drm_dbg_kms(display->drm,
1553 			    "Selective update not enabled, SU granularity not compatible\n");
1554 		goto unsupported;
1555 	}
1556 
1557 	crtc_state->enable_psr2_su_region_et =
1558 		psr2_su_region_et_valid(intel_dp, crtc_state->has_panel_replay);
1559 
1560 	return true;
1561 
1562 unsupported:
1563 	crtc_state->enable_psr2_sel_fetch = false;
1564 	return false;
1565 }
1566 
1567 static bool _psr_compute_config(struct intel_dp *intel_dp,
1568 				struct intel_crtc_state *crtc_state)
1569 {
1570 	struct intel_display *display = to_intel_display(intel_dp);
1571 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1572 	int entry_setup_frames;
1573 
1574 	if (!CAN_PSR(intel_dp))
1575 		return false;
1576 
1577 	entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1578 
1579 	if (entry_setup_frames >= 0) {
1580 		intel_dp->psr.entry_setup_frames = entry_setup_frames;
1581 	} else {
1582 		drm_dbg_kms(display->drm,
1583 			    "PSR condition failed: PSR setup timing not met\n");
1584 		return false;
1585 	}
1586 
1587 	return true;
1588 }
1589 
1590 static bool
1591 _panel_replay_compute_config(struct intel_dp *intel_dp,
1592 			     const struct intel_crtc_state *crtc_state,
1593 			     const struct drm_connector_state *conn_state)
1594 {
1595 	struct intel_display *display = to_intel_display(intel_dp);
1596 	struct intel_connector *connector =
1597 		to_intel_connector(conn_state->connector);
1598 	struct intel_hdcp *hdcp = &connector->hdcp;
1599 
1600 	if (!CAN_PANEL_REPLAY(intel_dp))
1601 		return false;
1602 
1603 	if (!panel_replay_global_enabled(intel_dp)) {
1604 		drm_dbg_kms(display->drm, "Panel Replay disabled by flag\n");
1605 		return false;
1606 	}
1607 
1608 	if (crtc_state->crc_enabled) {
1609 		drm_dbg_kms(display->drm,
1610 			    "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
1611 		return false;
1612 	}
1613 
1614 	if (!intel_dp_is_edp(intel_dp))
1615 		return true;
1616 
1617 	/* Remaining checks are for eDP only */
1618 
1619 	if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A &&
1620 	    to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_B)
1621 		return false;
1622 
1623 	/* 128b/132b Panel Replay is not supported on eDP */
1624 	if (intel_dp_is_uhbr(crtc_state)) {
1625 		drm_dbg_kms(display->drm,
1626 			    "Panel Replay is not supported with 128b/132b\n");
1627 		return false;
1628 	}
1629 
1630 	/* HW will not allow Panel Replay on eDP when HDCP enabled */
1631 	if (conn_state->content_protection ==
1632 	    DRM_MODE_CONTENT_PROTECTION_DESIRED ||
1633 	    (conn_state->content_protection ==
1634 	     DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
1635 	     DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) {
1636 		drm_dbg_kms(display->drm,
1637 			    "Panel Replay is not supported with HDCP\n");
1638 		return false;
1639 	}
1640 
1641 	if (!alpm_config_valid(intel_dp, crtc_state, true))
1642 		return false;
1643 
1644 	return true;
1645 }
1646 
1647 static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp,
1648 					   struct intel_crtc_state *crtc_state)
1649 {
1650 	struct intel_display *display = to_intel_display(intel_dp);
1651 
1652 	return (DISPLAY_VER(display) == 20 && intel_dp->psr.entry_setup_frames > 0 &&
1653 		!crtc_state->has_sel_update);
1654 }
1655 
1656 void intel_psr_compute_config(struct intel_dp *intel_dp,
1657 			      struct intel_crtc_state *crtc_state,
1658 			      struct drm_connector_state *conn_state)
1659 {
1660 	struct intel_display *display = to_intel_display(intel_dp);
1661 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1662 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
1663 	struct intel_crtc *crtc;
1664 	u8 active_pipes = 0;
1665 
1666 	if (!psr_global_enabled(intel_dp)) {
1667 		drm_dbg_kms(display->drm, "PSR disabled by flag\n");
1668 		return;
1669 	}
1670 
1671 	if (intel_dp->psr.sink_not_reliable) {
1672 		drm_dbg_kms(display->drm,
1673 			    "PSR sink implementation is not reliable\n");
1674 		return;
1675 	}
1676 
1677 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1678 		drm_dbg_kms(display->drm,
1679 			    "PSR condition failed: Interlaced mode enabled\n");
1680 		return;
1681 	}
1682 
1683 	/*
1684 	 * FIXME figure out what is wrong with PSR+joiner and
1685 	 * fix it. Presumably something related to the fact that
1686 	 * PSR is a transcoder level feature.
1687 	 */
1688 	if (crtc_state->joiner_pipes) {
1689 		drm_dbg_kms(display->drm,
1690 			    "PSR disabled due to joiner\n");
1691 		return;
1692 	}
1693 
1694 	/*
1695 	 * Currently PSR/PR doesn't work reliably with VRR enabled.
1696 	 */
1697 	if (crtc_state->vrr.enable)
1698 		return;
1699 
1700 	crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
1701 								    crtc_state,
1702 								    conn_state);
1703 
1704 	crtc_state->has_psr = crtc_state->has_panel_replay ? true :
1705 		_psr_compute_config(intel_dp, crtc_state);
1706 
1707 	if (!crtc_state->has_psr)
1708 		return;
1709 
1710 	crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
1711 
1712 	/* Wa_18037818876 */
1713 	if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) {
1714 		crtc_state->has_psr = false;
1715 		drm_dbg_kms(display->drm,
1716 			    "PSR disabled to workaround PSR FSM hang issue\n");
1717 	}
1718 
1719 	/* Rest is for Wa_16025596647 */
1720 	if (DISPLAY_VER(display) != 20 &&
1721 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
1722 		return;
1723 
1724 	/* Not needed by Panel Replay  */
1725 	if (crtc_state->has_panel_replay)
1726 		return;
1727 
1728 	/* We ignore possible secondary PSR/Panel Replay capable eDP */
1729 	for_each_intel_crtc(display->drm, crtc)
1730 		active_pipes |= crtc->active ? BIT(crtc->pipe) : 0;
1731 
1732 	active_pipes = intel_calc_active_pipes(state, active_pipes);
1733 
1734 	crtc_state->active_non_psr_pipes = active_pipes &
1735 		~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe);
1736 }
1737 
1738 void intel_psr_get_config(struct intel_encoder *encoder,
1739 			  struct intel_crtc_state *pipe_config)
1740 {
1741 	struct intel_display *display = to_intel_display(encoder);
1742 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1743 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1744 	struct intel_dp *intel_dp;
1745 	u32 val;
1746 
1747 	if (!dig_port)
1748 		return;
1749 
1750 	intel_dp = &dig_port->dp;
1751 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1752 		return;
1753 
1754 	mutex_lock(&intel_dp->psr.lock);
1755 	if (!intel_dp->psr.enabled)
1756 		goto unlock;
1757 
1758 	if (intel_dp->psr.panel_replay_enabled) {
1759 		pipe_config->has_psr = pipe_config->has_panel_replay = true;
1760 	} else {
1761 		/*
1762 		 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1763 		 * enabled/disabled because of frontbuffer tracking and others.
1764 		 */
1765 		pipe_config->has_psr = true;
1766 	}
1767 
1768 	pipe_config->has_sel_update = intel_dp->psr.sel_update_enabled;
1769 	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1770 
1771 	if (!intel_dp->psr.sel_update_enabled)
1772 		goto unlock;
1773 
1774 	if (HAS_PSR2_SEL_FETCH(display)) {
1775 		val = intel_de_read(display,
1776 				    PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1777 		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1778 			pipe_config->enable_psr2_sel_fetch = true;
1779 	}
1780 
1781 	pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled;
1782 
1783 	if (DISPLAY_VER(display) >= 12) {
1784 		val = intel_de_read(display,
1785 				    TRANS_EXITLINE(display, cpu_transcoder));
1786 		pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1787 	}
1788 unlock:
1789 	mutex_unlock(&intel_dp->psr.lock);
1790 }
1791 
1792 static void intel_psr_activate(struct intel_dp *intel_dp)
1793 {
1794 	struct intel_display *display = to_intel_display(intel_dp);
1795 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1796 
1797 	drm_WARN_ON(display->drm,
1798 		    transcoder_has_psr2(display, cpu_transcoder) &&
1799 		    intel_de_read(display, EDP_PSR2_CTL(display, cpu_transcoder)) & EDP_PSR2_ENABLE);
1800 
1801 	drm_WARN_ON(display->drm,
1802 		    intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)) & EDP_PSR_ENABLE);
1803 
1804 	drm_WARN_ON(display->drm, intel_dp->psr.active);
1805 
1806 	lockdep_assert_held(&intel_dp->psr.lock);
1807 
1808 	/* psr1, psr2 and panel-replay are mutually exclusive.*/
1809 	if (intel_dp->psr.panel_replay_enabled)
1810 		dg2_activate_panel_replay(intel_dp);
1811 	else if (intel_dp->psr.sel_update_enabled)
1812 		hsw_activate_psr2(intel_dp);
1813 	else
1814 		hsw_activate_psr1(intel_dp);
1815 
1816 	intel_dp->psr.active = true;
1817 }
1818 
1819 /*
1820  * Wa_16013835468
1821  * Wa_14015648006
1822  */
1823 static void wm_optimization_wa(struct intel_dp *intel_dp,
1824 			       const struct intel_crtc_state *crtc_state)
1825 {
1826 	struct intel_display *display = to_intel_display(intel_dp);
1827 	enum pipe pipe = intel_dp->psr.pipe;
1828 	bool activate = false;
1829 
1830 	/* Wa_14015648006 */
1831 	if (IS_DISPLAY_VER(display, 11, 14) && crtc_state->wm_level_disabled)
1832 		activate = true;
1833 
1834 	/* Wa_16013835468 */
1835 	if (DISPLAY_VER(display) == 12 &&
1836 	    crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1837 	    crtc_state->hw.adjusted_mode.crtc_vdisplay)
1838 		activate = true;
1839 
1840 	if (activate)
1841 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
1842 			     0, LATENCY_REPORTING_REMOVED(pipe));
1843 	else
1844 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
1845 			     LATENCY_REPORTING_REMOVED(pipe), 0);
1846 }
1847 
1848 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1849 				    const struct intel_crtc_state *crtc_state)
1850 {
1851 	struct intel_display *display = to_intel_display(intel_dp);
1852 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1853 	u32 mask = 0;
1854 
1855 	/*
1856 	 * Only HSW and BDW have PSR AUX registers that need to be setup.
1857 	 * SKL+ use hardcoded values PSR AUX transactions
1858 	 */
1859 	if (DISPLAY_VER(display) < 9)
1860 		hsw_psr_setup_aux(intel_dp);
1861 
1862 	/*
1863 	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1864 	 * mask LPSP to avoid dependency on other drivers that might block
1865 	 * runtime_pm besides preventing  other hw tracking issues now we
1866 	 * can rely on frontbuffer tracking.
1867 	 *
1868 	 * From bspec prior LunarLake:
1869 	 * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
1870 	 * panel replay mode.
1871 	 *
1872 	 * From bspec beyod LunarLake:
1873 	 * Panel Replay on DP: No bits are applicable
1874 	 * Panel Replay on eDP: All bits are applicable
1875 	 */
1876 	if (DISPLAY_VER(display) < 20 || intel_dp_is_edp(intel_dp))
1877 		mask = EDP_PSR_DEBUG_MASK_HPD;
1878 
1879 	if (intel_dp_is_edp(intel_dp)) {
1880 		mask |= EDP_PSR_DEBUG_MASK_MEMUP;
1881 
1882 		/*
1883 		 * For some unknown reason on HSW non-ULT (or at least on
1884 		 * Dell Latitude E6540) external displays start to flicker
1885 		 * when PSR is enabled on the eDP. SR/PC6 residency is much
1886 		 * higher than should be possible with an external display.
1887 		 * As a workaround leave LPSP unmasked to prevent PSR entry
1888 		 * when external displays are active.
1889 		 */
1890 		if (DISPLAY_VER(display) >= 8 || display->platform.haswell_ult)
1891 			mask |= EDP_PSR_DEBUG_MASK_LPSP;
1892 
1893 		if (DISPLAY_VER(display) < 20)
1894 			mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1895 
1896 		/*
1897 		 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1898 		 * registers in order to keep the CURSURFLIVE tricks working :(
1899 		 */
1900 		if (IS_DISPLAY_VER(display, 9, 10))
1901 			mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1902 
1903 		/* allow PSR with sprite enabled */
1904 		if (display->platform.haswell)
1905 			mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1906 	}
1907 
1908 	intel_de_write(display, psr_debug_reg(display, cpu_transcoder), mask);
1909 
1910 	psr_irq_control(intel_dp);
1911 
1912 	/*
1913 	 * TODO: if future platforms supports DC3CO in more than one
1914 	 * transcoder, EXITLINE will need to be unset when disabling PSR
1915 	 */
1916 	if (intel_dp->psr.dc3co_exitline)
1917 		intel_de_rmw(display,
1918 			     TRANS_EXITLINE(display, cpu_transcoder),
1919 			     EXITLINE_MASK,
1920 			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1921 
1922 	if (HAS_PSR_HW_TRACKING(display) && HAS_PSR2_SEL_FETCH(display))
1923 		intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1924 			     intel_dp->psr.psr2_sel_fetch_enabled ?
1925 			     IGNORE_PSR2_HW_TRACKING : 0);
1926 
1927 	/*
1928 	 * Wa_16013835468
1929 	 * Wa_14015648006
1930 	 */
1931 	wm_optimization_wa(intel_dp, crtc_state);
1932 
1933 	if (intel_dp->psr.sel_update_enabled) {
1934 		if (DISPLAY_VER(display) == 9)
1935 			intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 0,
1936 				     PSR2_VSC_ENABLE_PROG_HEADER |
1937 				     PSR2_ADD_VERTICAL_LINE_COUNT);
1938 
1939 		/*
1940 		 * Wa_16014451276:adlp,mtl[a0,b0]
1941 		 * All supported adlp panels have 1-based X granularity, this may
1942 		 * cause issues if non-supported panels are used.
1943 		 */
1944 		if (!intel_dp->psr.panel_replay_enabled &&
1945 		    (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
1946 		     display->platform.alderlake_p))
1947 			intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
1948 				     0, ADLP_1_BASED_X_GRANULARITY);
1949 
1950 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1951 		if (!intel_dp->psr.panel_replay_enabled &&
1952 		    IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
1953 			intel_de_rmw(display,
1954 				     MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
1955 				     0,
1956 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1957 		else if (display->platform.alderlake_p)
1958 			intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
1959 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1960 	}
1961 
1962 	/* Wa_16025596647 */
1963 	if ((DISPLAY_VER(display) == 20 ||
1964 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
1965 	    !intel_dp->psr.panel_replay_enabled)
1966 		intel_dmc_block_pkgc(display, intel_dp->psr.pipe, true);
1967 
1968 	intel_alpm_configure(intel_dp, crtc_state);
1969 }
1970 
1971 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1972 {
1973 	struct intel_display *display = to_intel_display(intel_dp);
1974 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1975 	u32 val;
1976 
1977 	if (intel_dp->psr.panel_replay_enabled)
1978 		goto no_err;
1979 
1980 	/*
1981 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1982 	 * will still keep the error set even after the reset done in the
1983 	 * irq_preinstall and irq_uninstall hooks.
1984 	 * And enabling in this situation cause the screen to freeze in the
1985 	 * first time that PSR HW tries to activate so lets keep PSR disabled
1986 	 * to avoid any rendering problems.
1987 	 */
1988 	val = intel_de_read(display, psr_iir_reg(display, cpu_transcoder));
1989 	val &= psr_irq_psr_error_bit_get(intel_dp);
1990 	if (val) {
1991 		intel_dp->psr.sink_not_reliable = true;
1992 		drm_dbg_kms(display->drm,
1993 			    "PSR interruption error set, not enabling PSR\n");
1994 		return false;
1995 	}
1996 
1997 no_err:
1998 	return true;
1999 }
2000 
2001 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
2002 				    const struct intel_crtc_state *crtc_state)
2003 {
2004 	struct intel_display *display = to_intel_display(intel_dp);
2005 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2006 	u32 val;
2007 
2008 	drm_WARN_ON(display->drm, intel_dp->psr.enabled);
2009 
2010 	intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update;
2011 	intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
2012 	intel_dp->psr.busy_frontbuffer_bits = 0;
2013 	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
2014 	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
2015 	/* DC5/DC6 requires at least 6 idle frames */
2016 	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
2017 	intel_dp->psr.dc3co_exit_delay = val;
2018 	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
2019 	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
2020 	intel_dp->psr.su_region_et_enabled = crtc_state->enable_psr2_su_region_et;
2021 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2022 	intel_dp->psr.req_psr2_sdp_prior_scanline =
2023 		crtc_state->req_psr2_sdp_prior_scanline;
2024 	intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes;
2025 
2026 	if (!psr_interrupt_error_check(intel_dp))
2027 		return;
2028 
2029 	if (intel_dp->psr.panel_replay_enabled)
2030 		drm_dbg_kms(display->drm, "Enabling Panel Replay\n");
2031 	else
2032 		drm_dbg_kms(display->drm, "Enabling PSR%s\n",
2033 			    intel_dp->psr.sel_update_enabled ? "2" : "1");
2034 
2035 	/*
2036 	 * Enabling sink PSR/Panel Replay here only for PSR. Panel Replay enable
2037 	 * bit is already written at this point. Sink ALPM is enabled here for
2038 	 * PSR and Panel Replay. See
2039 	 * intel_psr_panel_replay_enable_sink. Modifiers/options:
2040 	 *  - Selective Update
2041 	 *  - Region Early Transport
2042 	 *  - Selective Update Region Scanline Capture
2043 	 *  - VSC_SDP_CRC
2044 	 *  - HPD on different Errors
2045 	 *  - CRC verification
2046 	 * are written for PSR and Panel Replay here.
2047 	 */
2048 	intel_psr_enable_sink(intel_dp, crtc_state);
2049 
2050 	if (intel_dp_is_edp(intel_dp))
2051 		intel_snps_phy_update_psr_power_state(&dig_port->base, true);
2052 
2053 	intel_psr_enable_source(intel_dp, crtc_state);
2054 	intel_dp->psr.enabled = true;
2055 	intel_dp->psr.pause_counter = 0;
2056 
2057 	/*
2058 	 * Link_ok is sticky and set here on PSR enable. We can assume link
2059 	 * training is complete as we never continue to PSR enable with
2060 	 * untrained link. Link_ok is kept as set until first short pulse
2061 	 * interrupt. This is targeted to workaround panels stating bad link
2062 	 * after PSR is enabled.
2063 	 */
2064 	intel_dp->psr.link_ok = true;
2065 
2066 	intel_psr_activate(intel_dp);
2067 }
2068 
2069 static void intel_psr_exit(struct intel_dp *intel_dp)
2070 {
2071 	struct intel_display *display = to_intel_display(intel_dp);
2072 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2073 	u32 val;
2074 
2075 	if (!intel_dp->psr.active) {
2076 		if (transcoder_has_psr2(display, cpu_transcoder)) {
2077 			val = intel_de_read(display,
2078 					    EDP_PSR2_CTL(display, cpu_transcoder));
2079 			drm_WARN_ON(display->drm, val & EDP_PSR2_ENABLE);
2080 		}
2081 
2082 		val = intel_de_read(display,
2083 				    psr_ctl_reg(display, cpu_transcoder));
2084 		drm_WARN_ON(display->drm, val & EDP_PSR_ENABLE);
2085 
2086 		return;
2087 	}
2088 
2089 	if (intel_dp->psr.panel_replay_enabled) {
2090 		intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder),
2091 			     TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
2092 	} else if (intel_dp->psr.sel_update_enabled) {
2093 		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
2094 
2095 		val = intel_de_rmw(display,
2096 				   EDP_PSR2_CTL(display, cpu_transcoder),
2097 				   EDP_PSR2_ENABLE, 0);
2098 
2099 		drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
2100 	} else {
2101 		if (DISPLAY_VER(display) == 20 ||
2102 		    IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
2103 			intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
2104 								       intel_dp->psr.pipe,
2105 								       false);
2106 
2107 		val = intel_de_rmw(display,
2108 				   psr_ctl_reg(display, cpu_transcoder),
2109 				   EDP_PSR_ENABLE, 0);
2110 
2111 		drm_WARN_ON(display->drm, !(val & EDP_PSR_ENABLE));
2112 	}
2113 	intel_dp->psr.active = false;
2114 }
2115 
2116 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
2117 {
2118 	struct intel_display *display = to_intel_display(intel_dp);
2119 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2120 	i915_reg_t psr_status;
2121 	u32 psr_status_mask;
2122 
2123 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
2124 					  intel_dp->psr.panel_replay_enabled)) {
2125 		psr_status = EDP_PSR2_STATUS(display, cpu_transcoder);
2126 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
2127 	} else {
2128 		psr_status = psr_status_reg(display, cpu_transcoder);
2129 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
2130 	}
2131 
2132 	/* Wait till PSR is idle */
2133 	if (intel_de_wait_for_clear(display, psr_status,
2134 				    psr_status_mask, 2000))
2135 		drm_err(display->drm, "Timed out waiting PSR idle state\n");
2136 }
2137 
2138 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
2139 {
2140 	struct intel_display *display = to_intel_display(intel_dp);
2141 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2142 
2143 	lockdep_assert_held(&intel_dp->psr.lock);
2144 
2145 	if (!intel_dp->psr.enabled)
2146 		return;
2147 
2148 	if (intel_dp->psr.panel_replay_enabled)
2149 		drm_dbg_kms(display->drm, "Disabling Panel Replay\n");
2150 	else
2151 		drm_dbg_kms(display->drm, "Disabling PSR%s\n",
2152 			    intel_dp->psr.sel_update_enabled ? "2" : "1");
2153 
2154 	intel_psr_exit(intel_dp);
2155 	intel_psr_wait_exit_locked(intel_dp);
2156 
2157 	/*
2158 	 * Wa_16013835468
2159 	 * Wa_14015648006
2160 	 */
2161 	if (DISPLAY_VER(display) >= 11)
2162 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
2163 			     LATENCY_REPORTING_REMOVED(intel_dp->psr.pipe), 0);
2164 
2165 	if (intel_dp->psr.sel_update_enabled) {
2166 		/* Wa_16012604467:adlp,mtl[a0,b0] */
2167 		if (!intel_dp->psr.panel_replay_enabled &&
2168 		    IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
2169 			intel_de_rmw(display,
2170 				     MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
2171 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
2172 		else if (display->platform.alderlake_p)
2173 			intel_de_rmw(display, CLKGATE_DIS_MISC,
2174 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
2175 	}
2176 
2177 	if (intel_dp_is_edp(intel_dp))
2178 		intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
2179 
2180 	if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp))
2181 		intel_alpm_disable(intel_dp);
2182 
2183 	/* Disable PSR on Sink */
2184 	if (!intel_dp->psr.panel_replay_enabled) {
2185 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
2186 
2187 		if (intel_dp->psr.sel_update_enabled)
2188 			drm_dp_dpcd_writeb(&intel_dp->aux,
2189 					   DP_RECEIVER_ALPM_CONFIG, 0);
2190 	}
2191 
2192 	/* Wa_16025596647 */
2193 	if ((DISPLAY_VER(display) == 20 ||
2194 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2195 	    !intel_dp->psr.panel_replay_enabled)
2196 		intel_dmc_block_pkgc(display, intel_dp->psr.pipe, false);
2197 
2198 	intel_dp->psr.enabled = false;
2199 	intel_dp->psr.panel_replay_enabled = false;
2200 	intel_dp->psr.sel_update_enabled = false;
2201 	intel_dp->psr.psr2_sel_fetch_enabled = false;
2202 	intel_dp->psr.su_region_et_enabled = false;
2203 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2204 	intel_dp->psr.active_non_psr_pipes = 0;
2205 }
2206 
2207 /**
2208  * intel_psr_disable - Disable PSR
2209  * @intel_dp: Intel DP
2210  * @old_crtc_state: old CRTC state
2211  *
2212  * This function needs to be called before disabling pipe.
2213  */
2214 void intel_psr_disable(struct intel_dp *intel_dp,
2215 		       const struct intel_crtc_state *old_crtc_state)
2216 {
2217 	struct intel_display *display = to_intel_display(intel_dp);
2218 
2219 	if (!old_crtc_state->has_psr)
2220 		return;
2221 
2222 	if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp) &&
2223 			!CAN_PANEL_REPLAY(intel_dp)))
2224 		return;
2225 
2226 	mutex_lock(&intel_dp->psr.lock);
2227 
2228 	intel_psr_disable_locked(intel_dp);
2229 
2230 	intel_dp->psr.link_ok = false;
2231 
2232 	mutex_unlock(&intel_dp->psr.lock);
2233 	cancel_work_sync(&intel_dp->psr.work);
2234 	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
2235 }
2236 
2237 /**
2238  * intel_psr_pause - Pause PSR
2239  * @intel_dp: Intel DP
2240  *
2241  * This function need to be called after enabling psr.
2242  */
2243 void intel_psr_pause(struct intel_dp *intel_dp)
2244 {
2245 	struct intel_psr *psr = &intel_dp->psr;
2246 
2247 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2248 		return;
2249 
2250 	mutex_lock(&psr->lock);
2251 
2252 	if (!psr->enabled) {
2253 		mutex_unlock(&psr->lock);
2254 		return;
2255 	}
2256 
2257 	if (intel_dp->psr.pause_counter++ == 0) {
2258 		intel_psr_exit(intel_dp);
2259 		intel_psr_wait_exit_locked(intel_dp);
2260 	}
2261 
2262 	mutex_unlock(&psr->lock);
2263 
2264 	cancel_work_sync(&psr->work);
2265 	cancel_delayed_work_sync(&psr->dc3co_work);
2266 }
2267 
2268 /**
2269  * intel_psr_resume - Resume PSR
2270  * @intel_dp: Intel DP
2271  *
2272  * This function need to be called after pausing psr.
2273  */
2274 void intel_psr_resume(struct intel_dp *intel_dp)
2275 {
2276 	struct intel_display *display = to_intel_display(intel_dp);
2277 	struct intel_psr *psr = &intel_dp->psr;
2278 
2279 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2280 		return;
2281 
2282 	mutex_lock(&psr->lock);
2283 
2284 	if (!psr->enabled)
2285 		goto out;
2286 
2287 	if (!psr->pause_counter) {
2288 		drm_warn(display->drm, "Unbalanced PSR pause/resume!\n");
2289 		goto out;
2290 	}
2291 
2292 	if (--intel_dp->psr.pause_counter == 0)
2293 		intel_psr_activate(intel_dp);
2294 
2295 out:
2296 	mutex_unlock(&psr->lock);
2297 }
2298 
2299 /**
2300  * intel_psr_needs_vblank_notification - Check if PSR need vblank enable/disable
2301  * notification.
2302  * @crtc_state: CRTC status
2303  *
2304  * We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't
2305  * prevent it in case of Panel Replay. Panel Replay switches main link off on
2306  * DC entry. This means vblank interrupts are not fired and is a problem if
2307  * user-space is polling for vblank events. Also Wa_16025596647 needs
2308  * information when vblank is enabled/disabled.
2309  */
2310 bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state)
2311 {
2312 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2313 	struct intel_display *display = to_intel_display(crtc_state);
2314 	struct intel_encoder *encoder;
2315 
2316 	for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) {
2317 		struct intel_dp *intel_dp;
2318 
2319 		if (!intel_encoder_is_dp(encoder))
2320 			continue;
2321 
2322 		intel_dp = enc_to_intel_dp(encoder);
2323 
2324 		if (!intel_dp_is_edp(intel_dp))
2325 			continue;
2326 
2327 		if (CAN_PANEL_REPLAY(intel_dp))
2328 			return true;
2329 
2330 		if ((DISPLAY_VER(display) == 20 ||
2331 		     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2332 		    CAN_PSR(intel_dp))
2333 			return true;
2334 	}
2335 
2336 	return false;
2337 }
2338 
2339 /**
2340  * intel_psr_trigger_frame_change_event - Trigger "Frame Change" event
2341  * @dsb: DSB context
2342  * @state: the atomic state
2343  * @crtc: the CRTC
2344  *
2345  * Generate PSR "Frame Change" event.
2346  */
2347 void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
2348 					  struct intel_atomic_state *state,
2349 					  struct intel_crtc *crtc)
2350 {
2351 	const struct intel_crtc_state *crtc_state =
2352 		intel_pre_commit_crtc_state(state, crtc);
2353 	struct intel_display *display = to_intel_display(crtc);
2354 
2355 	if (crtc_state->has_psr)
2356 		intel_de_write_dsb(display, dsb,
2357 				   CURSURFLIVE(display, crtc->pipe), 0);
2358 }
2359 
2360 /**
2361  * intel_psr_min_vblank_delay - Minimum vblank delay needed by PSR
2362  * @crtc_state: the crtc state
2363  *
2364  * Return minimum vblank delay needed by PSR.
2365  */
2366 int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state)
2367 {
2368 	struct intel_display *display = to_intel_display(crtc_state);
2369 
2370 	if (!crtc_state->has_psr)
2371 		return 0;
2372 
2373 	/* Wa_14015401596 */
2374 	if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14))
2375 		return 1;
2376 
2377 	/* Rest is for SRD_STATUS needed on LunarLake and onwards */
2378 	if (DISPLAY_VER(display) < 20)
2379 		return 0;
2380 
2381 	/*
2382 	 * Comment on SRD_STATUS register in Bspec for LunarLake and onwards:
2383 	 *
2384 	 * To deterministically capture the transition of the state machine
2385 	 * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least
2386 	 * one line after the non-delayed V. Blank.
2387 	 *
2388 	 * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0
2389 	 * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ]
2390 	 * - TRANS_VTOTAL[ Vertical Active ])
2391 	 *
2392 	 * SRD_STATUS is used only by PSR1 on PantherLake.
2393 	 * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake.
2394 	 */
2395 
2396 	if (DISPLAY_VER(display) >= 30 && (crtc_state->has_panel_replay ||
2397 					   crtc_state->has_sel_update))
2398 		return 0;
2399 	else if (DISPLAY_VER(display) < 30 && (crtc_state->has_sel_update ||
2400 					       intel_crtc_has_type(crtc_state,
2401 								   INTEL_OUTPUT_EDP)))
2402 		return 0;
2403 	else
2404 		return 1;
2405 }
2406 
2407 static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
2408 {
2409 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 0 :
2410 		PSR2_MAN_TRK_CTL_ENABLE;
2411 }
2412 
2413 static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
2414 {
2415 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2416 	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
2417 	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
2418 }
2419 
2420 static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
2421 {
2422 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2423 	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
2424 	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
2425 }
2426 
2427 static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
2428 {
2429 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2430 	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
2431 	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
2432 }
2433 
2434 static void intel_psr_force_update(struct intel_dp *intel_dp)
2435 {
2436 	struct intel_display *display = to_intel_display(intel_dp);
2437 
2438 	/*
2439 	 * Display WA #0884: skl+
2440 	 * This documented WA for bxt can be safely applied
2441 	 * broadly so we can force HW tracking to exit PSR
2442 	 * instead of disabling and re-enabling.
2443 	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
2444 	 * but it makes more sense write to the current active
2445 	 * pipe.
2446 	 *
2447 	 * This workaround do not exist for platforms with display 10 or newer
2448 	 * but testing proved that it works for up display 13, for newer
2449 	 * than that testing will be needed.
2450 	 */
2451 	intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0);
2452 }
2453 
2454 void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
2455 					  const struct intel_crtc_state *crtc_state)
2456 {
2457 	struct intel_display *display = to_intel_display(crtc_state);
2458 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2459 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2460 	struct intel_encoder *encoder;
2461 
2462 	if (!crtc_state->enable_psr2_sel_fetch)
2463 		return;
2464 
2465 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2466 					     crtc_state->uapi.encoder_mask) {
2467 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2468 
2469 		if (!dsb)
2470 			lockdep_assert_held(&intel_dp->psr.lock);
2471 
2472 		if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_cff_enabled)
2473 			return;
2474 		break;
2475 	}
2476 
2477 	intel_de_write_dsb(display, dsb,
2478 			   PSR2_MAN_TRK_CTL(display, cpu_transcoder),
2479 			   crtc_state->psr2_man_track_ctl);
2480 
2481 	if (!crtc_state->enable_psr2_su_region_et)
2482 		return;
2483 
2484 	intel_de_write_dsb(display, dsb, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
2485 			   crtc_state->pipe_srcsz_early_tpt);
2486 }
2487 
2488 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2489 				  bool full_update)
2490 {
2491 	struct intel_display *display = to_intel_display(crtc_state);
2492 	u32 val = man_trk_ctl_enable_bit_get(display);
2493 
2494 	/* SF partial frame enable has to be set even on full update */
2495 	val |= man_trk_ctl_partial_frame_bit_get(display);
2496 
2497 	if (full_update) {
2498 		val |= man_trk_ctl_continuos_full_frame(display);
2499 		goto exit;
2500 	}
2501 
2502 	if (crtc_state->psr2_su_area.y1 == -1)
2503 		goto exit;
2504 
2505 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) {
2506 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2507 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2508 	} else {
2509 		drm_WARN_ON(crtc_state->uapi.crtc->dev,
2510 			    crtc_state->psr2_su_area.y1 % 4 ||
2511 			    crtc_state->psr2_su_area.y2 % 4);
2512 
2513 		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2514 			crtc_state->psr2_su_area.y1 / 4 + 1);
2515 		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2516 			crtc_state->psr2_su_area.y2 / 4 + 1);
2517 	}
2518 exit:
2519 	crtc_state->psr2_man_track_ctl = val;
2520 }
2521 
2522 static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
2523 					  bool full_update)
2524 {
2525 	int width, height;
2526 
2527 	if (!crtc_state->enable_psr2_su_region_et || full_update)
2528 		return 0;
2529 
2530 	width = drm_rect_width(&crtc_state->psr2_su_area);
2531 	height = drm_rect_height(&crtc_state->psr2_su_area);
2532 
2533 	return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
2534 }
2535 
2536 static void clip_area_update(struct drm_rect *overlap_damage_area,
2537 			     struct drm_rect *damage_area,
2538 			     struct drm_rect *pipe_src)
2539 {
2540 	if (!drm_rect_intersect(damage_area, pipe_src))
2541 		return;
2542 
2543 	if (overlap_damage_area->y1 == -1) {
2544 		overlap_damage_area->y1 = damage_area->y1;
2545 		overlap_damage_area->y2 = damage_area->y2;
2546 		return;
2547 	}
2548 
2549 	if (damage_area->y1 < overlap_damage_area->y1)
2550 		overlap_damage_area->y1 = damage_area->y1;
2551 
2552 	if (damage_area->y2 > overlap_damage_area->y2)
2553 		overlap_damage_area->y2 = damage_area->y2;
2554 }
2555 
2556 static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2557 {
2558 	struct intel_display *display = to_intel_display(crtc_state);
2559 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2560 	u16 y_alignment;
2561 
2562 	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2563 	if (crtc_state->dsc.compression_enable &&
2564 	    (display->platform.alderlake_p || DISPLAY_VER(display) >= 14))
2565 		y_alignment = vdsc_cfg->slice_height;
2566 	else
2567 		y_alignment = crtc_state->su_y_granularity;
2568 
2569 	crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2570 	if (crtc_state->psr2_su_area.y2 % y_alignment)
2571 		crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2572 						y_alignment) + 1) * y_alignment;
2573 }
2574 
2575 /*
2576  * When early transport is in use we need to extend SU area to cover
2577  * cursor fully when cursor is in SU area.
2578  */
2579 static void
2580 intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
2581 				  struct intel_crtc *crtc,
2582 				  bool *cursor_in_su_area)
2583 {
2584 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2585 	struct intel_plane_state *new_plane_state;
2586 	struct intel_plane *plane;
2587 	int i;
2588 
2589 	if (!crtc_state->enable_psr2_su_region_et)
2590 		return;
2591 
2592 	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
2593 		struct drm_rect inter;
2594 
2595 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2596 			continue;
2597 
2598 		if (plane->id != PLANE_CURSOR)
2599 			continue;
2600 
2601 		if (!new_plane_state->uapi.visible)
2602 			continue;
2603 
2604 		inter = crtc_state->psr2_su_area;
2605 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2606 			continue;
2607 
2608 		clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
2609 				 &crtc_state->pipe_src);
2610 		*cursor_in_su_area = true;
2611 	}
2612 }
2613 
2614 /*
2615  * TODO: Not clear how to handle planes with negative position,
2616  * also planes are not updated if they have a negative X
2617  * position so for now doing a full update in this cases
2618  *
2619  * Plane scaling and rotation is not supported by selective fetch and both
2620  * properties can change without a modeset, so need to be check at every
2621  * atomic commit.
2622  */
2623 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2624 {
2625 	if (plane_state->uapi.dst.y1 < 0 ||
2626 	    plane_state->uapi.dst.x1 < 0 ||
2627 	    plane_state->scaler_id >= 0 ||
2628 	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2629 		return false;
2630 
2631 	return true;
2632 }
2633 
2634 /*
2635  * Check for pipe properties that is not supported by selective fetch.
2636  *
2637  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2638  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2639  * enabled and going to the full update path.
2640  */
2641 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2642 {
2643 	if (crtc_state->scaler_state.scaler_id >= 0)
2644 		return false;
2645 
2646 	return true;
2647 }
2648 
2649 /* Wa 14019834836 */
2650 static void intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state *crtc_state)
2651 {
2652 	struct intel_display *display = to_intel_display(crtc_state);
2653 	struct intel_encoder *encoder;
2654 	int hactive_limit;
2655 
2656 	if (crtc_state->psr2_su_area.y1 != 0 ||
2657 	    crtc_state->psr2_su_area.y2 != 0)
2658 		return;
2659 
2660 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2661 		hactive_limit = intel_dp_is_uhbr(crtc_state) ? 1230 : 546;
2662 	else
2663 		hactive_limit = intel_dp_is_uhbr(crtc_state) ? 615 : 273;
2664 
2665 	if (crtc_state->hw.adjusted_mode.hdisplay < hactive_limit)
2666 		return;
2667 
2668 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2669 					     crtc_state->uapi.encoder_mask) {
2670 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2671 
2672 		if (!intel_dp_is_edp(intel_dp) &&
2673 		    intel_dp->psr.panel_replay_enabled &&
2674 		    intel_dp->psr.sel_update_enabled) {
2675 			crtc_state->psr2_su_area.y2++;
2676 			return;
2677 		}
2678 	}
2679 }
2680 
2681 static void
2682 intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state)
2683 {
2684 	struct intel_display *display = to_intel_display(crtc_state);
2685 
2686 	/* Wa_14014971492 */
2687 	if (!crtc_state->has_panel_replay &&
2688 	    ((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
2689 	      display->platform.alderlake_p || display->platform.tigerlake)) &&
2690 	    crtc_state->splitter.enable)
2691 		crtc_state->psr2_su_area.y1 = 0;
2692 
2693 	/* Wa 14019834836 */
2694 	if (DISPLAY_VER(display) == 30)
2695 		intel_psr_apply_pr_link_on_su_wa(crtc_state);
2696 }
2697 
2698 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2699 				struct intel_crtc *crtc)
2700 {
2701 	struct intel_display *display = to_intel_display(state);
2702 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2703 	struct intel_plane_state *new_plane_state, *old_plane_state;
2704 	struct intel_plane *plane;
2705 	bool full_update = false, cursor_in_su_area = false;
2706 	int i, ret;
2707 
2708 	if (!crtc_state->enable_psr2_sel_fetch)
2709 		return 0;
2710 
2711 	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2712 		full_update = true;
2713 		goto skip_sel_fetch_set_loop;
2714 	}
2715 
2716 	crtc_state->psr2_su_area.x1 = 0;
2717 	crtc_state->psr2_su_area.y1 = -1;
2718 	crtc_state->psr2_su_area.x2 = drm_rect_width(&crtc_state->pipe_src);
2719 	crtc_state->psr2_su_area.y2 = -1;
2720 
2721 	/*
2722 	 * Calculate minimal selective fetch area of each plane and calculate
2723 	 * the pipe damaged area.
2724 	 * In the next loop the plane selective fetch area will actually be set
2725 	 * using whole pipe damaged area.
2726 	 */
2727 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2728 					     new_plane_state, i) {
2729 		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2730 						      .x2 = INT_MAX };
2731 
2732 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2733 			continue;
2734 
2735 		if (!new_plane_state->uapi.visible &&
2736 		    !old_plane_state->uapi.visible)
2737 			continue;
2738 
2739 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2740 			full_update = true;
2741 			break;
2742 		}
2743 
2744 		/*
2745 		 * If visibility or plane moved, mark the whole plane area as
2746 		 * damaged as it needs to be complete redraw in the new and old
2747 		 * position.
2748 		 */
2749 		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2750 		    !drm_rect_equals(&new_plane_state->uapi.dst,
2751 				     &old_plane_state->uapi.dst)) {
2752 			if (old_plane_state->uapi.visible) {
2753 				damaged_area.y1 = old_plane_state->uapi.dst.y1;
2754 				damaged_area.y2 = old_plane_state->uapi.dst.y2;
2755 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2756 						 &crtc_state->pipe_src);
2757 			}
2758 
2759 			if (new_plane_state->uapi.visible) {
2760 				damaged_area.y1 = new_plane_state->uapi.dst.y1;
2761 				damaged_area.y2 = new_plane_state->uapi.dst.y2;
2762 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2763 						 &crtc_state->pipe_src);
2764 			}
2765 			continue;
2766 		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2767 			/* If alpha changed mark the whole plane area as damaged */
2768 			damaged_area.y1 = new_plane_state->uapi.dst.y1;
2769 			damaged_area.y2 = new_plane_state->uapi.dst.y2;
2770 			clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2771 					 &crtc_state->pipe_src);
2772 			continue;
2773 		}
2774 
2775 		src = drm_plane_state_src(&new_plane_state->uapi);
2776 		drm_rect_fp_to_int(&src, &src);
2777 
2778 		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2779 						     &new_plane_state->uapi, &damaged_area))
2780 			continue;
2781 
2782 		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2783 		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2784 		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2785 		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2786 
2787 		clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
2788 	}
2789 
2790 	/*
2791 	 * TODO: For now we are just using full update in case
2792 	 * selective fetch area calculation fails. To optimize this we
2793 	 * should identify cases where this happens and fix the area
2794 	 * calculation for those.
2795 	 */
2796 	if (crtc_state->psr2_su_area.y1 == -1) {
2797 		drm_info_once(display->drm,
2798 			      "Selective fetch area calculation failed in pipe %c\n",
2799 			      pipe_name(crtc->pipe));
2800 		full_update = true;
2801 	}
2802 
2803 	if (full_update)
2804 		goto skip_sel_fetch_set_loop;
2805 
2806 	intel_psr_apply_su_area_workarounds(crtc_state);
2807 
2808 	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2809 	if (ret)
2810 		return ret;
2811 
2812 	/*
2813 	 * Adjust su area to cover cursor fully as necessary (early
2814 	 * transport). This needs to be done after
2815 	 * drm_atomic_add_affected_planes to ensure visible cursor is added into
2816 	 * affected planes even when cursor is not updated by itself.
2817 	 */
2818 	intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
2819 
2820 	intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2821 
2822 	/*
2823 	 * Now that we have the pipe damaged area check if it intersect with
2824 	 * every plane, if it does set the plane selective fetch area.
2825 	 */
2826 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2827 					     new_plane_state, i) {
2828 		struct drm_rect *sel_fetch_area, inter;
2829 		struct intel_plane *linked = new_plane_state->planar_linked_plane;
2830 
2831 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2832 		    !new_plane_state->uapi.visible)
2833 			continue;
2834 
2835 		inter = crtc_state->psr2_su_area;
2836 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2837 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2838 			sel_fetch_area->y1 = -1;
2839 			sel_fetch_area->y2 = -1;
2840 			/*
2841 			 * if plane sel fetch was previously enabled ->
2842 			 * disable it
2843 			 */
2844 			if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2845 				crtc_state->update_planes |= BIT(plane->id);
2846 
2847 			continue;
2848 		}
2849 
2850 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2851 			full_update = true;
2852 			break;
2853 		}
2854 
2855 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2856 		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2857 		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2858 		crtc_state->update_planes |= BIT(plane->id);
2859 
2860 		/*
2861 		 * Sel_fetch_area is calculated for UV plane. Use
2862 		 * same area for Y plane as well.
2863 		 */
2864 		if (linked) {
2865 			struct intel_plane_state *linked_new_plane_state;
2866 			struct drm_rect *linked_sel_fetch_area;
2867 
2868 			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2869 			if (IS_ERR(linked_new_plane_state))
2870 				return PTR_ERR(linked_new_plane_state);
2871 
2872 			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2873 			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2874 			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2875 			crtc_state->update_planes |= BIT(linked->id);
2876 		}
2877 	}
2878 
2879 skip_sel_fetch_set_loop:
2880 	psr2_man_trk_ctl_calc(crtc_state, full_update);
2881 	crtc_state->pipe_srcsz_early_tpt =
2882 		psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
2883 	return 0;
2884 }
2885 
2886 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2887 				struct intel_crtc *crtc)
2888 {
2889 	struct intel_display *display = to_intel_display(state);
2890 	const struct intel_crtc_state *old_crtc_state =
2891 		intel_atomic_get_old_crtc_state(state, crtc);
2892 	const struct intel_crtc_state *new_crtc_state =
2893 		intel_atomic_get_new_crtc_state(state, crtc);
2894 	struct intel_encoder *encoder;
2895 
2896 	if (!HAS_PSR(display))
2897 		return;
2898 
2899 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2900 					     old_crtc_state->uapi.encoder_mask) {
2901 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2902 		struct intel_psr *psr = &intel_dp->psr;
2903 
2904 		mutex_lock(&psr->lock);
2905 
2906 		if (psr->enabled) {
2907 			/*
2908 			 * Reasons to disable:
2909 			 * - PSR disabled in new state
2910 			 * - All planes will go inactive
2911 			 * - Changing between PSR versions
2912 			 * - Region Early Transport changing
2913 			 * - Display WA #1136: skl, bxt
2914 			 */
2915 			if (intel_crtc_needs_modeset(new_crtc_state) ||
2916 			    !new_crtc_state->has_psr ||
2917 			    !new_crtc_state->active_planes ||
2918 			    new_crtc_state->has_sel_update != psr->sel_update_enabled ||
2919 			    new_crtc_state->enable_psr2_su_region_et != psr->su_region_et_enabled ||
2920 			    new_crtc_state->has_panel_replay != psr->panel_replay_enabled ||
2921 			    (DISPLAY_VER(display) < 11 && new_crtc_state->wm_level_disabled))
2922 				intel_psr_disable_locked(intel_dp);
2923 			else if (new_crtc_state->wm_level_disabled)
2924 				/* Wa_14015648006 */
2925 				wm_optimization_wa(intel_dp, new_crtc_state);
2926 		}
2927 
2928 		mutex_unlock(&psr->lock);
2929 	}
2930 }
2931 
2932 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2933 				 struct intel_crtc *crtc)
2934 {
2935 	struct intel_display *display = to_intel_display(state);
2936 	const struct intel_crtc_state *crtc_state =
2937 		intel_atomic_get_new_crtc_state(state, crtc);
2938 	struct intel_encoder *encoder;
2939 
2940 	if (!crtc_state->has_psr)
2941 		return;
2942 
2943 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2944 					     crtc_state->uapi.encoder_mask) {
2945 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2946 		struct intel_psr *psr = &intel_dp->psr;
2947 		bool keep_disabled = false;
2948 
2949 		mutex_lock(&psr->lock);
2950 
2951 		drm_WARN_ON(display->drm,
2952 			    psr->enabled && !crtc_state->active_planes);
2953 
2954 		keep_disabled |= psr->sink_not_reliable;
2955 		keep_disabled |= !crtc_state->active_planes;
2956 
2957 		/* Display WA #1136: skl, bxt */
2958 		keep_disabled |= DISPLAY_VER(display) < 11 &&
2959 			crtc_state->wm_level_disabled;
2960 
2961 		if (!psr->enabled && !keep_disabled)
2962 			intel_psr_enable_locked(intel_dp, crtc_state);
2963 		else if (psr->enabled && !crtc_state->wm_level_disabled)
2964 			/* Wa_14015648006 */
2965 			wm_optimization_wa(intel_dp, crtc_state);
2966 
2967 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2968 		if (crtc_state->crc_enabled && psr->enabled)
2969 			intel_psr_force_update(intel_dp);
2970 
2971 		/*
2972 		 * Clear possible busy bits in case we have
2973 		 * invalidate -> flip -> flush sequence.
2974 		 */
2975 		intel_dp->psr.busy_frontbuffer_bits = 0;
2976 
2977 		mutex_unlock(&psr->lock);
2978 	}
2979 }
2980 
2981 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2982 {
2983 	struct intel_display *display = to_intel_display(intel_dp);
2984 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2985 
2986 	/*
2987 	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2988 	 * As all higher states has bit 4 of PSR2 state set we can just wait for
2989 	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2990 	 */
2991 	return intel_de_wait_for_clear(display,
2992 				       EDP_PSR2_STATUS(display, cpu_transcoder),
2993 				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2994 }
2995 
2996 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2997 {
2998 	struct intel_display *display = to_intel_display(intel_dp);
2999 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3000 
3001 	/*
3002 	 * From bspec: Panel Self Refresh (BDW+)
3003 	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
3004 	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
3005 	 * defensive enough to cover everything.
3006 	 */
3007 	return intel_de_wait_for_clear(display,
3008 				       psr_status_reg(display, cpu_transcoder),
3009 				       EDP_PSR_STATUS_STATE_MASK, 50);
3010 }
3011 
3012 /**
3013  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
3014  * @new_crtc_state: new CRTC state
3015  *
3016  * This function is expected to be called from pipe_update_start() where it is
3017  * not expected to race with PSR enable or disable.
3018  */
3019 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
3020 {
3021 	struct intel_display *display = to_intel_display(new_crtc_state);
3022 	struct intel_encoder *encoder;
3023 
3024 	if (!new_crtc_state->has_psr)
3025 		return;
3026 
3027 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3028 					     new_crtc_state->uapi.encoder_mask) {
3029 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3030 		int ret;
3031 
3032 		lockdep_assert_held(&intel_dp->psr.lock);
3033 
3034 		if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
3035 			continue;
3036 
3037 		if (intel_dp->psr.sel_update_enabled)
3038 			ret = _psr2_ready_for_pipe_update_locked(intel_dp);
3039 		else
3040 			ret = _psr1_ready_for_pipe_update_locked(intel_dp);
3041 
3042 		if (ret)
3043 			drm_err(display->drm,
3044 				"PSR wait timed out, atomic update may fail\n");
3045 	}
3046 }
3047 
3048 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
3049 {
3050 	struct intel_display *display = to_intel_display(intel_dp);
3051 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3052 	i915_reg_t reg;
3053 	u32 mask;
3054 	int err;
3055 
3056 	if (!intel_dp->psr.enabled)
3057 		return false;
3058 
3059 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
3060 					  intel_dp->psr.panel_replay_enabled)) {
3061 		reg = EDP_PSR2_STATUS(display, cpu_transcoder);
3062 		mask = EDP_PSR2_STATUS_STATE_MASK;
3063 	} else {
3064 		reg = psr_status_reg(display, cpu_transcoder);
3065 		mask = EDP_PSR_STATUS_STATE_MASK;
3066 	}
3067 
3068 	mutex_unlock(&intel_dp->psr.lock);
3069 
3070 	err = intel_de_wait_for_clear(display, reg, mask, 50);
3071 	if (err)
3072 		drm_err(display->drm,
3073 			"Timed out waiting for PSR Idle for re-enable\n");
3074 
3075 	/* After the unlocked wait, verify that PSR is still wanted! */
3076 	mutex_lock(&intel_dp->psr.lock);
3077 	return err == 0 && intel_dp->psr.enabled;
3078 }
3079 
3080 static int intel_psr_fastset_force(struct intel_display *display)
3081 {
3082 	struct drm_connector_list_iter conn_iter;
3083 	struct drm_modeset_acquire_ctx ctx;
3084 	struct drm_atomic_state *state;
3085 	struct drm_connector *conn;
3086 	int err = 0;
3087 
3088 	state = drm_atomic_state_alloc(display->drm);
3089 	if (!state)
3090 		return -ENOMEM;
3091 
3092 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
3093 
3094 	state->acquire_ctx = &ctx;
3095 	to_intel_atomic_state(state)->internal = true;
3096 
3097 retry:
3098 	drm_connector_list_iter_begin(display->drm, &conn_iter);
3099 	drm_for_each_connector_iter(conn, &conn_iter) {
3100 		struct drm_connector_state *conn_state;
3101 		struct drm_crtc_state *crtc_state;
3102 
3103 		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
3104 			continue;
3105 
3106 		conn_state = drm_atomic_get_connector_state(state, conn);
3107 		if (IS_ERR(conn_state)) {
3108 			err = PTR_ERR(conn_state);
3109 			break;
3110 		}
3111 
3112 		if (!conn_state->crtc)
3113 			continue;
3114 
3115 		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
3116 		if (IS_ERR(crtc_state)) {
3117 			err = PTR_ERR(crtc_state);
3118 			break;
3119 		}
3120 
3121 		/* Mark mode as changed to trigger a pipe->update() */
3122 		crtc_state->mode_changed = true;
3123 	}
3124 	drm_connector_list_iter_end(&conn_iter);
3125 
3126 	if (err == 0)
3127 		err = drm_atomic_commit(state);
3128 
3129 	if (err == -EDEADLK) {
3130 		drm_atomic_state_clear(state);
3131 		err = drm_modeset_backoff(&ctx);
3132 		if (!err)
3133 			goto retry;
3134 	}
3135 
3136 	drm_modeset_drop_locks(&ctx);
3137 	drm_modeset_acquire_fini(&ctx);
3138 	drm_atomic_state_put(state);
3139 
3140 	return err;
3141 }
3142 
3143 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
3144 {
3145 	struct intel_display *display = to_intel_display(intel_dp);
3146 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
3147 	const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3148 					I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3149 	u32 old_mode, old_disable_bits;
3150 	int ret;
3151 
3152 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3153 		    I915_PSR_DEBUG_PANEL_REPLAY_DISABLE |
3154 		    I915_PSR_DEBUG_MODE_MASK) ||
3155 	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
3156 		drm_dbg_kms(display->drm, "Invalid debug mask %llx\n", val);
3157 		return -EINVAL;
3158 	}
3159 
3160 	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
3161 	if (ret)
3162 		return ret;
3163 
3164 	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
3165 	old_disable_bits = intel_dp->psr.debug &
3166 		(I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3167 		 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3168 
3169 	intel_dp->psr.debug = val;
3170 
3171 	/*
3172 	 * Do it right away if it's already enabled, otherwise it will be done
3173 	 * when enabling the source.
3174 	 */
3175 	if (intel_dp->psr.enabled)
3176 		psr_irq_control(intel_dp);
3177 
3178 	mutex_unlock(&intel_dp->psr.lock);
3179 
3180 	if (old_mode != mode || old_disable_bits != disable_bits)
3181 		ret = intel_psr_fastset_force(display);
3182 
3183 	return ret;
3184 }
3185 
3186 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
3187 {
3188 	struct intel_psr *psr = &intel_dp->psr;
3189 
3190 	intel_psr_disable_locked(intel_dp);
3191 	psr->sink_not_reliable = true;
3192 	/* let's make sure that sink is awaken */
3193 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
3194 }
3195 
3196 static void intel_psr_work(struct work_struct *work)
3197 {
3198 	struct intel_dp *intel_dp =
3199 		container_of(work, typeof(*intel_dp), psr.work);
3200 
3201 	mutex_lock(&intel_dp->psr.lock);
3202 
3203 	if (!intel_dp->psr.enabled)
3204 		goto unlock;
3205 
3206 	if (READ_ONCE(intel_dp->psr.irq_aux_error))
3207 		intel_psr_handle_irq(intel_dp);
3208 
3209 	/*
3210 	 * We have to make sure PSR is ready for re-enable
3211 	 * otherwise it keeps disabled until next full enable/disable cycle.
3212 	 * PSR might take some time to get fully disabled
3213 	 * and be ready for re-enable.
3214 	 */
3215 	if (!__psr_wait_for_idle_locked(intel_dp))
3216 		goto unlock;
3217 
3218 	/*
3219 	 * The delayed work can race with an invalidate hence we need to
3220 	 * recheck. Since psr_flush first clears this and then reschedules we
3221 	 * won't ever miss a flush when bailing out here.
3222 	 */
3223 	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
3224 		goto unlock;
3225 
3226 	intel_psr_activate(intel_dp);
3227 unlock:
3228 	mutex_unlock(&intel_dp->psr.lock);
3229 }
3230 
3231 static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp)
3232 {
3233 	struct intel_display *display = to_intel_display(intel_dp);
3234 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3235 
3236 	if (!intel_dp->psr.psr2_sel_fetch_enabled)
3237 		return;
3238 
3239 	if (DISPLAY_VER(display) >= 20)
3240 		intel_de_write(display, LNL_SFF_CTL(cpu_transcoder),
3241 			       LNL_SFF_CTL_SF_SINGLE_FULL_FRAME);
3242 	else
3243 		intel_de_write(display,
3244 			       PSR2_MAN_TRK_CTL(display, cpu_transcoder),
3245 			       man_trk_ctl_enable_bit_get(display) |
3246 			       man_trk_ctl_partial_frame_bit_get(display) |
3247 			       man_trk_ctl_single_full_frame_bit_get(display) |
3248 			       man_trk_ctl_continuos_full_frame(display));
3249 }
3250 
3251 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
3252 {
3253 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
3254 		if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3255 			intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
3256 			intel_psr_configure_full_frame_update(intel_dp);
3257 		}
3258 
3259 		intel_psr_force_update(intel_dp);
3260 	} else {
3261 		intel_psr_exit(intel_dp);
3262 	}
3263 }
3264 
3265 /**
3266  * intel_psr_invalidate - Invalidate PSR
3267  * @display: display device
3268  * @frontbuffer_bits: frontbuffer plane tracking bits
3269  * @origin: which operation caused the invalidate
3270  *
3271  * Since the hardware frontbuffer tracking has gaps we need to integrate
3272  * with the software frontbuffer tracking. This function gets called every
3273  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
3274  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
3275  *
3276  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
3277  */
3278 void intel_psr_invalidate(struct intel_display *display,
3279 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
3280 {
3281 	struct intel_encoder *encoder;
3282 
3283 	if (origin == ORIGIN_FLIP)
3284 		return;
3285 
3286 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3287 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3288 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3289 
3290 		mutex_lock(&intel_dp->psr.lock);
3291 		if (!intel_dp->psr.enabled) {
3292 			mutex_unlock(&intel_dp->psr.lock);
3293 			continue;
3294 		}
3295 
3296 		pipe_frontbuffer_bits &=
3297 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3298 		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
3299 
3300 		if (pipe_frontbuffer_bits)
3301 			_psr_invalidate_handle(intel_dp);
3302 
3303 		mutex_unlock(&intel_dp->psr.lock);
3304 	}
3305 }
3306 /*
3307  * When we will be completely rely on PSR2 S/W tracking in future,
3308  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
3309  * event also therefore tgl_dc3co_flush_locked() require to be changed
3310  * accordingly in future.
3311  */
3312 static void
3313 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
3314 		       enum fb_op_origin origin)
3315 {
3316 	struct intel_display *display = to_intel_display(intel_dp);
3317 	struct drm_i915_private *i915 = to_i915(display->drm);
3318 
3319 	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
3320 	    !intel_dp->psr.active)
3321 		return;
3322 
3323 	/*
3324 	 * At every frontbuffer flush flip event modified delay of delayed work,
3325 	 * when delayed work schedules that means display has been idle.
3326 	 */
3327 	if (!(frontbuffer_bits &
3328 	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
3329 		return;
3330 
3331 	tgl_psr2_enable_dc3co(intel_dp);
3332 	mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
3333 			 intel_dp->psr.dc3co_exit_delay);
3334 }
3335 
3336 static void _psr_flush_handle(struct intel_dp *intel_dp)
3337 {
3338 	struct intel_display *display = to_intel_display(intel_dp);
3339 	struct drm_i915_private *dev_priv = to_i915(display->drm);
3340 
3341 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
3342 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3343 			/* can we turn CFF off? */
3344 			if (intel_dp->psr.busy_frontbuffer_bits == 0)
3345 				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
3346 		}
3347 
3348 		/*
3349 		 * Still keep cff bit enabled as we don't have proper SU
3350 		 * configuration in case update is sent for any reason after
3351 		 * sff bit gets cleared by the HW on next vblank.
3352 		 *
3353 		 * NOTE: Setting cff bit is not needed for LunarLake onwards as
3354 		 * we have own register for SFF bit and we are not overwriting
3355 		 * existing SU configuration
3356 		 */
3357 		intel_psr_configure_full_frame_update(intel_dp);
3358 	}
3359 
3360 	intel_psr_force_update(intel_dp);
3361 
3362 	if (!intel_dp->psr.psr2_sel_fetch_enabled && !intel_dp->psr.active &&
3363 	    !intel_dp->psr.busy_frontbuffer_bits)
3364 		queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
3365 }
3366 
3367 /**
3368  * intel_psr_flush - Flush PSR
3369  * @display: display device
3370  * @frontbuffer_bits: frontbuffer plane tracking bits
3371  * @origin: which operation caused the flush
3372  *
3373  * Since the hardware frontbuffer tracking has gaps we need to integrate
3374  * with the software frontbuffer tracking. This function gets called every
3375  * time frontbuffer rendering has completed and flushed out to memory. PSR
3376  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
3377  *
3378  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
3379  */
3380 void intel_psr_flush(struct intel_display *display,
3381 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
3382 {
3383 	struct intel_encoder *encoder;
3384 
3385 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3386 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3387 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3388 
3389 		mutex_lock(&intel_dp->psr.lock);
3390 		if (!intel_dp->psr.enabled) {
3391 			mutex_unlock(&intel_dp->psr.lock);
3392 			continue;
3393 		}
3394 
3395 		pipe_frontbuffer_bits &=
3396 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3397 		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
3398 
3399 		/*
3400 		 * If the PSR is paused by an explicit intel_psr_paused() call,
3401 		 * we have to ensure that the PSR is not activated until
3402 		 * intel_psr_resume() is called.
3403 		 */
3404 		if (intel_dp->psr.pause_counter)
3405 			goto unlock;
3406 
3407 		if (origin == ORIGIN_FLIP ||
3408 		    (origin == ORIGIN_CURSOR_UPDATE &&
3409 		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
3410 			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
3411 			goto unlock;
3412 		}
3413 
3414 		if (pipe_frontbuffer_bits == 0)
3415 			goto unlock;
3416 
3417 		/* By definition flush = invalidate + flush */
3418 		_psr_flush_handle(intel_dp);
3419 unlock:
3420 		mutex_unlock(&intel_dp->psr.lock);
3421 	}
3422 }
3423 
3424 /**
3425  * intel_psr_init - Init basic PSR work and mutex.
3426  * @intel_dp: Intel DP
3427  *
3428  * This function is called after the initializing connector.
3429  * (the initializing of connector treats the handling of connector capabilities)
3430  * And it initializes basic PSR stuff for each DP Encoder.
3431  */
3432 void intel_psr_init(struct intel_dp *intel_dp)
3433 {
3434 	struct intel_display *display = to_intel_display(intel_dp);
3435 	struct intel_connector *connector = intel_dp->attached_connector;
3436 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3437 
3438 	if (!(HAS_PSR(display) || HAS_DP20(display)))
3439 		return;
3440 
3441 	/*
3442 	 * HSW spec explicitly says PSR is tied to port A.
3443 	 * BDW+ platforms have a instance of PSR registers per transcoder but
3444 	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
3445 	 * than eDP one.
3446 	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
3447 	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
3448 	 * But GEN12 supports a instance of PSR registers per transcoder.
3449 	 */
3450 	if (DISPLAY_VER(display) < 12 && dig_port->base.port != PORT_A) {
3451 		drm_dbg_kms(display->drm,
3452 			    "PSR condition failed: Port not supported\n");
3453 		return;
3454 	}
3455 
3456 	if ((HAS_DP20(display) && !intel_dp_is_edp(intel_dp)) ||
3457 	    DISPLAY_VER(display) >= 20)
3458 		intel_dp->psr.source_panel_replay_support = true;
3459 
3460 	if (HAS_PSR(display) && intel_dp_is_edp(intel_dp))
3461 		intel_dp->psr.source_support = true;
3462 
3463 	/* Set link_standby x link_off defaults */
3464 	if (DISPLAY_VER(display) < 12)
3465 		/* For new platforms up to TGL let's respect VBT back again */
3466 		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
3467 
3468 	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
3469 	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
3470 	mutex_init(&intel_dp->psr.lock);
3471 }
3472 
3473 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
3474 					   u8 *status, u8 *error_status)
3475 {
3476 	struct drm_dp_aux *aux = &intel_dp->aux;
3477 	int ret;
3478 	unsigned int offset;
3479 
3480 	offset = intel_dp->psr.panel_replay_enabled ?
3481 		 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
3482 
3483 	ret = drm_dp_dpcd_readb(aux, offset, status);
3484 	if (ret != 1)
3485 		return ret;
3486 
3487 	offset = intel_dp->psr.panel_replay_enabled ?
3488 		 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
3489 
3490 	ret = drm_dp_dpcd_readb(aux, offset, error_status);
3491 	if (ret != 1)
3492 		return ret;
3493 
3494 	*status = *status & DP_PSR_SINK_STATE_MASK;
3495 
3496 	return 0;
3497 }
3498 
3499 static void psr_alpm_check(struct intel_dp *intel_dp)
3500 {
3501 	struct intel_psr *psr = &intel_dp->psr;
3502 
3503 	if (!psr->sel_update_enabled)
3504 		return;
3505 
3506 	if (intel_alpm_get_error(intel_dp)) {
3507 		intel_psr_disable_locked(intel_dp);
3508 		psr->sink_not_reliable = true;
3509 	}
3510 }
3511 
3512 static void psr_capability_changed_check(struct intel_dp *intel_dp)
3513 {
3514 	struct intel_display *display = to_intel_display(intel_dp);
3515 	struct intel_psr *psr = &intel_dp->psr;
3516 	u8 val;
3517 	int r;
3518 
3519 	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
3520 	if (r != 1) {
3521 		drm_err(display->drm, "Error reading DP_PSR_ESI\n");
3522 		return;
3523 	}
3524 
3525 	if (val & DP_PSR_CAPS_CHANGE) {
3526 		intel_psr_disable_locked(intel_dp);
3527 		psr->sink_not_reliable = true;
3528 		drm_dbg_kms(display->drm,
3529 			    "Sink PSR capability changed, disabling PSR\n");
3530 
3531 		/* Clearing it */
3532 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
3533 	}
3534 }
3535 
3536 /*
3537  * On common bits:
3538  * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
3539  * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
3540  * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
3541  * this function is relying on PSR definitions
3542  */
3543 void intel_psr_short_pulse(struct intel_dp *intel_dp)
3544 {
3545 	struct intel_display *display = to_intel_display(intel_dp);
3546 	struct intel_psr *psr = &intel_dp->psr;
3547 	u8 status, error_status;
3548 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3549 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3550 			  DP_PSR_LINK_CRC_ERROR;
3551 
3552 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
3553 		return;
3554 
3555 	mutex_lock(&psr->lock);
3556 
3557 	psr->link_ok = false;
3558 
3559 	if (!psr->enabled)
3560 		goto exit;
3561 
3562 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
3563 		drm_err(display->drm,
3564 			"Error reading PSR status or error status\n");
3565 		goto exit;
3566 	}
3567 
3568 	if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
3569 	    (error_status & errors)) {
3570 		intel_psr_disable_locked(intel_dp);
3571 		psr->sink_not_reliable = true;
3572 	}
3573 
3574 	if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
3575 	    !error_status)
3576 		drm_dbg_kms(display->drm,
3577 			    "PSR sink internal error, disabling PSR\n");
3578 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3579 		drm_dbg_kms(display->drm,
3580 			    "PSR RFB storage error, disabling PSR\n");
3581 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3582 		drm_dbg_kms(display->drm,
3583 			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
3584 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3585 		drm_dbg_kms(display->drm,
3586 			    "PSR Link CRC error, disabling PSR\n");
3587 
3588 	if (error_status & ~errors)
3589 		drm_err(display->drm,
3590 			"PSR_ERROR_STATUS unhandled errors %x\n",
3591 			error_status & ~errors);
3592 	/* clear status register */
3593 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
3594 
3595 	if (!psr->panel_replay_enabled) {
3596 		psr_alpm_check(intel_dp);
3597 		psr_capability_changed_check(intel_dp);
3598 	}
3599 
3600 exit:
3601 	mutex_unlock(&psr->lock);
3602 }
3603 
3604 bool intel_psr_enabled(struct intel_dp *intel_dp)
3605 {
3606 	bool ret;
3607 
3608 	if (!CAN_PSR(intel_dp))
3609 		return false;
3610 
3611 	mutex_lock(&intel_dp->psr.lock);
3612 	ret = intel_dp->psr.enabled;
3613 	mutex_unlock(&intel_dp->psr.lock);
3614 
3615 	return ret;
3616 }
3617 
3618 /**
3619  * intel_psr_link_ok - return psr->link_ok
3620  * @intel_dp: struct intel_dp
3621  *
3622  * We are seeing unexpected link re-trainings with some panels. This is caused
3623  * by panel stating bad link status after PSR is enabled. Code checking link
3624  * status can call this to ensure it can ignore bad link status stated by the
3625  * panel I.e. if panel is stating bad link and intel_psr_link_ok is stating link
3626  * is ok caller should rely on latter.
3627  *
3628  * Return value of link_ok
3629  */
3630 bool intel_psr_link_ok(struct intel_dp *intel_dp)
3631 {
3632 	bool ret;
3633 
3634 	if ((!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) ||
3635 	    !intel_dp_is_edp(intel_dp))
3636 		return false;
3637 
3638 	mutex_lock(&intel_dp->psr.lock);
3639 	ret = intel_dp->psr.link_ok;
3640 	mutex_unlock(&intel_dp->psr.lock);
3641 
3642 	return ret;
3643 }
3644 
3645 /**
3646  * intel_psr_lock - grab PSR lock
3647  * @crtc_state: the crtc state
3648  *
3649  * This is initially meant to be used by around CRTC update, when
3650  * vblank sensitive registers are updated and we need grab the lock
3651  * before it to avoid vblank evasion.
3652  */
3653 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3654 {
3655 	struct intel_display *display = to_intel_display(crtc_state);
3656 	struct intel_encoder *encoder;
3657 
3658 	if (!crtc_state->has_psr)
3659 		return;
3660 
3661 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3662 					     crtc_state->uapi.encoder_mask) {
3663 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3664 
3665 		mutex_lock(&intel_dp->psr.lock);
3666 		break;
3667 	}
3668 }
3669 
3670 /**
3671  * intel_psr_unlock - release PSR lock
3672  * @crtc_state: the crtc state
3673  *
3674  * Release the PSR lock that was held during pipe update.
3675  */
3676 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3677 {
3678 	struct intel_display *display = to_intel_display(crtc_state);
3679 	struct intel_encoder *encoder;
3680 
3681 	if (!crtc_state->has_psr)
3682 		return;
3683 
3684 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3685 					     crtc_state->uapi.encoder_mask) {
3686 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3687 
3688 		mutex_unlock(&intel_dp->psr.lock);
3689 		break;
3690 	}
3691 }
3692 
3693 /* Wa_16025596647 */
3694 static void intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp *intel_dp)
3695 {
3696 	struct intel_display *display = to_intel_display(intel_dp);
3697 	bool dc5_dc6_blocked;
3698 
3699 	if (!intel_dp->psr.active)
3700 		return;
3701 
3702 	dc5_dc6_blocked = is_dc5_dc6_blocked(intel_dp);
3703 
3704 	if (intel_dp->psr.sel_update_enabled)
3705 		psr2_program_idle_frames(intel_dp, dc5_dc6_blocked ? 0 :
3706 					 psr_compute_idle_frames(intel_dp));
3707 	else
3708 		intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
3709 								       intel_dp->psr.pipe,
3710 								       dc5_dc6_blocked);
3711 }
3712 
3713 static void psr_dc5_dc6_wa_work(struct work_struct *work)
3714 {
3715 	struct intel_display *display = container_of(work, typeof(*display),
3716 						     psr_dc5_dc6_wa_work);
3717 	struct intel_encoder *encoder;
3718 
3719 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3720 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3721 
3722 		mutex_lock(&intel_dp->psr.lock);
3723 
3724 		if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled)
3725 			intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
3726 
3727 		mutex_unlock(&intel_dp->psr.lock);
3728 	}
3729 }
3730 
3731 /**
3732  * intel_psr_notify_dc5_dc6 - Notify PSR about enable/disable dc5/dc6
3733  * @display: intel atomic state
3734  *
3735  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to schedule
3736  * psr_dc5_dc6_wa_work used for applying/removing the workaround.
3737  */
3738 void intel_psr_notify_dc5_dc6(struct intel_display *display)
3739 {
3740 	if (DISPLAY_VER(display) != 20 &&
3741 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3742 		return;
3743 
3744 	schedule_work(&display->psr_dc5_dc6_wa_work);
3745 }
3746 
3747 /**
3748  * intel_psr_dc5_dc6_wa_init - Init work for underrun on idle PSR HW bug wa
3749  * @display: intel atomic state
3750  *
3751  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to init
3752  * psr_dc5_dc6_wa_work used for applying the workaround.
3753  */
3754 void intel_psr_dc5_dc6_wa_init(struct intel_display *display)
3755 {
3756 	if (DISPLAY_VER(display) != 20 &&
3757 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3758 		return;
3759 
3760 	INIT_WORK(&display->psr_dc5_dc6_wa_work, psr_dc5_dc6_wa_work);
3761 }
3762 
3763 /**
3764  * intel_psr_notify_pipe_change - Notify PSR about enable/disable of a pipe
3765  * @state: intel atomic state
3766  * @crtc: intel crtc
3767  * @enable: enable/disable
3768  *
3769  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
3770  * remove the workaround when pipe is getting enabled/disabled
3771  */
3772 void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
3773 				  struct intel_crtc *crtc, bool enable)
3774 {
3775 	struct intel_display *display = to_intel_display(state);
3776 	struct intel_encoder *encoder;
3777 
3778 	if (DISPLAY_VER(display) != 20 &&
3779 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3780 		return;
3781 
3782 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3783 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3784 		u8 active_non_psr_pipes;
3785 
3786 		mutex_lock(&intel_dp->psr.lock);
3787 
3788 		if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
3789 			goto unlock;
3790 
3791 		active_non_psr_pipes = intel_dp->psr.active_non_psr_pipes;
3792 
3793 		if (enable)
3794 			active_non_psr_pipes |= BIT(crtc->pipe);
3795 		else
3796 			active_non_psr_pipes &= ~BIT(crtc->pipe);
3797 
3798 		if (active_non_psr_pipes == intel_dp->psr.active_non_psr_pipes)
3799 			goto unlock;
3800 
3801 		if ((enable && intel_dp->psr.active_non_psr_pipes) ||
3802 		    (!enable && !intel_dp->psr.active_non_psr_pipes)) {
3803 			intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
3804 			goto unlock;
3805 		}
3806 
3807 		intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
3808 
3809 		intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
3810 unlock:
3811 		mutex_unlock(&intel_dp->psr.lock);
3812 	}
3813 }
3814 
3815 /**
3816  * intel_psr_notify_vblank_enable_disable - Notify PSR about enable/disable of vblank
3817  * @display: intel display struct
3818  * @enable: enable/disable
3819  *
3820  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
3821  * remove the workaround when vblank is getting enabled/disabled
3822  */
3823 void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
3824 					    bool enable)
3825 {
3826 	struct intel_encoder *encoder;
3827 
3828 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3829 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3830 
3831 		mutex_lock(&intel_dp->psr.lock);
3832 		if (intel_dp->psr.panel_replay_enabled) {
3833 			mutex_unlock(&intel_dp->psr.lock);
3834 			break;
3835 		}
3836 
3837 		if (intel_dp->psr.enabled)
3838 			intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
3839 
3840 		mutex_unlock(&intel_dp->psr.lock);
3841 		return;
3842 	}
3843 
3844 	/*
3845 	 * NOTE: intel_display_power_set_target_dc_state is used
3846 	 * only by PSR * code for DC3CO handling. DC3CO target
3847 	 * state is currently disabled in * PSR code. If DC3CO
3848 	 * is taken into use we need take that into account here
3849 	 * as well.
3850 	 */
3851 	intel_display_power_set_target_dc_state(display, enable ? DC_STATE_DISABLE :
3852 						DC_STATE_EN_UPTO_DC6);
3853 }
3854 
3855 static void
3856 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3857 {
3858 	struct intel_display *display = to_intel_display(intel_dp);
3859 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3860 	const char *status = "unknown";
3861 	u32 val, status_val;
3862 
3863 	if ((intel_dp_is_edp(intel_dp) || DISPLAY_VER(display) >= 30) &&
3864 	    (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) {
3865 		static const char * const live_status[] = {
3866 			"IDLE",
3867 			"CAPTURE",
3868 			"CAPTURE_FS",
3869 			"SLEEP",
3870 			"BUFON_FW",
3871 			"ML_UP",
3872 			"SU_STANDBY",
3873 			"FAST_SLEEP",
3874 			"DEEP_SLEEP",
3875 			"BUF_ON",
3876 			"TG_ON"
3877 		};
3878 		val = intel_de_read(display,
3879 				    EDP_PSR2_STATUS(display, cpu_transcoder));
3880 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3881 		if (status_val < ARRAY_SIZE(live_status))
3882 			status = live_status[status_val];
3883 	} else {
3884 		static const char * const live_status[] = {
3885 			"IDLE",
3886 			"SRDONACK",
3887 			"SRDENT",
3888 			"BUFOFF",
3889 			"BUFON",
3890 			"AUXACK",
3891 			"SRDOFFACK",
3892 			"SRDENT_ON",
3893 		};
3894 		val = intel_de_read(display,
3895 				    psr_status_reg(display, cpu_transcoder));
3896 		status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3897 		if (status_val < ARRAY_SIZE(live_status))
3898 			status = live_status[status_val];
3899 	}
3900 
3901 	seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3902 }
3903 
3904 static void intel_psr_sink_capability(struct intel_dp *intel_dp,
3905 				      struct seq_file *m)
3906 {
3907 	struct intel_psr *psr = &intel_dp->psr;
3908 
3909 	seq_printf(m, "Sink support: PSR = %s",
3910 		   str_yes_no(psr->sink_support));
3911 
3912 	if (psr->sink_support)
3913 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3914 	if (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
3915 		seq_printf(m, " (Early Transport)");
3916 	seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
3917 	seq_printf(m, ", Panel Replay Selective Update = %s",
3918 		   str_yes_no(psr->sink_panel_replay_su_support));
3919 	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
3920 		seq_printf(m, " (Early Transport)");
3921 	seq_printf(m, "\n");
3922 }
3923 
3924 static void intel_psr_print_mode(struct intel_dp *intel_dp,
3925 				 struct seq_file *m)
3926 {
3927 	struct intel_psr *psr = &intel_dp->psr;
3928 	const char *status, *mode, *region_et;
3929 
3930 	if (psr->enabled)
3931 		status = " enabled";
3932 	else
3933 		status = "disabled";
3934 
3935 	if (psr->panel_replay_enabled && psr->sel_update_enabled)
3936 		mode = "Panel Replay Selective Update";
3937 	else if (psr->panel_replay_enabled)
3938 		mode = "Panel Replay";
3939 	else if (psr->sel_update_enabled)
3940 		mode = "PSR2";
3941 	else if (psr->enabled)
3942 		mode = "PSR1";
3943 	else
3944 		mode = "";
3945 
3946 	if (psr->su_region_et_enabled)
3947 		region_et = " (Early Transport)";
3948 	else
3949 		region_et = "";
3950 
3951 	seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et);
3952 }
3953 
3954 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3955 {
3956 	struct intel_display *display = to_intel_display(intel_dp);
3957 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3958 	struct intel_psr *psr = &intel_dp->psr;
3959 	struct ref_tracker *wakeref;
3960 	bool enabled;
3961 	u32 val, psr2_ctl;
3962 
3963 	intel_psr_sink_capability(intel_dp, m);
3964 
3965 	if (!(psr->sink_support || psr->sink_panel_replay_support))
3966 		return 0;
3967 
3968 	wakeref = intel_display_rpm_get(display);
3969 	mutex_lock(&psr->lock);
3970 
3971 	intel_psr_print_mode(intel_dp, m);
3972 
3973 	if (!psr->enabled) {
3974 		seq_printf(m, "PSR sink not reliable: %s\n",
3975 			   str_yes_no(psr->sink_not_reliable));
3976 
3977 		goto unlock;
3978 	}
3979 
3980 	if (psr->panel_replay_enabled) {
3981 		val = intel_de_read(display, TRANS_DP2_CTL(cpu_transcoder));
3982 
3983 		if (intel_dp_is_edp(intel_dp))
3984 			psr2_ctl = intel_de_read(display,
3985 						 EDP_PSR2_CTL(display,
3986 							      cpu_transcoder));
3987 
3988 		enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3989 	} else if (psr->sel_update_enabled) {
3990 		val = intel_de_read(display,
3991 				    EDP_PSR2_CTL(display, cpu_transcoder));
3992 		enabled = val & EDP_PSR2_ENABLE;
3993 	} else {
3994 		val = intel_de_read(display, psr_ctl_reg(display, cpu_transcoder));
3995 		enabled = val & EDP_PSR_ENABLE;
3996 	}
3997 	seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
3998 		   str_enabled_disabled(enabled), val);
3999 	if (psr->panel_replay_enabled && intel_dp_is_edp(intel_dp))
4000 		seq_printf(m, "PSR2_CTL: 0x%08x\n",
4001 			   psr2_ctl);
4002 	psr_source_status(intel_dp, m);
4003 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
4004 		   psr->busy_frontbuffer_bits);
4005 
4006 	/*
4007 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
4008 	 */
4009 	val = intel_de_read(display, psr_perf_cnt_reg(display, cpu_transcoder));
4010 	seq_printf(m, "Performance counter: %u\n",
4011 		   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
4012 
4013 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
4014 		seq_printf(m, "Last attempted entry at: %lld\n",
4015 			   psr->last_entry_attempt);
4016 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
4017 	}
4018 
4019 	if (psr->sel_update_enabled) {
4020 		u32 su_frames_val[3];
4021 		int frame;
4022 
4023 		/*
4024 		 * Reading all 3 registers before hand to minimize crossing a
4025 		 * frame boundary between register reads
4026 		 */
4027 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
4028 			val = intel_de_read(display,
4029 					    PSR2_SU_STATUS(display, cpu_transcoder, frame));
4030 			su_frames_val[frame / 3] = val;
4031 		}
4032 
4033 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
4034 
4035 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
4036 			u32 su_blocks;
4037 
4038 			su_blocks = su_frames_val[frame / 3] &
4039 				    PSR2_SU_STATUS_MASK(frame);
4040 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
4041 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
4042 		}
4043 
4044 		seq_printf(m, "PSR2 selective fetch: %s\n",
4045 			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
4046 	}
4047 
4048 unlock:
4049 	mutex_unlock(&psr->lock);
4050 	intel_display_rpm_put(display, wakeref);
4051 
4052 	return 0;
4053 }
4054 
4055 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
4056 {
4057 	struct intel_display *display = m->private;
4058 	struct intel_dp *intel_dp = NULL;
4059 	struct intel_encoder *encoder;
4060 
4061 	if (!HAS_PSR(display))
4062 		return -ENODEV;
4063 
4064 	/* Find the first EDP which supports PSR */
4065 	for_each_intel_encoder_with_psr(display->drm, encoder) {
4066 		intel_dp = enc_to_intel_dp(encoder);
4067 		break;
4068 	}
4069 
4070 	if (!intel_dp)
4071 		return -ENODEV;
4072 
4073 	return intel_psr_status(m, intel_dp);
4074 }
4075 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
4076 
4077 static int
4078 i915_edp_psr_debug_set(void *data, u64 val)
4079 {
4080 	struct intel_display *display = data;
4081 	struct intel_encoder *encoder;
4082 	int ret = -ENODEV;
4083 
4084 	if (!HAS_PSR(display))
4085 		return ret;
4086 
4087 	for_each_intel_encoder_with_psr(display->drm, encoder) {
4088 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4089 
4090 		drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);
4091 
4092 		// TODO: split to each transcoder's PSR debug state
4093 		with_intel_display_rpm(display)
4094 			ret = intel_psr_debug_set(intel_dp, val);
4095 	}
4096 
4097 	return ret;
4098 }
4099 
4100 static int
4101 i915_edp_psr_debug_get(void *data, u64 *val)
4102 {
4103 	struct intel_display *display = data;
4104 	struct intel_encoder *encoder;
4105 
4106 	if (!HAS_PSR(display))
4107 		return -ENODEV;
4108 
4109 	for_each_intel_encoder_with_psr(display->drm, encoder) {
4110 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4111 
4112 		// TODO: split to each transcoder's PSR debug state
4113 		*val = READ_ONCE(intel_dp->psr.debug);
4114 		return 0;
4115 	}
4116 
4117 	return -ENODEV;
4118 }
4119 
4120 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
4121 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
4122 			"%llu\n");
4123 
4124 void intel_psr_debugfs_register(struct intel_display *display)
4125 {
4126 	struct drm_minor *minor = display->drm->primary;
4127 
4128 	debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
4129 			    display, &i915_edp_psr_debug_fops);
4130 
4131 	debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
4132 			    display, &i915_edp_psr_status_fops);
4133 }
4134 
4135 static const char *psr_mode_str(struct intel_dp *intel_dp)
4136 {
4137 	if (intel_dp->psr.panel_replay_enabled)
4138 		return "PANEL-REPLAY";
4139 	else if (intel_dp->psr.enabled)
4140 		return "PSR";
4141 
4142 	return "unknown";
4143 }
4144 
4145 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
4146 {
4147 	struct intel_connector *connector = m->private;
4148 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4149 	static const char * const sink_status[] = {
4150 		"inactive",
4151 		"transition to active, capture and display",
4152 		"active, display from RFB",
4153 		"active, capture and display on sink device timings",
4154 		"transition to inactive, capture and display, timing re-sync",
4155 		"reserved",
4156 		"reserved",
4157 		"sink internal error",
4158 	};
4159 	const char *str;
4160 	int ret;
4161 	u8 status, error_status;
4162 
4163 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
4164 		seq_puts(m, "PSR/Panel-Replay Unsupported\n");
4165 		return -ENODEV;
4166 	}
4167 
4168 	if (connector->base.status != connector_status_connected)
4169 		return -ENODEV;
4170 
4171 	ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
4172 	if (ret)
4173 		return ret;
4174 
4175 	status &= DP_PSR_SINK_STATE_MASK;
4176 	if (status < ARRAY_SIZE(sink_status))
4177 		str = sink_status[status];
4178 	else
4179 		str = "unknown";
4180 
4181 	seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
4182 
4183 	seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
4184 
4185 	if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
4186 			    DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
4187 			    DP_PSR_LINK_CRC_ERROR))
4188 		seq_puts(m, ":\n");
4189 	else
4190 		seq_puts(m, "\n");
4191 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
4192 		seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
4193 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
4194 		seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
4195 	if (error_status & DP_PSR_LINK_CRC_ERROR)
4196 		seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
4197 
4198 	return ret;
4199 }
4200 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
4201 
4202 static int i915_psr_status_show(struct seq_file *m, void *data)
4203 {
4204 	struct intel_connector *connector = m->private;
4205 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4206 
4207 	return intel_psr_status(m, intel_dp);
4208 }
4209 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
4210 
4211 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
4212 {
4213 	struct intel_display *display = to_intel_display(connector);
4214 	struct dentry *root = connector->base.debugfs_entry;
4215 
4216 	if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
4217 	    connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
4218 		return;
4219 
4220 	debugfs_create_file("i915_psr_sink_status", 0444, root,
4221 			    connector, &i915_psr_sink_status_fops);
4222 
4223 	if (HAS_PSR(display) || HAS_DP20(display))
4224 		debugfs_create_file("i915_psr_status", 0444, root,
4225 				    connector, &i915_psr_status_fops);
4226 }
4227 
4228 bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state)
4229 {
4230 	/*
4231 	 * eDP Panel Replay uses always ALPM
4232 	 * PSR2 uses ALPM but PSR1 doesn't
4233 	 */
4234 	return intel_dp_is_edp(intel_dp) && (crtc_state->has_sel_update ||
4235 					     crtc_state->has_panel_replay);
4236 }
4237