xref: /linux/drivers/gpu/drm/i915/display/intel_psr.c (revision f86ad0ed620cb3c91ec7d5468e93ac68d727539d)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/debugfs.h>
25 
26 #include <drm/drm_atomic_helper.h>
27 #include <drm/drm_damage_helper.h>
28 #include <drm/drm_debugfs.h>
29 #include <drm/drm_vblank.h>
30 
31 #include "i915_drv.h"
32 #include "i915_reg.h"
33 #include "intel_alpm.h"
34 #include "intel_atomic.h"
35 #include "intel_crtc.h"
36 #include "intel_cursor_regs.h"
37 #include "intel_ddi.h"
38 #include "intel_de.h"
39 #include "intel_display_irq.h"
40 #include "intel_display_regs.h"
41 #include "intel_display_rpm.h"
42 #include "intel_display_types.h"
43 #include "intel_dmc.h"
44 #include "intel_dp.h"
45 #include "intel_dp_aux.h"
46 #include "intel_frontbuffer.h"
47 #include "intel_hdmi.h"
48 #include "intel_psr.h"
49 #include "intel_psr_regs.h"
50 #include "intel_snps_phy.h"
51 #include "intel_vblank.h"
52 #include "intel_vrr.h"
53 #include "skl_universal_plane.h"
54 
55 /**
56  * DOC: Panel Self Refresh (PSR/SRD)
57  *
58  * Since Haswell Display controller supports Panel Self-Refresh on display
59  * panels witch have a remote frame buffer (RFB) implemented according to PSR
60  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
61  * when system is idle but display is on as it eliminates display refresh
62  * request to DDR memory completely as long as the frame buffer for that
63  * display is unchanged.
64  *
65  * Panel Self Refresh must be supported by both Hardware (source) and
66  * Panel (sink).
67  *
68  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
69  * to power down the link and memory controller. For DSI panels the same idea
70  * is called "manual mode".
71  *
72  * The implementation uses the hardware-based PSR support which automatically
73  * enters/exits self-refresh mode. The hardware takes care of sending the
74  * required DP aux message and could even retrain the link (that part isn't
75  * enabled yet though). The hardware also keeps track of any frontbuffer
76  * changes to know when to exit self-refresh mode again. Unfortunately that
77  * part doesn't work too well, hence why the i915 PSR support uses the
78  * software frontbuffer tracking to make sure it doesn't miss a screen
79  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
80  * get called by the frontbuffer tracking code. Note that because of locking
81  * issues the self-refresh re-enable code is done from a work queue, which
82  * must be correctly synchronized/cancelled when shutting down the pipe."
83  *
84  * DC3CO (DC3 clock off)
85  *
86  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
87  * clock off automatically during PSR2 idle state.
88  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
89  * entry/exit allows the HW to enter a low-power state even when page flipping
90  * periodically (for instance a 30fps video playback scenario).
91  *
92  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
93  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
94  * frames, if no other flip occurs and the function above is executed, DC3CO is
95  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
96  * of another flip.
97  * Front buffer modifications do not trigger DC3CO activation on purpose as it
98  * would bring a lot of complexity and most of the moderns systems will only
99  * use page flips.
100  */
101 
102 /*
103  * Description of PSR mask bits:
104  *
105  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
106  *
107  *  When unmasked (nearly) all display register writes (eg. even
108  *  SWF) trigger a PSR exit. Some registers are excluded from this
109  *  and they have a more specific mask (described below). On icl+
110  *  this bit no longer exists and is effectively always set.
111  *
112  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
113  *
114  *  When unmasked (nearly) all pipe/plane register writes
115  *  trigger a PSR exit. Some plane registers are excluded from this
116  *  and they have a more specific mask (described below).
117  *
118  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
119  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
120  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
121  *
122  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
123  *  SPR_SURF/CURBASE are not included in this and instead are
124  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
125  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
126  *
127  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
128  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
129  *
130  *  When unmasked PSR is blocked as long as the sprite
131  *  plane is enabled. skl+ with their universal planes no
132  *  longer have a mask bit like this, and no plane being
133  *  enabledb blocks PSR.
134  *
135  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
136  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
137  *
138  *  When umasked CURPOS writes trigger a PSR exit. On skl+
139  *  this doesn't exit but CURPOS is included in the
140  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
141  *
142  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
143  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
144  *
145  *  When unmasked PSR is blocked as long as vblank and/or vsync
146  *  interrupt is unmasked in IMR *and* enabled in IER.
147  *
148  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
149  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
150  *
151  *  Selectcs whether PSR exit generates an extra vblank before
152  *  the first frame is transmitted. Also note the opposite polarity
153  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
154  *  unmasked==do not generate the extra vblank).
155  *
156  *  With DC states enabled the extra vblank happens after link training,
157  *  with DC states disabled it happens immediately upuon PSR exit trigger.
158  *  No idea as of now why there is a difference. HSW/BDW (which don't
159  *  even have DMC) always generate it after link training. Go figure.
160  *
161  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
162  *  and thus won't latch until the first vblank. So with DC states
163  *  enabled the register effectively uses the reset value during DC5
164  *  exit+PSR exit sequence, and thus the bit does nothing until
165  *  latched by the vblank that it was trying to prevent from being
166  *  generated in the first place. So we should probably call this
167  *  one a chicken/egg bit instead on skl+.
168  *
169  *  In standby mode (as opposed to link-off) this makes no difference
170  *  as the timing generator keeps running the whole time generating
171  *  normal periodic vblanks.
172  *
173  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
174  *  and doing so makes the behaviour match the skl+ reset value.
175  *
176  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
177  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
178  *
179  *  On BDW without this bit is no vblanks whatsoever are
180  *  generated after PSR exit. On HSW this has no apparent effect.
181  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
182  *
183  * The rest of the bits are more self-explanatory and/or
184  * irrelevant for normal operation.
185  *
186  * Description of intel_crtc_state variables. has_psr, has_panel_replay and
187  * has_sel_update:
188  *
189  *  has_psr (alone):					PSR1
190  *  has_psr + has_sel_update:				PSR2
191  *  has_psr + has_panel_replay:				Panel Replay
192  *  has_psr + has_panel_replay + has_sel_update:	Panel Replay Selective Update
193  *
194  * Description of some intel_psr variables. enabled, panel_replay_enabled,
195  * sel_update_enabled
196  *
197  *  enabled (alone):						PSR1
198  *  enabled + sel_update_enabled:				PSR2
199  *  enabled + panel_replay_enabled:				Panel Replay
200  *  enabled + panel_replay_enabled + sel_update_enabled:	Panel Replay SU
201  */
202 
203 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
204 			   (intel_dp)->psr.source_support)
205 
206 bool intel_encoder_can_psr(struct intel_encoder *encoder)
207 {
208 	if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
209 		return CAN_PSR(enc_to_intel_dp(encoder)) ||
210 		       CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
211 	else
212 		return false;
213 }
214 
215 bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
216 				  const struct intel_crtc_state *crtc_state)
217 {
218 	/*
219 	 * For PSR/PR modes only eDP requires the AUX IO power to be enabled whenever
220 	 * the output is enabled. For non-eDP outputs the main link is always
221 	 * on, hence it doesn't require the HW initiated AUX wake-up signaling used
222 	 * for eDP.
223 	 *
224 	 * TODO:
225 	 * - Consider leaving AUX IO disabled for eDP / PR as well, in case
226 	 *   the ALPM with main-link off mode is not enabled.
227 	 * - Leave AUX IO enabled for DP / PR, once support for ALPM with
228 	 *   main-link off mode is added for it and this mode gets enabled.
229 	 */
230 	return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
231 	       intel_encoder_can_psr(encoder);
232 }
233 
234 static bool psr_global_enabled(struct intel_dp *intel_dp)
235 {
236 	struct intel_display *display = to_intel_display(intel_dp);
237 	struct intel_connector *connector = intel_dp->attached_connector;
238 
239 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
240 	case I915_PSR_DEBUG_DEFAULT:
241 		if (display->params.enable_psr == -1)
242 			return intel_dp_is_edp(intel_dp) ?
243 				connector->panel.vbt.psr.enable :
244 				true;
245 		return display->params.enable_psr;
246 	case I915_PSR_DEBUG_DISABLE:
247 		return false;
248 	default:
249 		return true;
250 	}
251 }
252 
253 static bool psr2_global_enabled(struct intel_dp *intel_dp)
254 {
255 	struct intel_display *display = to_intel_display(intel_dp);
256 
257 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
258 	case I915_PSR_DEBUG_DISABLE:
259 	case I915_PSR_DEBUG_FORCE_PSR1:
260 		return false;
261 	default:
262 		if (display->params.enable_psr == 1)
263 			return false;
264 		return true;
265 	}
266 }
267 
268 static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp)
269 {
270 	struct intel_display *display = to_intel_display(intel_dp);
271 
272 	if (display->params.enable_psr != -1)
273 		return false;
274 
275 	return true;
276 }
277 
278 static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
279 {
280 	struct intel_display *display = to_intel_display(intel_dp);
281 
282 	if ((display->params.enable_psr != -1) ||
283 	    (intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE))
284 		return false;
285 	return true;
286 }
287 
288 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
289 {
290 	struct intel_display *display = to_intel_display(intel_dp);
291 
292 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_ERROR :
293 		EDP_PSR_ERROR(intel_dp->psr.transcoder);
294 }
295 
296 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
297 {
298 	struct intel_display *display = to_intel_display(intel_dp);
299 
300 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_POST_EXIT :
301 		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
302 }
303 
304 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
305 {
306 	struct intel_display *display = to_intel_display(intel_dp);
307 
308 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_PRE_ENTRY :
309 		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
310 }
311 
312 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
313 {
314 	struct intel_display *display = to_intel_display(intel_dp);
315 
316 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_MASK :
317 		EDP_PSR_MASK(intel_dp->psr.transcoder);
318 }
319 
320 static i915_reg_t psr_ctl_reg(struct intel_display *display,
321 			      enum transcoder cpu_transcoder)
322 {
323 	if (DISPLAY_VER(display) >= 8)
324 		return EDP_PSR_CTL(display, cpu_transcoder);
325 	else
326 		return HSW_SRD_CTL;
327 }
328 
329 static i915_reg_t psr_debug_reg(struct intel_display *display,
330 				enum transcoder cpu_transcoder)
331 {
332 	if (DISPLAY_VER(display) >= 8)
333 		return EDP_PSR_DEBUG(display, cpu_transcoder);
334 	else
335 		return HSW_SRD_DEBUG;
336 }
337 
338 static i915_reg_t psr_perf_cnt_reg(struct intel_display *display,
339 				   enum transcoder cpu_transcoder)
340 {
341 	if (DISPLAY_VER(display) >= 8)
342 		return EDP_PSR_PERF_CNT(display, cpu_transcoder);
343 	else
344 		return HSW_SRD_PERF_CNT;
345 }
346 
347 static i915_reg_t psr_status_reg(struct intel_display *display,
348 				 enum transcoder cpu_transcoder)
349 {
350 	if (DISPLAY_VER(display) >= 8)
351 		return EDP_PSR_STATUS(display, cpu_transcoder);
352 	else
353 		return HSW_SRD_STATUS;
354 }
355 
356 static i915_reg_t psr_imr_reg(struct intel_display *display,
357 			      enum transcoder cpu_transcoder)
358 {
359 	if (DISPLAY_VER(display) >= 12)
360 		return TRANS_PSR_IMR(display, cpu_transcoder);
361 	else
362 		return EDP_PSR_IMR;
363 }
364 
365 static i915_reg_t psr_iir_reg(struct intel_display *display,
366 			      enum transcoder cpu_transcoder)
367 {
368 	if (DISPLAY_VER(display) >= 12)
369 		return TRANS_PSR_IIR(display, cpu_transcoder);
370 	else
371 		return EDP_PSR_IIR;
372 }
373 
374 static i915_reg_t psr_aux_ctl_reg(struct intel_display *display,
375 				  enum transcoder cpu_transcoder)
376 {
377 	if (DISPLAY_VER(display) >= 8)
378 		return EDP_PSR_AUX_CTL(display, cpu_transcoder);
379 	else
380 		return HSW_SRD_AUX_CTL;
381 }
382 
383 static i915_reg_t psr_aux_data_reg(struct intel_display *display,
384 				   enum transcoder cpu_transcoder, int i)
385 {
386 	if (DISPLAY_VER(display) >= 8)
387 		return EDP_PSR_AUX_DATA(display, cpu_transcoder, i);
388 	else
389 		return HSW_SRD_AUX_DATA(i);
390 }
391 
392 static void psr_irq_control(struct intel_dp *intel_dp)
393 {
394 	struct intel_display *display = to_intel_display(intel_dp);
395 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
396 	u32 mask;
397 
398 	if (intel_dp->psr.panel_replay_enabled)
399 		return;
400 
401 	mask = psr_irq_psr_error_bit_get(intel_dp);
402 	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
403 		mask |= psr_irq_post_exit_bit_get(intel_dp) |
404 			psr_irq_pre_entry_bit_get(intel_dp);
405 
406 	intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
407 		     psr_irq_mask_get(intel_dp), ~mask);
408 }
409 
410 static void psr_event_print(struct intel_display *display,
411 			    u32 val, bool sel_update_enabled)
412 {
413 	drm_dbg_kms(display->drm, "PSR exit events: 0x%x\n", val);
414 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
415 		drm_dbg_kms(display->drm, "\tPSR2 watchdog timer expired\n");
416 	if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled)
417 		drm_dbg_kms(display->drm, "\tPSR2 disabled\n");
418 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
419 		drm_dbg_kms(display->drm, "\tSU dirty FIFO underrun\n");
420 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
421 		drm_dbg_kms(display->drm, "\tSU CRC FIFO underrun\n");
422 	if (val & PSR_EVENT_GRAPHICS_RESET)
423 		drm_dbg_kms(display->drm, "\tGraphics reset\n");
424 	if (val & PSR_EVENT_PCH_INTERRUPT)
425 		drm_dbg_kms(display->drm, "\tPCH interrupt\n");
426 	if (val & PSR_EVENT_MEMORY_UP)
427 		drm_dbg_kms(display->drm, "\tMemory up\n");
428 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
429 		drm_dbg_kms(display->drm, "\tFront buffer modification\n");
430 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
431 		drm_dbg_kms(display->drm, "\tPSR watchdog timer expired\n");
432 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
433 		drm_dbg_kms(display->drm, "\tPIPE registers updated\n");
434 	if (val & PSR_EVENT_REGISTER_UPDATE)
435 		drm_dbg_kms(display->drm, "\tRegister updated\n");
436 	if (val & PSR_EVENT_HDCP_ENABLE)
437 		drm_dbg_kms(display->drm, "\tHDCP enabled\n");
438 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
439 		drm_dbg_kms(display->drm, "\tKVMR session enabled\n");
440 	if (val & PSR_EVENT_VBI_ENABLE)
441 		drm_dbg_kms(display->drm, "\tVBI enabled\n");
442 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
443 		drm_dbg_kms(display->drm, "\tLPSP mode exited\n");
444 	if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled)
445 		drm_dbg_kms(display->drm, "\tPSR disabled\n");
446 }
447 
448 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
449 {
450 	struct intel_display *display = to_intel_display(intel_dp);
451 	struct drm_i915_private *dev_priv = to_i915(display->drm);
452 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
453 	ktime_t time_ns =  ktime_get();
454 
455 	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
456 		intel_dp->psr.last_entry_attempt = time_ns;
457 		drm_dbg_kms(display->drm,
458 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
459 			    transcoder_name(cpu_transcoder));
460 	}
461 
462 	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
463 		intel_dp->psr.last_exit = time_ns;
464 		drm_dbg_kms(display->drm,
465 			    "[transcoder %s] PSR exit completed\n",
466 			    transcoder_name(cpu_transcoder));
467 
468 		if (DISPLAY_VER(display) >= 9) {
469 			u32 val;
470 
471 			val = intel_de_rmw(display,
472 					   PSR_EVENT(display, cpu_transcoder),
473 					   0, 0);
474 
475 			psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
476 		}
477 	}
478 
479 	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
480 		drm_warn(display->drm, "[transcoder %s] PSR aux error\n",
481 			 transcoder_name(cpu_transcoder));
482 
483 		intel_dp->psr.irq_aux_error = true;
484 
485 		/*
486 		 * If this interruption is not masked it will keep
487 		 * interrupting so fast that it prevents the scheduled
488 		 * work to run.
489 		 * Also after a PSR error, we don't want to arm PSR
490 		 * again so we don't care about unmask the interruption
491 		 * or unset irq_aux_error.
492 		 */
493 		intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
494 			     0, psr_irq_psr_error_bit_get(intel_dp));
495 
496 		queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
497 	}
498 }
499 
500 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
501 {
502 	struct intel_display *display = to_intel_display(intel_dp);
503 	u8 val = 8; /* assume the worst if we can't read the value */
504 
505 	if (drm_dp_dpcd_readb(&intel_dp->aux,
506 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
507 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
508 	else
509 		drm_dbg_kms(display->drm,
510 			    "Unable to get sink synchronization latency, assuming 8 frames\n");
511 	return val;
512 }
513 
514 static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
515 {
516 	u8 su_capability = 0;
517 
518 	if (intel_dp->psr.sink_panel_replay_su_support)
519 		drm_dp_dpcd_readb(&intel_dp->aux,
520 				  DP_PANEL_REPLAY_CAP_CAPABILITY,
521 				  &su_capability);
522 	else
523 		su_capability = intel_dp->psr_dpcd[1];
524 
525 	return su_capability;
526 }
527 
528 static unsigned int
529 intel_dp_get_su_x_granularity_offset(struct intel_dp *intel_dp)
530 {
531 	return intel_dp->psr.sink_panel_replay_su_support ?
532 		DP_PANEL_REPLAY_CAP_X_GRANULARITY :
533 		DP_PSR2_SU_X_GRANULARITY;
534 }
535 
536 static unsigned int
537 intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
538 {
539 	return intel_dp->psr.sink_panel_replay_su_support ?
540 		DP_PANEL_REPLAY_CAP_Y_GRANULARITY :
541 		DP_PSR2_SU_Y_GRANULARITY;
542 }
543 
544 /*
545  * Note: Bits related to granularity are same in panel replay and psr
546  * registers. Rely on PSR definitions on these "common" bits.
547  */
548 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
549 {
550 	struct intel_display *display = to_intel_display(intel_dp);
551 	ssize_t r;
552 	u16 w;
553 	u8 y;
554 
555 	/*
556 	 * TODO: Do we need to take into account panel supporting both PSR and
557 	 * Panel replay?
558 	 */
559 
560 	/*
561 	 * If sink don't have specific granularity requirements set legacy
562 	 * ones.
563 	 */
564 	if (!(intel_dp_get_su_capability(intel_dp) &
565 	      DP_PSR2_SU_GRANULARITY_REQUIRED)) {
566 		/* As PSR2 HW sends full lines, we do not care about x granularity */
567 		w = 4;
568 		y = 4;
569 		goto exit;
570 	}
571 
572 	r = drm_dp_dpcd_read(&intel_dp->aux,
573 			     intel_dp_get_su_x_granularity_offset(intel_dp),
574 			     &w, 2);
575 	if (r != 2)
576 		drm_dbg_kms(display->drm,
577 			    "Unable to read selective update x granularity\n");
578 	/*
579 	 * Spec says that if the value read is 0 the default granularity should
580 	 * be used instead.
581 	 */
582 	if (r != 2 || w == 0)
583 		w = 4;
584 
585 	r = drm_dp_dpcd_read(&intel_dp->aux,
586 			     intel_dp_get_su_y_granularity_offset(intel_dp),
587 			     &y, 1);
588 	if (r != 1) {
589 		drm_dbg_kms(display->drm,
590 			    "Unable to read selective update y granularity\n");
591 		y = 4;
592 	}
593 	if (y == 0)
594 		y = 1;
595 
596 exit:
597 	intel_dp->psr.su_w_granularity = w;
598 	intel_dp->psr.su_y_granularity = y;
599 }
600 
601 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
602 {
603 	struct intel_display *display = to_intel_display(intel_dp);
604 
605 	if (intel_dp_is_edp(intel_dp)) {
606 		if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
607 			drm_dbg_kms(display->drm,
608 				    "Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n");
609 			return;
610 		}
611 
612 		if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
613 		      DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
614 			drm_dbg_kms(display->drm,
615 				    "Panel doesn't support early transport, eDP Panel Replay not possible\n");
616 			return;
617 		}
618 	}
619 
620 	intel_dp->psr.sink_panel_replay_support = true;
621 
622 	if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
623 	    DP_PANEL_REPLAY_SU_SUPPORT)
624 		intel_dp->psr.sink_panel_replay_su_support = true;
625 
626 	drm_dbg_kms(display->drm,
627 		    "Panel replay %sis supported by panel\n",
628 		    intel_dp->psr.sink_panel_replay_su_support ?
629 		    "selective_update " : "");
630 }
631 
632 static void _psr_init_dpcd(struct intel_dp *intel_dp)
633 {
634 	struct intel_display *display = to_intel_display(intel_dp);
635 
636 	drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
637 		    intel_dp->psr_dpcd[0]);
638 
639 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
640 		drm_dbg_kms(display->drm,
641 			    "PSR support not currently available for this panel\n");
642 		return;
643 	}
644 
645 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
646 		drm_dbg_kms(display->drm,
647 			    "Panel lacks power state control, PSR cannot be enabled\n");
648 		return;
649 	}
650 
651 	intel_dp->psr.sink_support = true;
652 	intel_dp->psr.sink_sync_latency =
653 		intel_dp_get_sink_sync_latency(intel_dp);
654 
655 	if (DISPLAY_VER(display) >= 9 &&
656 	    intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
657 		bool y_req = intel_dp->psr_dpcd[1] &
658 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
659 
660 		/*
661 		 * All panels that supports PSR version 03h (PSR2 +
662 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
663 		 * only sure that it is going to be used when required by the
664 		 * panel. This way panel is capable to do selective update
665 		 * without a aux frame sync.
666 		 *
667 		 * To support PSR version 02h and PSR version 03h without
668 		 * Y-coordinate requirement panels we would need to enable
669 		 * GTC first.
670 		 */
671 		intel_dp->psr.sink_psr2_support = y_req &&
672 			intel_alpm_aux_wake_supported(intel_dp);
673 		drm_dbg_kms(display->drm, "PSR2 %ssupported\n",
674 			    intel_dp->psr.sink_psr2_support ? "" : "not ");
675 	}
676 }
677 
678 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
679 {
680 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
681 			 sizeof(intel_dp->psr_dpcd));
682 
683 	drm_dp_dpcd_read(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
684 			 &intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
685 
686 	if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
687 	    DP_PANEL_REPLAY_SUPPORT)
688 		_panel_replay_init_dpcd(intel_dp);
689 
690 	if (intel_dp->psr_dpcd[0])
691 		_psr_init_dpcd(intel_dp);
692 
693 	if (intel_dp->psr.sink_psr2_support ||
694 	    intel_dp->psr.sink_panel_replay_su_support)
695 		intel_dp_get_su_granularity(intel_dp);
696 }
697 
698 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
699 {
700 	struct intel_display *display = to_intel_display(intel_dp);
701 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
702 	u32 aux_clock_divider, aux_ctl;
703 	/* write DP_SET_POWER=D0 */
704 	static const u8 aux_msg[] = {
705 		[0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
706 		[1] = (DP_SET_POWER >> 8) & 0xff,
707 		[2] = DP_SET_POWER & 0xff,
708 		[3] = 1 - 1,
709 		[4] = DP_SET_POWER_D0,
710 	};
711 	int i;
712 
713 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
714 	for (i = 0; i < sizeof(aux_msg); i += 4)
715 		intel_de_write(display,
716 			       psr_aux_data_reg(display, cpu_transcoder, i >> 2),
717 			       intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
718 
719 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
720 
721 	/* Start with bits set for DDI_AUX_CTL register */
722 	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
723 					     aux_clock_divider);
724 
725 	/* Select only valid bits for SRD_AUX_CTL */
726 	aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
727 		EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
728 		EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
729 		EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
730 
731 	intel_de_write(display, psr_aux_ctl_reg(display, cpu_transcoder),
732 		       aux_ctl);
733 }
734 
735 static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay)
736 {
737 	struct intel_display *display = to_intel_display(intel_dp);
738 
739 	if (DISPLAY_VER(display) < 20 || !intel_dp_is_edp(intel_dp) ||
740 	    intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)
741 		return false;
742 
743 	return panel_replay ?
744 		intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
745 		DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
746 		intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
747 		psr2_su_region_et_global_enabled(intel_dp);
748 }
749 
750 static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
751 				      const struct intel_crtc_state *crtc_state)
752 {
753 	u8 val = DP_PANEL_REPLAY_ENABLE |
754 		DP_PANEL_REPLAY_VSC_SDP_CRC_EN |
755 		DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
756 		DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN |
757 		DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN;
758 	u8 panel_replay_config2 = DP_PANEL_REPLAY_CRC_VERIFICATION;
759 
760 	if (crtc_state->has_sel_update)
761 		val |= DP_PANEL_REPLAY_SU_ENABLE;
762 
763 	if (crtc_state->enable_psr2_su_region_et)
764 		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
765 
766 	if (crtc_state->req_psr2_sdp_prior_scanline)
767 		panel_replay_config2 |=
768 			DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE;
769 
770 	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, val);
771 
772 	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG2,
773 			   panel_replay_config2);
774 }
775 
776 static void _psr_enable_sink(struct intel_dp *intel_dp,
777 			     const struct intel_crtc_state *crtc_state)
778 {
779 	struct intel_display *display = to_intel_display(intel_dp);
780 	u8 val = 0;
781 
782 	if (crtc_state->has_sel_update) {
783 		val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
784 	} else {
785 		if (intel_dp->psr.link_standby)
786 			val |= DP_PSR_MAIN_LINK_ACTIVE;
787 
788 		if (DISPLAY_VER(display) >= 8)
789 			val |= DP_PSR_CRC_VERIFICATION;
790 	}
791 
792 	if (crtc_state->req_psr2_sdp_prior_scanline)
793 		val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
794 
795 	if (crtc_state->enable_psr2_su_region_et)
796 		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
797 
798 	if (intel_dp->psr.entry_setup_frames > 0)
799 		val |= DP_PSR_FRAME_CAPTURE;
800 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
801 
802 	val |= DP_PSR_ENABLE;
803 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
804 }
805 
806 static void intel_psr_enable_sink(struct intel_dp *intel_dp,
807 				  const struct intel_crtc_state *crtc_state)
808 {
809 	intel_alpm_enable_sink(intel_dp, crtc_state);
810 
811 	crtc_state->has_panel_replay ?
812 		_panel_replay_enable_sink(intel_dp, crtc_state) :
813 		_psr_enable_sink(intel_dp, crtc_state);
814 
815 	if (intel_dp_is_edp(intel_dp))
816 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
817 }
818 
819 void intel_psr_panel_replay_enable_sink(struct intel_dp *intel_dp)
820 {
821 	if (CAN_PANEL_REPLAY(intel_dp))
822 		drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
823 				   DP_PANEL_REPLAY_ENABLE);
824 }
825 
826 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
827 {
828 	struct intel_display *display = to_intel_display(intel_dp);
829 	struct intel_connector *connector = intel_dp->attached_connector;
830 	u32 val = 0;
831 
832 	if (DISPLAY_VER(display) >= 11)
833 		val |= EDP_PSR_TP4_TIME_0us;
834 
835 	if (display->params.psr_safest_params) {
836 		val |= EDP_PSR_TP1_TIME_2500us;
837 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
838 		goto check_tp3_sel;
839 	}
840 
841 	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
842 		val |= EDP_PSR_TP1_TIME_0us;
843 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
844 		val |= EDP_PSR_TP1_TIME_100us;
845 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
846 		val |= EDP_PSR_TP1_TIME_500us;
847 	else
848 		val |= EDP_PSR_TP1_TIME_2500us;
849 
850 	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
851 		val |= EDP_PSR_TP2_TP3_TIME_0us;
852 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
853 		val |= EDP_PSR_TP2_TP3_TIME_100us;
854 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
855 		val |= EDP_PSR_TP2_TP3_TIME_500us;
856 	else
857 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
858 
859 	/*
860 	 * WA 0479: hsw,bdw
861 	 * "Do not skip both TP1 and TP2/TP3"
862 	 */
863 	if (DISPLAY_VER(display) < 9 &&
864 	    connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
865 	    connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
866 		val |= EDP_PSR_TP2_TP3_TIME_100us;
867 
868 check_tp3_sel:
869 	if (intel_dp_source_supports_tps3(display) &&
870 	    drm_dp_tps3_supported(intel_dp->dpcd))
871 		val |= EDP_PSR_TP_TP1_TP3;
872 	else
873 		val |= EDP_PSR_TP_TP1_TP2;
874 
875 	return val;
876 }
877 
878 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
879 {
880 	struct intel_display *display = to_intel_display(intel_dp);
881 	struct intel_connector *connector = intel_dp->attached_connector;
882 	int idle_frames;
883 
884 	/* Let's use 6 as the minimum to cover all known cases including the
885 	 * off-by-one issue that HW has in some cases.
886 	 */
887 	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
888 	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
889 
890 	if (drm_WARN_ON(display->drm, idle_frames > 0xf))
891 		idle_frames = 0xf;
892 
893 	return idle_frames;
894 }
895 
896 static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp)
897 {
898 	struct intel_display *display = to_intel_display(intel_dp);
899 	u32 current_dc_state = intel_display_power_get_current_dc_state(display);
900 	struct drm_vblank_crtc *vblank = &display->drm->vblank[intel_dp->psr.pipe];
901 
902 	return (current_dc_state != DC_STATE_EN_UPTO_DC5 &&
903 		current_dc_state != DC_STATE_EN_UPTO_DC6) ||
904 		intel_dp->psr.active_non_psr_pipes ||
905 		READ_ONCE(vblank->enabled);
906 }
907 
908 static void hsw_activate_psr1(struct intel_dp *intel_dp)
909 {
910 	struct intel_display *display = to_intel_display(intel_dp);
911 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
912 	u32 max_sleep_time = 0x1f;
913 	u32 val = EDP_PSR_ENABLE;
914 
915 	val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
916 
917 	if (DISPLAY_VER(display) < 20)
918 		val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
919 
920 	if (display->platform.haswell)
921 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
922 
923 	if (intel_dp->psr.link_standby)
924 		val |= EDP_PSR_LINK_STANDBY;
925 
926 	val |= intel_psr1_get_tp_time(intel_dp);
927 
928 	if (DISPLAY_VER(display) >= 8)
929 		val |= EDP_PSR_CRC_ENABLE;
930 
931 	if (DISPLAY_VER(display) >= 20)
932 		val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
933 
934 	intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder),
935 		     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
936 
937 	/* Wa_16025596647 */
938 	if ((DISPLAY_VER(display) == 20 ||
939 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
940 	    is_dc5_dc6_blocked(intel_dp))
941 		intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
942 								       intel_dp->psr.pipe,
943 								       true);
944 }
945 
946 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
947 {
948 	struct intel_display *display = to_intel_display(intel_dp);
949 	struct intel_connector *connector = intel_dp->attached_connector;
950 	u32 val = 0;
951 
952 	if (display->params.psr_safest_params)
953 		return EDP_PSR2_TP2_TIME_2500us;
954 
955 	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
956 	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
957 		val |= EDP_PSR2_TP2_TIME_50us;
958 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
959 		val |= EDP_PSR2_TP2_TIME_100us;
960 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
961 		val |= EDP_PSR2_TP2_TIME_500us;
962 	else
963 		val |= EDP_PSR2_TP2_TIME_2500us;
964 
965 	return val;
966 }
967 
968 static int psr2_block_count_lines(struct intel_dp *intel_dp)
969 {
970 	return intel_dp->alpm_parameters.io_wake_lines < 9 &&
971 		intel_dp->alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
972 }
973 
974 static int psr2_block_count(struct intel_dp *intel_dp)
975 {
976 	return psr2_block_count_lines(intel_dp) / 4;
977 }
978 
979 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
980 {
981 	u8 frames_before_su_entry;
982 
983 	frames_before_su_entry = max_t(u8,
984 				       intel_dp->psr.sink_sync_latency + 1,
985 				       2);
986 
987 	/* Entry setup frames must be at least 1 less than frames before SU entry */
988 	if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
989 		frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
990 
991 	return frames_before_su_entry;
992 }
993 
994 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
995 {
996 	struct intel_display *display = to_intel_display(intel_dp);
997 	struct intel_psr *psr = &intel_dp->psr;
998 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
999 
1000 	if (intel_dp_is_edp(intel_dp) && psr->sel_update_enabled) {
1001 		u32 val = psr->su_region_et_enabled ?
1002 			LNL_EDP_PSR2_SU_REGION_ET_ENABLE : 0;
1003 
1004 		if (intel_dp->psr.req_psr2_sdp_prior_scanline)
1005 			val |= EDP_PSR2_SU_SDP_SCANLINE;
1006 
1007 		intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder),
1008 			       val);
1009 	}
1010 
1011 	intel_de_rmw(display,
1012 		     PSR2_MAN_TRK_CTL(display, intel_dp->psr.transcoder),
1013 		     0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
1014 
1015 	intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
1016 		     TRANS_DP2_PANEL_REPLAY_ENABLE);
1017 }
1018 
1019 static void hsw_activate_psr2(struct intel_dp *intel_dp)
1020 {
1021 	struct intel_display *display = to_intel_display(intel_dp);
1022 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1023 	u32 val = EDP_PSR2_ENABLE;
1024 	u32 psr_val = 0;
1025 	u8 idle_frames;
1026 
1027 	/* Wa_16025596647 */
1028 	if ((DISPLAY_VER(display) == 20 ||
1029 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
1030 	    is_dc5_dc6_blocked(intel_dp))
1031 		idle_frames = 0;
1032 	else
1033 		idle_frames = psr_compute_idle_frames(intel_dp);
1034 	val |= EDP_PSR2_IDLE_FRAMES(idle_frames);
1035 
1036 	if (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)
1037 		val |= EDP_SU_TRACK_ENABLE;
1038 
1039 	if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
1040 		val |= EDP_Y_COORDINATE_ENABLE;
1041 
1042 	val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
1043 
1044 	val |= intel_psr2_get_tp_time(intel_dp);
1045 
1046 	if (DISPLAY_VER(display) >= 12 && DISPLAY_VER(display) < 20) {
1047 		if (psr2_block_count(intel_dp) > 2)
1048 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
1049 		else
1050 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
1051 	}
1052 
1053 	/* Wa_22012278275:adl-p */
1054 	if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
1055 		static const u8 map[] = {
1056 			2, /* 5 lines */
1057 			1, /* 6 lines */
1058 			0, /* 7 lines */
1059 			3, /* 8 lines */
1060 			6, /* 9 lines */
1061 			5, /* 10 lines */
1062 			4, /* 11 lines */
1063 			7, /* 12 lines */
1064 		};
1065 		/*
1066 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
1067 		 * comments below for more information
1068 		 */
1069 		int tmp;
1070 
1071 		tmp = map[intel_dp->alpm_parameters.io_wake_lines -
1072 			  TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
1073 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
1074 
1075 		tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
1076 		val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
1077 	} else if (DISPLAY_VER(display) >= 20) {
1078 		val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1079 	} else if (DISPLAY_VER(display) >= 12) {
1080 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1081 		val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1082 	} else if (DISPLAY_VER(display) >= 9) {
1083 		val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1084 		val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1085 	}
1086 
1087 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
1088 		val |= EDP_PSR2_SU_SDP_SCANLINE;
1089 
1090 	if (DISPLAY_VER(display) >= 20)
1091 		psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
1092 
1093 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
1094 		u32 tmp;
1095 
1096 		tmp = intel_de_read(display,
1097 				    PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1098 		drm_WARN_ON(display->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
1099 	} else if (HAS_PSR2_SEL_FETCH(display)) {
1100 		intel_de_write(display,
1101 			       PSR2_MAN_TRK_CTL(display, cpu_transcoder), 0);
1102 	}
1103 
1104 	if (intel_dp->psr.su_region_et_enabled)
1105 		val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
1106 
1107 	/*
1108 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
1109 	 * recommending keep this bit unset while PSR2 is enabled.
1110 	 */
1111 	intel_de_write(display, psr_ctl_reg(display, cpu_transcoder), psr_val);
1112 
1113 	intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), val);
1114 }
1115 
1116 static bool
1117 transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
1118 {
1119 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1120 		return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
1121 	else if (DISPLAY_VER(display) >= 12)
1122 		return cpu_transcoder == TRANSCODER_A;
1123 	else if (DISPLAY_VER(display) >= 9)
1124 		return cpu_transcoder == TRANSCODER_EDP;
1125 	else
1126 		return false;
1127 }
1128 
1129 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
1130 {
1131 	if (!crtc_state->hw.active)
1132 		return 0;
1133 
1134 	return DIV_ROUND_UP(1000 * 1000,
1135 			    drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
1136 }
1137 
1138 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
1139 				     u32 idle_frames)
1140 {
1141 	struct intel_display *display = to_intel_display(intel_dp);
1142 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1143 
1144 	intel_de_rmw(display, EDP_PSR2_CTL(display, cpu_transcoder),
1145 		     EDP_PSR2_IDLE_FRAMES_MASK,
1146 		     EDP_PSR2_IDLE_FRAMES(idle_frames));
1147 }
1148 
1149 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
1150 {
1151 	struct intel_display *display = to_intel_display(intel_dp);
1152 
1153 	psr2_program_idle_frames(intel_dp, 0);
1154 	intel_display_power_set_target_dc_state(display, DC_STATE_EN_DC3CO);
1155 }
1156 
1157 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
1158 {
1159 	struct intel_display *display = to_intel_display(intel_dp);
1160 
1161 	intel_display_power_set_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
1162 	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
1163 }
1164 
1165 static void tgl_dc3co_disable_work(struct work_struct *work)
1166 {
1167 	struct intel_dp *intel_dp =
1168 		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
1169 
1170 	mutex_lock(&intel_dp->psr.lock);
1171 	/* If delayed work is pending, it is not idle */
1172 	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
1173 		goto unlock;
1174 
1175 	tgl_psr2_disable_dc3co(intel_dp);
1176 unlock:
1177 	mutex_unlock(&intel_dp->psr.lock);
1178 }
1179 
1180 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
1181 {
1182 	if (!intel_dp->psr.dc3co_exitline)
1183 		return;
1184 
1185 	cancel_delayed_work(&intel_dp->psr.dc3co_work);
1186 	/* Before PSR2 exit disallow dc3co*/
1187 	tgl_psr2_disable_dc3co(intel_dp);
1188 }
1189 
1190 static bool
1191 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
1192 			      struct intel_crtc_state *crtc_state)
1193 {
1194 	struct intel_display *display = to_intel_display(intel_dp);
1195 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1196 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1197 	enum port port = dig_port->base.port;
1198 
1199 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1200 		return pipe <= PIPE_B && port <= PORT_B;
1201 	else
1202 		return pipe == PIPE_A && port == PORT_A;
1203 }
1204 
1205 static void
1206 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
1207 				  struct intel_crtc_state *crtc_state)
1208 {
1209 	struct intel_display *display = to_intel_display(intel_dp);
1210 	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
1211 	struct i915_power_domains *power_domains = &display->power.domains;
1212 	u32 exit_scanlines;
1213 
1214 	/*
1215 	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1216 	 * disable DC3CO until the changed dc3co activating/deactivating sequence
1217 	 * is applied. B.Specs:49196
1218 	 */
1219 	return;
1220 
1221 	/*
1222 	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1223 	 * TODO: when the issue is addressed, this restriction should be removed.
1224 	 */
1225 	if (crtc_state->enable_psr2_sel_fetch)
1226 		return;
1227 
1228 	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1229 		return;
1230 
1231 	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1232 		return;
1233 
1234 	/* Wa_16011303918:adl-p */
1235 	if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
1236 		return;
1237 
1238 	/*
1239 	 * DC3CO Exit time 200us B.Spec 49196
1240 	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1241 	 */
1242 	exit_scanlines =
1243 		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1244 
1245 	if (drm_WARN_ON(display->drm, exit_scanlines > crtc_vdisplay))
1246 		return;
1247 
1248 	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1249 }
1250 
1251 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1252 					      struct intel_crtc_state *crtc_state)
1253 {
1254 	struct intel_display *display = to_intel_display(intel_dp);
1255 
1256 	if (!display->params.enable_psr2_sel_fetch &&
1257 	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1258 		drm_dbg_kms(display->drm,
1259 			    "PSR2 sel fetch not enabled, disabled by parameter\n");
1260 		return false;
1261 	}
1262 
1263 	if (crtc_state->uapi.async_flip) {
1264 		drm_dbg_kms(display->drm,
1265 			    "PSR2 sel fetch not enabled, async flip enabled\n");
1266 		return false;
1267 	}
1268 
1269 	return crtc_state->enable_psr2_sel_fetch = true;
1270 }
1271 
1272 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1273 				   struct intel_crtc_state *crtc_state)
1274 {
1275 	struct intel_display *display = to_intel_display(intel_dp);
1276 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1277 	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1278 	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1279 	u16 y_granularity = 0;
1280 
1281 	/* PSR2 HW only send full lines so we only need to validate the width */
1282 	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1283 		return false;
1284 
1285 	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1286 		return false;
1287 
1288 	/* HW tracking is only aligned to 4 lines */
1289 	if (!crtc_state->enable_psr2_sel_fetch)
1290 		return intel_dp->psr.su_y_granularity == 4;
1291 
1292 	/*
1293 	 * adl_p and mtl platforms have 1 line granularity.
1294 	 * For other platforms with SW tracking we can adjust the y coordinates
1295 	 * to match sink requirement if multiple of 4.
1296 	 */
1297 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1298 		y_granularity = intel_dp->psr.su_y_granularity;
1299 	else if (intel_dp->psr.su_y_granularity <= 2)
1300 		y_granularity = 4;
1301 	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1302 		y_granularity = intel_dp->psr.su_y_granularity;
1303 
1304 	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1305 		return false;
1306 
1307 	if (crtc_state->dsc.compression_enable &&
1308 	    vdsc_cfg->slice_height % y_granularity)
1309 		return false;
1310 
1311 	crtc_state->su_y_granularity = y_granularity;
1312 	return true;
1313 }
1314 
1315 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1316 							struct intel_crtc_state *crtc_state)
1317 {
1318 	struct intel_display *display = to_intel_display(intel_dp);
1319 	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1320 	u32 hblank_total, hblank_ns, req_ns;
1321 
1322 	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1323 	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1324 
1325 	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1326 	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1327 
1328 	if ((hblank_ns - req_ns) > 100)
1329 		return true;
1330 
1331 	/* Not supported <13 / Wa_22012279113:adl-p */
1332 	if (DISPLAY_VER(display) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1333 		return false;
1334 
1335 	crtc_state->req_psr2_sdp_prior_scanline = true;
1336 	return true;
1337 }
1338 
1339 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1340 					const struct drm_display_mode *adjusted_mode)
1341 {
1342 	struct intel_display *display = to_intel_display(intel_dp);
1343 	int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1344 	int entry_setup_frames = 0;
1345 
1346 	if (psr_setup_time < 0) {
1347 		drm_dbg_kms(display->drm,
1348 			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1349 			    intel_dp->psr_dpcd[1]);
1350 		return -ETIME;
1351 	}
1352 
1353 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1354 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1355 		if (DISPLAY_VER(display) >= 20) {
1356 			/* setup entry frames can be up to 3 frames */
1357 			entry_setup_frames = 1;
1358 			drm_dbg_kms(display->drm,
1359 				    "PSR setup entry frames %d\n",
1360 				    entry_setup_frames);
1361 		} else {
1362 			drm_dbg_kms(display->drm,
1363 				    "PSR condition failed: PSR setup time (%d us) too long\n",
1364 				    psr_setup_time);
1365 			return -ETIME;
1366 		}
1367 	}
1368 
1369 	return entry_setup_frames;
1370 }
1371 
1372 static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
1373 				       const struct intel_crtc_state *crtc_state,
1374 				       bool aux_less)
1375 {
1376 	struct intel_display *display = to_intel_display(intel_dp);
1377 	int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
1378 		crtc_state->hw.adjusted_mode.crtc_vblank_start;
1379 	int wake_lines;
1380 
1381 	if (aux_less)
1382 		wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
1383 	else
1384 		wake_lines = DISPLAY_VER(display) < 20 ?
1385 			psr2_block_count_lines(intel_dp) :
1386 			intel_dp->alpm_parameters.io_wake_lines;
1387 
1388 	if (crtc_state->req_psr2_sdp_prior_scanline)
1389 		vblank -= 1;
1390 
1391 	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
1392 	if (vblank < wake_lines)
1393 		return false;
1394 
1395 	return true;
1396 }
1397 
1398 static bool alpm_config_valid(struct intel_dp *intel_dp,
1399 			      const struct intel_crtc_state *crtc_state,
1400 			      bool aux_less)
1401 {
1402 	struct intel_display *display = to_intel_display(intel_dp);
1403 
1404 	if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
1405 		drm_dbg_kms(display->drm,
1406 			    "PSR2/Panel Replay  not enabled, Unable to use long enough wake times\n");
1407 		return false;
1408 	}
1409 
1410 	if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less)) {
1411 		drm_dbg_kms(display->drm,
1412 			    "PSR2/Panel Replay not enabled, too short vblank time\n");
1413 		return false;
1414 	}
1415 
1416 	return true;
1417 }
1418 
1419 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1420 				    struct intel_crtc_state *crtc_state)
1421 {
1422 	struct intel_display *display = to_intel_display(intel_dp);
1423 	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1424 	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1425 	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1426 
1427 	if (!intel_dp->psr.sink_psr2_support)
1428 		return false;
1429 
1430 	/* JSL and EHL only supports eDP 1.3 */
1431 	if (display->platform.jasperlake || display->platform.elkhartlake) {
1432 		drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
1433 		return false;
1434 	}
1435 
1436 	/* Wa_16011181250 */
1437 	if (display->platform.rocketlake || display->platform.alderlake_s ||
1438 	    display->platform.dg2) {
1439 		drm_dbg_kms(display->drm,
1440 			    "PSR2 is defeatured for this platform\n");
1441 		return false;
1442 	}
1443 
1444 	if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1445 		drm_dbg_kms(display->drm,
1446 			    "PSR2 not completely functional in this stepping\n");
1447 		return false;
1448 	}
1449 
1450 	if (!transcoder_has_psr2(display, crtc_state->cpu_transcoder)) {
1451 		drm_dbg_kms(display->drm,
1452 			    "PSR2 not supported in transcoder %s\n",
1453 			    transcoder_name(crtc_state->cpu_transcoder));
1454 		return false;
1455 	}
1456 
1457 	/*
1458 	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1459 	 * resolution requires DSC to be enabled, priority is given to DSC
1460 	 * over PSR2.
1461 	 */
1462 	if (crtc_state->dsc.compression_enable &&
1463 	    (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)) {
1464 		drm_dbg_kms(display->drm,
1465 			    "PSR2 cannot be enabled since DSC is enabled\n");
1466 		return false;
1467 	}
1468 
1469 	if (DISPLAY_VER(display) >= 20) {
1470 		psr_max_h = crtc_hdisplay;
1471 		psr_max_v = crtc_vdisplay;
1472 		max_bpp = crtc_state->pipe_bpp;
1473 	} else if (IS_DISPLAY_VER(display, 12, 14)) {
1474 		psr_max_h = 5120;
1475 		psr_max_v = 3200;
1476 		max_bpp = 30;
1477 	} else if (IS_DISPLAY_VER(display, 10, 11)) {
1478 		psr_max_h = 4096;
1479 		psr_max_v = 2304;
1480 		max_bpp = 24;
1481 	} else if (DISPLAY_VER(display) == 9) {
1482 		psr_max_h = 3640;
1483 		psr_max_v = 2304;
1484 		max_bpp = 24;
1485 	}
1486 
1487 	if (crtc_state->pipe_bpp > max_bpp) {
1488 		drm_dbg_kms(display->drm,
1489 			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1490 			    crtc_state->pipe_bpp, max_bpp);
1491 		return false;
1492 	}
1493 
1494 	/* Wa_16011303918:adl-p */
1495 	if (crtc_state->vrr.enable &&
1496 	    display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1497 		drm_dbg_kms(display->drm,
1498 			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1499 		return false;
1500 	}
1501 
1502 	if (!alpm_config_valid(intel_dp, crtc_state, false))
1503 		return false;
1504 
1505 	if (!crtc_state->enable_psr2_sel_fetch &&
1506 	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1507 		drm_dbg_kms(display->drm,
1508 			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1509 			    crtc_hdisplay, crtc_vdisplay,
1510 			    psr_max_h, psr_max_v);
1511 		return false;
1512 	}
1513 
1514 	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1515 
1516 	return true;
1517 }
1518 
1519 static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
1520 					  struct intel_crtc_state *crtc_state)
1521 {
1522 	struct intel_display *display = to_intel_display(intel_dp);
1523 
1524 	if (HAS_PSR2_SEL_FETCH(display) &&
1525 	    !intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1526 	    !HAS_PSR_HW_TRACKING(display)) {
1527 		drm_dbg_kms(display->drm,
1528 			    "Selective update not enabled, selective fetch not valid and no HW tracking available\n");
1529 		goto unsupported;
1530 	}
1531 
1532 	if (!psr2_global_enabled(intel_dp)) {
1533 		drm_dbg_kms(display->drm,
1534 			    "Selective update disabled by flag\n");
1535 		goto unsupported;
1536 	}
1537 
1538 	if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state))
1539 		goto unsupported;
1540 
1541 	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1542 		drm_dbg_kms(display->drm,
1543 			    "Selective update not enabled, SDP indication do not fit in hblank\n");
1544 		goto unsupported;
1545 	}
1546 
1547 	if (crtc_state->has_panel_replay && (DISPLAY_VER(display) < 14 ||
1548 					     !intel_dp->psr.sink_panel_replay_su_support))
1549 		goto unsupported;
1550 
1551 	if (crtc_state->crc_enabled) {
1552 		drm_dbg_kms(display->drm,
1553 			    "Selective update not enabled because it would inhibit pipe CRC calculation\n");
1554 		goto unsupported;
1555 	}
1556 
1557 	if (!psr2_granularity_check(intel_dp, crtc_state)) {
1558 		drm_dbg_kms(display->drm,
1559 			    "Selective update not enabled, SU granularity not compatible\n");
1560 		goto unsupported;
1561 	}
1562 
1563 	crtc_state->enable_psr2_su_region_et =
1564 		psr2_su_region_et_valid(intel_dp, crtc_state->has_panel_replay);
1565 
1566 	return true;
1567 
1568 unsupported:
1569 	crtc_state->enable_psr2_sel_fetch = false;
1570 	return false;
1571 }
1572 
1573 static bool _psr_compute_config(struct intel_dp *intel_dp,
1574 				struct intel_crtc_state *crtc_state)
1575 {
1576 	struct intel_display *display = to_intel_display(intel_dp);
1577 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1578 	int entry_setup_frames;
1579 
1580 	if (!CAN_PSR(intel_dp))
1581 		return false;
1582 
1583 	/*
1584 	 * Currently PSR doesn't work reliably with VRR enabled.
1585 	 */
1586 	if (crtc_state->vrr.enable)
1587 		return false;
1588 
1589 	entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1590 
1591 	if (entry_setup_frames >= 0) {
1592 		intel_dp->psr.entry_setup_frames = entry_setup_frames;
1593 	} else {
1594 		drm_dbg_kms(display->drm,
1595 			    "PSR condition failed: PSR setup timing not met\n");
1596 		return false;
1597 	}
1598 
1599 	return true;
1600 }
1601 
1602 static bool
1603 _panel_replay_compute_config(struct intel_dp *intel_dp,
1604 			     const struct intel_crtc_state *crtc_state,
1605 			     const struct drm_connector_state *conn_state)
1606 {
1607 	struct intel_display *display = to_intel_display(intel_dp);
1608 	struct intel_connector *connector =
1609 		to_intel_connector(conn_state->connector);
1610 	struct intel_hdcp *hdcp = &connector->hdcp;
1611 
1612 	if (!CAN_PANEL_REPLAY(intel_dp))
1613 		return false;
1614 
1615 	if (!panel_replay_global_enabled(intel_dp)) {
1616 		drm_dbg_kms(display->drm, "Panel Replay disabled by flag\n");
1617 		return false;
1618 	}
1619 
1620 	if (crtc_state->crc_enabled) {
1621 		drm_dbg_kms(display->drm,
1622 			    "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
1623 		return false;
1624 	}
1625 
1626 	if (!intel_dp_is_edp(intel_dp))
1627 		return true;
1628 
1629 	/* Remaining checks are for eDP only */
1630 
1631 	if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A &&
1632 	    to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_B)
1633 		return false;
1634 
1635 	/* 128b/132b Panel Replay is not supported on eDP */
1636 	if (intel_dp_is_uhbr(crtc_state)) {
1637 		drm_dbg_kms(display->drm,
1638 			    "Panel Replay is not supported with 128b/132b\n");
1639 		return false;
1640 	}
1641 
1642 	/* HW will not allow Panel Replay on eDP when HDCP enabled */
1643 	if (conn_state->content_protection ==
1644 	    DRM_MODE_CONTENT_PROTECTION_DESIRED ||
1645 	    (conn_state->content_protection ==
1646 	     DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
1647 	     DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) {
1648 		drm_dbg_kms(display->drm,
1649 			    "Panel Replay is not supported with HDCP\n");
1650 		return false;
1651 	}
1652 
1653 	if (!alpm_config_valid(intel_dp, crtc_state, true))
1654 		return false;
1655 
1656 	return true;
1657 }
1658 
1659 static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp,
1660 					   struct intel_crtc_state *crtc_state)
1661 {
1662 	struct intel_display *display = to_intel_display(intel_dp);
1663 
1664 	return (DISPLAY_VER(display) == 20 && intel_dp->psr.entry_setup_frames > 0 &&
1665 		!crtc_state->has_sel_update);
1666 }
1667 
1668 void intel_psr_compute_config(struct intel_dp *intel_dp,
1669 			      struct intel_crtc_state *crtc_state,
1670 			      struct drm_connector_state *conn_state)
1671 {
1672 	struct intel_display *display = to_intel_display(intel_dp);
1673 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1674 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
1675 	struct intel_crtc *crtc;
1676 	u8 active_pipes = 0;
1677 
1678 	if (!psr_global_enabled(intel_dp)) {
1679 		drm_dbg_kms(display->drm, "PSR disabled by flag\n");
1680 		return;
1681 	}
1682 
1683 	if (intel_dp->psr.sink_not_reliable) {
1684 		drm_dbg_kms(display->drm,
1685 			    "PSR sink implementation is not reliable\n");
1686 		return;
1687 	}
1688 
1689 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1690 		drm_dbg_kms(display->drm,
1691 			    "PSR condition failed: Interlaced mode enabled\n");
1692 		return;
1693 	}
1694 
1695 	/*
1696 	 * FIXME figure out what is wrong with PSR+joiner and
1697 	 * fix it. Presumably something related to the fact that
1698 	 * PSR is a transcoder level feature.
1699 	 */
1700 	if (crtc_state->joiner_pipes) {
1701 		drm_dbg_kms(display->drm,
1702 			    "PSR disabled due to joiner\n");
1703 		return;
1704 	}
1705 
1706 	crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
1707 								    crtc_state,
1708 								    conn_state);
1709 
1710 	crtc_state->has_psr = crtc_state->has_panel_replay ? true :
1711 		_psr_compute_config(intel_dp, crtc_state);
1712 
1713 	if (!crtc_state->has_psr)
1714 		return;
1715 
1716 	crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
1717 
1718 	/* Wa_18037818876 */
1719 	if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) {
1720 		crtc_state->has_psr = false;
1721 		drm_dbg_kms(display->drm,
1722 			    "PSR disabled to workaround PSR FSM hang issue\n");
1723 	}
1724 
1725 	/* Rest is for Wa_16025596647 */
1726 	if (DISPLAY_VER(display) != 20 &&
1727 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
1728 		return;
1729 
1730 	/* Not needed by Panel Replay  */
1731 	if (crtc_state->has_panel_replay)
1732 		return;
1733 
1734 	/* We ignore possible secondary PSR/Panel Replay capable eDP */
1735 	for_each_intel_crtc(display->drm, crtc)
1736 		active_pipes |= crtc->active ? BIT(crtc->pipe) : 0;
1737 
1738 	active_pipes = intel_calc_active_pipes(state, active_pipes);
1739 
1740 	crtc_state->active_non_psr_pipes = active_pipes &
1741 		~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe);
1742 }
1743 
1744 void intel_psr_get_config(struct intel_encoder *encoder,
1745 			  struct intel_crtc_state *pipe_config)
1746 {
1747 	struct intel_display *display = to_intel_display(encoder);
1748 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1749 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1750 	struct intel_dp *intel_dp;
1751 	u32 val;
1752 
1753 	if (!dig_port)
1754 		return;
1755 
1756 	intel_dp = &dig_port->dp;
1757 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1758 		return;
1759 
1760 	mutex_lock(&intel_dp->psr.lock);
1761 	if (!intel_dp->psr.enabled)
1762 		goto unlock;
1763 
1764 	if (intel_dp->psr.panel_replay_enabled) {
1765 		pipe_config->has_psr = pipe_config->has_panel_replay = true;
1766 	} else {
1767 		/*
1768 		 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1769 		 * enabled/disabled because of frontbuffer tracking and others.
1770 		 */
1771 		pipe_config->has_psr = true;
1772 	}
1773 
1774 	pipe_config->has_sel_update = intel_dp->psr.sel_update_enabled;
1775 	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1776 
1777 	if (!intel_dp->psr.sel_update_enabled)
1778 		goto unlock;
1779 
1780 	if (HAS_PSR2_SEL_FETCH(display)) {
1781 		val = intel_de_read(display,
1782 				    PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1783 		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1784 			pipe_config->enable_psr2_sel_fetch = true;
1785 	}
1786 
1787 	pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled;
1788 
1789 	if (DISPLAY_VER(display) >= 12) {
1790 		val = intel_de_read(display,
1791 				    TRANS_EXITLINE(display, cpu_transcoder));
1792 		pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1793 	}
1794 unlock:
1795 	mutex_unlock(&intel_dp->psr.lock);
1796 }
1797 
1798 static void intel_psr_activate(struct intel_dp *intel_dp)
1799 {
1800 	struct intel_display *display = to_intel_display(intel_dp);
1801 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1802 
1803 	drm_WARN_ON(display->drm,
1804 		    transcoder_has_psr2(display, cpu_transcoder) &&
1805 		    intel_de_read(display, EDP_PSR2_CTL(display, cpu_transcoder)) & EDP_PSR2_ENABLE);
1806 
1807 	drm_WARN_ON(display->drm,
1808 		    intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)) & EDP_PSR_ENABLE);
1809 
1810 	drm_WARN_ON(display->drm, intel_dp->psr.active);
1811 
1812 	lockdep_assert_held(&intel_dp->psr.lock);
1813 
1814 	/* psr1, psr2 and panel-replay are mutually exclusive.*/
1815 	if (intel_dp->psr.panel_replay_enabled)
1816 		dg2_activate_panel_replay(intel_dp);
1817 	else if (intel_dp->psr.sel_update_enabled)
1818 		hsw_activate_psr2(intel_dp);
1819 	else
1820 		hsw_activate_psr1(intel_dp);
1821 
1822 	intel_dp->psr.active = true;
1823 }
1824 
1825 /*
1826  * Wa_16013835468
1827  * Wa_14015648006
1828  */
1829 static void wm_optimization_wa(struct intel_dp *intel_dp,
1830 			       const struct intel_crtc_state *crtc_state)
1831 {
1832 	struct intel_display *display = to_intel_display(intel_dp);
1833 	enum pipe pipe = intel_dp->psr.pipe;
1834 	bool activate = false;
1835 
1836 	/* Wa_14015648006 */
1837 	if (IS_DISPLAY_VER(display, 11, 14) && crtc_state->wm_level_disabled)
1838 		activate = true;
1839 
1840 	/* Wa_16013835468 */
1841 	if (DISPLAY_VER(display) == 12 &&
1842 	    crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1843 	    crtc_state->hw.adjusted_mode.crtc_vdisplay)
1844 		activate = true;
1845 
1846 	if (activate)
1847 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
1848 			     0, LATENCY_REPORTING_REMOVED(pipe));
1849 	else
1850 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
1851 			     LATENCY_REPORTING_REMOVED(pipe), 0);
1852 }
1853 
1854 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1855 				    const struct intel_crtc_state *crtc_state)
1856 {
1857 	struct intel_display *display = to_intel_display(intel_dp);
1858 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1859 	u32 mask = 0;
1860 
1861 	/*
1862 	 * Only HSW and BDW have PSR AUX registers that need to be setup.
1863 	 * SKL+ use hardcoded values PSR AUX transactions
1864 	 */
1865 	if (DISPLAY_VER(display) < 9)
1866 		hsw_psr_setup_aux(intel_dp);
1867 
1868 	/*
1869 	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1870 	 * mask LPSP to avoid dependency on other drivers that might block
1871 	 * runtime_pm besides preventing  other hw tracking issues now we
1872 	 * can rely on frontbuffer tracking.
1873 	 *
1874 	 * From bspec prior LunarLake:
1875 	 * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
1876 	 * panel replay mode.
1877 	 *
1878 	 * From bspec beyod LunarLake:
1879 	 * Panel Replay on DP: No bits are applicable
1880 	 * Panel Replay on eDP: All bits are applicable
1881 	 */
1882 	if (DISPLAY_VER(display) < 20 || intel_dp_is_edp(intel_dp))
1883 		mask = EDP_PSR_DEBUG_MASK_HPD;
1884 
1885 	if (intel_dp_is_edp(intel_dp)) {
1886 		mask |= EDP_PSR_DEBUG_MASK_MEMUP;
1887 
1888 		/*
1889 		 * For some unknown reason on HSW non-ULT (or at least on
1890 		 * Dell Latitude E6540) external displays start to flicker
1891 		 * when PSR is enabled on the eDP. SR/PC6 residency is much
1892 		 * higher than should be possible with an external display.
1893 		 * As a workaround leave LPSP unmasked to prevent PSR entry
1894 		 * when external displays are active.
1895 		 */
1896 		if (DISPLAY_VER(display) >= 8 || display->platform.haswell_ult)
1897 			mask |= EDP_PSR_DEBUG_MASK_LPSP;
1898 
1899 		if (DISPLAY_VER(display) < 20)
1900 			mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1901 
1902 		/*
1903 		 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1904 		 * registers in order to keep the CURSURFLIVE tricks working :(
1905 		 */
1906 		if (IS_DISPLAY_VER(display, 9, 10))
1907 			mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1908 
1909 		/* allow PSR with sprite enabled */
1910 		if (display->platform.haswell)
1911 			mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1912 	}
1913 
1914 	intel_de_write(display, psr_debug_reg(display, cpu_transcoder), mask);
1915 
1916 	psr_irq_control(intel_dp);
1917 
1918 	/*
1919 	 * TODO: if future platforms supports DC3CO in more than one
1920 	 * transcoder, EXITLINE will need to be unset when disabling PSR
1921 	 */
1922 	if (intel_dp->psr.dc3co_exitline)
1923 		intel_de_rmw(display,
1924 			     TRANS_EXITLINE(display, cpu_transcoder),
1925 			     EXITLINE_MASK,
1926 			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1927 
1928 	if (HAS_PSR_HW_TRACKING(display) && HAS_PSR2_SEL_FETCH(display))
1929 		intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1930 			     intel_dp->psr.psr2_sel_fetch_enabled ?
1931 			     IGNORE_PSR2_HW_TRACKING : 0);
1932 
1933 	/*
1934 	 * Wa_16013835468
1935 	 * Wa_14015648006
1936 	 */
1937 	wm_optimization_wa(intel_dp, crtc_state);
1938 
1939 	if (intel_dp->psr.sel_update_enabled) {
1940 		if (DISPLAY_VER(display) == 9)
1941 			intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 0,
1942 				     PSR2_VSC_ENABLE_PROG_HEADER |
1943 				     PSR2_ADD_VERTICAL_LINE_COUNT);
1944 
1945 		/*
1946 		 * Wa_16014451276:adlp,mtl[a0,b0]
1947 		 * All supported adlp panels have 1-based X granularity, this may
1948 		 * cause issues if non-supported panels are used.
1949 		 */
1950 		if (!intel_dp->psr.panel_replay_enabled &&
1951 		    (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
1952 		     display->platform.alderlake_p))
1953 			intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
1954 				     0, ADLP_1_BASED_X_GRANULARITY);
1955 
1956 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1957 		if (!intel_dp->psr.panel_replay_enabled &&
1958 		    IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
1959 			intel_de_rmw(display,
1960 				     MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
1961 				     0,
1962 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1963 		else if (display->platform.alderlake_p)
1964 			intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
1965 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1966 	}
1967 
1968 	/* Wa_16025596647 */
1969 	if ((DISPLAY_VER(display) == 20 ||
1970 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
1971 	    !intel_dp->psr.panel_replay_enabled)
1972 		intel_dmc_block_pkgc(display, intel_dp->psr.pipe, true);
1973 
1974 	intel_alpm_configure(intel_dp, crtc_state);
1975 }
1976 
1977 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1978 {
1979 	struct intel_display *display = to_intel_display(intel_dp);
1980 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1981 	u32 val;
1982 
1983 	if (intel_dp->psr.panel_replay_enabled)
1984 		goto no_err;
1985 
1986 	/*
1987 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1988 	 * will still keep the error set even after the reset done in the
1989 	 * irq_preinstall and irq_uninstall hooks.
1990 	 * And enabling in this situation cause the screen to freeze in the
1991 	 * first time that PSR HW tries to activate so lets keep PSR disabled
1992 	 * to avoid any rendering problems.
1993 	 */
1994 	val = intel_de_read(display, psr_iir_reg(display, cpu_transcoder));
1995 	val &= psr_irq_psr_error_bit_get(intel_dp);
1996 	if (val) {
1997 		intel_dp->psr.sink_not_reliable = true;
1998 		drm_dbg_kms(display->drm,
1999 			    "PSR interruption error set, not enabling PSR\n");
2000 		return false;
2001 	}
2002 
2003 no_err:
2004 	return true;
2005 }
2006 
2007 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
2008 				    const struct intel_crtc_state *crtc_state)
2009 {
2010 	struct intel_display *display = to_intel_display(intel_dp);
2011 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2012 	u32 val;
2013 
2014 	drm_WARN_ON(display->drm, intel_dp->psr.enabled);
2015 
2016 	intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update;
2017 	intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
2018 	intel_dp->psr.busy_frontbuffer_bits = 0;
2019 	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
2020 	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
2021 	/* DC5/DC6 requires at least 6 idle frames */
2022 	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
2023 	intel_dp->psr.dc3co_exit_delay = val;
2024 	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
2025 	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
2026 	intel_dp->psr.su_region_et_enabled = crtc_state->enable_psr2_su_region_et;
2027 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2028 	intel_dp->psr.req_psr2_sdp_prior_scanline =
2029 		crtc_state->req_psr2_sdp_prior_scanline;
2030 	intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes;
2031 
2032 	if (!psr_interrupt_error_check(intel_dp))
2033 		return;
2034 
2035 	if (intel_dp->psr.panel_replay_enabled)
2036 		drm_dbg_kms(display->drm, "Enabling Panel Replay\n");
2037 	else
2038 		drm_dbg_kms(display->drm, "Enabling PSR%s\n",
2039 			    intel_dp->psr.sel_update_enabled ? "2" : "1");
2040 
2041 	/*
2042 	 * Enabling sink PSR/Panel Replay here only for PSR. Panel Replay enable
2043 	 * bit is already written at this point. Sink ALPM is enabled here for
2044 	 * PSR and Panel Replay. See
2045 	 * intel_psr_panel_replay_enable_sink. Modifiers/options:
2046 	 *  - Selective Update
2047 	 *  - Region Early Transport
2048 	 *  - Selective Update Region Scanline Capture
2049 	 *  - VSC_SDP_CRC
2050 	 *  - HPD on different Errors
2051 	 *  - CRC verification
2052 	 * are written for PSR and Panel Replay here.
2053 	 */
2054 	intel_psr_enable_sink(intel_dp, crtc_state);
2055 
2056 	if (intel_dp_is_edp(intel_dp))
2057 		intel_snps_phy_update_psr_power_state(&dig_port->base, true);
2058 
2059 	intel_psr_enable_source(intel_dp, crtc_state);
2060 	intel_dp->psr.enabled = true;
2061 	intel_dp->psr.pause_counter = 0;
2062 
2063 	/*
2064 	 * Link_ok is sticky and set here on PSR enable. We can assume link
2065 	 * training is complete as we never continue to PSR enable with
2066 	 * untrained link. Link_ok is kept as set until first short pulse
2067 	 * interrupt. This is targeted to workaround panels stating bad link
2068 	 * after PSR is enabled.
2069 	 */
2070 	intel_dp->psr.link_ok = true;
2071 
2072 	intel_psr_activate(intel_dp);
2073 }
2074 
2075 static void intel_psr_exit(struct intel_dp *intel_dp)
2076 {
2077 	struct intel_display *display = to_intel_display(intel_dp);
2078 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2079 	u32 val;
2080 
2081 	if (!intel_dp->psr.active) {
2082 		if (transcoder_has_psr2(display, cpu_transcoder)) {
2083 			val = intel_de_read(display,
2084 					    EDP_PSR2_CTL(display, cpu_transcoder));
2085 			drm_WARN_ON(display->drm, val & EDP_PSR2_ENABLE);
2086 		}
2087 
2088 		val = intel_de_read(display,
2089 				    psr_ctl_reg(display, cpu_transcoder));
2090 		drm_WARN_ON(display->drm, val & EDP_PSR_ENABLE);
2091 
2092 		return;
2093 	}
2094 
2095 	if (intel_dp->psr.panel_replay_enabled) {
2096 		intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder),
2097 			     TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
2098 	} else if (intel_dp->psr.sel_update_enabled) {
2099 		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
2100 
2101 		val = intel_de_rmw(display,
2102 				   EDP_PSR2_CTL(display, cpu_transcoder),
2103 				   EDP_PSR2_ENABLE, 0);
2104 
2105 		drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
2106 	} else {
2107 		if (DISPLAY_VER(display) == 20 ||
2108 		    IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
2109 			intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
2110 								       intel_dp->psr.pipe,
2111 								       false);
2112 
2113 		val = intel_de_rmw(display,
2114 				   psr_ctl_reg(display, cpu_transcoder),
2115 				   EDP_PSR_ENABLE, 0);
2116 
2117 		drm_WARN_ON(display->drm, !(val & EDP_PSR_ENABLE));
2118 	}
2119 	intel_dp->psr.active = false;
2120 }
2121 
2122 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
2123 {
2124 	struct intel_display *display = to_intel_display(intel_dp);
2125 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2126 	i915_reg_t psr_status;
2127 	u32 psr_status_mask;
2128 
2129 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
2130 					  intel_dp->psr.panel_replay_enabled)) {
2131 		psr_status = EDP_PSR2_STATUS(display, cpu_transcoder);
2132 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
2133 	} else {
2134 		psr_status = psr_status_reg(display, cpu_transcoder);
2135 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
2136 	}
2137 
2138 	/* Wait till PSR is idle */
2139 	if (intel_de_wait_for_clear(display, psr_status,
2140 				    psr_status_mask, 2000))
2141 		drm_err(display->drm, "Timed out waiting PSR idle state\n");
2142 }
2143 
2144 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
2145 {
2146 	struct intel_display *display = to_intel_display(intel_dp);
2147 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2148 
2149 	lockdep_assert_held(&intel_dp->psr.lock);
2150 
2151 	if (!intel_dp->psr.enabled)
2152 		return;
2153 
2154 	if (intel_dp->psr.panel_replay_enabled)
2155 		drm_dbg_kms(display->drm, "Disabling Panel Replay\n");
2156 	else
2157 		drm_dbg_kms(display->drm, "Disabling PSR%s\n",
2158 			    intel_dp->psr.sel_update_enabled ? "2" : "1");
2159 
2160 	intel_psr_exit(intel_dp);
2161 	intel_psr_wait_exit_locked(intel_dp);
2162 
2163 	/*
2164 	 * Wa_16013835468
2165 	 * Wa_14015648006
2166 	 */
2167 	if (DISPLAY_VER(display) >= 11)
2168 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
2169 			     LATENCY_REPORTING_REMOVED(intel_dp->psr.pipe), 0);
2170 
2171 	if (intel_dp->psr.sel_update_enabled) {
2172 		/* Wa_16012604467:adlp,mtl[a0,b0] */
2173 		if (!intel_dp->psr.panel_replay_enabled &&
2174 		    IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
2175 			intel_de_rmw(display,
2176 				     MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
2177 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
2178 		else if (display->platform.alderlake_p)
2179 			intel_de_rmw(display, CLKGATE_DIS_MISC,
2180 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
2181 	}
2182 
2183 	if (intel_dp_is_edp(intel_dp))
2184 		intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
2185 
2186 	if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp))
2187 		intel_alpm_disable(intel_dp);
2188 
2189 	/* Disable PSR on Sink */
2190 	if (!intel_dp->psr.panel_replay_enabled) {
2191 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
2192 
2193 		if (intel_dp->psr.sel_update_enabled)
2194 			drm_dp_dpcd_writeb(&intel_dp->aux,
2195 					   DP_RECEIVER_ALPM_CONFIG, 0);
2196 	}
2197 
2198 	/* Wa_16025596647 */
2199 	if ((DISPLAY_VER(display) == 20 ||
2200 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2201 	    !intel_dp->psr.panel_replay_enabled)
2202 		intel_dmc_block_pkgc(display, intel_dp->psr.pipe, false);
2203 
2204 	intel_dp->psr.enabled = false;
2205 	intel_dp->psr.panel_replay_enabled = false;
2206 	intel_dp->psr.sel_update_enabled = false;
2207 	intel_dp->psr.psr2_sel_fetch_enabled = false;
2208 	intel_dp->psr.su_region_et_enabled = false;
2209 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2210 	intel_dp->psr.active_non_psr_pipes = 0;
2211 }
2212 
2213 /**
2214  * intel_psr_disable - Disable PSR
2215  * @intel_dp: Intel DP
2216  * @old_crtc_state: old CRTC state
2217  *
2218  * This function needs to be called before disabling pipe.
2219  */
2220 void intel_psr_disable(struct intel_dp *intel_dp,
2221 		       const struct intel_crtc_state *old_crtc_state)
2222 {
2223 	struct intel_display *display = to_intel_display(intel_dp);
2224 
2225 	if (!old_crtc_state->has_psr)
2226 		return;
2227 
2228 	if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp) &&
2229 			!CAN_PANEL_REPLAY(intel_dp)))
2230 		return;
2231 
2232 	mutex_lock(&intel_dp->psr.lock);
2233 
2234 	intel_psr_disable_locked(intel_dp);
2235 
2236 	intel_dp->psr.link_ok = false;
2237 
2238 	mutex_unlock(&intel_dp->psr.lock);
2239 	cancel_work_sync(&intel_dp->psr.work);
2240 	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
2241 }
2242 
2243 /**
2244  * intel_psr_pause - Pause PSR
2245  * @intel_dp: Intel DP
2246  *
2247  * This function need to be called after enabling psr.
2248  */
2249 void intel_psr_pause(struct intel_dp *intel_dp)
2250 {
2251 	struct intel_psr *psr = &intel_dp->psr;
2252 
2253 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2254 		return;
2255 
2256 	mutex_lock(&psr->lock);
2257 
2258 	if (!psr->enabled) {
2259 		mutex_unlock(&psr->lock);
2260 		return;
2261 	}
2262 
2263 	if (intel_dp->psr.pause_counter++ == 0) {
2264 		intel_psr_exit(intel_dp);
2265 		intel_psr_wait_exit_locked(intel_dp);
2266 	}
2267 
2268 	mutex_unlock(&psr->lock);
2269 
2270 	cancel_work_sync(&psr->work);
2271 	cancel_delayed_work_sync(&psr->dc3co_work);
2272 }
2273 
2274 /**
2275  * intel_psr_resume - Resume PSR
2276  * @intel_dp: Intel DP
2277  *
2278  * This function need to be called after pausing psr.
2279  */
2280 void intel_psr_resume(struct intel_dp *intel_dp)
2281 {
2282 	struct intel_display *display = to_intel_display(intel_dp);
2283 	struct intel_psr *psr = &intel_dp->psr;
2284 
2285 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2286 		return;
2287 
2288 	mutex_lock(&psr->lock);
2289 
2290 	if (!psr->enabled)
2291 		goto out;
2292 
2293 	if (!psr->pause_counter) {
2294 		drm_warn(display->drm, "Unbalanced PSR pause/resume!\n");
2295 		goto out;
2296 	}
2297 
2298 	if (--intel_dp->psr.pause_counter == 0)
2299 		intel_psr_activate(intel_dp);
2300 
2301 out:
2302 	mutex_unlock(&psr->lock);
2303 }
2304 
2305 /**
2306  * intel_psr_needs_vblank_notification - Check if PSR need vblank enable/disable
2307  * notification.
2308  * @crtc_state: CRTC status
2309  *
2310  * We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't
2311  * prevent it in case of Panel Replay. Panel Replay switches main link off on
2312  * DC entry. This means vblank interrupts are not fired and is a problem if
2313  * user-space is polling for vblank events. Also Wa_16025596647 needs
2314  * information when vblank is enabled/disabled.
2315  */
2316 bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state)
2317 {
2318 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2319 	struct intel_display *display = to_intel_display(crtc_state);
2320 	struct intel_encoder *encoder;
2321 
2322 	for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) {
2323 		struct intel_dp *intel_dp;
2324 
2325 		if (!intel_encoder_is_dp(encoder))
2326 			continue;
2327 
2328 		intel_dp = enc_to_intel_dp(encoder);
2329 
2330 		if (!intel_dp_is_edp(intel_dp))
2331 			continue;
2332 
2333 		if (CAN_PANEL_REPLAY(intel_dp))
2334 			return true;
2335 
2336 		if ((DISPLAY_VER(display) == 20 ||
2337 		     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2338 		    CAN_PSR(intel_dp))
2339 			return true;
2340 	}
2341 
2342 	return false;
2343 }
2344 
2345 /**
2346  * intel_psr_trigger_frame_change_event - Trigger "Frame Change" event
2347  * @dsb: DSB context
2348  * @state: the atomic state
2349  * @crtc: the CRTC
2350  *
2351  * Generate PSR "Frame Change" event.
2352  */
2353 void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
2354 					  struct intel_atomic_state *state,
2355 					  struct intel_crtc *crtc)
2356 {
2357 	const struct intel_crtc_state *crtc_state =
2358 		intel_pre_commit_crtc_state(state, crtc);
2359 	struct intel_display *display = to_intel_display(crtc);
2360 
2361 	if (crtc_state->has_psr)
2362 		intel_de_write_dsb(display, dsb,
2363 				   CURSURFLIVE(display, crtc->pipe), 0);
2364 }
2365 
2366 /**
2367  * intel_psr_min_vblank_delay - Minimum vblank delay needed by PSR
2368  * @crtc_state: the crtc state
2369  *
2370  * Return minimum vblank delay needed by PSR.
2371  */
2372 int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state)
2373 {
2374 	struct intel_display *display = to_intel_display(crtc_state);
2375 
2376 	if (!crtc_state->has_psr)
2377 		return 0;
2378 
2379 	/* Wa_14015401596 */
2380 	if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14))
2381 		return 1;
2382 
2383 	/* Rest is for SRD_STATUS needed on LunarLake and onwards */
2384 	if (DISPLAY_VER(display) < 20)
2385 		return 0;
2386 
2387 	/*
2388 	 * Comment on SRD_STATUS register in Bspec for LunarLake and onwards:
2389 	 *
2390 	 * To deterministically capture the transition of the state machine
2391 	 * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least
2392 	 * one line after the non-delayed V. Blank.
2393 	 *
2394 	 * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0
2395 	 * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ]
2396 	 * - TRANS_VTOTAL[ Vertical Active ])
2397 	 *
2398 	 * SRD_STATUS is used only by PSR1 on PantherLake.
2399 	 * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake.
2400 	 */
2401 
2402 	if (DISPLAY_VER(display) >= 30 && (crtc_state->has_panel_replay ||
2403 					   crtc_state->has_sel_update))
2404 		return 0;
2405 	else if (DISPLAY_VER(display) < 30 && (crtc_state->has_sel_update ||
2406 					       intel_crtc_has_type(crtc_state,
2407 								   INTEL_OUTPUT_EDP)))
2408 		return 0;
2409 	else
2410 		return 1;
2411 }
2412 
2413 static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
2414 {
2415 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 0 :
2416 		PSR2_MAN_TRK_CTL_ENABLE;
2417 }
2418 
2419 static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
2420 {
2421 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2422 	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
2423 	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
2424 }
2425 
2426 static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
2427 {
2428 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2429 	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
2430 	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
2431 }
2432 
2433 static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
2434 {
2435 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2436 	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
2437 	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
2438 }
2439 
2440 static void intel_psr_force_update(struct intel_dp *intel_dp)
2441 {
2442 	struct intel_display *display = to_intel_display(intel_dp);
2443 
2444 	/*
2445 	 * Display WA #0884: skl+
2446 	 * This documented WA for bxt can be safely applied
2447 	 * broadly so we can force HW tracking to exit PSR
2448 	 * instead of disabling and re-enabling.
2449 	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
2450 	 * but it makes more sense write to the current active
2451 	 * pipe.
2452 	 *
2453 	 * This workaround do not exist for platforms with display 10 or newer
2454 	 * but testing proved that it works for up display 13, for newer
2455 	 * than that testing will be needed.
2456 	 */
2457 	intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0);
2458 }
2459 
2460 void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
2461 					  const struct intel_crtc_state *crtc_state)
2462 {
2463 	struct intel_display *display = to_intel_display(crtc_state);
2464 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2465 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2466 	struct intel_encoder *encoder;
2467 
2468 	if (!crtc_state->enable_psr2_sel_fetch)
2469 		return;
2470 
2471 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2472 					     crtc_state->uapi.encoder_mask) {
2473 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2474 
2475 		if (!dsb)
2476 			lockdep_assert_held(&intel_dp->psr.lock);
2477 
2478 		if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_cff_enabled)
2479 			return;
2480 		break;
2481 	}
2482 
2483 	intel_de_write_dsb(display, dsb,
2484 			   PSR2_MAN_TRK_CTL(display, cpu_transcoder),
2485 			   crtc_state->psr2_man_track_ctl);
2486 
2487 	if (!crtc_state->enable_psr2_su_region_et)
2488 		return;
2489 
2490 	intel_de_write_dsb(display, dsb, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
2491 			   crtc_state->pipe_srcsz_early_tpt);
2492 }
2493 
2494 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2495 				  bool full_update)
2496 {
2497 	struct intel_display *display = to_intel_display(crtc_state);
2498 	u32 val = man_trk_ctl_enable_bit_get(display);
2499 
2500 	/* SF partial frame enable has to be set even on full update */
2501 	val |= man_trk_ctl_partial_frame_bit_get(display);
2502 
2503 	if (full_update) {
2504 		val |= man_trk_ctl_continuos_full_frame(display);
2505 		goto exit;
2506 	}
2507 
2508 	if (crtc_state->psr2_su_area.y1 == -1)
2509 		goto exit;
2510 
2511 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) {
2512 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2513 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2514 	} else {
2515 		drm_WARN_ON(crtc_state->uapi.crtc->dev,
2516 			    crtc_state->psr2_su_area.y1 % 4 ||
2517 			    crtc_state->psr2_su_area.y2 % 4);
2518 
2519 		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2520 			crtc_state->psr2_su_area.y1 / 4 + 1);
2521 		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2522 			crtc_state->psr2_su_area.y2 / 4 + 1);
2523 	}
2524 exit:
2525 	crtc_state->psr2_man_track_ctl = val;
2526 }
2527 
2528 static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
2529 					  bool full_update)
2530 {
2531 	int width, height;
2532 
2533 	if (!crtc_state->enable_psr2_su_region_et || full_update)
2534 		return 0;
2535 
2536 	width = drm_rect_width(&crtc_state->psr2_su_area);
2537 	height = drm_rect_height(&crtc_state->psr2_su_area);
2538 
2539 	return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
2540 }
2541 
2542 static void clip_area_update(struct drm_rect *overlap_damage_area,
2543 			     struct drm_rect *damage_area,
2544 			     struct drm_rect *pipe_src)
2545 {
2546 	if (!drm_rect_intersect(damage_area, pipe_src))
2547 		return;
2548 
2549 	if (overlap_damage_area->y1 == -1) {
2550 		overlap_damage_area->y1 = damage_area->y1;
2551 		overlap_damage_area->y2 = damage_area->y2;
2552 		return;
2553 	}
2554 
2555 	if (damage_area->y1 < overlap_damage_area->y1)
2556 		overlap_damage_area->y1 = damage_area->y1;
2557 
2558 	if (damage_area->y2 > overlap_damage_area->y2)
2559 		overlap_damage_area->y2 = damage_area->y2;
2560 }
2561 
2562 static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2563 {
2564 	struct intel_display *display = to_intel_display(crtc_state);
2565 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2566 	u16 y_alignment;
2567 
2568 	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2569 	if (crtc_state->dsc.compression_enable &&
2570 	    (display->platform.alderlake_p || DISPLAY_VER(display) >= 14))
2571 		y_alignment = vdsc_cfg->slice_height;
2572 	else
2573 		y_alignment = crtc_state->su_y_granularity;
2574 
2575 	crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2576 	if (crtc_state->psr2_su_area.y2 % y_alignment)
2577 		crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2578 						y_alignment) + 1) * y_alignment;
2579 }
2580 
2581 /*
2582  * When early transport is in use we need to extend SU area to cover
2583  * cursor fully when cursor is in SU area.
2584  */
2585 static void
2586 intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
2587 				  struct intel_crtc *crtc,
2588 				  bool *cursor_in_su_area)
2589 {
2590 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2591 	struct intel_plane_state *new_plane_state;
2592 	struct intel_plane *plane;
2593 	int i;
2594 
2595 	if (!crtc_state->enable_psr2_su_region_et)
2596 		return;
2597 
2598 	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
2599 		struct drm_rect inter;
2600 
2601 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2602 			continue;
2603 
2604 		if (plane->id != PLANE_CURSOR)
2605 			continue;
2606 
2607 		if (!new_plane_state->uapi.visible)
2608 			continue;
2609 
2610 		inter = crtc_state->psr2_su_area;
2611 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2612 			continue;
2613 
2614 		clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
2615 				 &crtc_state->pipe_src);
2616 		*cursor_in_su_area = true;
2617 	}
2618 }
2619 
2620 /*
2621  * TODO: Not clear how to handle planes with negative position,
2622  * also planes are not updated if they have a negative X
2623  * position so for now doing a full update in this cases
2624  *
2625  * Plane scaling and rotation is not supported by selective fetch and both
2626  * properties can change without a modeset, so need to be check at every
2627  * atomic commit.
2628  */
2629 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2630 {
2631 	if (plane_state->uapi.dst.y1 < 0 ||
2632 	    plane_state->uapi.dst.x1 < 0 ||
2633 	    plane_state->scaler_id >= 0 ||
2634 	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2635 		return false;
2636 
2637 	return true;
2638 }
2639 
2640 /*
2641  * Check for pipe properties that is not supported by selective fetch.
2642  *
2643  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2644  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2645  * enabled and going to the full update path.
2646  */
2647 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2648 {
2649 	if (crtc_state->scaler_state.scaler_id >= 0)
2650 		return false;
2651 
2652 	return true;
2653 }
2654 
2655 /* Wa 14019834836 */
2656 static void intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state *crtc_state)
2657 {
2658 	struct intel_display *display = to_intel_display(crtc_state);
2659 	struct intel_encoder *encoder;
2660 	int hactive_limit;
2661 
2662 	if (crtc_state->psr2_su_area.y1 != 0 ||
2663 	    crtc_state->psr2_su_area.y2 != 0)
2664 		return;
2665 
2666 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2667 		hactive_limit = intel_dp_is_uhbr(crtc_state) ? 1230 : 546;
2668 	else
2669 		hactive_limit = intel_dp_is_uhbr(crtc_state) ? 615 : 273;
2670 
2671 	if (crtc_state->hw.adjusted_mode.hdisplay < hactive_limit)
2672 		return;
2673 
2674 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2675 					     crtc_state->uapi.encoder_mask) {
2676 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2677 
2678 		if (!intel_dp_is_edp(intel_dp) &&
2679 		    intel_dp->psr.panel_replay_enabled &&
2680 		    intel_dp->psr.sel_update_enabled) {
2681 			crtc_state->psr2_su_area.y2++;
2682 			return;
2683 		}
2684 	}
2685 }
2686 
2687 static void
2688 intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state)
2689 {
2690 	struct intel_display *display = to_intel_display(crtc_state);
2691 
2692 	/* Wa_14014971492 */
2693 	if (!crtc_state->has_panel_replay &&
2694 	    ((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
2695 	      display->platform.alderlake_p || display->platform.tigerlake)) &&
2696 	    crtc_state->splitter.enable)
2697 		crtc_state->psr2_su_area.y1 = 0;
2698 
2699 	/* Wa 14019834836 */
2700 	if (DISPLAY_VER(display) == 30)
2701 		intel_psr_apply_pr_link_on_su_wa(crtc_state);
2702 }
2703 
2704 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2705 				struct intel_crtc *crtc)
2706 {
2707 	struct intel_display *display = to_intel_display(state);
2708 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2709 	struct intel_plane_state *new_plane_state, *old_plane_state;
2710 	struct intel_plane *plane;
2711 	bool full_update = false, cursor_in_su_area = false;
2712 	int i, ret;
2713 
2714 	if (!crtc_state->enable_psr2_sel_fetch)
2715 		return 0;
2716 
2717 	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2718 		full_update = true;
2719 		goto skip_sel_fetch_set_loop;
2720 	}
2721 
2722 	crtc_state->psr2_su_area.x1 = 0;
2723 	crtc_state->psr2_su_area.y1 = -1;
2724 	crtc_state->psr2_su_area.x2 = drm_rect_width(&crtc_state->pipe_src);
2725 	crtc_state->psr2_su_area.y2 = -1;
2726 
2727 	/*
2728 	 * Calculate minimal selective fetch area of each plane and calculate
2729 	 * the pipe damaged area.
2730 	 * In the next loop the plane selective fetch area will actually be set
2731 	 * using whole pipe damaged area.
2732 	 */
2733 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2734 					     new_plane_state, i) {
2735 		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2736 						      .x2 = INT_MAX };
2737 
2738 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2739 			continue;
2740 
2741 		if (!new_plane_state->uapi.visible &&
2742 		    !old_plane_state->uapi.visible)
2743 			continue;
2744 
2745 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2746 			full_update = true;
2747 			break;
2748 		}
2749 
2750 		/*
2751 		 * If visibility or plane moved, mark the whole plane area as
2752 		 * damaged as it needs to be complete redraw in the new and old
2753 		 * position.
2754 		 */
2755 		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2756 		    !drm_rect_equals(&new_plane_state->uapi.dst,
2757 				     &old_plane_state->uapi.dst)) {
2758 			if (old_plane_state->uapi.visible) {
2759 				damaged_area.y1 = old_plane_state->uapi.dst.y1;
2760 				damaged_area.y2 = old_plane_state->uapi.dst.y2;
2761 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2762 						 &crtc_state->pipe_src);
2763 			}
2764 
2765 			if (new_plane_state->uapi.visible) {
2766 				damaged_area.y1 = new_plane_state->uapi.dst.y1;
2767 				damaged_area.y2 = new_plane_state->uapi.dst.y2;
2768 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2769 						 &crtc_state->pipe_src);
2770 			}
2771 			continue;
2772 		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2773 			/* If alpha changed mark the whole plane area as damaged */
2774 			damaged_area.y1 = new_plane_state->uapi.dst.y1;
2775 			damaged_area.y2 = new_plane_state->uapi.dst.y2;
2776 			clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2777 					 &crtc_state->pipe_src);
2778 			continue;
2779 		}
2780 
2781 		src = drm_plane_state_src(&new_plane_state->uapi);
2782 		drm_rect_fp_to_int(&src, &src);
2783 
2784 		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2785 						     &new_plane_state->uapi, &damaged_area))
2786 			continue;
2787 
2788 		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2789 		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2790 		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2791 		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2792 
2793 		clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
2794 	}
2795 
2796 	/*
2797 	 * TODO: For now we are just using full update in case
2798 	 * selective fetch area calculation fails. To optimize this we
2799 	 * should identify cases where this happens and fix the area
2800 	 * calculation for those.
2801 	 */
2802 	if (crtc_state->psr2_su_area.y1 == -1) {
2803 		drm_info_once(display->drm,
2804 			      "Selective fetch area calculation failed in pipe %c\n",
2805 			      pipe_name(crtc->pipe));
2806 		full_update = true;
2807 	}
2808 
2809 	if (full_update)
2810 		goto skip_sel_fetch_set_loop;
2811 
2812 	intel_psr_apply_su_area_workarounds(crtc_state);
2813 
2814 	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2815 	if (ret)
2816 		return ret;
2817 
2818 	/*
2819 	 * Adjust su area to cover cursor fully as necessary (early
2820 	 * transport). This needs to be done after
2821 	 * drm_atomic_add_affected_planes to ensure visible cursor is added into
2822 	 * affected planes even when cursor is not updated by itself.
2823 	 */
2824 	intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
2825 
2826 	intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2827 
2828 	/*
2829 	 * Now that we have the pipe damaged area check if it intersect with
2830 	 * every plane, if it does set the plane selective fetch area.
2831 	 */
2832 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2833 					     new_plane_state, i) {
2834 		struct drm_rect *sel_fetch_area, inter;
2835 		struct intel_plane *linked = new_plane_state->planar_linked_plane;
2836 
2837 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2838 		    !new_plane_state->uapi.visible)
2839 			continue;
2840 
2841 		inter = crtc_state->psr2_su_area;
2842 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2843 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2844 			sel_fetch_area->y1 = -1;
2845 			sel_fetch_area->y2 = -1;
2846 			/*
2847 			 * if plane sel fetch was previously enabled ->
2848 			 * disable it
2849 			 */
2850 			if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2851 				crtc_state->update_planes |= BIT(plane->id);
2852 
2853 			continue;
2854 		}
2855 
2856 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2857 			full_update = true;
2858 			break;
2859 		}
2860 
2861 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2862 		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2863 		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2864 		crtc_state->update_planes |= BIT(plane->id);
2865 
2866 		/*
2867 		 * Sel_fetch_area is calculated for UV plane. Use
2868 		 * same area for Y plane as well.
2869 		 */
2870 		if (linked) {
2871 			struct intel_plane_state *linked_new_plane_state;
2872 			struct drm_rect *linked_sel_fetch_area;
2873 
2874 			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2875 			if (IS_ERR(linked_new_plane_state))
2876 				return PTR_ERR(linked_new_plane_state);
2877 
2878 			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2879 			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2880 			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2881 			crtc_state->update_planes |= BIT(linked->id);
2882 		}
2883 	}
2884 
2885 skip_sel_fetch_set_loop:
2886 	psr2_man_trk_ctl_calc(crtc_state, full_update);
2887 	crtc_state->pipe_srcsz_early_tpt =
2888 		psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
2889 	return 0;
2890 }
2891 
2892 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2893 				struct intel_crtc *crtc)
2894 {
2895 	struct intel_display *display = to_intel_display(state);
2896 	const struct intel_crtc_state *old_crtc_state =
2897 		intel_atomic_get_old_crtc_state(state, crtc);
2898 	const struct intel_crtc_state *new_crtc_state =
2899 		intel_atomic_get_new_crtc_state(state, crtc);
2900 	struct intel_encoder *encoder;
2901 
2902 	if (!HAS_PSR(display))
2903 		return;
2904 
2905 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2906 					     old_crtc_state->uapi.encoder_mask) {
2907 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2908 		struct intel_psr *psr = &intel_dp->psr;
2909 
2910 		mutex_lock(&psr->lock);
2911 
2912 		if (psr->enabled) {
2913 			/*
2914 			 * Reasons to disable:
2915 			 * - PSR disabled in new state
2916 			 * - All planes will go inactive
2917 			 * - Changing between PSR versions
2918 			 * - Region Early Transport changing
2919 			 * - Display WA #1136: skl, bxt
2920 			 */
2921 			if (intel_crtc_needs_modeset(new_crtc_state) ||
2922 			    !new_crtc_state->has_psr ||
2923 			    !new_crtc_state->active_planes ||
2924 			    new_crtc_state->has_sel_update != psr->sel_update_enabled ||
2925 			    new_crtc_state->enable_psr2_su_region_et != psr->su_region_et_enabled ||
2926 			    new_crtc_state->has_panel_replay != psr->panel_replay_enabled ||
2927 			    (DISPLAY_VER(display) < 11 && new_crtc_state->wm_level_disabled))
2928 				intel_psr_disable_locked(intel_dp);
2929 			else if (new_crtc_state->wm_level_disabled)
2930 				/* Wa_14015648006 */
2931 				wm_optimization_wa(intel_dp, new_crtc_state);
2932 		}
2933 
2934 		mutex_unlock(&psr->lock);
2935 	}
2936 }
2937 
2938 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2939 				 struct intel_crtc *crtc)
2940 {
2941 	struct intel_display *display = to_intel_display(state);
2942 	const struct intel_crtc_state *crtc_state =
2943 		intel_atomic_get_new_crtc_state(state, crtc);
2944 	struct intel_encoder *encoder;
2945 
2946 	if (!crtc_state->has_psr)
2947 		return;
2948 
2949 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2950 					     crtc_state->uapi.encoder_mask) {
2951 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2952 		struct intel_psr *psr = &intel_dp->psr;
2953 		bool keep_disabled = false;
2954 
2955 		mutex_lock(&psr->lock);
2956 
2957 		drm_WARN_ON(display->drm,
2958 			    psr->enabled && !crtc_state->active_planes);
2959 
2960 		keep_disabled |= psr->sink_not_reliable;
2961 		keep_disabled |= !crtc_state->active_planes;
2962 
2963 		/* Display WA #1136: skl, bxt */
2964 		keep_disabled |= DISPLAY_VER(display) < 11 &&
2965 			crtc_state->wm_level_disabled;
2966 
2967 		if (!psr->enabled && !keep_disabled)
2968 			intel_psr_enable_locked(intel_dp, crtc_state);
2969 		else if (psr->enabled && !crtc_state->wm_level_disabled)
2970 			/* Wa_14015648006 */
2971 			wm_optimization_wa(intel_dp, crtc_state);
2972 
2973 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2974 		if (crtc_state->crc_enabled && psr->enabled)
2975 			intel_psr_force_update(intel_dp);
2976 
2977 		/*
2978 		 * Clear possible busy bits in case we have
2979 		 * invalidate -> flip -> flush sequence.
2980 		 */
2981 		intel_dp->psr.busy_frontbuffer_bits = 0;
2982 
2983 		mutex_unlock(&psr->lock);
2984 	}
2985 }
2986 
2987 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2988 {
2989 	struct intel_display *display = to_intel_display(intel_dp);
2990 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2991 
2992 	/*
2993 	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2994 	 * As all higher states has bit 4 of PSR2 state set we can just wait for
2995 	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2996 	 */
2997 	return intel_de_wait_for_clear(display,
2998 				       EDP_PSR2_STATUS(display, cpu_transcoder),
2999 				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
3000 }
3001 
3002 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
3003 {
3004 	struct intel_display *display = to_intel_display(intel_dp);
3005 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3006 
3007 	/*
3008 	 * From bspec: Panel Self Refresh (BDW+)
3009 	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
3010 	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
3011 	 * defensive enough to cover everything.
3012 	 */
3013 	return intel_de_wait_for_clear(display,
3014 				       psr_status_reg(display, cpu_transcoder),
3015 				       EDP_PSR_STATUS_STATE_MASK, 50);
3016 }
3017 
3018 /**
3019  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
3020  * @new_crtc_state: new CRTC state
3021  *
3022  * This function is expected to be called from pipe_update_start() where it is
3023  * not expected to race with PSR enable or disable.
3024  */
3025 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
3026 {
3027 	struct intel_display *display = to_intel_display(new_crtc_state);
3028 	struct intel_encoder *encoder;
3029 
3030 	if (!new_crtc_state->has_psr)
3031 		return;
3032 
3033 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3034 					     new_crtc_state->uapi.encoder_mask) {
3035 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3036 		int ret;
3037 
3038 		lockdep_assert_held(&intel_dp->psr.lock);
3039 
3040 		if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
3041 			continue;
3042 
3043 		if (intel_dp->psr.sel_update_enabled)
3044 			ret = _psr2_ready_for_pipe_update_locked(intel_dp);
3045 		else
3046 			ret = _psr1_ready_for_pipe_update_locked(intel_dp);
3047 
3048 		if (ret)
3049 			drm_err(display->drm,
3050 				"PSR wait timed out, atomic update may fail\n");
3051 	}
3052 }
3053 
3054 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
3055 {
3056 	struct intel_display *display = to_intel_display(intel_dp);
3057 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3058 	i915_reg_t reg;
3059 	u32 mask;
3060 	int err;
3061 
3062 	if (!intel_dp->psr.enabled)
3063 		return false;
3064 
3065 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
3066 					  intel_dp->psr.panel_replay_enabled)) {
3067 		reg = EDP_PSR2_STATUS(display, cpu_transcoder);
3068 		mask = EDP_PSR2_STATUS_STATE_MASK;
3069 	} else {
3070 		reg = psr_status_reg(display, cpu_transcoder);
3071 		mask = EDP_PSR_STATUS_STATE_MASK;
3072 	}
3073 
3074 	mutex_unlock(&intel_dp->psr.lock);
3075 
3076 	err = intel_de_wait_for_clear(display, reg, mask, 50);
3077 	if (err)
3078 		drm_err(display->drm,
3079 			"Timed out waiting for PSR Idle for re-enable\n");
3080 
3081 	/* After the unlocked wait, verify that PSR is still wanted! */
3082 	mutex_lock(&intel_dp->psr.lock);
3083 	return err == 0 && intel_dp->psr.enabled;
3084 }
3085 
3086 static int intel_psr_fastset_force(struct intel_display *display)
3087 {
3088 	struct drm_connector_list_iter conn_iter;
3089 	struct drm_modeset_acquire_ctx ctx;
3090 	struct drm_atomic_state *state;
3091 	struct drm_connector *conn;
3092 	int err = 0;
3093 
3094 	state = drm_atomic_state_alloc(display->drm);
3095 	if (!state)
3096 		return -ENOMEM;
3097 
3098 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
3099 
3100 	state->acquire_ctx = &ctx;
3101 	to_intel_atomic_state(state)->internal = true;
3102 
3103 retry:
3104 	drm_connector_list_iter_begin(display->drm, &conn_iter);
3105 	drm_for_each_connector_iter(conn, &conn_iter) {
3106 		struct drm_connector_state *conn_state;
3107 		struct drm_crtc_state *crtc_state;
3108 
3109 		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
3110 			continue;
3111 
3112 		conn_state = drm_atomic_get_connector_state(state, conn);
3113 		if (IS_ERR(conn_state)) {
3114 			err = PTR_ERR(conn_state);
3115 			break;
3116 		}
3117 
3118 		if (!conn_state->crtc)
3119 			continue;
3120 
3121 		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
3122 		if (IS_ERR(crtc_state)) {
3123 			err = PTR_ERR(crtc_state);
3124 			break;
3125 		}
3126 
3127 		/* Mark mode as changed to trigger a pipe->update() */
3128 		crtc_state->mode_changed = true;
3129 	}
3130 	drm_connector_list_iter_end(&conn_iter);
3131 
3132 	if (err == 0)
3133 		err = drm_atomic_commit(state);
3134 
3135 	if (err == -EDEADLK) {
3136 		drm_atomic_state_clear(state);
3137 		err = drm_modeset_backoff(&ctx);
3138 		if (!err)
3139 			goto retry;
3140 	}
3141 
3142 	drm_modeset_drop_locks(&ctx);
3143 	drm_modeset_acquire_fini(&ctx);
3144 	drm_atomic_state_put(state);
3145 
3146 	return err;
3147 }
3148 
3149 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
3150 {
3151 	struct intel_display *display = to_intel_display(intel_dp);
3152 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
3153 	const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3154 					I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3155 	u32 old_mode, old_disable_bits;
3156 	int ret;
3157 
3158 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3159 		    I915_PSR_DEBUG_PANEL_REPLAY_DISABLE |
3160 		    I915_PSR_DEBUG_MODE_MASK) ||
3161 	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
3162 		drm_dbg_kms(display->drm, "Invalid debug mask %llx\n", val);
3163 		return -EINVAL;
3164 	}
3165 
3166 	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
3167 	if (ret)
3168 		return ret;
3169 
3170 	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
3171 	old_disable_bits = intel_dp->psr.debug &
3172 		(I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3173 		 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3174 
3175 	intel_dp->psr.debug = val;
3176 
3177 	/*
3178 	 * Do it right away if it's already enabled, otherwise it will be done
3179 	 * when enabling the source.
3180 	 */
3181 	if (intel_dp->psr.enabled)
3182 		psr_irq_control(intel_dp);
3183 
3184 	mutex_unlock(&intel_dp->psr.lock);
3185 
3186 	if (old_mode != mode || old_disable_bits != disable_bits)
3187 		ret = intel_psr_fastset_force(display);
3188 
3189 	return ret;
3190 }
3191 
3192 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
3193 {
3194 	struct intel_psr *psr = &intel_dp->psr;
3195 
3196 	intel_psr_disable_locked(intel_dp);
3197 	psr->sink_not_reliable = true;
3198 	/* let's make sure that sink is awaken */
3199 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
3200 }
3201 
3202 static void intel_psr_work(struct work_struct *work)
3203 {
3204 	struct intel_dp *intel_dp =
3205 		container_of(work, typeof(*intel_dp), psr.work);
3206 
3207 	mutex_lock(&intel_dp->psr.lock);
3208 
3209 	if (!intel_dp->psr.enabled)
3210 		goto unlock;
3211 
3212 	if (READ_ONCE(intel_dp->psr.irq_aux_error))
3213 		intel_psr_handle_irq(intel_dp);
3214 
3215 	/*
3216 	 * We have to make sure PSR is ready for re-enable
3217 	 * otherwise it keeps disabled until next full enable/disable cycle.
3218 	 * PSR might take some time to get fully disabled
3219 	 * and be ready for re-enable.
3220 	 */
3221 	if (!__psr_wait_for_idle_locked(intel_dp))
3222 		goto unlock;
3223 
3224 	/*
3225 	 * The delayed work can race with an invalidate hence we need to
3226 	 * recheck. Since psr_flush first clears this and then reschedules we
3227 	 * won't ever miss a flush when bailing out here.
3228 	 */
3229 	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
3230 		goto unlock;
3231 
3232 	intel_psr_activate(intel_dp);
3233 unlock:
3234 	mutex_unlock(&intel_dp->psr.lock);
3235 }
3236 
3237 static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp)
3238 {
3239 	struct intel_display *display = to_intel_display(intel_dp);
3240 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3241 
3242 	if (!intel_dp->psr.psr2_sel_fetch_enabled)
3243 		return;
3244 
3245 	if (DISPLAY_VER(display) >= 20)
3246 		intel_de_write(display, LNL_SFF_CTL(cpu_transcoder),
3247 			       LNL_SFF_CTL_SF_SINGLE_FULL_FRAME);
3248 	else
3249 		intel_de_write(display,
3250 			       PSR2_MAN_TRK_CTL(display, cpu_transcoder),
3251 			       man_trk_ctl_enable_bit_get(display) |
3252 			       man_trk_ctl_partial_frame_bit_get(display) |
3253 			       man_trk_ctl_single_full_frame_bit_get(display) |
3254 			       man_trk_ctl_continuos_full_frame(display));
3255 }
3256 
3257 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
3258 {
3259 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
3260 		if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3261 			intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
3262 			intel_psr_configure_full_frame_update(intel_dp);
3263 		}
3264 
3265 		intel_psr_force_update(intel_dp);
3266 	} else {
3267 		intel_psr_exit(intel_dp);
3268 	}
3269 }
3270 
3271 /**
3272  * intel_psr_invalidate - Invalidate PSR
3273  * @display: display device
3274  * @frontbuffer_bits: frontbuffer plane tracking bits
3275  * @origin: which operation caused the invalidate
3276  *
3277  * Since the hardware frontbuffer tracking has gaps we need to integrate
3278  * with the software frontbuffer tracking. This function gets called every
3279  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
3280  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
3281  *
3282  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
3283  */
3284 void intel_psr_invalidate(struct intel_display *display,
3285 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
3286 {
3287 	struct intel_encoder *encoder;
3288 
3289 	if (origin == ORIGIN_FLIP)
3290 		return;
3291 
3292 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3293 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3294 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3295 
3296 		mutex_lock(&intel_dp->psr.lock);
3297 		if (!intel_dp->psr.enabled) {
3298 			mutex_unlock(&intel_dp->psr.lock);
3299 			continue;
3300 		}
3301 
3302 		pipe_frontbuffer_bits &=
3303 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3304 		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
3305 
3306 		if (pipe_frontbuffer_bits)
3307 			_psr_invalidate_handle(intel_dp);
3308 
3309 		mutex_unlock(&intel_dp->psr.lock);
3310 	}
3311 }
3312 /*
3313  * When we will be completely rely on PSR2 S/W tracking in future,
3314  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
3315  * event also therefore tgl_dc3co_flush_locked() require to be changed
3316  * accordingly in future.
3317  */
3318 static void
3319 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
3320 		       enum fb_op_origin origin)
3321 {
3322 	struct intel_display *display = to_intel_display(intel_dp);
3323 	struct drm_i915_private *i915 = to_i915(display->drm);
3324 
3325 	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
3326 	    !intel_dp->psr.active)
3327 		return;
3328 
3329 	/*
3330 	 * At every frontbuffer flush flip event modified delay of delayed work,
3331 	 * when delayed work schedules that means display has been idle.
3332 	 */
3333 	if (!(frontbuffer_bits &
3334 	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
3335 		return;
3336 
3337 	tgl_psr2_enable_dc3co(intel_dp);
3338 	mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
3339 			 intel_dp->psr.dc3co_exit_delay);
3340 }
3341 
3342 static void _psr_flush_handle(struct intel_dp *intel_dp)
3343 {
3344 	struct intel_display *display = to_intel_display(intel_dp);
3345 	struct drm_i915_private *dev_priv = to_i915(display->drm);
3346 
3347 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
3348 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3349 			/* can we turn CFF off? */
3350 			if (intel_dp->psr.busy_frontbuffer_bits == 0)
3351 				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
3352 		}
3353 
3354 		/*
3355 		 * Still keep cff bit enabled as we don't have proper SU
3356 		 * configuration in case update is sent for any reason after
3357 		 * sff bit gets cleared by the HW on next vblank.
3358 		 *
3359 		 * NOTE: Setting cff bit is not needed for LunarLake onwards as
3360 		 * we have own register for SFF bit and we are not overwriting
3361 		 * existing SU configuration
3362 		 */
3363 		intel_psr_configure_full_frame_update(intel_dp);
3364 	}
3365 
3366 	intel_psr_force_update(intel_dp);
3367 
3368 	if (!intel_dp->psr.psr2_sel_fetch_enabled && !intel_dp->psr.active &&
3369 	    !intel_dp->psr.busy_frontbuffer_bits)
3370 		queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
3371 }
3372 
3373 /**
3374  * intel_psr_flush - Flush PSR
3375  * @display: display device
3376  * @frontbuffer_bits: frontbuffer plane tracking bits
3377  * @origin: which operation caused the flush
3378  *
3379  * Since the hardware frontbuffer tracking has gaps we need to integrate
3380  * with the software frontbuffer tracking. This function gets called every
3381  * time frontbuffer rendering has completed and flushed out to memory. PSR
3382  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
3383  *
3384  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
3385  */
3386 void intel_psr_flush(struct intel_display *display,
3387 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
3388 {
3389 	struct intel_encoder *encoder;
3390 
3391 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3392 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3393 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3394 
3395 		mutex_lock(&intel_dp->psr.lock);
3396 		if (!intel_dp->psr.enabled) {
3397 			mutex_unlock(&intel_dp->psr.lock);
3398 			continue;
3399 		}
3400 
3401 		pipe_frontbuffer_bits &=
3402 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3403 		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
3404 
3405 		/*
3406 		 * If the PSR is paused by an explicit intel_psr_paused() call,
3407 		 * we have to ensure that the PSR is not activated until
3408 		 * intel_psr_resume() is called.
3409 		 */
3410 		if (intel_dp->psr.pause_counter)
3411 			goto unlock;
3412 
3413 		if (origin == ORIGIN_FLIP ||
3414 		    (origin == ORIGIN_CURSOR_UPDATE &&
3415 		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
3416 			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
3417 			goto unlock;
3418 		}
3419 
3420 		if (pipe_frontbuffer_bits == 0)
3421 			goto unlock;
3422 
3423 		/* By definition flush = invalidate + flush */
3424 		_psr_flush_handle(intel_dp);
3425 unlock:
3426 		mutex_unlock(&intel_dp->psr.lock);
3427 	}
3428 }
3429 
3430 /**
3431  * intel_psr_init - Init basic PSR work and mutex.
3432  * @intel_dp: Intel DP
3433  *
3434  * This function is called after the initializing connector.
3435  * (the initializing of connector treats the handling of connector capabilities)
3436  * And it initializes basic PSR stuff for each DP Encoder.
3437  */
3438 void intel_psr_init(struct intel_dp *intel_dp)
3439 {
3440 	struct intel_display *display = to_intel_display(intel_dp);
3441 	struct intel_connector *connector = intel_dp->attached_connector;
3442 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3443 
3444 	if (!(HAS_PSR(display) || HAS_DP20(display)))
3445 		return;
3446 
3447 	/*
3448 	 * HSW spec explicitly says PSR is tied to port A.
3449 	 * BDW+ platforms have a instance of PSR registers per transcoder but
3450 	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
3451 	 * than eDP one.
3452 	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
3453 	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
3454 	 * But GEN12 supports a instance of PSR registers per transcoder.
3455 	 */
3456 	if (DISPLAY_VER(display) < 12 && dig_port->base.port != PORT_A) {
3457 		drm_dbg_kms(display->drm,
3458 			    "PSR condition failed: Port not supported\n");
3459 		return;
3460 	}
3461 
3462 	if ((HAS_DP20(display) && !intel_dp_is_edp(intel_dp)) ||
3463 	    DISPLAY_VER(display) >= 20)
3464 		intel_dp->psr.source_panel_replay_support = true;
3465 
3466 	if (HAS_PSR(display) && intel_dp_is_edp(intel_dp))
3467 		intel_dp->psr.source_support = true;
3468 
3469 	/* Set link_standby x link_off defaults */
3470 	if (DISPLAY_VER(display) < 12)
3471 		/* For new platforms up to TGL let's respect VBT back again */
3472 		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
3473 
3474 	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
3475 	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
3476 	mutex_init(&intel_dp->psr.lock);
3477 }
3478 
3479 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
3480 					   u8 *status, u8 *error_status)
3481 {
3482 	struct drm_dp_aux *aux = &intel_dp->aux;
3483 	int ret;
3484 	unsigned int offset;
3485 
3486 	offset = intel_dp->psr.panel_replay_enabled ?
3487 		 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
3488 
3489 	ret = drm_dp_dpcd_readb(aux, offset, status);
3490 	if (ret != 1)
3491 		return ret;
3492 
3493 	offset = intel_dp->psr.panel_replay_enabled ?
3494 		 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
3495 
3496 	ret = drm_dp_dpcd_readb(aux, offset, error_status);
3497 	if (ret != 1)
3498 		return ret;
3499 
3500 	*status = *status & DP_PSR_SINK_STATE_MASK;
3501 
3502 	return 0;
3503 }
3504 
3505 static void psr_alpm_check(struct intel_dp *intel_dp)
3506 {
3507 	struct intel_psr *psr = &intel_dp->psr;
3508 
3509 	if (!psr->sel_update_enabled)
3510 		return;
3511 
3512 	if (intel_alpm_get_error(intel_dp)) {
3513 		intel_psr_disable_locked(intel_dp);
3514 		psr->sink_not_reliable = true;
3515 	}
3516 }
3517 
3518 static void psr_capability_changed_check(struct intel_dp *intel_dp)
3519 {
3520 	struct intel_display *display = to_intel_display(intel_dp);
3521 	struct intel_psr *psr = &intel_dp->psr;
3522 	u8 val;
3523 	int r;
3524 
3525 	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
3526 	if (r != 1) {
3527 		drm_err(display->drm, "Error reading DP_PSR_ESI\n");
3528 		return;
3529 	}
3530 
3531 	if (val & DP_PSR_CAPS_CHANGE) {
3532 		intel_psr_disable_locked(intel_dp);
3533 		psr->sink_not_reliable = true;
3534 		drm_dbg_kms(display->drm,
3535 			    "Sink PSR capability changed, disabling PSR\n");
3536 
3537 		/* Clearing it */
3538 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
3539 	}
3540 }
3541 
3542 /*
3543  * On common bits:
3544  * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
3545  * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
3546  * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
3547  * this function is relying on PSR definitions
3548  */
3549 void intel_psr_short_pulse(struct intel_dp *intel_dp)
3550 {
3551 	struct intel_display *display = to_intel_display(intel_dp);
3552 	struct intel_psr *psr = &intel_dp->psr;
3553 	u8 status, error_status;
3554 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3555 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3556 			  DP_PSR_LINK_CRC_ERROR;
3557 
3558 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
3559 		return;
3560 
3561 	mutex_lock(&psr->lock);
3562 
3563 	psr->link_ok = false;
3564 
3565 	if (!psr->enabled)
3566 		goto exit;
3567 
3568 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
3569 		drm_err(display->drm,
3570 			"Error reading PSR status or error status\n");
3571 		goto exit;
3572 	}
3573 
3574 	if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
3575 	    (error_status & errors)) {
3576 		intel_psr_disable_locked(intel_dp);
3577 		psr->sink_not_reliable = true;
3578 	}
3579 
3580 	if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
3581 	    !error_status)
3582 		drm_dbg_kms(display->drm,
3583 			    "PSR sink internal error, disabling PSR\n");
3584 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3585 		drm_dbg_kms(display->drm,
3586 			    "PSR RFB storage error, disabling PSR\n");
3587 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3588 		drm_dbg_kms(display->drm,
3589 			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
3590 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3591 		drm_dbg_kms(display->drm,
3592 			    "PSR Link CRC error, disabling PSR\n");
3593 
3594 	if (error_status & ~errors)
3595 		drm_err(display->drm,
3596 			"PSR_ERROR_STATUS unhandled errors %x\n",
3597 			error_status & ~errors);
3598 	/* clear status register */
3599 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
3600 
3601 	if (!psr->panel_replay_enabled) {
3602 		psr_alpm_check(intel_dp);
3603 		psr_capability_changed_check(intel_dp);
3604 	}
3605 
3606 exit:
3607 	mutex_unlock(&psr->lock);
3608 }
3609 
3610 bool intel_psr_enabled(struct intel_dp *intel_dp)
3611 {
3612 	bool ret;
3613 
3614 	if (!CAN_PSR(intel_dp))
3615 		return false;
3616 
3617 	mutex_lock(&intel_dp->psr.lock);
3618 	ret = intel_dp->psr.enabled;
3619 	mutex_unlock(&intel_dp->psr.lock);
3620 
3621 	return ret;
3622 }
3623 
3624 /**
3625  * intel_psr_link_ok - return psr->link_ok
3626  * @intel_dp: struct intel_dp
3627  *
3628  * We are seeing unexpected link re-trainings with some panels. This is caused
3629  * by panel stating bad link status after PSR is enabled. Code checking link
3630  * status can call this to ensure it can ignore bad link status stated by the
3631  * panel I.e. if panel is stating bad link and intel_psr_link_ok is stating link
3632  * is ok caller should rely on latter.
3633  *
3634  * Return value of link_ok
3635  */
3636 bool intel_psr_link_ok(struct intel_dp *intel_dp)
3637 {
3638 	bool ret;
3639 
3640 	if ((!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) ||
3641 	    !intel_dp_is_edp(intel_dp))
3642 		return false;
3643 
3644 	mutex_lock(&intel_dp->psr.lock);
3645 	ret = intel_dp->psr.link_ok;
3646 	mutex_unlock(&intel_dp->psr.lock);
3647 
3648 	return ret;
3649 }
3650 
3651 /**
3652  * intel_psr_lock - grab PSR lock
3653  * @crtc_state: the crtc state
3654  *
3655  * This is initially meant to be used by around CRTC update, when
3656  * vblank sensitive registers are updated and we need grab the lock
3657  * before it to avoid vblank evasion.
3658  */
3659 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3660 {
3661 	struct intel_display *display = to_intel_display(crtc_state);
3662 	struct intel_encoder *encoder;
3663 
3664 	if (!crtc_state->has_psr)
3665 		return;
3666 
3667 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3668 					     crtc_state->uapi.encoder_mask) {
3669 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3670 
3671 		mutex_lock(&intel_dp->psr.lock);
3672 		break;
3673 	}
3674 }
3675 
3676 /**
3677  * intel_psr_unlock - release PSR lock
3678  * @crtc_state: the crtc state
3679  *
3680  * Release the PSR lock that was held during pipe update.
3681  */
3682 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3683 {
3684 	struct intel_display *display = to_intel_display(crtc_state);
3685 	struct intel_encoder *encoder;
3686 
3687 	if (!crtc_state->has_psr)
3688 		return;
3689 
3690 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3691 					     crtc_state->uapi.encoder_mask) {
3692 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3693 
3694 		mutex_unlock(&intel_dp->psr.lock);
3695 		break;
3696 	}
3697 }
3698 
3699 /* Wa_16025596647 */
3700 static void intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp *intel_dp)
3701 {
3702 	struct intel_display *display = to_intel_display(intel_dp);
3703 	bool dc5_dc6_blocked;
3704 
3705 	if (!intel_dp->psr.active)
3706 		return;
3707 
3708 	dc5_dc6_blocked = is_dc5_dc6_blocked(intel_dp);
3709 
3710 	if (intel_dp->psr.sel_update_enabled)
3711 		psr2_program_idle_frames(intel_dp, dc5_dc6_blocked ? 0 :
3712 					 psr_compute_idle_frames(intel_dp));
3713 	else
3714 		intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
3715 								       intel_dp->psr.pipe,
3716 								       dc5_dc6_blocked);
3717 }
3718 
3719 static void psr_dc5_dc6_wa_work(struct work_struct *work)
3720 {
3721 	struct intel_display *display = container_of(work, typeof(*display),
3722 						     psr_dc5_dc6_wa_work);
3723 	struct intel_encoder *encoder;
3724 
3725 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3726 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3727 
3728 		mutex_lock(&intel_dp->psr.lock);
3729 
3730 		if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled)
3731 			intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
3732 
3733 		mutex_unlock(&intel_dp->psr.lock);
3734 	}
3735 }
3736 
3737 /**
3738  * intel_psr_notify_dc5_dc6 - Notify PSR about enable/disable dc5/dc6
3739  * @display: intel atomic state
3740  *
3741  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to schedule
3742  * psr_dc5_dc6_wa_work used for applying/removing the workaround.
3743  */
3744 void intel_psr_notify_dc5_dc6(struct intel_display *display)
3745 {
3746 	if (DISPLAY_VER(display) != 20 &&
3747 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3748 		return;
3749 
3750 	schedule_work(&display->psr_dc5_dc6_wa_work);
3751 }
3752 
3753 /**
3754  * intel_psr_dc5_dc6_wa_init - Init work for underrun on idle PSR HW bug wa
3755  * @display: intel atomic state
3756  *
3757  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to init
3758  * psr_dc5_dc6_wa_work used for applying the workaround.
3759  */
3760 void intel_psr_dc5_dc6_wa_init(struct intel_display *display)
3761 {
3762 	if (DISPLAY_VER(display) != 20 &&
3763 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3764 		return;
3765 
3766 	INIT_WORK(&display->psr_dc5_dc6_wa_work, psr_dc5_dc6_wa_work);
3767 }
3768 
3769 /**
3770  * intel_psr_notify_pipe_change - Notify PSR about enable/disable of a pipe
3771  * @state: intel atomic state
3772  * @crtc: intel crtc
3773  * @enable: enable/disable
3774  *
3775  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
3776  * remove the workaround when pipe is getting enabled/disabled
3777  */
3778 void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
3779 				  struct intel_crtc *crtc, bool enable)
3780 {
3781 	struct intel_display *display = to_intel_display(state);
3782 	struct intel_encoder *encoder;
3783 
3784 	if (DISPLAY_VER(display) != 20 &&
3785 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3786 		return;
3787 
3788 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3789 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3790 		u8 active_non_psr_pipes;
3791 
3792 		mutex_lock(&intel_dp->psr.lock);
3793 
3794 		if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
3795 			goto unlock;
3796 
3797 		active_non_psr_pipes = intel_dp->psr.active_non_psr_pipes;
3798 
3799 		if (enable)
3800 			active_non_psr_pipes |= BIT(crtc->pipe);
3801 		else
3802 			active_non_psr_pipes &= ~BIT(crtc->pipe);
3803 
3804 		if (active_non_psr_pipes == intel_dp->psr.active_non_psr_pipes)
3805 			goto unlock;
3806 
3807 		if ((enable && intel_dp->psr.active_non_psr_pipes) ||
3808 		    (!enable && !intel_dp->psr.active_non_psr_pipes)) {
3809 			intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
3810 			goto unlock;
3811 		}
3812 
3813 		intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
3814 
3815 		intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
3816 unlock:
3817 		mutex_unlock(&intel_dp->psr.lock);
3818 	}
3819 }
3820 
3821 /**
3822  * intel_psr_notify_vblank_enable_disable - Notify PSR about enable/disable of vblank
3823  * @display: intel display struct
3824  * @enable: enable/disable
3825  *
3826  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
3827  * remove the workaround when vblank is getting enabled/disabled
3828  */
3829 void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
3830 					    bool enable)
3831 {
3832 	struct intel_encoder *encoder;
3833 
3834 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3835 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3836 
3837 		mutex_lock(&intel_dp->psr.lock);
3838 		if (intel_dp->psr.panel_replay_enabled) {
3839 			mutex_unlock(&intel_dp->psr.lock);
3840 			break;
3841 		}
3842 
3843 		if (intel_dp->psr.enabled)
3844 			intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
3845 
3846 		mutex_unlock(&intel_dp->psr.lock);
3847 		return;
3848 	}
3849 
3850 	/*
3851 	 * NOTE: intel_display_power_set_target_dc_state is used
3852 	 * only by PSR * code for DC3CO handling. DC3CO target
3853 	 * state is currently disabled in * PSR code. If DC3CO
3854 	 * is taken into use we need take that into account here
3855 	 * as well.
3856 	 */
3857 	intel_display_power_set_target_dc_state(display, enable ? DC_STATE_DISABLE :
3858 						DC_STATE_EN_UPTO_DC6);
3859 }
3860 
3861 static void
3862 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3863 {
3864 	struct intel_display *display = to_intel_display(intel_dp);
3865 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3866 	const char *status = "unknown";
3867 	u32 val, status_val;
3868 
3869 	if ((intel_dp_is_edp(intel_dp) || DISPLAY_VER(display) >= 30) &&
3870 	    (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) {
3871 		static const char * const live_status[] = {
3872 			"IDLE",
3873 			"CAPTURE",
3874 			"CAPTURE_FS",
3875 			"SLEEP",
3876 			"BUFON_FW",
3877 			"ML_UP",
3878 			"SU_STANDBY",
3879 			"FAST_SLEEP",
3880 			"DEEP_SLEEP",
3881 			"BUF_ON",
3882 			"TG_ON"
3883 		};
3884 		val = intel_de_read(display,
3885 				    EDP_PSR2_STATUS(display, cpu_transcoder));
3886 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3887 		if (status_val < ARRAY_SIZE(live_status))
3888 			status = live_status[status_val];
3889 	} else {
3890 		static const char * const live_status[] = {
3891 			"IDLE",
3892 			"SRDONACK",
3893 			"SRDENT",
3894 			"BUFOFF",
3895 			"BUFON",
3896 			"AUXACK",
3897 			"SRDOFFACK",
3898 			"SRDENT_ON",
3899 		};
3900 		val = intel_de_read(display,
3901 				    psr_status_reg(display, cpu_transcoder));
3902 		status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3903 		if (status_val < ARRAY_SIZE(live_status))
3904 			status = live_status[status_val];
3905 	}
3906 
3907 	seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3908 }
3909 
3910 static void intel_psr_sink_capability(struct intel_dp *intel_dp,
3911 				      struct seq_file *m)
3912 {
3913 	struct intel_psr *psr = &intel_dp->psr;
3914 
3915 	seq_printf(m, "Sink support: PSR = %s",
3916 		   str_yes_no(psr->sink_support));
3917 
3918 	if (psr->sink_support)
3919 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3920 	if (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
3921 		seq_printf(m, " (Early Transport)");
3922 	seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
3923 	seq_printf(m, ", Panel Replay Selective Update = %s",
3924 		   str_yes_no(psr->sink_panel_replay_su_support));
3925 	if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
3926 	    DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
3927 		seq_printf(m, " (Early Transport)");
3928 	seq_printf(m, "\n");
3929 }
3930 
3931 static void intel_psr_print_mode(struct intel_dp *intel_dp,
3932 				 struct seq_file *m)
3933 {
3934 	struct intel_psr *psr = &intel_dp->psr;
3935 	const char *status, *mode, *region_et;
3936 
3937 	if (psr->enabled)
3938 		status = " enabled";
3939 	else
3940 		status = "disabled";
3941 
3942 	if (psr->panel_replay_enabled && psr->sel_update_enabled)
3943 		mode = "Panel Replay Selective Update";
3944 	else if (psr->panel_replay_enabled)
3945 		mode = "Panel Replay";
3946 	else if (psr->sel_update_enabled)
3947 		mode = "PSR2";
3948 	else if (psr->enabled)
3949 		mode = "PSR1";
3950 	else
3951 		mode = "";
3952 
3953 	if (psr->su_region_et_enabled)
3954 		region_et = " (Early Transport)";
3955 	else
3956 		region_et = "";
3957 
3958 	seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et);
3959 }
3960 
3961 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3962 {
3963 	struct intel_display *display = to_intel_display(intel_dp);
3964 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3965 	struct intel_psr *psr = &intel_dp->psr;
3966 	struct ref_tracker *wakeref;
3967 	bool enabled;
3968 	u32 val, psr2_ctl;
3969 
3970 	intel_psr_sink_capability(intel_dp, m);
3971 
3972 	if (!(psr->sink_support || psr->sink_panel_replay_support))
3973 		return 0;
3974 
3975 	wakeref = intel_display_rpm_get(display);
3976 	mutex_lock(&psr->lock);
3977 
3978 	intel_psr_print_mode(intel_dp, m);
3979 
3980 	if (!psr->enabled) {
3981 		seq_printf(m, "PSR sink not reliable: %s\n",
3982 			   str_yes_no(psr->sink_not_reliable));
3983 
3984 		goto unlock;
3985 	}
3986 
3987 	if (psr->panel_replay_enabled) {
3988 		val = intel_de_read(display, TRANS_DP2_CTL(cpu_transcoder));
3989 
3990 		if (intel_dp_is_edp(intel_dp))
3991 			psr2_ctl = intel_de_read(display,
3992 						 EDP_PSR2_CTL(display,
3993 							      cpu_transcoder));
3994 
3995 		enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3996 	} else if (psr->sel_update_enabled) {
3997 		val = intel_de_read(display,
3998 				    EDP_PSR2_CTL(display, cpu_transcoder));
3999 		enabled = val & EDP_PSR2_ENABLE;
4000 	} else {
4001 		val = intel_de_read(display, psr_ctl_reg(display, cpu_transcoder));
4002 		enabled = val & EDP_PSR_ENABLE;
4003 	}
4004 	seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
4005 		   str_enabled_disabled(enabled), val);
4006 	if (psr->panel_replay_enabled && intel_dp_is_edp(intel_dp))
4007 		seq_printf(m, "PSR2_CTL: 0x%08x\n",
4008 			   psr2_ctl);
4009 	psr_source_status(intel_dp, m);
4010 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
4011 		   psr->busy_frontbuffer_bits);
4012 
4013 	/*
4014 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
4015 	 */
4016 	val = intel_de_read(display, psr_perf_cnt_reg(display, cpu_transcoder));
4017 	seq_printf(m, "Performance counter: %u\n",
4018 		   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
4019 
4020 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
4021 		seq_printf(m, "Last attempted entry at: %lld\n",
4022 			   psr->last_entry_attempt);
4023 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
4024 	}
4025 
4026 	if (psr->sel_update_enabled) {
4027 		u32 su_frames_val[3];
4028 		int frame;
4029 
4030 		/*
4031 		 * PSR2_SU_STATUS register has been tied-off since DG2/ADL-P
4032 		 * (it returns zeros only) and it has been removed on Xe2_LPD.
4033 		 */
4034 		if (DISPLAY_VER(display) < 13) {
4035 			/*
4036 			 * Reading all 3 registers before hand to minimize crossing a
4037 			 * frame boundary between register reads
4038 			 */
4039 			for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
4040 				val = intel_de_read(display,
4041 						    PSR2_SU_STATUS(display, cpu_transcoder, frame));
4042 				su_frames_val[frame / 3] = val;
4043 			}
4044 
4045 			seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
4046 
4047 			for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
4048 				u32 su_blocks;
4049 
4050 				su_blocks = su_frames_val[frame / 3] &
4051 					PSR2_SU_STATUS_MASK(frame);
4052 				su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
4053 				seq_printf(m, "%d\t%d\n", frame, su_blocks);
4054 			}
4055 		}
4056 
4057 		seq_printf(m, "PSR2 selective fetch: %s\n",
4058 			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
4059 	}
4060 
4061 unlock:
4062 	mutex_unlock(&psr->lock);
4063 	intel_display_rpm_put(display, wakeref);
4064 
4065 	return 0;
4066 }
4067 
4068 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
4069 {
4070 	struct intel_display *display = m->private;
4071 	struct intel_dp *intel_dp = NULL;
4072 	struct intel_encoder *encoder;
4073 
4074 	if (!HAS_PSR(display))
4075 		return -ENODEV;
4076 
4077 	/* Find the first EDP which supports PSR */
4078 	for_each_intel_encoder_with_psr(display->drm, encoder) {
4079 		intel_dp = enc_to_intel_dp(encoder);
4080 		break;
4081 	}
4082 
4083 	if (!intel_dp)
4084 		return -ENODEV;
4085 
4086 	return intel_psr_status(m, intel_dp);
4087 }
4088 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
4089 
4090 static int
4091 i915_edp_psr_debug_set(void *data, u64 val)
4092 {
4093 	struct intel_display *display = data;
4094 	struct intel_encoder *encoder;
4095 	int ret = -ENODEV;
4096 
4097 	if (!HAS_PSR(display))
4098 		return ret;
4099 
4100 	for_each_intel_encoder_with_psr(display->drm, encoder) {
4101 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4102 
4103 		drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);
4104 
4105 		// TODO: split to each transcoder's PSR debug state
4106 		with_intel_display_rpm(display)
4107 			ret = intel_psr_debug_set(intel_dp, val);
4108 	}
4109 
4110 	return ret;
4111 }
4112 
4113 static int
4114 i915_edp_psr_debug_get(void *data, u64 *val)
4115 {
4116 	struct intel_display *display = data;
4117 	struct intel_encoder *encoder;
4118 
4119 	if (!HAS_PSR(display))
4120 		return -ENODEV;
4121 
4122 	for_each_intel_encoder_with_psr(display->drm, encoder) {
4123 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4124 
4125 		// TODO: split to each transcoder's PSR debug state
4126 		*val = READ_ONCE(intel_dp->psr.debug);
4127 		return 0;
4128 	}
4129 
4130 	return -ENODEV;
4131 }
4132 
4133 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
4134 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
4135 			"%llu\n");
4136 
4137 void intel_psr_debugfs_register(struct intel_display *display)
4138 {
4139 	struct drm_minor *minor = display->drm->primary;
4140 
4141 	debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
4142 			    display, &i915_edp_psr_debug_fops);
4143 
4144 	debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
4145 			    display, &i915_edp_psr_status_fops);
4146 }
4147 
4148 static const char *psr_mode_str(struct intel_dp *intel_dp)
4149 {
4150 	if (intel_dp->psr.panel_replay_enabled)
4151 		return "PANEL-REPLAY";
4152 	else if (intel_dp->psr.enabled)
4153 		return "PSR";
4154 
4155 	return "unknown";
4156 }
4157 
4158 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
4159 {
4160 	struct intel_connector *connector = m->private;
4161 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4162 	static const char * const sink_status[] = {
4163 		"inactive",
4164 		"transition to active, capture and display",
4165 		"active, display from RFB",
4166 		"active, capture and display on sink device timings",
4167 		"transition to inactive, capture and display, timing re-sync",
4168 		"reserved",
4169 		"reserved",
4170 		"sink internal error",
4171 	};
4172 	const char *str;
4173 	int ret;
4174 	u8 status, error_status;
4175 
4176 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
4177 		seq_puts(m, "PSR/Panel-Replay Unsupported\n");
4178 		return -ENODEV;
4179 	}
4180 
4181 	if (connector->base.status != connector_status_connected)
4182 		return -ENODEV;
4183 
4184 	ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
4185 	if (ret)
4186 		return ret;
4187 
4188 	status &= DP_PSR_SINK_STATE_MASK;
4189 	if (status < ARRAY_SIZE(sink_status))
4190 		str = sink_status[status];
4191 	else
4192 		str = "unknown";
4193 
4194 	seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
4195 
4196 	seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
4197 
4198 	if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
4199 			    DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
4200 			    DP_PSR_LINK_CRC_ERROR))
4201 		seq_puts(m, ":\n");
4202 	else
4203 		seq_puts(m, "\n");
4204 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
4205 		seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
4206 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
4207 		seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
4208 	if (error_status & DP_PSR_LINK_CRC_ERROR)
4209 		seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
4210 
4211 	return ret;
4212 }
4213 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
4214 
4215 static int i915_psr_status_show(struct seq_file *m, void *data)
4216 {
4217 	struct intel_connector *connector = m->private;
4218 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4219 
4220 	return intel_psr_status(m, intel_dp);
4221 }
4222 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
4223 
4224 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
4225 {
4226 	struct intel_display *display = to_intel_display(connector);
4227 	struct dentry *root = connector->base.debugfs_entry;
4228 
4229 	if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
4230 	    connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
4231 		return;
4232 
4233 	debugfs_create_file("i915_psr_sink_status", 0444, root,
4234 			    connector, &i915_psr_sink_status_fops);
4235 
4236 	if (HAS_PSR(display) || HAS_DP20(display))
4237 		debugfs_create_file("i915_psr_status", 0444, root,
4238 				    connector, &i915_psr_status_fops);
4239 }
4240 
4241 bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state)
4242 {
4243 	/*
4244 	 * eDP Panel Replay uses always ALPM
4245 	 * PSR2 uses ALPM but PSR1 doesn't
4246 	 */
4247 	return intel_dp_is_edp(intel_dp) && (crtc_state->has_sel_update ||
4248 					     crtc_state->has_panel_replay);
4249 }
4250 
4251 bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
4252 				   const struct intel_crtc_state *crtc_state)
4253 {
4254 	return intel_dp_is_edp(intel_dp) && crtc_state->has_panel_replay;
4255 }
4256