xref: /linux/drivers/gpu/drm/i915/display/intel_psr.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/debugfs.h>
25 
26 #include <drm/drm_atomic_helper.h>
27 #include <drm/drm_damage_helper.h>
28 #include <drm/drm_debugfs.h>
29 #include <drm/drm_print.h>
30 #include <drm/drm_vblank.h>
31 
32 #include "i915_reg.h"
33 #include "intel_alpm.h"
34 #include "intel_atomic.h"
35 #include "intel_crtc.h"
36 #include "intel_cursor_regs.h"
37 #include "intel_ddi.h"
38 #include "intel_de.h"
39 #include "intel_display_irq.h"
40 #include "intel_display_regs.h"
41 #include "intel_display_rpm.h"
42 #include "intel_display_types.h"
43 #include "intel_display_utils.h"
44 #include "intel_dmc.h"
45 #include "intel_dp.h"
46 #include "intel_dp_aux.h"
47 #include "intel_dsb.h"
48 #include "intel_frontbuffer.h"
49 #include "intel_hdmi.h"
50 #include "intel_psr.h"
51 #include "intel_psr_regs.h"
52 #include "intel_snps_phy.h"
53 #include "intel_step.h"
54 #include "intel_vblank.h"
55 #include "intel_vdsc.h"
56 #include "intel_vrr.h"
57 #include "skl_universal_plane.h"
58 
59 /**
60  * DOC: Panel Self Refresh (PSR/SRD)
61  *
62  * Since Haswell Display controller supports Panel Self-Refresh on display
63  * panels witch have a remote frame buffer (RFB) implemented according to PSR
64  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
65  * when system is idle but display is on as it eliminates display refresh
66  * request to DDR memory completely as long as the frame buffer for that
67  * display is unchanged.
68  *
69  * Panel Self Refresh must be supported by both Hardware (source) and
70  * Panel (sink).
71  *
72  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
73  * to power down the link and memory controller. For DSI panels the same idea
74  * is called "manual mode".
75  *
76  * The implementation uses the hardware-based PSR support which automatically
77  * enters/exits self-refresh mode. The hardware takes care of sending the
78  * required DP aux message and could even retrain the link (that part isn't
79  * enabled yet though). The hardware also keeps track of any frontbuffer
80  * changes to know when to exit self-refresh mode again. Unfortunately that
81  * part doesn't work too well, hence why the i915 PSR support uses the
82  * software frontbuffer tracking to make sure it doesn't miss a screen
83  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
84  * get called by the frontbuffer tracking code. Note that because of locking
85  * issues the self-refresh re-enable code is done from a work queue, which
86  * must be correctly synchronized/cancelled when shutting down the pipe."
87  *
88  * DC3CO (DC3 clock off)
89  *
90  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
91  * clock off automatically during PSR2 idle state.
92  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
93  * entry/exit allows the HW to enter a low-power state even when page flipping
94  * periodically (for instance a 30fps video playback scenario).
95  *
96  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
97  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
98  * frames, if no other flip occurs and the function above is executed, DC3CO is
99  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
100  * of another flip.
101  * Front buffer modifications do not trigger DC3CO activation on purpose as it
102  * would bring a lot of complexity and most of the moderns systems will only
103  * use page flips.
104  */
105 
106 /*
107  * Description of PSR mask bits:
108  *
109  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
110  *
111  *  When unmasked (nearly) all display register writes (eg. even
112  *  SWF) trigger a PSR exit. Some registers are excluded from this
113  *  and they have a more specific mask (described below). On icl+
114  *  this bit no longer exists and is effectively always set.
115  *
116  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
117  *
118  *  When unmasked (nearly) all pipe/plane register writes
119  *  trigger a PSR exit. Some plane registers are excluded from this
120  *  and they have a more specific mask (described below).
121  *
122  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
123  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
124  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
125  *
126  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
127  *  SPR_SURF/CURBASE are not included in this and instead are
128  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
129  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
130  *
131  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
132  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
133  *
134  *  When unmasked PSR is blocked as long as the sprite
135  *  plane is enabled. skl+ with their universal planes no
136  *  longer have a mask bit like this, and no plane being
137  *  enabledb blocks PSR.
138  *
139  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
140  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
141  *
142  *  When umasked CURPOS writes trigger a PSR exit. On skl+
143  *  this doesn't exit but CURPOS is included in the
144  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
145  *
146  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
147  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
148  *
149  *  When unmasked PSR is blocked as long as vblank and/or vsync
150  *  interrupt is unmasked in IMR *and* enabled in IER.
151  *
152  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
153  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
154  *
155  *  Selectcs whether PSR exit generates an extra vblank before
156  *  the first frame is transmitted. Also note the opposite polarity
157  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
158  *  unmasked==do not generate the extra vblank).
159  *
160  *  With DC states enabled the extra vblank happens after link training,
161  *  with DC states disabled it happens immediately upuon PSR exit trigger.
162  *  No idea as of now why there is a difference. HSW/BDW (which don't
163  *  even have DMC) always generate it after link training. Go figure.
164  *
165  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
166  *  and thus won't latch until the first vblank. So with DC states
167  *  enabled the register effectively uses the reset value during DC5
168  *  exit+PSR exit sequence, and thus the bit does nothing until
169  *  latched by the vblank that it was trying to prevent from being
170  *  generated in the first place. So we should probably call this
171  *  one a chicken/egg bit instead on skl+.
172  *
173  *  In standby mode (as opposed to link-off) this makes no difference
174  *  as the timing generator keeps running the whole time generating
175  *  normal periodic vblanks.
176  *
177  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
178  *  and doing so makes the behaviour match the skl+ reset value.
179  *
180  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
181  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
182  *
183  *  On BDW without this bit is no vblanks whatsoever are
184  *  generated after PSR exit. On HSW this has no apparent effect.
185  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
186  *
187  * The rest of the bits are more self-explanatory and/or
188  * irrelevant for normal operation.
189  *
190  * Description of intel_crtc_state variables. has_psr, has_panel_replay and
191  * has_sel_update:
192  *
193  *  has_psr (alone):					PSR1
194  *  has_psr + has_sel_update:				PSR2
195  *  has_psr + has_panel_replay:				Panel Replay
196  *  has_psr + has_panel_replay + has_sel_update:	Panel Replay Selective Update
197  *
198  * Description of some intel_psr variables. enabled, panel_replay_enabled,
199  * sel_update_enabled
200  *
201  *  enabled (alone):						PSR1
202  *  enabled + sel_update_enabled:				PSR2
203  *  enabled + panel_replay_enabled:				Panel Replay
204  *  enabled + panel_replay_enabled + sel_update_enabled:	Panel Replay SU
205  */
206 
207 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
208 			   (intel_dp)->psr.source_support)
209 
210 bool intel_encoder_can_psr(struct intel_encoder *encoder)
211 {
212 	if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
213 		return CAN_PSR(enc_to_intel_dp(encoder)) ||
214 		       CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
215 	else
216 		return false;
217 }
218 
219 bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
220 				  const struct intel_crtc_state *crtc_state)
221 {
222 	/*
223 	 * For PSR/PR modes only eDP requires the AUX IO power to be enabled whenever
224 	 * the output is enabled. For non-eDP outputs the main link is always
225 	 * on, hence it doesn't require the HW initiated AUX wake-up signaling used
226 	 * for eDP.
227 	 *
228 	 * TODO:
229 	 * - Consider leaving AUX IO disabled for eDP / PR as well, in case
230 	 *   the ALPM with main-link off mode is not enabled.
231 	 * - Leave AUX IO enabled for DP / PR, once support for ALPM with
232 	 *   main-link off mode is added for it and this mode gets enabled.
233 	 */
234 	return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
235 	       intel_encoder_can_psr(encoder);
236 }
237 
238 static bool psr_global_enabled(struct intel_dp *intel_dp)
239 {
240 	struct intel_connector *connector = intel_dp->attached_connector;
241 
242 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
243 	case I915_PSR_DEBUG_DEFAULT:
244 		return intel_dp_is_edp(intel_dp) ?
245 			connector->panel.vbt.psr.enable : true;
246 	case I915_PSR_DEBUG_DISABLE:
247 		return false;
248 	default:
249 		return true;
250 	}
251 }
252 
253 static bool sel_update_global_enabled(struct intel_dp *intel_dp)
254 {
255 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
256 	case I915_PSR_DEBUG_DISABLE:
257 	case I915_PSR_DEBUG_FORCE_PSR1:
258 		return false;
259 	default:
260 		return true;
261 	}
262 }
263 
264 static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
265 {
266 	struct intel_display *display = to_intel_display(intel_dp);
267 
268 	return !(intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE) &&
269 		display->params.enable_panel_replay;
270 }
271 
272 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
273 {
274 	struct intel_display *display = to_intel_display(intel_dp);
275 
276 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_ERROR :
277 		EDP_PSR_ERROR(intel_dp->psr.transcoder);
278 }
279 
280 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
281 {
282 	struct intel_display *display = to_intel_display(intel_dp);
283 
284 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_POST_EXIT :
285 		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
286 }
287 
288 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
289 {
290 	struct intel_display *display = to_intel_display(intel_dp);
291 
292 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_PRE_ENTRY :
293 		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
294 }
295 
296 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
297 {
298 	struct intel_display *display = to_intel_display(intel_dp);
299 
300 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_MASK :
301 		EDP_PSR_MASK(intel_dp->psr.transcoder);
302 }
303 
304 static i915_reg_t psr_ctl_reg(struct intel_display *display,
305 			      enum transcoder cpu_transcoder)
306 {
307 	if (DISPLAY_VER(display) >= 8)
308 		return EDP_PSR_CTL(display, cpu_transcoder);
309 	else
310 		return HSW_SRD_CTL;
311 }
312 
313 static i915_reg_t psr_debug_reg(struct intel_display *display,
314 				enum transcoder cpu_transcoder)
315 {
316 	if (DISPLAY_VER(display) >= 8)
317 		return EDP_PSR_DEBUG(display, cpu_transcoder);
318 	else
319 		return HSW_SRD_DEBUG;
320 }
321 
322 static i915_reg_t psr_perf_cnt_reg(struct intel_display *display,
323 				   enum transcoder cpu_transcoder)
324 {
325 	if (DISPLAY_VER(display) >= 8)
326 		return EDP_PSR_PERF_CNT(display, cpu_transcoder);
327 	else
328 		return HSW_SRD_PERF_CNT;
329 }
330 
331 static i915_reg_t psr_status_reg(struct intel_display *display,
332 				 enum transcoder cpu_transcoder)
333 {
334 	if (DISPLAY_VER(display) >= 8)
335 		return EDP_PSR_STATUS(display, cpu_transcoder);
336 	else
337 		return HSW_SRD_STATUS;
338 }
339 
340 static i915_reg_t psr_imr_reg(struct intel_display *display,
341 			      enum transcoder cpu_transcoder)
342 {
343 	if (DISPLAY_VER(display) >= 12)
344 		return TRANS_PSR_IMR(display, cpu_transcoder);
345 	else
346 		return EDP_PSR_IMR;
347 }
348 
349 static i915_reg_t psr_iir_reg(struct intel_display *display,
350 			      enum transcoder cpu_transcoder)
351 {
352 	if (DISPLAY_VER(display) >= 12)
353 		return TRANS_PSR_IIR(display, cpu_transcoder);
354 	else
355 		return EDP_PSR_IIR;
356 }
357 
358 static i915_reg_t psr_aux_ctl_reg(struct intel_display *display,
359 				  enum transcoder cpu_transcoder)
360 {
361 	if (DISPLAY_VER(display) >= 8)
362 		return EDP_PSR_AUX_CTL(display, cpu_transcoder);
363 	else
364 		return HSW_SRD_AUX_CTL;
365 }
366 
367 static i915_reg_t psr_aux_data_reg(struct intel_display *display,
368 				   enum transcoder cpu_transcoder, int i)
369 {
370 	if (DISPLAY_VER(display) >= 8)
371 		return EDP_PSR_AUX_DATA(display, cpu_transcoder, i);
372 	else
373 		return HSW_SRD_AUX_DATA(i);
374 }
375 
376 static void psr_irq_control(struct intel_dp *intel_dp)
377 {
378 	struct intel_display *display = to_intel_display(intel_dp);
379 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
380 	u32 mask;
381 
382 	if (intel_dp->psr.panel_replay_enabled)
383 		return;
384 
385 	mask = psr_irq_psr_error_bit_get(intel_dp);
386 	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
387 		mask |= psr_irq_post_exit_bit_get(intel_dp) |
388 			psr_irq_pre_entry_bit_get(intel_dp);
389 
390 	intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
391 		     psr_irq_mask_get(intel_dp), ~mask);
392 }
393 
394 static void psr_event_print(struct intel_display *display,
395 			    u32 val, bool sel_update_enabled)
396 {
397 	drm_dbg_kms(display->drm, "PSR exit events: 0x%x\n", val);
398 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
399 		drm_dbg_kms(display->drm, "\tPSR2 watchdog timer expired\n");
400 	if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled)
401 		drm_dbg_kms(display->drm, "\tPSR2 disabled\n");
402 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
403 		drm_dbg_kms(display->drm, "\tSU dirty FIFO underrun\n");
404 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
405 		drm_dbg_kms(display->drm, "\tSU CRC FIFO underrun\n");
406 	if (val & PSR_EVENT_GRAPHICS_RESET)
407 		drm_dbg_kms(display->drm, "\tGraphics reset\n");
408 	if (val & PSR_EVENT_PCH_INTERRUPT)
409 		drm_dbg_kms(display->drm, "\tPCH interrupt\n");
410 	if (val & PSR_EVENT_MEMORY_UP)
411 		drm_dbg_kms(display->drm, "\tMemory up\n");
412 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
413 		drm_dbg_kms(display->drm, "\tFront buffer modification\n");
414 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
415 		drm_dbg_kms(display->drm, "\tPSR watchdog timer expired\n");
416 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
417 		drm_dbg_kms(display->drm, "\tPIPE registers updated\n");
418 	if (val & PSR_EVENT_REGISTER_UPDATE)
419 		drm_dbg_kms(display->drm, "\tRegister updated\n");
420 	if (val & PSR_EVENT_HDCP_ENABLE)
421 		drm_dbg_kms(display->drm, "\tHDCP enabled\n");
422 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
423 		drm_dbg_kms(display->drm, "\tKVMR session enabled\n");
424 	if (val & PSR_EVENT_VBI_ENABLE)
425 		drm_dbg_kms(display->drm, "\tVBI enabled\n");
426 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
427 		drm_dbg_kms(display->drm, "\tLPSP mode exited\n");
428 	if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled)
429 		drm_dbg_kms(display->drm, "\tPSR disabled\n");
430 }
431 
432 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
433 {
434 	struct intel_display *display = to_intel_display(intel_dp);
435 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
436 	ktime_t time_ns =  ktime_get();
437 
438 	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
439 		intel_dp->psr.last_entry_attempt = time_ns;
440 		drm_dbg_kms(display->drm,
441 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
442 			    transcoder_name(cpu_transcoder));
443 	}
444 
445 	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
446 		intel_dp->psr.last_exit = time_ns;
447 		drm_dbg_kms(display->drm,
448 			    "[transcoder %s] PSR exit completed\n",
449 			    transcoder_name(cpu_transcoder));
450 
451 		if (DISPLAY_VER(display) >= 9) {
452 			u32 val;
453 
454 			val = intel_de_rmw(display,
455 					   PSR_EVENT(display, cpu_transcoder),
456 					   0, 0);
457 
458 			psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
459 		}
460 	}
461 
462 	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
463 		drm_warn(display->drm, "[transcoder %s] PSR aux error\n",
464 			 transcoder_name(cpu_transcoder));
465 
466 		intel_dp->psr.irq_aux_error = true;
467 
468 		/*
469 		 * If this interruption is not masked it will keep
470 		 * interrupting so fast that it prevents the scheduled
471 		 * work to run.
472 		 * Also after a PSR error, we don't want to arm PSR
473 		 * again so we don't care about unmask the interruption
474 		 * or unset irq_aux_error.
475 		 */
476 		intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
477 			     0, psr_irq_psr_error_bit_get(intel_dp));
478 
479 		queue_work(display->wq.unordered, &intel_dp->psr.work);
480 	}
481 }
482 
483 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
484 {
485 	struct intel_display *display = to_intel_display(intel_dp);
486 	u8 val = 8; /* assume the worst if we can't read the value */
487 
488 	if (drm_dp_dpcd_readb(&intel_dp->aux,
489 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
490 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
491 	else
492 		drm_dbg_kms(display->drm,
493 			    "Unable to get sink synchronization latency, assuming 8 frames\n");
494 	return val;
495 }
496 
497 static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
498 {
499 	u8 su_capability = 0;
500 
501 	if (intel_dp->psr.sink_panel_replay_su_support) {
502 		if (drm_dp_dpcd_read_byte(&intel_dp->aux,
503 					  DP_PANEL_REPLAY_CAP_CAPABILITY,
504 					  &su_capability) < 0)
505 			return 0;
506 	} else {
507 		su_capability = intel_dp->psr_dpcd[1];
508 	}
509 
510 	return su_capability;
511 }
512 
513 static unsigned int
514 intel_dp_get_su_x_granularity_offset(struct intel_dp *intel_dp)
515 {
516 	return intel_dp->psr.sink_panel_replay_su_support ?
517 		DP_PANEL_REPLAY_CAP_X_GRANULARITY :
518 		DP_PSR2_SU_X_GRANULARITY;
519 }
520 
521 static unsigned int
522 intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
523 {
524 	return intel_dp->psr.sink_panel_replay_su_support ?
525 		DP_PANEL_REPLAY_CAP_Y_GRANULARITY :
526 		DP_PSR2_SU_Y_GRANULARITY;
527 }
528 
529 /*
530  * Note: Bits related to granularity are same in panel replay and psr
531  * registers. Rely on PSR definitions on these "common" bits.
532  */
533 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
534 {
535 	struct intel_display *display = to_intel_display(intel_dp);
536 	ssize_t r;
537 	u16 w;
538 	u8 y;
539 
540 	/*
541 	 * TODO: Do we need to take into account panel supporting both PSR and
542 	 * Panel replay?
543 	 */
544 
545 	/*
546 	 * If sink don't have specific granularity requirements set legacy
547 	 * ones.
548 	 */
549 	if (!(intel_dp_get_su_capability(intel_dp) &
550 	      DP_PSR2_SU_GRANULARITY_REQUIRED)) {
551 		/* As PSR2 HW sends full lines, we do not care about x granularity */
552 		w = 4;
553 		y = 4;
554 		goto exit;
555 	}
556 
557 	r = drm_dp_dpcd_read(&intel_dp->aux,
558 			     intel_dp_get_su_x_granularity_offset(intel_dp),
559 			     &w, 2);
560 	if (r != 2)
561 		drm_dbg_kms(display->drm,
562 			    "Unable to read selective update x granularity\n");
563 	/*
564 	 * Spec says that if the value read is 0 the default granularity should
565 	 * be used instead.
566 	 */
567 	if (r != 2 || w == 0)
568 		w = 4;
569 
570 	r = drm_dp_dpcd_read(&intel_dp->aux,
571 			     intel_dp_get_su_y_granularity_offset(intel_dp),
572 			     &y, 1);
573 	if (r != 1) {
574 		drm_dbg_kms(display->drm,
575 			    "Unable to read selective update y granularity\n");
576 		y = 4;
577 	}
578 	if (y == 0)
579 		y = 1;
580 
581 exit:
582 	intel_dp->psr.su_w_granularity = w;
583 	intel_dp->psr.su_y_granularity = y;
584 }
585 
586 static enum intel_panel_replay_dsc_support
587 compute_pr_dsc_support(struct intel_dp *intel_dp)
588 {
589 	u8 pr_dsc_mode;
590 	u8 val;
591 
592 	val = intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)];
593 	pr_dsc_mode = REG_FIELD_GET8(DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_MASK, val);
594 
595 	switch (pr_dsc_mode) {
596 	case DP_DSC_DECODE_CAPABILITY_IN_PR_FULL_FRAME_ONLY:
597 		return INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY;
598 	case DP_DSC_DECODE_CAPABILITY_IN_PR_SUPPORTED:
599 		return INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE;
600 	default:
601 		MISSING_CASE(pr_dsc_mode);
602 		fallthrough;
603 	case DP_DSC_DECODE_CAPABILITY_IN_PR_NOT_SUPPORTED:
604 	case DP_DSC_DECODE_CAPABILITY_IN_PR_RESERVED:
605 		return INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED;
606 	}
607 }
608 
609 static const char *panel_replay_dsc_support_str(enum intel_panel_replay_dsc_support dsc_support)
610 {
611 	switch (dsc_support) {
612 	case INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED:
613 		return "not supported";
614 	case INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY:
615 		return "full frame only";
616 	case INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE:
617 		return "selective update";
618 	default:
619 		MISSING_CASE(dsc_support);
620 		return "n/a";
621 	};
622 }
623 
624 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
625 {
626 	struct intel_display *display = to_intel_display(intel_dp);
627 	int ret;
628 
629 	ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
630 				    &intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
631 	if (ret < 0)
632 		return;
633 
634 	if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
635 	      DP_PANEL_REPLAY_SUPPORT))
636 		return;
637 
638 	if (intel_dp_is_edp(intel_dp)) {
639 		if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
640 			drm_dbg_kms(display->drm,
641 				    "Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n");
642 			return;
643 		}
644 
645 		if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
646 		      DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
647 			drm_dbg_kms(display->drm,
648 				    "Panel doesn't support early transport, eDP Panel Replay not possible\n");
649 			return;
650 		}
651 	}
652 
653 	intel_dp->psr.sink_panel_replay_support = true;
654 
655 	if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
656 	    DP_PANEL_REPLAY_SU_SUPPORT)
657 		intel_dp->psr.sink_panel_replay_su_support = true;
658 
659 	intel_dp->psr.sink_panel_replay_dsc_support = compute_pr_dsc_support(intel_dp);
660 
661 	drm_dbg_kms(display->drm,
662 		    "Panel replay %sis supported by panel (in DSC mode: %s)\n",
663 		    intel_dp->psr.sink_panel_replay_su_support ?
664 		    "selective_update " : "",
665 		    panel_replay_dsc_support_str(intel_dp->psr.sink_panel_replay_dsc_support));
666 }
667 
668 static void _psr_init_dpcd(struct intel_dp *intel_dp)
669 {
670 	struct intel_display *display = to_intel_display(intel_dp);
671 	int ret;
672 
673 	ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
674 				    sizeof(intel_dp->psr_dpcd));
675 	if (ret < 0)
676 		return;
677 
678 	if (!intel_dp->psr_dpcd[0])
679 		return;
680 
681 	drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
682 		    intel_dp->psr_dpcd[0]);
683 
684 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
685 		drm_dbg_kms(display->drm,
686 			    "PSR support not currently available for this panel\n");
687 		return;
688 	}
689 
690 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
691 		drm_dbg_kms(display->drm,
692 			    "Panel lacks power state control, PSR cannot be enabled\n");
693 		return;
694 	}
695 
696 	intel_dp->psr.sink_support = true;
697 	intel_dp->psr.sink_sync_latency =
698 		intel_dp_get_sink_sync_latency(intel_dp);
699 
700 	if (DISPLAY_VER(display) >= 9 &&
701 	    intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
702 		bool y_req = intel_dp->psr_dpcd[1] &
703 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
704 
705 		/*
706 		 * All panels that supports PSR version 03h (PSR2 +
707 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
708 		 * only sure that it is going to be used when required by the
709 		 * panel. This way panel is capable to do selective update
710 		 * without a aux frame sync.
711 		 *
712 		 * To support PSR version 02h and PSR version 03h without
713 		 * Y-coordinate requirement panels we would need to enable
714 		 * GTC first.
715 		 */
716 		intel_dp->psr.sink_psr2_support = y_req &&
717 			intel_alpm_aux_wake_supported(intel_dp);
718 		drm_dbg_kms(display->drm, "PSR2 %ssupported\n",
719 			    intel_dp->psr.sink_psr2_support ? "" : "not ");
720 	}
721 }
722 
723 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
724 {
725 	_psr_init_dpcd(intel_dp);
726 
727 	_panel_replay_init_dpcd(intel_dp);
728 
729 	if (intel_dp->psr.sink_psr2_support ||
730 	    intel_dp->psr.sink_panel_replay_su_support)
731 		intel_dp_get_su_granularity(intel_dp);
732 }
733 
734 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
735 {
736 	struct intel_display *display = to_intel_display(intel_dp);
737 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
738 	u32 aux_clock_divider, aux_ctl;
739 	/* write DP_SET_POWER=D0 */
740 	static const u8 aux_msg[] = {
741 		[0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
742 		[1] = (DP_SET_POWER >> 8) & 0xff,
743 		[2] = DP_SET_POWER & 0xff,
744 		[3] = 1 - 1,
745 		[4] = DP_SET_POWER_D0,
746 	};
747 	int i;
748 
749 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
750 	for (i = 0; i < sizeof(aux_msg); i += 4)
751 		intel_de_write(display,
752 			       psr_aux_data_reg(display, cpu_transcoder, i >> 2),
753 			       intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
754 
755 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
756 
757 	/* Start with bits set for DDI_AUX_CTL register */
758 	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
759 					     aux_clock_divider);
760 
761 	/* Select only valid bits for SRD_AUX_CTL */
762 	aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
763 		EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
764 		EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
765 		EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
766 
767 	intel_de_write(display, psr_aux_ctl_reg(display, cpu_transcoder),
768 		       aux_ctl);
769 }
770 
771 static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay)
772 {
773 	struct intel_display *display = to_intel_display(intel_dp);
774 
775 	if (DISPLAY_VER(display) < 20 || !intel_dp_is_edp(intel_dp) ||
776 	    intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)
777 		return false;
778 
779 	return panel_replay ?
780 		intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
781 		DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
782 		intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED;
783 }
784 
785 static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
786 				      const struct intel_crtc_state *crtc_state)
787 {
788 	u8 val = DP_PANEL_REPLAY_ENABLE |
789 		DP_PANEL_REPLAY_VSC_SDP_CRC_EN |
790 		DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
791 		DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN |
792 		DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN;
793 	u8 panel_replay_config2 = DP_PANEL_REPLAY_CRC_VERIFICATION;
794 
795 	if (crtc_state->has_sel_update)
796 		val |= DP_PANEL_REPLAY_SU_ENABLE;
797 
798 	if (crtc_state->enable_psr2_su_region_et)
799 		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
800 
801 	if (crtc_state->req_psr2_sdp_prior_scanline)
802 		panel_replay_config2 |=
803 			DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE;
804 
805 	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, val);
806 
807 	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG2,
808 			   panel_replay_config2);
809 }
810 
811 static void _psr_enable_sink(struct intel_dp *intel_dp,
812 			     const struct intel_crtc_state *crtc_state)
813 {
814 	struct intel_display *display = to_intel_display(intel_dp);
815 	u8 val = 0;
816 
817 	if (crtc_state->has_sel_update) {
818 		val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
819 	} else {
820 		if (intel_dp->psr.link_standby)
821 			val |= DP_PSR_MAIN_LINK_ACTIVE;
822 
823 		if (DISPLAY_VER(display) >= 8)
824 			val |= DP_PSR_CRC_VERIFICATION;
825 	}
826 
827 	if (crtc_state->req_psr2_sdp_prior_scanline)
828 		val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
829 
830 	if (crtc_state->enable_psr2_su_region_et)
831 		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
832 
833 	if (intel_dp->psr.entry_setup_frames > 0)
834 		val |= DP_PSR_FRAME_CAPTURE;
835 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
836 
837 	val |= DP_PSR_ENABLE;
838 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
839 }
840 
841 static void intel_psr_enable_sink(struct intel_dp *intel_dp,
842 				  const struct intel_crtc_state *crtc_state)
843 {
844 	intel_alpm_enable_sink(intel_dp, crtc_state);
845 
846 	crtc_state->has_panel_replay ?
847 		_panel_replay_enable_sink(intel_dp, crtc_state) :
848 		_psr_enable_sink(intel_dp, crtc_state);
849 
850 	if (intel_dp_is_edp(intel_dp))
851 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
852 }
853 
854 void intel_psr_panel_replay_enable_sink(struct intel_dp *intel_dp)
855 {
856 	if (CAN_PANEL_REPLAY(intel_dp))
857 		drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
858 				   DP_PANEL_REPLAY_ENABLE);
859 }
860 
861 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
862 {
863 	struct intel_display *display = to_intel_display(intel_dp);
864 	struct intel_connector *connector = intel_dp->attached_connector;
865 	u32 val = 0;
866 
867 	if (DISPLAY_VER(display) >= 11)
868 		val |= EDP_PSR_TP4_TIME_0us;
869 
870 	if (display->params.psr_safest_params) {
871 		val |= EDP_PSR_TP1_TIME_2500us;
872 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
873 		goto check_tp3_sel;
874 	}
875 
876 	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
877 		val |= EDP_PSR_TP1_TIME_0us;
878 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
879 		val |= EDP_PSR_TP1_TIME_100us;
880 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
881 		val |= EDP_PSR_TP1_TIME_500us;
882 	else
883 		val |= EDP_PSR_TP1_TIME_2500us;
884 
885 	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
886 		val |= EDP_PSR_TP2_TP3_TIME_0us;
887 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
888 		val |= EDP_PSR_TP2_TP3_TIME_100us;
889 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
890 		val |= EDP_PSR_TP2_TP3_TIME_500us;
891 	else
892 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
893 
894 	/*
895 	 * WA 0479: hsw,bdw
896 	 * "Do not skip both TP1 and TP2/TP3"
897 	 */
898 	if (DISPLAY_VER(display) < 9 &&
899 	    connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
900 	    connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
901 		val |= EDP_PSR_TP2_TP3_TIME_100us;
902 
903 check_tp3_sel:
904 	if (intel_dp_source_supports_tps3(display) &&
905 	    drm_dp_tps3_supported(intel_dp->dpcd))
906 		val |= EDP_PSR_TP_TP1_TP3;
907 	else
908 		val |= EDP_PSR_TP_TP1_TP2;
909 
910 	return val;
911 }
912 
913 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
914 {
915 	struct intel_display *display = to_intel_display(intel_dp);
916 	struct intel_connector *connector = intel_dp->attached_connector;
917 	int idle_frames;
918 
919 	/* Let's use 6 as the minimum to cover all known cases including the
920 	 * off-by-one issue that HW has in some cases.
921 	 */
922 	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
923 	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
924 
925 	if (drm_WARN_ON(display->drm, idle_frames > 0xf))
926 		idle_frames = 0xf;
927 
928 	return idle_frames;
929 }
930 
931 static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp)
932 {
933 	struct intel_display *display = to_intel_display(intel_dp);
934 	u32 current_dc_state = intel_display_power_get_current_dc_state(display);
935 	struct drm_vblank_crtc *vblank = &display->drm->vblank[intel_dp->psr.pipe];
936 
937 	return (current_dc_state != DC_STATE_EN_UPTO_DC5 &&
938 		current_dc_state != DC_STATE_EN_UPTO_DC6) ||
939 		intel_dp->psr.active_non_psr_pipes ||
940 		READ_ONCE(vblank->enabled);
941 }
942 
943 static void hsw_activate_psr1(struct intel_dp *intel_dp)
944 {
945 	struct intel_display *display = to_intel_display(intel_dp);
946 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
947 	u32 max_sleep_time = 0x1f;
948 	u32 val = EDP_PSR_ENABLE;
949 
950 	val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
951 
952 	if (DISPLAY_VER(display) < 20)
953 		val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
954 
955 	if (display->platform.haswell)
956 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
957 
958 	if (intel_dp->psr.link_standby)
959 		val |= EDP_PSR_LINK_STANDBY;
960 
961 	val |= intel_psr1_get_tp_time(intel_dp);
962 
963 	if (DISPLAY_VER(display) >= 8)
964 		val |= EDP_PSR_CRC_ENABLE;
965 
966 	if (DISPLAY_VER(display) >= 20)
967 		val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
968 
969 	intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder),
970 		     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
971 
972 	/* Wa_16025596647 */
973 	if ((DISPLAY_VER(display) == 20 ||
974 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
975 	    is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used)
976 		intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
977 								       intel_dp->psr.pipe,
978 								       true);
979 }
980 
981 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
982 {
983 	struct intel_display *display = to_intel_display(intel_dp);
984 	struct intel_connector *connector = intel_dp->attached_connector;
985 	u32 val = 0;
986 
987 	if (display->params.psr_safest_params)
988 		return EDP_PSR2_TP2_TIME_2500us;
989 
990 	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
991 	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
992 		val |= EDP_PSR2_TP2_TIME_50us;
993 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
994 		val |= EDP_PSR2_TP2_TIME_100us;
995 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
996 		val |= EDP_PSR2_TP2_TIME_500us;
997 	else
998 		val |= EDP_PSR2_TP2_TIME_2500us;
999 
1000 	return val;
1001 }
1002 
1003 static int
1004 psr2_block_count_lines(u8 io_wake_lines, u8 fast_wake_lines)
1005 {
1006 	return io_wake_lines < 9 && fast_wake_lines < 9 ? 8 : 12;
1007 }
1008 
1009 static int psr2_block_count(struct intel_dp *intel_dp)
1010 {
1011 	return psr2_block_count_lines(intel_dp->psr.io_wake_lines,
1012 				      intel_dp->psr.fast_wake_lines) / 4;
1013 }
1014 
1015 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
1016 {
1017 	u8 frames_before_su_entry;
1018 
1019 	frames_before_su_entry = max_t(u8,
1020 				       intel_dp->psr.sink_sync_latency + 1,
1021 				       2);
1022 
1023 	/* Entry setup frames must be at least 1 less than frames before SU entry */
1024 	if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
1025 		frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
1026 
1027 	return frames_before_su_entry;
1028 }
1029 
1030 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
1031 {
1032 	struct intel_display *display = to_intel_display(intel_dp);
1033 	struct intel_psr *psr = &intel_dp->psr;
1034 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1035 
1036 	if (intel_dp_is_edp(intel_dp) && psr->sel_update_enabled) {
1037 		u32 val = psr->su_region_et_enabled ?
1038 			LNL_EDP_PSR2_SU_REGION_ET_ENABLE : 0;
1039 
1040 		if (intel_dp->psr.req_psr2_sdp_prior_scanline)
1041 			val |= EDP_PSR2_SU_SDP_SCANLINE;
1042 
1043 		intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder),
1044 			       val);
1045 	}
1046 
1047 	intel_de_rmw(display,
1048 		     PSR2_MAN_TRK_CTL(display, intel_dp->psr.transcoder),
1049 		     0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
1050 
1051 	intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
1052 		     TRANS_DP2_PANEL_REPLAY_ENABLE);
1053 }
1054 
1055 static void hsw_activate_psr2(struct intel_dp *intel_dp)
1056 {
1057 	struct intel_display *display = to_intel_display(intel_dp);
1058 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1059 	u32 val = EDP_PSR2_ENABLE;
1060 	u32 psr_val = 0;
1061 	u8 idle_frames;
1062 
1063 	/* Wa_16025596647 */
1064 	if ((DISPLAY_VER(display) == 20 ||
1065 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
1066 	    is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used)
1067 		idle_frames = 0;
1068 	else
1069 		idle_frames = psr_compute_idle_frames(intel_dp);
1070 	val |= EDP_PSR2_IDLE_FRAMES(idle_frames);
1071 
1072 	if (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)
1073 		val |= EDP_SU_TRACK_ENABLE;
1074 
1075 	if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
1076 		val |= EDP_Y_COORDINATE_ENABLE;
1077 
1078 	val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
1079 
1080 	val |= intel_psr2_get_tp_time(intel_dp);
1081 
1082 	if (DISPLAY_VER(display) >= 12 && DISPLAY_VER(display) < 20) {
1083 		if (psr2_block_count(intel_dp) > 2)
1084 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
1085 		else
1086 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
1087 	}
1088 
1089 	/* Wa_22012278275:adl-p */
1090 	if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
1091 		static const u8 map[] = {
1092 			2, /* 5 lines */
1093 			1, /* 6 lines */
1094 			0, /* 7 lines */
1095 			3, /* 8 lines */
1096 			6, /* 9 lines */
1097 			5, /* 10 lines */
1098 			4, /* 11 lines */
1099 			7, /* 12 lines */
1100 		};
1101 		/*
1102 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
1103 		 * comments below for more information
1104 		 */
1105 		int tmp;
1106 
1107 		tmp = map[intel_dp->psr.io_wake_lines -
1108 			  TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
1109 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
1110 
1111 		tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
1112 		val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
1113 	} else if (DISPLAY_VER(display) >= 20) {
1114 		val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
1115 	} else if (DISPLAY_VER(display) >= 12) {
1116 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
1117 		val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
1118 	} else if (DISPLAY_VER(display) >= 9) {
1119 		val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
1120 		val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
1121 	}
1122 
1123 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
1124 		val |= EDP_PSR2_SU_SDP_SCANLINE;
1125 
1126 	if (DISPLAY_VER(display) >= 20)
1127 		psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
1128 
1129 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
1130 		u32 tmp;
1131 
1132 		tmp = intel_de_read(display,
1133 				    PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1134 		drm_WARN_ON(display->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
1135 	} else if (HAS_PSR2_SEL_FETCH(display)) {
1136 		intel_de_write(display,
1137 			       PSR2_MAN_TRK_CTL(display, cpu_transcoder), 0);
1138 	}
1139 
1140 	if (intel_dp->psr.su_region_et_enabled)
1141 		val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
1142 
1143 	/*
1144 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
1145 	 * recommending keep this bit unset while PSR2 is enabled.
1146 	 */
1147 	intel_de_write(display, psr_ctl_reg(display, cpu_transcoder), psr_val);
1148 
1149 	intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), val);
1150 }
1151 
1152 static bool
1153 transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
1154 {
1155 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1156 		return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
1157 	else if (DISPLAY_VER(display) >= 12)
1158 		return cpu_transcoder == TRANSCODER_A;
1159 	else if (DISPLAY_VER(display) >= 9)
1160 		return cpu_transcoder == TRANSCODER_EDP;
1161 	else
1162 		return false;
1163 }
1164 
1165 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
1166 {
1167 	if (!crtc_state->hw.active)
1168 		return 0;
1169 
1170 	return DIV_ROUND_UP(1000 * 1000,
1171 			    drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
1172 }
1173 
1174 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
1175 				     u32 idle_frames)
1176 {
1177 	struct intel_display *display = to_intel_display(intel_dp);
1178 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1179 
1180 	intel_de_rmw(display, EDP_PSR2_CTL(display, cpu_transcoder),
1181 		     EDP_PSR2_IDLE_FRAMES_MASK,
1182 		     EDP_PSR2_IDLE_FRAMES(idle_frames));
1183 }
1184 
1185 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
1186 {
1187 	struct intel_display *display = to_intel_display(intel_dp);
1188 
1189 	psr2_program_idle_frames(intel_dp, 0);
1190 	intel_display_power_set_target_dc_state(display, DC_STATE_EN_DC3CO);
1191 }
1192 
1193 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
1194 {
1195 	struct intel_display *display = to_intel_display(intel_dp);
1196 
1197 	intel_display_power_set_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
1198 	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
1199 }
1200 
1201 static void tgl_dc3co_disable_work(struct work_struct *work)
1202 {
1203 	struct intel_dp *intel_dp =
1204 		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
1205 
1206 	mutex_lock(&intel_dp->psr.lock);
1207 	/* If delayed work is pending, it is not idle */
1208 	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
1209 		goto unlock;
1210 
1211 	tgl_psr2_disable_dc3co(intel_dp);
1212 unlock:
1213 	mutex_unlock(&intel_dp->psr.lock);
1214 }
1215 
1216 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
1217 {
1218 	if (!intel_dp->psr.dc3co_exitline)
1219 		return;
1220 
1221 	cancel_delayed_work(&intel_dp->psr.dc3co_work);
1222 	/* Before PSR2 exit disallow dc3co*/
1223 	tgl_psr2_disable_dc3co(intel_dp);
1224 }
1225 
1226 static bool
1227 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
1228 			      struct intel_crtc_state *crtc_state)
1229 {
1230 	struct intel_display *display = to_intel_display(intel_dp);
1231 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1232 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1233 	enum port port = dig_port->base.port;
1234 
1235 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1236 		return pipe <= PIPE_B && port <= PORT_B;
1237 	else
1238 		return pipe == PIPE_A && port == PORT_A;
1239 }
1240 
1241 static void
1242 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
1243 				  struct intel_crtc_state *crtc_state)
1244 {
1245 	struct intel_display *display = to_intel_display(intel_dp);
1246 	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
1247 	struct i915_power_domains *power_domains = &display->power.domains;
1248 	u32 exit_scanlines;
1249 
1250 	/*
1251 	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1252 	 * disable DC3CO until the changed dc3co activating/deactivating sequence
1253 	 * is applied. B.Specs:49196
1254 	 */
1255 	return;
1256 
1257 	/*
1258 	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1259 	 * TODO: when the issue is addressed, this restriction should be removed.
1260 	 */
1261 	if (crtc_state->enable_psr2_sel_fetch)
1262 		return;
1263 
1264 	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1265 		return;
1266 
1267 	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1268 		return;
1269 
1270 	/* Wa_16011303918:adl-p */
1271 	if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
1272 		return;
1273 
1274 	/*
1275 	 * DC3CO Exit time 200us B.Spec 49196
1276 	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1277 	 */
1278 	exit_scanlines =
1279 		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1280 
1281 	if (drm_WARN_ON(display->drm, exit_scanlines > crtc_vdisplay))
1282 		return;
1283 
1284 	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1285 }
1286 
1287 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1288 					      struct intel_crtc_state *crtc_state)
1289 {
1290 	struct intel_display *display = to_intel_display(intel_dp);
1291 
1292 	if (!display->params.enable_psr2_sel_fetch &&
1293 	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1294 		drm_dbg_kms(display->drm,
1295 			    "PSR2 sel fetch not enabled, disabled by parameter\n");
1296 		return false;
1297 	}
1298 
1299 	if (crtc_state->uapi.async_flip) {
1300 		drm_dbg_kms(display->drm,
1301 			    "PSR2 sel fetch not enabled, async flip enabled\n");
1302 		return false;
1303 	}
1304 
1305 	return crtc_state->enable_psr2_sel_fetch = true;
1306 }
1307 
1308 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1309 				   struct intel_crtc_state *crtc_state)
1310 {
1311 	struct intel_display *display = to_intel_display(intel_dp);
1312 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1313 	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1314 	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1315 	u16 y_granularity = 0;
1316 
1317 	/* PSR2 HW only send full lines so we only need to validate the width */
1318 	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1319 		return false;
1320 
1321 	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1322 		return false;
1323 
1324 	/* HW tracking is only aligned to 4 lines */
1325 	if (!crtc_state->enable_psr2_sel_fetch)
1326 		return intel_dp->psr.su_y_granularity == 4;
1327 
1328 	/*
1329 	 * adl_p and mtl platforms have 1 line granularity.
1330 	 * For other platforms with SW tracking we can adjust the y coordinates
1331 	 * to match sink requirement if multiple of 4.
1332 	 */
1333 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1334 		y_granularity = intel_dp->psr.su_y_granularity;
1335 	else if (intel_dp->psr.su_y_granularity <= 2)
1336 		y_granularity = 4;
1337 	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1338 		y_granularity = intel_dp->psr.su_y_granularity;
1339 
1340 	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1341 		return false;
1342 
1343 	if (crtc_state->dsc.compression_enable &&
1344 	    vdsc_cfg->slice_height % y_granularity)
1345 		return false;
1346 
1347 	crtc_state->su_y_granularity = y_granularity;
1348 	return true;
1349 }
1350 
1351 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1352 							struct intel_crtc_state *crtc_state)
1353 {
1354 	struct intel_display *display = to_intel_display(intel_dp);
1355 	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1356 	u32 hblank_total, hblank_ns, req_ns;
1357 
1358 	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1359 	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1360 
1361 	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1362 	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1363 
1364 	if ((hblank_ns - req_ns) > 100)
1365 		return true;
1366 
1367 	/* Not supported <13 / Wa_22012279113:adl-p */
1368 	if (DISPLAY_VER(display) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1369 		return false;
1370 
1371 	crtc_state->req_psr2_sdp_prior_scanline = true;
1372 	return true;
1373 }
1374 
1375 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1376 					const struct drm_display_mode *adjusted_mode)
1377 {
1378 	struct intel_display *display = to_intel_display(intel_dp);
1379 	int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1380 	int entry_setup_frames = 0;
1381 
1382 	if (psr_setup_time < 0) {
1383 		drm_dbg_kms(display->drm,
1384 			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1385 			    intel_dp->psr_dpcd[1]);
1386 		return -ETIME;
1387 	}
1388 
1389 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1390 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1391 		if (DISPLAY_VER(display) >= 20) {
1392 			/* setup entry frames can be up to 3 frames */
1393 			entry_setup_frames = 1;
1394 			drm_dbg_kms(display->drm,
1395 				    "PSR setup entry frames %d\n",
1396 				    entry_setup_frames);
1397 		} else {
1398 			drm_dbg_kms(display->drm,
1399 				    "PSR condition failed: PSR setup time (%d us) too long\n",
1400 				    psr_setup_time);
1401 			return -ETIME;
1402 		}
1403 	}
1404 
1405 	return entry_setup_frames;
1406 }
1407 
1408 static
1409 int _intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state,
1410 				       bool needs_panel_replay,
1411 				       bool needs_sel_update)
1412 {
1413 	struct intel_display *display = to_intel_display(crtc_state);
1414 
1415 	if (!crtc_state->has_psr)
1416 		return 0;
1417 
1418 	/* Wa_14015401596 */
1419 	if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14))
1420 		return 1;
1421 
1422 	/* Rest is for SRD_STATUS needed on LunarLake and onwards */
1423 	if (DISPLAY_VER(display) < 20)
1424 		return 0;
1425 
1426 	/*
1427 	 * Comment on SRD_STATUS register in Bspec for LunarLake and onwards:
1428 	 *
1429 	 * To deterministically capture the transition of the state machine
1430 	 * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least
1431 	 * one line after the non-delayed V. Blank.
1432 	 *
1433 	 * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0
1434 	 * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ]
1435 	 * - TRANS_VTOTAL[ Vertical Active ])
1436 	 *
1437 	 * SRD_STATUS is used only by PSR1 on PantherLake.
1438 	 * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake.
1439 	 */
1440 
1441 	if (DISPLAY_VER(display) >= 30 && (needs_panel_replay ||
1442 					   needs_sel_update))
1443 		return 0;
1444 	else if (DISPLAY_VER(display) < 30 && (needs_sel_update ||
1445 					       intel_crtc_has_type(crtc_state,
1446 								   INTEL_OUTPUT_EDP)))
1447 		return 0;
1448 	else
1449 		return 1;
1450 }
1451 
1452 static bool _wake_lines_fit_into_vblank(const struct intel_crtc_state *crtc_state,
1453 					int vblank,
1454 					int wake_lines)
1455 {
1456 	if (crtc_state->req_psr2_sdp_prior_scanline)
1457 		vblank -= 1;
1458 
1459 	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
1460 	if (vblank < wake_lines)
1461 		return false;
1462 
1463 	return true;
1464 }
1465 
1466 static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
1467 				       const struct intel_crtc_state *crtc_state,
1468 				       bool aux_less,
1469 				       bool needs_panel_replay,
1470 				       bool needs_sel_update)
1471 {
1472 	struct intel_display *display = to_intel_display(intel_dp);
1473 	int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
1474 		crtc_state->hw.adjusted_mode.crtc_vblank_start;
1475 	int wake_lines;
1476 	int scl = _intel_psr_min_set_context_latency(crtc_state,
1477 						     needs_panel_replay,
1478 						     needs_sel_update);
1479 	vblank -= scl;
1480 
1481 	if (aux_less)
1482 		wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
1483 	else
1484 		wake_lines = DISPLAY_VER(display) < 20 ?
1485 			psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
1486 					       crtc_state->alpm_state.fast_wake_lines) :
1487 			crtc_state->alpm_state.io_wake_lines;
1488 
1489 	/*
1490 	 * Guardband has not been computed yet, so we conservatively check if the
1491 	 * full vblank duration is sufficient to accommodate wake line requirements
1492 	 * for PSR features like Panel Replay and Selective Update.
1493 	 *
1494 	 * Once the actual guardband is available, a more accurate validation is
1495 	 * performed in intel_psr_compute_config_late(), and PSR features are
1496 	 * disabled if wake lines exceed the available guardband.
1497 	 */
1498 	return _wake_lines_fit_into_vblank(crtc_state, vblank, wake_lines);
1499 }
1500 
1501 static bool alpm_config_valid(struct intel_dp *intel_dp,
1502 			      struct intel_crtc_state *crtc_state,
1503 			      bool aux_less,
1504 			      bool needs_panel_replay,
1505 			      bool needs_sel_update)
1506 {
1507 	struct intel_display *display = to_intel_display(intel_dp);
1508 
1509 	if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
1510 		drm_dbg_kms(display->drm,
1511 			    "PSR2/Panel Replay  not enabled, Unable to use long enough wake times\n");
1512 		return false;
1513 	}
1514 
1515 	if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less,
1516 					needs_panel_replay, needs_sel_update)) {
1517 		drm_dbg_kms(display->drm,
1518 			    "PSR2/Panel Replay not enabled, too short vblank time\n");
1519 		return false;
1520 	}
1521 
1522 	return true;
1523 }
1524 
1525 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1526 				    struct intel_crtc_state *crtc_state)
1527 {
1528 	struct intel_display *display = to_intel_display(intel_dp);
1529 	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1530 	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1531 	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1532 
1533 	if (!intel_dp->psr.sink_psr2_support || display->params.enable_psr == 1)
1534 		return false;
1535 
1536 	/* JSL and EHL only supports eDP 1.3 */
1537 	if (display->platform.jasperlake || display->platform.elkhartlake) {
1538 		drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
1539 		return false;
1540 	}
1541 
1542 	/* Wa_16011181250 */
1543 	if (display->platform.rocketlake || display->platform.alderlake_s ||
1544 	    display->platform.dg2) {
1545 		drm_dbg_kms(display->drm,
1546 			    "PSR2 is defeatured for this platform\n");
1547 		return false;
1548 	}
1549 
1550 	if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1551 		drm_dbg_kms(display->drm,
1552 			    "PSR2 not completely functional in this stepping\n");
1553 		return false;
1554 	}
1555 
1556 	if (!transcoder_has_psr2(display, crtc_state->cpu_transcoder)) {
1557 		drm_dbg_kms(display->drm,
1558 			    "PSR2 not supported in transcoder %s\n",
1559 			    transcoder_name(crtc_state->cpu_transcoder));
1560 		return false;
1561 	}
1562 
1563 	/*
1564 	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1565 	 * resolution requires DSC to be enabled, priority is given to DSC
1566 	 * over PSR2.
1567 	 */
1568 	if (crtc_state->dsc.compression_enable &&
1569 	    (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)) {
1570 		drm_dbg_kms(display->drm,
1571 			    "PSR2 cannot be enabled since DSC is enabled\n");
1572 		return false;
1573 	}
1574 
1575 	if (DISPLAY_VER(display) >= 20) {
1576 		psr_max_h = crtc_hdisplay;
1577 		psr_max_v = crtc_vdisplay;
1578 		max_bpp = crtc_state->pipe_bpp;
1579 	} else if (IS_DISPLAY_VER(display, 12, 14)) {
1580 		psr_max_h = 5120;
1581 		psr_max_v = 3200;
1582 		max_bpp = 30;
1583 	} else if (IS_DISPLAY_VER(display, 10, 11)) {
1584 		psr_max_h = 4096;
1585 		psr_max_v = 2304;
1586 		max_bpp = 24;
1587 	} else if (DISPLAY_VER(display) == 9) {
1588 		psr_max_h = 3640;
1589 		psr_max_v = 2304;
1590 		max_bpp = 24;
1591 	}
1592 
1593 	if (crtc_state->pipe_bpp > max_bpp) {
1594 		drm_dbg_kms(display->drm,
1595 			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1596 			    crtc_state->pipe_bpp, max_bpp);
1597 		return false;
1598 	}
1599 
1600 	/* Wa_16011303918:adl-p */
1601 	if (crtc_state->vrr.enable &&
1602 	    display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1603 		drm_dbg_kms(display->drm,
1604 			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1605 		return false;
1606 	}
1607 
1608 	if (!alpm_config_valid(intel_dp, crtc_state, false, false, true))
1609 		return false;
1610 
1611 	if (!crtc_state->enable_psr2_sel_fetch &&
1612 	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1613 		drm_dbg_kms(display->drm,
1614 			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1615 			    crtc_hdisplay, crtc_vdisplay,
1616 			    psr_max_h, psr_max_v);
1617 		return false;
1618 	}
1619 
1620 	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1621 
1622 	return true;
1623 }
1624 
1625 static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
1626 					  struct intel_crtc_state *crtc_state)
1627 {
1628 	struct intel_display *display = to_intel_display(intel_dp);
1629 
1630 	if (HAS_PSR2_SEL_FETCH(display) &&
1631 	    !intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1632 	    !HAS_PSR_HW_TRACKING(display)) {
1633 		drm_dbg_kms(display->drm,
1634 			    "Selective update not enabled, selective fetch not valid and no HW tracking available\n");
1635 		goto unsupported;
1636 	}
1637 
1638 	if (!sel_update_global_enabled(intel_dp)) {
1639 		drm_dbg_kms(display->drm,
1640 			    "Selective update disabled by flag\n");
1641 		goto unsupported;
1642 	}
1643 
1644 	if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state))
1645 		goto unsupported;
1646 
1647 	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1648 		drm_dbg_kms(display->drm,
1649 			    "Selective update not enabled, SDP indication do not fit in hblank\n");
1650 		goto unsupported;
1651 	}
1652 
1653 	if (crtc_state->has_panel_replay) {
1654 		if (DISPLAY_VER(display) < 14)
1655 			goto unsupported;
1656 
1657 		if (!intel_dp->psr.sink_panel_replay_su_support)
1658 			goto unsupported;
1659 
1660 		if (intel_dsc_enabled_on_link(crtc_state) &&
1661 		    intel_dp->psr.sink_panel_replay_dsc_support !=
1662 		    INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE) {
1663 			drm_dbg_kms(display->drm,
1664 				    "Selective update with Panel Replay not enabled because it's not supported with DSC\n");
1665 			goto unsupported;
1666 		}
1667 	}
1668 
1669 	if (crtc_state->crc_enabled) {
1670 		drm_dbg_kms(display->drm,
1671 			    "Selective update not enabled because it would inhibit pipe CRC calculation\n");
1672 		goto unsupported;
1673 	}
1674 
1675 	if (!psr2_granularity_check(intel_dp, crtc_state)) {
1676 		drm_dbg_kms(display->drm,
1677 			    "Selective update not enabled, SU granularity not compatible\n");
1678 		goto unsupported;
1679 	}
1680 
1681 	crtc_state->enable_psr2_su_region_et =
1682 		psr2_su_region_et_valid(intel_dp, crtc_state->has_panel_replay);
1683 
1684 	return true;
1685 
1686 unsupported:
1687 	crtc_state->enable_psr2_sel_fetch = false;
1688 	return false;
1689 }
1690 
1691 static bool _psr_compute_config(struct intel_dp *intel_dp,
1692 				struct intel_crtc_state *crtc_state)
1693 {
1694 	struct intel_display *display = to_intel_display(intel_dp);
1695 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1696 	int entry_setup_frames;
1697 
1698 	if (!CAN_PSR(intel_dp) || !display->params.enable_psr)
1699 		return false;
1700 
1701 	/*
1702 	 * Currently PSR doesn't work reliably with VRR enabled.
1703 	 */
1704 	if (crtc_state->vrr.enable)
1705 		return false;
1706 
1707 	entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1708 
1709 	if (entry_setup_frames >= 0) {
1710 		intel_dp->psr.entry_setup_frames = entry_setup_frames;
1711 	} else {
1712 		crtc_state->no_psr_reason = "PSR setup timing not met";
1713 		drm_dbg_kms(display->drm,
1714 			    "PSR condition failed: PSR setup timing not met\n");
1715 		return false;
1716 	}
1717 
1718 	return true;
1719 }
1720 
1721 static bool
1722 _panel_replay_compute_config(struct intel_dp *intel_dp,
1723 			     struct intel_crtc_state *crtc_state,
1724 			     const struct drm_connector_state *conn_state)
1725 {
1726 	struct intel_display *display = to_intel_display(intel_dp);
1727 	struct intel_connector *connector =
1728 		to_intel_connector(conn_state->connector);
1729 	struct intel_hdcp *hdcp = &connector->hdcp;
1730 
1731 	if (!CAN_PANEL_REPLAY(intel_dp))
1732 		return false;
1733 
1734 	if (!panel_replay_global_enabled(intel_dp)) {
1735 		drm_dbg_kms(display->drm, "Panel Replay disabled by flag\n");
1736 		return false;
1737 	}
1738 
1739 	if (crtc_state->crc_enabled) {
1740 		drm_dbg_kms(display->drm,
1741 			    "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
1742 		return false;
1743 	}
1744 
1745 	if (intel_dsc_enabled_on_link(crtc_state) &&
1746 	    intel_dp->psr.sink_panel_replay_dsc_support ==
1747 	    INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED) {
1748 		drm_dbg_kms(display->drm,
1749 			    "Panel Replay not enabled because it's not supported with DSC\n");
1750 		return false;
1751 	}
1752 
1753 	if (!intel_dp_is_edp(intel_dp))
1754 		return true;
1755 
1756 	/* Remaining checks are for eDP only */
1757 
1758 	if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A &&
1759 	    to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_B)
1760 		return false;
1761 
1762 	/* 128b/132b Panel Replay is not supported on eDP */
1763 	if (intel_dp_is_uhbr(crtc_state)) {
1764 		drm_dbg_kms(display->drm,
1765 			    "Panel Replay is not supported with 128b/132b\n");
1766 		return false;
1767 	}
1768 
1769 	/* HW will not allow Panel Replay on eDP when HDCP enabled */
1770 	if (conn_state->content_protection ==
1771 	    DRM_MODE_CONTENT_PROTECTION_DESIRED ||
1772 	    (conn_state->content_protection ==
1773 	     DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
1774 	     DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) {
1775 		drm_dbg_kms(display->drm,
1776 			    "Panel Replay is not supported with HDCP\n");
1777 		return false;
1778 	}
1779 
1780 	if (!alpm_config_valid(intel_dp, crtc_state, true, true, false))
1781 		return false;
1782 
1783 	return true;
1784 }
1785 
1786 static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp,
1787 					   struct intel_crtc_state *crtc_state)
1788 {
1789 	struct intel_display *display = to_intel_display(intel_dp);
1790 
1791 	return (DISPLAY_VER(display) == 20 && intel_dp->psr.entry_setup_frames > 0 &&
1792 		!crtc_state->has_sel_update);
1793 }
1794 
1795 static
1796 void intel_psr_set_non_psr_pipes(struct intel_dp *intel_dp,
1797 				 struct intel_crtc_state *crtc_state)
1798 {
1799 	struct intel_display *display = to_intel_display(intel_dp);
1800 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
1801 	struct intel_crtc *crtc;
1802 	u8 active_pipes = 0;
1803 
1804 	/* Wa_16025596647 */
1805 	if (DISPLAY_VER(display) != 20 &&
1806 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
1807 		return;
1808 
1809 	/* Not needed by Panel Replay  */
1810 	if (crtc_state->has_panel_replay)
1811 		return;
1812 
1813 	/* We ignore possible secondary PSR/Panel Replay capable eDP */
1814 	for_each_intel_crtc(display->drm, crtc)
1815 		active_pipes |= crtc->active ? BIT(crtc->pipe) : 0;
1816 
1817 	active_pipes = intel_calc_active_pipes(state, active_pipes);
1818 
1819 	crtc_state->active_non_psr_pipes = active_pipes &
1820 		~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe);
1821 }
1822 
1823 void intel_psr_compute_config(struct intel_dp *intel_dp,
1824 			      struct intel_crtc_state *crtc_state,
1825 			      struct drm_connector_state *conn_state)
1826 {
1827 	struct intel_display *display = to_intel_display(intel_dp);
1828 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1829 
1830 	if (!psr_global_enabled(intel_dp)) {
1831 		drm_dbg_kms(display->drm, "PSR disabled by flag\n");
1832 		return;
1833 	}
1834 
1835 	if (intel_dp->psr.sink_not_reliable) {
1836 		drm_dbg_kms(display->drm,
1837 			    "PSR sink implementation is not reliable\n");
1838 		return;
1839 	}
1840 
1841 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1842 		drm_dbg_kms(display->drm,
1843 			    "PSR condition failed: Interlaced mode enabled\n");
1844 		return;
1845 	}
1846 
1847 	/*
1848 	 * FIXME figure out what is wrong with PSR+joiner and
1849 	 * fix it. Presumably something related to the fact that
1850 	 * PSR is a transcoder level feature.
1851 	 */
1852 	if (crtc_state->joiner_pipes) {
1853 		drm_dbg_kms(display->drm,
1854 			    "PSR disabled due to joiner\n");
1855 		return;
1856 	}
1857 
1858 	/* Only used for state verification. */
1859 	crtc_state->panel_replay_dsc_support = intel_dp->psr.sink_panel_replay_dsc_support;
1860 	crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
1861 								    crtc_state,
1862 								    conn_state);
1863 
1864 	crtc_state->has_psr = crtc_state->has_panel_replay ? true :
1865 		_psr_compute_config(intel_dp, crtc_state);
1866 
1867 	if (!crtc_state->has_psr)
1868 		return;
1869 
1870 	crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
1871 }
1872 
1873 void intel_psr_get_config(struct intel_encoder *encoder,
1874 			  struct intel_crtc_state *pipe_config)
1875 {
1876 	struct intel_display *display = to_intel_display(encoder);
1877 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1878 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1879 	struct intel_dp *intel_dp;
1880 	u32 val;
1881 
1882 	if (!dig_port)
1883 		return;
1884 
1885 	intel_dp = &dig_port->dp;
1886 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1887 		return;
1888 
1889 	mutex_lock(&intel_dp->psr.lock);
1890 	if (!intel_dp->psr.enabled)
1891 		goto unlock;
1892 
1893 	if (intel_dp->psr.panel_replay_enabled) {
1894 		pipe_config->has_psr = pipe_config->has_panel_replay = true;
1895 	} else {
1896 		/*
1897 		 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1898 		 * enabled/disabled because of frontbuffer tracking and others.
1899 		 */
1900 		pipe_config->has_psr = true;
1901 	}
1902 
1903 	pipe_config->has_sel_update = intel_dp->psr.sel_update_enabled;
1904 	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1905 
1906 	if (!intel_dp->psr.sel_update_enabled)
1907 		goto unlock;
1908 
1909 	if (HAS_PSR2_SEL_FETCH(display)) {
1910 		val = intel_de_read(display,
1911 				    PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1912 		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1913 			pipe_config->enable_psr2_sel_fetch = true;
1914 	}
1915 
1916 	pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled;
1917 
1918 	if (DISPLAY_VER(display) >= 12) {
1919 		val = intel_de_read(display,
1920 				    TRANS_EXITLINE(display, cpu_transcoder));
1921 		pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1922 	}
1923 unlock:
1924 	mutex_unlock(&intel_dp->psr.lock);
1925 }
1926 
1927 static void intel_psr_activate(struct intel_dp *intel_dp)
1928 {
1929 	struct intel_display *display = to_intel_display(intel_dp);
1930 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1931 
1932 	drm_WARN_ON(display->drm,
1933 		    transcoder_has_psr2(display, cpu_transcoder) &&
1934 		    intel_de_read(display, EDP_PSR2_CTL(display, cpu_transcoder)) & EDP_PSR2_ENABLE);
1935 
1936 	drm_WARN_ON(display->drm,
1937 		    intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)) & EDP_PSR_ENABLE);
1938 
1939 	drm_WARN_ON(display->drm, intel_dp->psr.active);
1940 
1941 	drm_WARN_ON(display->drm, !intel_dp->psr.enabled);
1942 
1943 	lockdep_assert_held(&intel_dp->psr.lock);
1944 
1945 	/* psr1, psr2 and panel-replay are mutually exclusive.*/
1946 	if (intel_dp->psr.panel_replay_enabled)
1947 		dg2_activate_panel_replay(intel_dp);
1948 	else if (intel_dp->psr.sel_update_enabled)
1949 		hsw_activate_psr2(intel_dp);
1950 	else
1951 		hsw_activate_psr1(intel_dp);
1952 
1953 	intel_dp->psr.active = true;
1954 	intel_dp->psr.no_psr_reason = NULL;
1955 }
1956 
1957 /*
1958  * Wa_16013835468
1959  * Wa_14015648006
1960  */
1961 static void wm_optimization_wa(struct intel_dp *intel_dp,
1962 			       const struct intel_crtc_state *crtc_state)
1963 {
1964 	struct intel_display *display = to_intel_display(intel_dp);
1965 	enum pipe pipe = intel_dp->psr.pipe;
1966 	bool activate = false;
1967 
1968 	/* Wa_14015648006 */
1969 	if (IS_DISPLAY_VER(display, 11, 14) && crtc_state->wm_level_disabled)
1970 		activate = true;
1971 
1972 	/* Wa_16013835468 */
1973 	if (DISPLAY_VER(display) == 12 &&
1974 	    crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1975 	    crtc_state->hw.adjusted_mode.crtc_vdisplay)
1976 		activate = true;
1977 
1978 	if (activate)
1979 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
1980 			     0, LATENCY_REPORTING_REMOVED(pipe));
1981 	else
1982 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
1983 			     LATENCY_REPORTING_REMOVED(pipe), 0);
1984 }
1985 
1986 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1987 				    const struct intel_crtc_state *crtc_state)
1988 {
1989 	struct intel_display *display = to_intel_display(intel_dp);
1990 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1991 	u32 mask = 0;
1992 
1993 	/*
1994 	 * Only HSW and BDW have PSR AUX registers that need to be setup.
1995 	 * SKL+ use hardcoded values PSR AUX transactions
1996 	 */
1997 	if (DISPLAY_VER(display) < 9)
1998 		hsw_psr_setup_aux(intel_dp);
1999 
2000 	/*
2001 	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
2002 	 * mask LPSP to avoid dependency on other drivers that might block
2003 	 * runtime_pm besides preventing  other hw tracking issues now we
2004 	 * can rely on frontbuffer tracking.
2005 	 *
2006 	 * From bspec prior LunarLake:
2007 	 * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
2008 	 * panel replay mode.
2009 	 *
2010 	 * From bspec beyod LunarLake:
2011 	 * Panel Replay on DP: No bits are applicable
2012 	 * Panel Replay on eDP: All bits are applicable
2013 	 */
2014 	if (DISPLAY_VER(display) < 20 || intel_dp_is_edp(intel_dp))
2015 		mask = EDP_PSR_DEBUG_MASK_HPD;
2016 
2017 	if (intel_dp_is_edp(intel_dp)) {
2018 		mask |= EDP_PSR_DEBUG_MASK_MEMUP;
2019 
2020 		/*
2021 		 * For some unknown reason on HSW non-ULT (or at least on
2022 		 * Dell Latitude E6540) external displays start to flicker
2023 		 * when PSR is enabled on the eDP. SR/PC6 residency is much
2024 		 * higher than should be possible with an external display.
2025 		 * As a workaround leave LPSP unmasked to prevent PSR entry
2026 		 * when external displays are active.
2027 		 */
2028 		if (DISPLAY_VER(display) >= 8 || display->platform.haswell_ult)
2029 			mask |= EDP_PSR_DEBUG_MASK_LPSP;
2030 
2031 		if (DISPLAY_VER(display) < 20)
2032 			mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
2033 
2034 		/*
2035 		 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
2036 		 * registers in order to keep the CURSURFLIVE tricks working :(
2037 		 */
2038 		if (IS_DISPLAY_VER(display, 9, 10))
2039 			mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
2040 
2041 		/* allow PSR with sprite enabled */
2042 		if (display->platform.haswell)
2043 			mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
2044 	}
2045 
2046 	intel_de_write(display, psr_debug_reg(display, cpu_transcoder), mask);
2047 
2048 	psr_irq_control(intel_dp);
2049 
2050 	/*
2051 	 * TODO: if future platforms supports DC3CO in more than one
2052 	 * transcoder, EXITLINE will need to be unset when disabling PSR
2053 	 */
2054 	if (intel_dp->psr.dc3co_exitline)
2055 		intel_de_rmw(display,
2056 			     TRANS_EXITLINE(display, cpu_transcoder),
2057 			     EXITLINE_MASK,
2058 			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
2059 
2060 	if (HAS_PSR_HW_TRACKING(display) && HAS_PSR2_SEL_FETCH(display))
2061 		intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
2062 			     intel_dp->psr.psr2_sel_fetch_enabled ?
2063 			     IGNORE_PSR2_HW_TRACKING : 0);
2064 
2065 	/*
2066 	 * Wa_16013835468
2067 	 * Wa_14015648006
2068 	 */
2069 	wm_optimization_wa(intel_dp, crtc_state);
2070 
2071 	if (intel_dp->psr.sel_update_enabled) {
2072 		if (DISPLAY_VER(display) == 9)
2073 			intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 0,
2074 				     PSR2_VSC_ENABLE_PROG_HEADER |
2075 				     PSR2_ADD_VERTICAL_LINE_COUNT);
2076 
2077 		/*
2078 		 * Wa_16014451276:adlp,mtl[a0,b0]
2079 		 * All supported adlp panels have 1-based X granularity, this may
2080 		 * cause issues if non-supported panels are used.
2081 		 */
2082 		if (!intel_dp->psr.panel_replay_enabled &&
2083 		    (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
2084 		     display->platform.alderlake_p))
2085 			intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
2086 				     0, ADLP_1_BASED_X_GRANULARITY);
2087 
2088 		/* Wa_16012604467:adlp,mtl[a0,b0] */
2089 		if (!intel_dp->psr.panel_replay_enabled &&
2090 		    IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
2091 			intel_de_rmw(display,
2092 				     MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
2093 				     0,
2094 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
2095 		else if (display->platform.alderlake_p)
2096 			intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
2097 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
2098 	}
2099 
2100 	/* Wa_16025596647 */
2101 	if ((DISPLAY_VER(display) == 20 ||
2102 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2103 	    !intel_dp->psr.panel_replay_enabled)
2104 		intel_dmc_block_pkgc(display, intel_dp->psr.pipe, true);
2105 
2106 	intel_alpm_configure(intel_dp, crtc_state);
2107 }
2108 
2109 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
2110 {
2111 	struct intel_display *display = to_intel_display(intel_dp);
2112 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2113 	u32 val;
2114 
2115 	if (intel_dp->psr.panel_replay_enabled)
2116 		goto no_err;
2117 
2118 	/*
2119 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
2120 	 * will still keep the error set even after the reset done in the
2121 	 * irq_preinstall and irq_uninstall hooks.
2122 	 * And enabling in this situation cause the screen to freeze in the
2123 	 * first time that PSR HW tries to activate so lets keep PSR disabled
2124 	 * to avoid any rendering problems.
2125 	 */
2126 	val = intel_de_read(display, psr_iir_reg(display, cpu_transcoder));
2127 	val &= psr_irq_psr_error_bit_get(intel_dp);
2128 	if (val) {
2129 		intel_dp->psr.sink_not_reliable = true;
2130 		drm_dbg_kms(display->drm,
2131 			    "PSR interruption error set, not enabling PSR\n");
2132 		return false;
2133 	}
2134 
2135 no_err:
2136 	return true;
2137 }
2138 
2139 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
2140 				    const struct intel_crtc_state *crtc_state)
2141 {
2142 	struct intel_display *display = to_intel_display(intel_dp);
2143 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2144 	u32 val;
2145 
2146 	drm_WARN_ON(display->drm, intel_dp->psr.enabled);
2147 
2148 	intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update;
2149 	intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
2150 	intel_dp->psr.busy_frontbuffer_bits = 0;
2151 	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
2152 	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
2153 	/* DC5/DC6 requires at least 6 idle frames */
2154 	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
2155 	intel_dp->psr.dc3co_exit_delay = val;
2156 	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
2157 	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
2158 	intel_dp->psr.su_region_et_enabled = crtc_state->enable_psr2_su_region_et;
2159 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2160 	intel_dp->psr.req_psr2_sdp_prior_scanline =
2161 		crtc_state->req_psr2_sdp_prior_scanline;
2162 	intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes;
2163 	intel_dp->psr.pkg_c_latency_used = crtc_state->pkg_c_latency_used;
2164 	intel_dp->psr.io_wake_lines = crtc_state->alpm_state.io_wake_lines;
2165 	intel_dp->psr.fast_wake_lines = crtc_state->alpm_state.fast_wake_lines;
2166 
2167 	if (!psr_interrupt_error_check(intel_dp))
2168 		return;
2169 
2170 	if (intel_dp->psr.panel_replay_enabled)
2171 		drm_dbg_kms(display->drm, "Enabling Panel Replay\n");
2172 	else
2173 		drm_dbg_kms(display->drm, "Enabling PSR%s\n",
2174 			    intel_dp->psr.sel_update_enabled ? "2" : "1");
2175 
2176 	/*
2177 	 * Enabling sink PSR/Panel Replay here only for PSR. Panel Replay enable
2178 	 * bit is already written at this point. Sink ALPM is enabled here for
2179 	 * PSR and Panel Replay. See
2180 	 * intel_psr_panel_replay_enable_sink. Modifiers/options:
2181 	 *  - Selective Update
2182 	 *  - Region Early Transport
2183 	 *  - Selective Update Region Scanline Capture
2184 	 *  - VSC_SDP_CRC
2185 	 *  - HPD on different Errors
2186 	 *  - CRC verification
2187 	 * are written for PSR and Panel Replay here.
2188 	 */
2189 	intel_psr_enable_sink(intel_dp, crtc_state);
2190 
2191 	if (intel_dp_is_edp(intel_dp))
2192 		intel_snps_phy_update_psr_power_state(&dig_port->base, true);
2193 
2194 	intel_psr_enable_source(intel_dp, crtc_state);
2195 	intel_dp->psr.enabled = true;
2196 	intel_dp->psr.pause_counter = 0;
2197 
2198 	/*
2199 	 * Link_ok is sticky and set here on PSR enable. We can assume link
2200 	 * training is complete as we never continue to PSR enable with
2201 	 * untrained link. Link_ok is kept as set until first short pulse
2202 	 * interrupt. This is targeted to workaround panels stating bad link
2203 	 * after PSR is enabled.
2204 	 */
2205 	intel_dp->psr.link_ok = true;
2206 
2207 	intel_psr_activate(intel_dp);
2208 }
2209 
2210 static void intel_psr_exit(struct intel_dp *intel_dp)
2211 {
2212 	struct intel_display *display = to_intel_display(intel_dp);
2213 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2214 	u32 val;
2215 
2216 	if (!intel_dp->psr.active) {
2217 		if (transcoder_has_psr2(display, cpu_transcoder)) {
2218 			val = intel_de_read(display,
2219 					    EDP_PSR2_CTL(display, cpu_transcoder));
2220 			drm_WARN_ON(display->drm, val & EDP_PSR2_ENABLE);
2221 		}
2222 
2223 		val = intel_de_read(display,
2224 				    psr_ctl_reg(display, cpu_transcoder));
2225 		drm_WARN_ON(display->drm, val & EDP_PSR_ENABLE);
2226 
2227 		return;
2228 	}
2229 
2230 	if (intel_dp->psr.panel_replay_enabled) {
2231 		intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder),
2232 			     TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
2233 	} else if (intel_dp->psr.sel_update_enabled) {
2234 		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
2235 
2236 		val = intel_de_rmw(display,
2237 				   EDP_PSR2_CTL(display, cpu_transcoder),
2238 				   EDP_PSR2_ENABLE, 0);
2239 
2240 		drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
2241 	} else {
2242 		if ((DISPLAY_VER(display) == 20 ||
2243 		     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2244 			intel_dp->psr.pkg_c_latency_used)
2245 			intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
2246 								       intel_dp->psr.pipe,
2247 								       false);
2248 
2249 		val = intel_de_rmw(display,
2250 				   psr_ctl_reg(display, cpu_transcoder),
2251 				   EDP_PSR_ENABLE, 0);
2252 
2253 		drm_WARN_ON(display->drm, !(val & EDP_PSR_ENABLE));
2254 	}
2255 	intel_dp->psr.active = false;
2256 }
2257 
2258 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
2259 {
2260 	struct intel_display *display = to_intel_display(intel_dp);
2261 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2262 	i915_reg_t psr_status;
2263 	u32 psr_status_mask;
2264 
2265 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
2266 					  intel_dp->psr.panel_replay_enabled)) {
2267 		psr_status = EDP_PSR2_STATUS(display, cpu_transcoder);
2268 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
2269 	} else {
2270 		psr_status = psr_status_reg(display, cpu_transcoder);
2271 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
2272 	}
2273 
2274 	/* Wait till PSR is idle */
2275 	if (intel_de_wait_for_clear(display, psr_status,
2276 				    psr_status_mask, 2000))
2277 		drm_err(display->drm, "Timed out waiting PSR idle state\n");
2278 }
2279 
2280 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
2281 {
2282 	struct intel_display *display = to_intel_display(intel_dp);
2283 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2284 
2285 	lockdep_assert_held(&intel_dp->psr.lock);
2286 
2287 	if (!intel_dp->psr.enabled)
2288 		return;
2289 
2290 	if (intel_dp->psr.panel_replay_enabled)
2291 		drm_dbg_kms(display->drm, "Disabling Panel Replay\n");
2292 	else
2293 		drm_dbg_kms(display->drm, "Disabling PSR%s\n",
2294 			    intel_dp->psr.sel_update_enabled ? "2" : "1");
2295 
2296 	intel_psr_exit(intel_dp);
2297 	intel_psr_wait_exit_locked(intel_dp);
2298 
2299 	/*
2300 	 * Wa_16013835468
2301 	 * Wa_14015648006
2302 	 */
2303 	if (DISPLAY_VER(display) >= 11)
2304 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
2305 			     LATENCY_REPORTING_REMOVED(intel_dp->psr.pipe), 0);
2306 
2307 	if (intel_dp->psr.sel_update_enabled) {
2308 		/* Wa_16012604467:adlp,mtl[a0,b0] */
2309 		if (!intel_dp->psr.panel_replay_enabled &&
2310 		    IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
2311 			intel_de_rmw(display,
2312 				     MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
2313 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
2314 		else if (display->platform.alderlake_p)
2315 			intel_de_rmw(display, CLKGATE_DIS_MISC,
2316 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
2317 	}
2318 
2319 	if (intel_dp_is_edp(intel_dp))
2320 		intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
2321 
2322 	if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp))
2323 		intel_alpm_disable(intel_dp);
2324 
2325 	/* Disable PSR on Sink */
2326 	if (!intel_dp->psr.panel_replay_enabled) {
2327 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
2328 
2329 		if (intel_dp->psr.sel_update_enabled)
2330 			drm_dp_dpcd_writeb(&intel_dp->aux,
2331 					   DP_RECEIVER_ALPM_CONFIG, 0);
2332 	}
2333 
2334 	/* Wa_16025596647 */
2335 	if ((DISPLAY_VER(display) == 20 ||
2336 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2337 	    !intel_dp->psr.panel_replay_enabled)
2338 		intel_dmc_block_pkgc(display, intel_dp->psr.pipe, false);
2339 
2340 	intel_dp->psr.enabled = false;
2341 	intel_dp->psr.panel_replay_enabled = false;
2342 	intel_dp->psr.sel_update_enabled = false;
2343 	intel_dp->psr.psr2_sel_fetch_enabled = false;
2344 	intel_dp->psr.su_region_et_enabled = false;
2345 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2346 	intel_dp->psr.active_non_psr_pipes = 0;
2347 	intel_dp->psr.pkg_c_latency_used = 0;
2348 }
2349 
2350 /**
2351  * intel_psr_disable - Disable PSR
2352  * @intel_dp: Intel DP
2353  * @old_crtc_state: old CRTC state
2354  *
2355  * This function needs to be called before disabling pipe.
2356  */
2357 void intel_psr_disable(struct intel_dp *intel_dp,
2358 		       const struct intel_crtc_state *old_crtc_state)
2359 {
2360 	struct intel_display *display = to_intel_display(intel_dp);
2361 
2362 	if (!old_crtc_state->has_psr)
2363 		return;
2364 
2365 	if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp) &&
2366 			!CAN_PANEL_REPLAY(intel_dp)))
2367 		return;
2368 
2369 	mutex_lock(&intel_dp->psr.lock);
2370 
2371 	intel_psr_disable_locked(intel_dp);
2372 
2373 	intel_dp->psr.link_ok = false;
2374 
2375 	mutex_unlock(&intel_dp->psr.lock);
2376 	cancel_work_sync(&intel_dp->psr.work);
2377 	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
2378 }
2379 
2380 /**
2381  * intel_psr_pause - Pause PSR
2382  * @intel_dp: Intel DP
2383  *
2384  * This function need to be called after enabling psr.
2385  */
2386 void intel_psr_pause(struct intel_dp *intel_dp)
2387 {
2388 	struct intel_psr *psr = &intel_dp->psr;
2389 
2390 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2391 		return;
2392 
2393 	mutex_lock(&psr->lock);
2394 
2395 	if (!psr->enabled) {
2396 		mutex_unlock(&psr->lock);
2397 		return;
2398 	}
2399 
2400 	if (intel_dp->psr.pause_counter++ == 0) {
2401 		intel_psr_exit(intel_dp);
2402 		intel_psr_wait_exit_locked(intel_dp);
2403 	}
2404 
2405 	mutex_unlock(&psr->lock);
2406 
2407 	cancel_work_sync(&psr->work);
2408 	cancel_delayed_work_sync(&psr->dc3co_work);
2409 }
2410 
2411 /**
2412  * intel_psr_resume - Resume PSR
2413  * @intel_dp: Intel DP
2414  *
2415  * This function need to be called after pausing psr.
2416  */
2417 void intel_psr_resume(struct intel_dp *intel_dp)
2418 {
2419 	struct intel_display *display = to_intel_display(intel_dp);
2420 	struct intel_psr *psr = &intel_dp->psr;
2421 
2422 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2423 		return;
2424 
2425 	mutex_lock(&psr->lock);
2426 
2427 	if (!psr->enabled)
2428 		goto out;
2429 
2430 	if (!psr->pause_counter) {
2431 		drm_warn(display->drm, "Unbalanced PSR pause/resume!\n");
2432 		goto out;
2433 	}
2434 
2435 	if (--intel_dp->psr.pause_counter == 0)
2436 		intel_psr_activate(intel_dp);
2437 
2438 out:
2439 	mutex_unlock(&psr->lock);
2440 }
2441 
2442 /**
2443  * intel_psr_needs_vblank_notification - Check if PSR need vblank enable/disable
2444  * notification.
2445  * @crtc_state: CRTC status
2446  *
2447  * We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't
2448  * prevent it in case of Panel Replay. Panel Replay switches main link off on
2449  * DC entry. This means vblank interrupts are not fired and is a problem if
2450  * user-space is polling for vblank events. Also Wa_16025596647 needs
2451  * information when vblank is enabled/disabled.
2452  */
2453 bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state)
2454 {
2455 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2456 	struct intel_display *display = to_intel_display(crtc_state);
2457 	struct intel_encoder *encoder;
2458 
2459 	for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) {
2460 		struct intel_dp *intel_dp;
2461 
2462 		if (!intel_encoder_is_dp(encoder))
2463 			continue;
2464 
2465 		intel_dp = enc_to_intel_dp(encoder);
2466 
2467 		if (!intel_dp_is_edp(intel_dp))
2468 			continue;
2469 
2470 		if (CAN_PANEL_REPLAY(intel_dp))
2471 			return true;
2472 
2473 		if ((DISPLAY_VER(display) == 20 ||
2474 		     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2475 		    CAN_PSR(intel_dp))
2476 			return true;
2477 	}
2478 
2479 	return false;
2480 }
2481 
2482 /**
2483  * intel_psr_trigger_frame_change_event - Trigger "Frame Change" event
2484  * @dsb: DSB context
2485  * @state: the atomic state
2486  * @crtc: the CRTC
2487  *
2488  * Generate PSR "Frame Change" event.
2489  */
2490 void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
2491 					  struct intel_atomic_state *state,
2492 					  struct intel_crtc *crtc)
2493 {
2494 	const struct intel_crtc_state *crtc_state =
2495 		intel_pre_commit_crtc_state(state, crtc);
2496 	struct intel_display *display = to_intel_display(crtc);
2497 
2498 	if (crtc_state->has_psr)
2499 		intel_de_write_dsb(display, dsb,
2500 				   CURSURFLIVE(display, crtc->pipe), 0);
2501 }
2502 
2503 /**
2504  * intel_psr_min_set_context_latency - Minimum 'set context latency' lines needed by PSR
2505  * @crtc_state: the crtc state
2506  *
2507  * Return minimum SCL lines/delay needed by PSR.
2508  */
2509 int intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state)
2510 {
2511 
2512 	return _intel_psr_min_set_context_latency(crtc_state,
2513 						  crtc_state->has_panel_replay,
2514 						  crtc_state->has_sel_update);
2515 }
2516 
2517 static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
2518 {
2519 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 0 :
2520 		PSR2_MAN_TRK_CTL_ENABLE;
2521 }
2522 
2523 static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
2524 {
2525 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2526 	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
2527 	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
2528 }
2529 
2530 static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
2531 {
2532 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2533 	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
2534 	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
2535 }
2536 
2537 static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
2538 {
2539 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2540 	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
2541 	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
2542 }
2543 
2544 static void intel_psr_force_update(struct intel_dp *intel_dp)
2545 {
2546 	struct intel_display *display = to_intel_display(intel_dp);
2547 
2548 	/*
2549 	 * Display WA #0884: skl+
2550 	 * This documented WA for bxt can be safely applied
2551 	 * broadly so we can force HW tracking to exit PSR
2552 	 * instead of disabling and re-enabling.
2553 	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
2554 	 * but it makes more sense write to the current active
2555 	 * pipe.
2556 	 *
2557 	 * This workaround do not exist for platforms with display 10 or newer
2558 	 * but testing proved that it works for up display 13, for newer
2559 	 * than that testing will be needed.
2560 	 */
2561 	intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0);
2562 }
2563 
2564 void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
2565 					  const struct intel_crtc_state *crtc_state)
2566 {
2567 	struct intel_display *display = to_intel_display(crtc_state);
2568 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2569 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2570 	struct intel_encoder *encoder;
2571 
2572 	if (!crtc_state->enable_psr2_sel_fetch)
2573 		return;
2574 
2575 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2576 					     crtc_state->uapi.encoder_mask) {
2577 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2578 
2579 		if (!dsb)
2580 			lockdep_assert_held(&intel_dp->psr.lock);
2581 
2582 		if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_cff_enabled)
2583 			return;
2584 		break;
2585 	}
2586 
2587 	intel_de_write_dsb(display, dsb,
2588 			   PSR2_MAN_TRK_CTL(display, cpu_transcoder),
2589 			   crtc_state->psr2_man_track_ctl);
2590 
2591 	if (!crtc_state->enable_psr2_su_region_et)
2592 		return;
2593 
2594 	intel_de_write_dsb(display, dsb, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
2595 			   crtc_state->pipe_srcsz_early_tpt);
2596 }
2597 
2598 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2599 				  bool full_update)
2600 {
2601 	struct intel_display *display = to_intel_display(crtc_state);
2602 	u32 val = man_trk_ctl_enable_bit_get(display);
2603 
2604 	/* SF partial frame enable has to be set even on full update */
2605 	val |= man_trk_ctl_partial_frame_bit_get(display);
2606 
2607 	if (full_update) {
2608 		val |= man_trk_ctl_continuos_full_frame(display);
2609 		goto exit;
2610 	}
2611 
2612 	if (crtc_state->psr2_su_area.y1 == -1)
2613 		goto exit;
2614 
2615 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) {
2616 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2617 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2618 	} else {
2619 		drm_WARN_ON(crtc_state->uapi.crtc->dev,
2620 			    crtc_state->psr2_su_area.y1 % 4 ||
2621 			    crtc_state->psr2_su_area.y2 % 4);
2622 
2623 		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2624 			crtc_state->psr2_su_area.y1 / 4 + 1);
2625 		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2626 			crtc_state->psr2_su_area.y2 / 4 + 1);
2627 	}
2628 exit:
2629 	crtc_state->psr2_man_track_ctl = val;
2630 }
2631 
2632 static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
2633 					  bool full_update)
2634 {
2635 	int width, height;
2636 
2637 	if (!crtc_state->enable_psr2_su_region_et || full_update)
2638 		return 0;
2639 
2640 	width = drm_rect_width(&crtc_state->psr2_su_area);
2641 	height = drm_rect_height(&crtc_state->psr2_su_area);
2642 
2643 	return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
2644 }
2645 
2646 static void clip_area_update(struct drm_rect *overlap_damage_area,
2647 			     struct drm_rect *damage_area,
2648 			     struct drm_rect *pipe_src)
2649 {
2650 	if (!drm_rect_intersect(damage_area, pipe_src))
2651 		return;
2652 
2653 	if (overlap_damage_area->y1 == -1) {
2654 		overlap_damage_area->y1 = damage_area->y1;
2655 		overlap_damage_area->y2 = damage_area->y2;
2656 		return;
2657 	}
2658 
2659 	if (damage_area->y1 < overlap_damage_area->y1)
2660 		overlap_damage_area->y1 = damage_area->y1;
2661 
2662 	if (damage_area->y2 > overlap_damage_area->y2)
2663 		overlap_damage_area->y2 = damage_area->y2;
2664 }
2665 
2666 static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2667 {
2668 	struct intel_display *display = to_intel_display(crtc_state);
2669 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2670 	u16 y_alignment;
2671 
2672 	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2673 	if (crtc_state->dsc.compression_enable &&
2674 	    (display->platform.alderlake_p || DISPLAY_VER(display) >= 14))
2675 		y_alignment = vdsc_cfg->slice_height;
2676 	else
2677 		y_alignment = crtc_state->su_y_granularity;
2678 
2679 	crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2680 	if (crtc_state->psr2_su_area.y2 % y_alignment)
2681 		crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2682 						y_alignment) + 1) * y_alignment;
2683 }
2684 
2685 /*
2686  * When early transport is in use we need to extend SU area to cover
2687  * cursor fully when cursor is in SU area.
2688  */
2689 static void
2690 intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
2691 				  struct intel_crtc *crtc,
2692 				  bool *cursor_in_su_area)
2693 {
2694 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2695 	struct intel_plane_state *new_plane_state;
2696 	struct intel_plane *plane;
2697 	int i;
2698 
2699 	if (!crtc_state->enable_psr2_su_region_et)
2700 		return;
2701 
2702 	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
2703 		struct drm_rect inter;
2704 
2705 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2706 			continue;
2707 
2708 		if (plane->id != PLANE_CURSOR)
2709 			continue;
2710 
2711 		if (!new_plane_state->uapi.visible)
2712 			continue;
2713 
2714 		inter = crtc_state->psr2_su_area;
2715 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2716 			continue;
2717 
2718 		clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
2719 				 &crtc_state->pipe_src);
2720 		*cursor_in_su_area = true;
2721 	}
2722 }
2723 
2724 /*
2725  * TODO: Not clear how to handle planes with negative position,
2726  * also planes are not updated if they have a negative X
2727  * position so for now doing a full update in this cases
2728  *
2729  * Plane scaling and rotation is not supported by selective fetch and both
2730  * properties can change without a modeset, so need to be check at every
2731  * atomic commit.
2732  */
2733 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2734 {
2735 	if (plane_state->uapi.dst.y1 < 0 ||
2736 	    plane_state->uapi.dst.x1 < 0 ||
2737 	    plane_state->scaler_id >= 0 ||
2738 	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2739 		return false;
2740 
2741 	return true;
2742 }
2743 
2744 /*
2745  * Check for pipe properties that is not supported by selective fetch.
2746  *
2747  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2748  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2749  * enabled and going to the full update path.
2750  */
2751 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2752 {
2753 	if (crtc_state->scaler_state.scaler_id >= 0)
2754 		return false;
2755 
2756 	return true;
2757 }
2758 
2759 /* Wa 14019834836 */
2760 static void intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state *crtc_state)
2761 {
2762 	struct intel_display *display = to_intel_display(crtc_state);
2763 	struct intel_encoder *encoder;
2764 	int hactive_limit;
2765 
2766 	if (crtc_state->psr2_su_area.y1 != 0 ||
2767 	    crtc_state->psr2_su_area.y2 != 0)
2768 		return;
2769 
2770 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2771 		hactive_limit = intel_dp_is_uhbr(crtc_state) ? 1230 : 546;
2772 	else
2773 		hactive_limit = intel_dp_is_uhbr(crtc_state) ? 615 : 273;
2774 
2775 	if (crtc_state->hw.adjusted_mode.hdisplay < hactive_limit)
2776 		return;
2777 
2778 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2779 					     crtc_state->uapi.encoder_mask) {
2780 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2781 
2782 		if (!intel_dp_is_edp(intel_dp) &&
2783 		    intel_dp->psr.panel_replay_enabled &&
2784 		    intel_dp->psr.sel_update_enabled) {
2785 			crtc_state->psr2_su_area.y2++;
2786 			return;
2787 		}
2788 	}
2789 }
2790 
2791 static void
2792 intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state)
2793 {
2794 	struct intel_display *display = to_intel_display(crtc_state);
2795 
2796 	/* Wa_14014971492 */
2797 	if (!crtc_state->has_panel_replay &&
2798 	    ((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
2799 	      display->platform.alderlake_p || display->platform.tigerlake)) &&
2800 	    crtc_state->splitter.enable)
2801 		crtc_state->psr2_su_area.y1 = 0;
2802 
2803 	/* Wa 14019834836 */
2804 	if (DISPLAY_VER(display) == 30)
2805 		intel_psr_apply_pr_link_on_su_wa(crtc_state);
2806 }
2807 
2808 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2809 				struct intel_crtc *crtc)
2810 {
2811 	struct intel_display *display = to_intel_display(state);
2812 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2813 	struct intel_plane_state *new_plane_state, *old_plane_state;
2814 	struct intel_plane *plane;
2815 	bool full_update = false, cursor_in_su_area = false;
2816 	int i, ret;
2817 
2818 	if (!crtc_state->enable_psr2_sel_fetch)
2819 		return 0;
2820 
2821 	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2822 		full_update = true;
2823 		goto skip_sel_fetch_set_loop;
2824 	}
2825 
2826 	crtc_state->psr2_su_area.x1 = 0;
2827 	crtc_state->psr2_su_area.y1 = -1;
2828 	crtc_state->psr2_su_area.x2 = drm_rect_width(&crtc_state->pipe_src);
2829 	crtc_state->psr2_su_area.y2 = -1;
2830 
2831 	/*
2832 	 * Calculate minimal selective fetch area of each plane and calculate
2833 	 * the pipe damaged area.
2834 	 * In the next loop the plane selective fetch area will actually be set
2835 	 * using whole pipe damaged area.
2836 	 */
2837 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2838 					     new_plane_state, i) {
2839 		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2840 						      .x2 = INT_MAX };
2841 
2842 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2843 			continue;
2844 
2845 		if (!new_plane_state->uapi.visible &&
2846 		    !old_plane_state->uapi.visible)
2847 			continue;
2848 
2849 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2850 			full_update = true;
2851 			break;
2852 		}
2853 
2854 		/*
2855 		 * If visibility or plane moved, mark the whole plane area as
2856 		 * damaged as it needs to be complete redraw in the new and old
2857 		 * position.
2858 		 */
2859 		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2860 		    !drm_rect_equals(&new_plane_state->uapi.dst,
2861 				     &old_plane_state->uapi.dst)) {
2862 			if (old_plane_state->uapi.visible) {
2863 				damaged_area.y1 = old_plane_state->uapi.dst.y1;
2864 				damaged_area.y2 = old_plane_state->uapi.dst.y2;
2865 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2866 						 &crtc_state->pipe_src);
2867 			}
2868 
2869 			if (new_plane_state->uapi.visible) {
2870 				damaged_area.y1 = new_plane_state->uapi.dst.y1;
2871 				damaged_area.y2 = new_plane_state->uapi.dst.y2;
2872 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2873 						 &crtc_state->pipe_src);
2874 			}
2875 			continue;
2876 		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2877 			/* If alpha changed mark the whole plane area as damaged */
2878 			damaged_area.y1 = new_plane_state->uapi.dst.y1;
2879 			damaged_area.y2 = new_plane_state->uapi.dst.y2;
2880 			clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2881 					 &crtc_state->pipe_src);
2882 			continue;
2883 		}
2884 
2885 		src = drm_plane_state_src(&new_plane_state->uapi);
2886 		drm_rect_fp_to_int(&src, &src);
2887 
2888 		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2889 						     &new_plane_state->uapi, &damaged_area))
2890 			continue;
2891 
2892 		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2893 		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2894 		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2895 		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2896 
2897 		clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
2898 	}
2899 
2900 	/*
2901 	 * TODO: For now we are just using full update in case
2902 	 * selective fetch area calculation fails. To optimize this we
2903 	 * should identify cases where this happens and fix the area
2904 	 * calculation for those.
2905 	 */
2906 	if (crtc_state->psr2_su_area.y1 == -1) {
2907 		drm_info_once(display->drm,
2908 			      "Selective fetch area calculation failed in pipe %c\n",
2909 			      pipe_name(crtc->pipe));
2910 		full_update = true;
2911 	}
2912 
2913 	if (full_update)
2914 		goto skip_sel_fetch_set_loop;
2915 
2916 	intel_psr_apply_su_area_workarounds(crtc_state);
2917 
2918 	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2919 	if (ret)
2920 		return ret;
2921 
2922 	/*
2923 	 * Adjust su area to cover cursor fully as necessary (early
2924 	 * transport). This needs to be done after
2925 	 * drm_atomic_add_affected_planes to ensure visible cursor is added into
2926 	 * affected planes even when cursor is not updated by itself.
2927 	 */
2928 	intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
2929 
2930 	intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2931 
2932 	/*
2933 	 * Now that we have the pipe damaged area check if it intersect with
2934 	 * every plane, if it does set the plane selective fetch area.
2935 	 */
2936 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2937 					     new_plane_state, i) {
2938 		struct drm_rect *sel_fetch_area, inter;
2939 		struct intel_plane *linked = new_plane_state->planar_linked_plane;
2940 
2941 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2942 		    !new_plane_state->uapi.visible)
2943 			continue;
2944 
2945 		inter = crtc_state->psr2_su_area;
2946 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2947 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2948 			sel_fetch_area->y1 = -1;
2949 			sel_fetch_area->y2 = -1;
2950 			/*
2951 			 * if plane sel fetch was previously enabled ->
2952 			 * disable it
2953 			 */
2954 			if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2955 				crtc_state->update_planes |= BIT(plane->id);
2956 
2957 			continue;
2958 		}
2959 
2960 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2961 			full_update = true;
2962 			break;
2963 		}
2964 
2965 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2966 		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2967 		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2968 		crtc_state->update_planes |= BIT(plane->id);
2969 
2970 		/*
2971 		 * Sel_fetch_area is calculated for UV plane. Use
2972 		 * same area for Y plane as well.
2973 		 */
2974 		if (linked) {
2975 			struct intel_plane_state *linked_new_plane_state;
2976 			struct drm_rect *linked_sel_fetch_area;
2977 
2978 			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2979 			if (IS_ERR(linked_new_plane_state))
2980 				return PTR_ERR(linked_new_plane_state);
2981 
2982 			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2983 			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2984 			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2985 			crtc_state->update_planes |= BIT(linked->id);
2986 		}
2987 	}
2988 
2989 skip_sel_fetch_set_loop:
2990 	psr2_man_trk_ctl_calc(crtc_state, full_update);
2991 	crtc_state->pipe_srcsz_early_tpt =
2992 		psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
2993 	return 0;
2994 }
2995 
2996 void intel_psr2_panic_force_full_update(struct intel_display *display,
2997 					struct intel_crtc_state *crtc_state)
2998 {
2999 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3000 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3001 	u32 val = man_trk_ctl_enable_bit_get(display);
3002 
3003 	/* SF partial frame enable has to be set even on full update */
3004 	val |= man_trk_ctl_partial_frame_bit_get(display);
3005 	val |= man_trk_ctl_continuos_full_frame(display);
3006 
3007 	/* Directly write the register */
3008 	intel_de_write_fw(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder), val);
3009 
3010 	if (!crtc_state->enable_psr2_su_region_et)
3011 		return;
3012 
3013 	intel_de_write_fw(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe), 0);
3014 }
3015 
3016 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
3017 				struct intel_crtc *crtc)
3018 {
3019 	struct intel_display *display = to_intel_display(state);
3020 	const struct intel_crtc_state *old_crtc_state =
3021 		intel_atomic_get_old_crtc_state(state, crtc);
3022 	const struct intel_crtc_state *new_crtc_state =
3023 		intel_atomic_get_new_crtc_state(state, crtc);
3024 	struct intel_encoder *encoder;
3025 
3026 	if (!HAS_PSR(display))
3027 		return;
3028 
3029 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
3030 					     old_crtc_state->uapi.encoder_mask) {
3031 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3032 		struct intel_psr *psr = &intel_dp->psr;
3033 
3034 		mutex_lock(&psr->lock);
3035 
3036 		if (!new_crtc_state->has_psr)
3037 			psr->no_psr_reason = new_crtc_state->no_psr_reason;
3038 
3039 		if (psr->enabled) {
3040 			/*
3041 			 * Reasons to disable:
3042 			 * - PSR disabled in new state
3043 			 * - All planes will go inactive
3044 			 * - Changing between PSR versions
3045 			 * - Region Early Transport changing
3046 			 * - Display WA #1136: skl, bxt
3047 			 */
3048 			if (intel_crtc_needs_modeset(new_crtc_state) ||
3049 			    !new_crtc_state->has_psr ||
3050 			    !new_crtc_state->active_planes ||
3051 			    new_crtc_state->has_sel_update != psr->sel_update_enabled ||
3052 			    new_crtc_state->enable_psr2_su_region_et != psr->su_region_et_enabled ||
3053 			    new_crtc_state->has_panel_replay != psr->panel_replay_enabled ||
3054 			    (DISPLAY_VER(display) < 11 && new_crtc_state->wm_level_disabled))
3055 				intel_psr_disable_locked(intel_dp);
3056 			else if (new_crtc_state->wm_level_disabled)
3057 				/* Wa_14015648006 */
3058 				wm_optimization_wa(intel_dp, new_crtc_state);
3059 		}
3060 
3061 		mutex_unlock(&psr->lock);
3062 	}
3063 }
3064 
3065 static void
3066 verify_panel_replay_dsc_state(const struct intel_crtc_state *crtc_state)
3067 {
3068 	struct intel_display *display = to_intel_display(crtc_state);
3069 
3070 	if (!crtc_state->has_panel_replay)
3071 		return;
3072 
3073 	drm_WARN_ON(display->drm,
3074 		    intel_dsc_enabled_on_link(crtc_state) &&
3075 		    crtc_state->panel_replay_dsc_support ==
3076 		    INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED);
3077 }
3078 
3079 void intel_psr_post_plane_update(struct intel_atomic_state *state,
3080 				 struct intel_crtc *crtc)
3081 {
3082 	struct intel_display *display = to_intel_display(state);
3083 	const struct intel_crtc_state *crtc_state =
3084 		intel_atomic_get_new_crtc_state(state, crtc);
3085 	struct intel_encoder *encoder;
3086 
3087 	if (!crtc_state->has_psr)
3088 		return;
3089 
3090 	verify_panel_replay_dsc_state(crtc_state);
3091 
3092 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
3093 					     crtc_state->uapi.encoder_mask) {
3094 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3095 		struct intel_psr *psr = &intel_dp->psr;
3096 		bool keep_disabled = false;
3097 
3098 		mutex_lock(&psr->lock);
3099 
3100 		drm_WARN_ON(display->drm,
3101 			    psr->enabled && !crtc_state->active_planes);
3102 
3103 		if (psr->sink_not_reliable)
3104 			keep_disabled = true;
3105 
3106 		if (!crtc_state->active_planes) {
3107 			psr->no_psr_reason = "All planes inactive";
3108 			keep_disabled = true;
3109 		}
3110 
3111 		/* Display WA #1136: skl, bxt */
3112 		if (DISPLAY_VER(display) < 11 && crtc_state->wm_level_disabled) {
3113 			psr->no_psr_reason = "Workaround #1136 for skl, bxt";
3114 			keep_disabled = true;
3115 		}
3116 
3117 		if (!psr->enabled && !keep_disabled)
3118 			intel_psr_enable_locked(intel_dp, crtc_state);
3119 		else if (psr->enabled && !crtc_state->wm_level_disabled)
3120 			/* Wa_14015648006 */
3121 			wm_optimization_wa(intel_dp, crtc_state);
3122 
3123 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
3124 		if (crtc_state->crc_enabled && psr->enabled)
3125 			intel_psr_force_update(intel_dp);
3126 
3127 		/*
3128 		 * Clear possible busy bits in case we have
3129 		 * invalidate -> flip -> flush sequence.
3130 		 */
3131 		intel_dp->psr.busy_frontbuffer_bits = 0;
3132 
3133 		mutex_unlock(&psr->lock);
3134 	}
3135 }
3136 
3137 /*
3138  * From bspec: Panel Self Refresh (BDW+)
3139  * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
3140  * exit training time + 1.5 ms of aux channel handshake. 50 ms is
3141  * defensive enough to cover everything.
3142  */
3143 #define PSR_IDLE_TIMEOUT_MS 50
3144 
3145 static int
3146 _psr2_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state,
3147 				   struct intel_dsb *dsb)
3148 {
3149 	struct intel_display *display = to_intel_display(new_crtc_state);
3150 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3151 
3152 	/*
3153 	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
3154 	 * As all higher states has bit 4 of PSR2 state set we can just wait for
3155 	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
3156 	 */
3157 	if (dsb) {
3158 		intel_dsb_poll(dsb, EDP_PSR2_STATUS(display, cpu_transcoder),
3159 			       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 0, 200,
3160 			       PSR_IDLE_TIMEOUT_MS * 1000 / 200);
3161 		return true;
3162 	}
3163 
3164 	return intel_de_wait_for_clear(display,
3165 				       EDP_PSR2_STATUS(display, cpu_transcoder),
3166 				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP,
3167 				       PSR_IDLE_TIMEOUT_MS);
3168 }
3169 
3170 static int
3171 _psr1_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state,
3172 				   struct intel_dsb *dsb)
3173 {
3174 	struct intel_display *display = to_intel_display(new_crtc_state);
3175 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3176 
3177 	if (dsb) {
3178 		intel_dsb_poll(dsb, psr_status_reg(display, cpu_transcoder),
3179 			       EDP_PSR_STATUS_STATE_MASK, 0, 200,
3180 			       PSR_IDLE_TIMEOUT_MS * 1000 / 200);
3181 		return true;
3182 	}
3183 
3184 	return intel_de_wait_for_clear(display,
3185 				       psr_status_reg(display, cpu_transcoder),
3186 				       EDP_PSR_STATUS_STATE_MASK,
3187 				       PSR_IDLE_TIMEOUT_MS);
3188 }
3189 
3190 /**
3191  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
3192  * @new_crtc_state: new CRTC state
3193  *
3194  * This function is expected to be called from pipe_update_start() where it is
3195  * not expected to race with PSR enable or disable.
3196  */
3197 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
3198 {
3199 	struct intel_display *display = to_intel_display(new_crtc_state);
3200 	struct intel_encoder *encoder;
3201 
3202 	if (!new_crtc_state->has_psr)
3203 		return;
3204 
3205 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3206 					     new_crtc_state->uapi.encoder_mask) {
3207 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3208 		int ret;
3209 
3210 		lockdep_assert_held(&intel_dp->psr.lock);
3211 
3212 		if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
3213 			continue;
3214 
3215 		if (intel_dp->psr.sel_update_enabled)
3216 			ret = _psr2_ready_for_pipe_update_locked(new_crtc_state,
3217 								 NULL);
3218 		else
3219 			ret = _psr1_ready_for_pipe_update_locked(new_crtc_state,
3220 								 NULL);
3221 
3222 		if (ret)
3223 			drm_err(display->drm,
3224 				"PSR wait timed out, atomic update may fail\n");
3225 	}
3226 }
3227 
3228 void intel_psr_wait_for_idle_dsb(struct intel_dsb *dsb,
3229 				 const struct intel_crtc_state *new_crtc_state)
3230 {
3231 	if (!new_crtc_state->has_psr || new_crtc_state->has_panel_replay)
3232 		return;
3233 
3234 	if (new_crtc_state->has_sel_update)
3235 		_psr2_ready_for_pipe_update_locked(new_crtc_state, dsb);
3236 	else
3237 		_psr1_ready_for_pipe_update_locked(new_crtc_state, dsb);
3238 }
3239 
3240 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
3241 {
3242 	struct intel_display *display = to_intel_display(intel_dp);
3243 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3244 	i915_reg_t reg;
3245 	u32 mask;
3246 	int err;
3247 
3248 	if (!intel_dp->psr.enabled)
3249 		return false;
3250 
3251 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
3252 					  intel_dp->psr.panel_replay_enabled)) {
3253 		reg = EDP_PSR2_STATUS(display, cpu_transcoder);
3254 		mask = EDP_PSR2_STATUS_STATE_MASK;
3255 	} else {
3256 		reg = psr_status_reg(display, cpu_transcoder);
3257 		mask = EDP_PSR_STATUS_STATE_MASK;
3258 	}
3259 
3260 	mutex_unlock(&intel_dp->psr.lock);
3261 
3262 	err = intel_de_wait_for_clear(display, reg, mask, 50);
3263 	if (err)
3264 		drm_err(display->drm,
3265 			"Timed out waiting for PSR Idle for re-enable\n");
3266 
3267 	/* After the unlocked wait, verify that PSR is still wanted! */
3268 	mutex_lock(&intel_dp->psr.lock);
3269 	return err == 0 && intel_dp->psr.enabled && !intel_dp->psr.pause_counter;
3270 }
3271 
3272 static int intel_psr_fastset_force(struct intel_display *display)
3273 {
3274 	struct drm_connector_list_iter conn_iter;
3275 	struct drm_modeset_acquire_ctx ctx;
3276 	struct drm_atomic_state *state;
3277 	struct drm_connector *conn;
3278 	int err = 0;
3279 
3280 	state = drm_atomic_state_alloc(display->drm);
3281 	if (!state)
3282 		return -ENOMEM;
3283 
3284 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
3285 
3286 	state->acquire_ctx = &ctx;
3287 	to_intel_atomic_state(state)->internal = true;
3288 
3289 retry:
3290 	drm_connector_list_iter_begin(display->drm, &conn_iter);
3291 	drm_for_each_connector_iter(conn, &conn_iter) {
3292 		struct drm_connector_state *conn_state;
3293 		struct drm_crtc_state *crtc_state;
3294 
3295 		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
3296 			continue;
3297 
3298 		conn_state = drm_atomic_get_connector_state(state, conn);
3299 		if (IS_ERR(conn_state)) {
3300 			err = PTR_ERR(conn_state);
3301 			break;
3302 		}
3303 
3304 		if (!conn_state->crtc)
3305 			continue;
3306 
3307 		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
3308 		if (IS_ERR(crtc_state)) {
3309 			err = PTR_ERR(crtc_state);
3310 			break;
3311 		}
3312 
3313 		/* Mark mode as changed to trigger a pipe->update() */
3314 		crtc_state->mode_changed = true;
3315 	}
3316 	drm_connector_list_iter_end(&conn_iter);
3317 
3318 	if (err == 0)
3319 		err = drm_atomic_commit(state);
3320 
3321 	if (err == -EDEADLK) {
3322 		drm_atomic_state_clear(state);
3323 		err = drm_modeset_backoff(&ctx);
3324 		if (!err)
3325 			goto retry;
3326 	}
3327 
3328 	drm_modeset_drop_locks(&ctx);
3329 	drm_modeset_acquire_fini(&ctx);
3330 	drm_atomic_state_put(state);
3331 
3332 	return err;
3333 }
3334 
3335 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
3336 {
3337 	struct intel_display *display = to_intel_display(intel_dp);
3338 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
3339 	const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3340 					I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3341 	u32 old_mode, old_disable_bits;
3342 	int ret;
3343 
3344 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3345 		    I915_PSR_DEBUG_PANEL_REPLAY_DISABLE |
3346 		    I915_PSR_DEBUG_MODE_MASK) ||
3347 	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
3348 		drm_dbg_kms(display->drm, "Invalid debug mask %llx\n", val);
3349 		return -EINVAL;
3350 	}
3351 
3352 	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
3353 	if (ret)
3354 		return ret;
3355 
3356 	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
3357 	old_disable_bits = intel_dp->psr.debug &
3358 		(I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3359 		 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3360 
3361 	intel_dp->psr.debug = val;
3362 
3363 	/*
3364 	 * Do it right away if it's already enabled, otherwise it will be done
3365 	 * when enabling the source.
3366 	 */
3367 	if (intel_dp->psr.enabled)
3368 		psr_irq_control(intel_dp);
3369 
3370 	mutex_unlock(&intel_dp->psr.lock);
3371 
3372 	if (old_mode != mode || old_disable_bits != disable_bits)
3373 		ret = intel_psr_fastset_force(display);
3374 
3375 	return ret;
3376 }
3377 
3378 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
3379 {
3380 	struct intel_psr *psr = &intel_dp->psr;
3381 
3382 	intel_psr_disable_locked(intel_dp);
3383 	psr->sink_not_reliable = true;
3384 	/* let's make sure that sink is awaken */
3385 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
3386 }
3387 
3388 static void intel_psr_work(struct work_struct *work)
3389 {
3390 	struct intel_dp *intel_dp =
3391 		container_of(work, typeof(*intel_dp), psr.work);
3392 
3393 	mutex_lock(&intel_dp->psr.lock);
3394 
3395 	if (!intel_dp->psr.enabled)
3396 		goto unlock;
3397 
3398 	if (READ_ONCE(intel_dp->psr.irq_aux_error)) {
3399 		intel_psr_handle_irq(intel_dp);
3400 		goto unlock;
3401 	}
3402 
3403 	if (intel_dp->psr.pause_counter)
3404 		goto unlock;
3405 
3406 	/*
3407 	 * We have to make sure PSR is ready for re-enable
3408 	 * otherwise it keeps disabled until next full enable/disable cycle.
3409 	 * PSR might take some time to get fully disabled
3410 	 * and be ready for re-enable.
3411 	 */
3412 	if (!__psr_wait_for_idle_locked(intel_dp))
3413 		goto unlock;
3414 
3415 	/*
3416 	 * The delayed work can race with an invalidate hence we need to
3417 	 * recheck. Since psr_flush first clears this and then reschedules we
3418 	 * won't ever miss a flush when bailing out here.
3419 	 */
3420 	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
3421 		goto unlock;
3422 
3423 	intel_psr_activate(intel_dp);
3424 unlock:
3425 	mutex_unlock(&intel_dp->psr.lock);
3426 }
3427 
3428 static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp)
3429 {
3430 	struct intel_display *display = to_intel_display(intel_dp);
3431 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3432 
3433 	if (!intel_dp->psr.psr2_sel_fetch_enabled)
3434 		return;
3435 
3436 	if (DISPLAY_VER(display) >= 20)
3437 		intel_de_write(display, LNL_SFF_CTL(cpu_transcoder),
3438 			       LNL_SFF_CTL_SF_SINGLE_FULL_FRAME);
3439 	else
3440 		intel_de_write(display,
3441 			       PSR2_MAN_TRK_CTL(display, cpu_transcoder),
3442 			       man_trk_ctl_enable_bit_get(display) |
3443 			       man_trk_ctl_partial_frame_bit_get(display) |
3444 			       man_trk_ctl_single_full_frame_bit_get(display) |
3445 			       man_trk_ctl_continuos_full_frame(display));
3446 }
3447 
3448 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
3449 {
3450 	struct intel_display *display = to_intel_display(intel_dp);
3451 
3452 	if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
3453 		if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3454 			intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
3455 			intel_psr_configure_full_frame_update(intel_dp);
3456 		}
3457 
3458 		intel_psr_force_update(intel_dp);
3459 	} else {
3460 		intel_psr_exit(intel_dp);
3461 	}
3462 }
3463 
3464 /**
3465  * intel_psr_invalidate - Invalidate PSR
3466  * @display: display device
3467  * @frontbuffer_bits: frontbuffer plane tracking bits
3468  * @origin: which operation caused the invalidate
3469  *
3470  * Since the hardware frontbuffer tracking has gaps we need to integrate
3471  * with the software frontbuffer tracking. This function gets called every
3472  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
3473  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
3474  *
3475  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
3476  */
3477 void intel_psr_invalidate(struct intel_display *display,
3478 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
3479 {
3480 	struct intel_encoder *encoder;
3481 
3482 	if (origin == ORIGIN_FLIP)
3483 		return;
3484 
3485 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3486 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3487 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3488 
3489 		mutex_lock(&intel_dp->psr.lock);
3490 		if (!intel_dp->psr.enabled) {
3491 			mutex_unlock(&intel_dp->psr.lock);
3492 			continue;
3493 		}
3494 
3495 		pipe_frontbuffer_bits &=
3496 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3497 		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
3498 
3499 		if (pipe_frontbuffer_bits)
3500 			_psr_invalidate_handle(intel_dp);
3501 
3502 		mutex_unlock(&intel_dp->psr.lock);
3503 	}
3504 }
3505 /*
3506  * When we will be completely rely on PSR2 S/W tracking in future,
3507  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
3508  * event also therefore tgl_dc3co_flush_locked() require to be changed
3509  * accordingly in future.
3510  */
3511 static void
3512 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
3513 		       enum fb_op_origin origin)
3514 {
3515 	struct intel_display *display = to_intel_display(intel_dp);
3516 
3517 	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
3518 	    !intel_dp->psr.active)
3519 		return;
3520 
3521 	/*
3522 	 * At every frontbuffer flush flip event modified delay of delayed work,
3523 	 * when delayed work schedules that means display has been idle.
3524 	 */
3525 	if (!(frontbuffer_bits &
3526 	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
3527 		return;
3528 
3529 	tgl_psr2_enable_dc3co(intel_dp);
3530 	mod_delayed_work(display->wq.unordered, &intel_dp->psr.dc3co_work,
3531 			 intel_dp->psr.dc3co_exit_delay);
3532 }
3533 
3534 static void _psr_flush_handle(struct intel_dp *intel_dp)
3535 {
3536 	struct intel_display *display = to_intel_display(intel_dp);
3537 
3538 	if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
3539 		/* Selective fetch prior LNL */
3540 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3541 			/* can we turn CFF off? */
3542 			if (intel_dp->psr.busy_frontbuffer_bits == 0)
3543 				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
3544 		}
3545 
3546 		/*
3547 		 * Still keep cff bit enabled as we don't have proper SU
3548 		 * configuration in case update is sent for any reason after
3549 		 * sff bit gets cleared by the HW on next vblank.
3550 		 *
3551 		 * NOTE: Setting cff bit is not needed for LunarLake onwards as
3552 		 * we have own register for SFF bit and we are not overwriting
3553 		 * existing SU configuration
3554 		 */
3555 		intel_psr_configure_full_frame_update(intel_dp);
3556 
3557 		intel_psr_force_update(intel_dp);
3558 	} else if (!intel_dp->psr.psr2_sel_fetch_enabled) {
3559 		/*
3560 		 * PSR1 on all platforms
3561 		 * PSR2 HW tracking
3562 		 * Panel Replay Full frame update
3563 		 */
3564 		intel_psr_force_update(intel_dp);
3565 	} else {
3566 		/* Selective update LNL onwards */
3567 		intel_psr_exit(intel_dp);
3568 	}
3569 
3570 	if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
3571 		queue_work(display->wq.unordered, &intel_dp->psr.work);
3572 }
3573 
3574 /**
3575  * intel_psr_flush - Flush PSR
3576  * @display: display device
3577  * @frontbuffer_bits: frontbuffer plane tracking bits
3578  * @origin: which operation caused the flush
3579  *
3580  * Since the hardware frontbuffer tracking has gaps we need to integrate
3581  * with the software frontbuffer tracking. This function gets called every
3582  * time frontbuffer rendering has completed and flushed out to memory. PSR
3583  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
3584  *
3585  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
3586  */
3587 void intel_psr_flush(struct intel_display *display,
3588 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
3589 {
3590 	struct intel_encoder *encoder;
3591 
3592 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3593 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3594 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3595 
3596 		mutex_lock(&intel_dp->psr.lock);
3597 		if (!intel_dp->psr.enabled) {
3598 			mutex_unlock(&intel_dp->psr.lock);
3599 			continue;
3600 		}
3601 
3602 		pipe_frontbuffer_bits &=
3603 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3604 		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
3605 
3606 		/*
3607 		 * If the PSR is paused by an explicit intel_psr_paused() call,
3608 		 * we have to ensure that the PSR is not activated until
3609 		 * intel_psr_resume() is called.
3610 		 */
3611 		if (intel_dp->psr.pause_counter)
3612 			goto unlock;
3613 
3614 		if (origin == ORIGIN_FLIP ||
3615 		    (origin == ORIGIN_CURSOR_UPDATE &&
3616 		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
3617 			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
3618 			goto unlock;
3619 		}
3620 
3621 		if (pipe_frontbuffer_bits == 0)
3622 			goto unlock;
3623 
3624 		/* By definition flush = invalidate + flush */
3625 		_psr_flush_handle(intel_dp);
3626 unlock:
3627 		mutex_unlock(&intel_dp->psr.lock);
3628 	}
3629 }
3630 
3631 /**
3632  * intel_psr_init - Init basic PSR work and mutex.
3633  * @intel_dp: Intel DP
3634  *
3635  * This function is called after the initializing connector.
3636  * (the initializing of connector treats the handling of connector capabilities)
3637  * And it initializes basic PSR stuff for each DP Encoder.
3638  */
3639 void intel_psr_init(struct intel_dp *intel_dp)
3640 {
3641 	struct intel_display *display = to_intel_display(intel_dp);
3642 	struct intel_connector *connector = intel_dp->attached_connector;
3643 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3644 
3645 	if (!(HAS_PSR(display) || HAS_DP20(display)))
3646 		return;
3647 
3648 	/*
3649 	 * HSW spec explicitly says PSR is tied to port A.
3650 	 * BDW+ platforms have a instance of PSR registers per transcoder but
3651 	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
3652 	 * than eDP one.
3653 	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
3654 	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
3655 	 * But GEN12 supports a instance of PSR registers per transcoder.
3656 	 */
3657 	if (DISPLAY_VER(display) < 12 && dig_port->base.port != PORT_A) {
3658 		drm_dbg_kms(display->drm,
3659 			    "PSR condition failed: Port not supported\n");
3660 		return;
3661 	}
3662 
3663 	if ((HAS_DP20(display) && !intel_dp_is_edp(intel_dp)) ||
3664 	    DISPLAY_VER(display) >= 20)
3665 		intel_dp->psr.source_panel_replay_support = true;
3666 
3667 	if (HAS_PSR(display) && intel_dp_is_edp(intel_dp))
3668 		intel_dp->psr.source_support = true;
3669 
3670 	/* Set link_standby x link_off defaults */
3671 	if (DISPLAY_VER(display) < 12)
3672 		/* For new platforms up to TGL let's respect VBT back again */
3673 		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
3674 
3675 	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
3676 	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
3677 	mutex_init(&intel_dp->psr.lock);
3678 }
3679 
3680 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
3681 					   u8 *status, u8 *error_status)
3682 {
3683 	struct drm_dp_aux *aux = &intel_dp->aux;
3684 	int ret;
3685 	unsigned int offset;
3686 
3687 	offset = intel_dp->psr.panel_replay_enabled ?
3688 		 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
3689 
3690 	ret = drm_dp_dpcd_readb(aux, offset, status);
3691 	if (ret != 1)
3692 		return ret;
3693 
3694 	offset = intel_dp->psr.panel_replay_enabled ?
3695 		 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
3696 
3697 	ret = drm_dp_dpcd_readb(aux, offset, error_status);
3698 	if (ret != 1)
3699 		return ret;
3700 
3701 	*status = *status & DP_PSR_SINK_STATE_MASK;
3702 
3703 	return 0;
3704 }
3705 
3706 static void psr_alpm_check(struct intel_dp *intel_dp)
3707 {
3708 	struct intel_psr *psr = &intel_dp->psr;
3709 
3710 	if (!psr->sel_update_enabled)
3711 		return;
3712 
3713 	if (intel_alpm_get_error(intel_dp)) {
3714 		intel_psr_disable_locked(intel_dp);
3715 		psr->sink_not_reliable = true;
3716 	}
3717 }
3718 
3719 static void psr_capability_changed_check(struct intel_dp *intel_dp)
3720 {
3721 	struct intel_display *display = to_intel_display(intel_dp);
3722 	struct intel_psr *psr = &intel_dp->psr;
3723 	u8 val;
3724 	int r;
3725 
3726 	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
3727 	if (r != 1) {
3728 		drm_err(display->drm, "Error reading DP_PSR_ESI\n");
3729 		return;
3730 	}
3731 
3732 	if (val & DP_PSR_CAPS_CHANGE) {
3733 		intel_psr_disable_locked(intel_dp);
3734 		psr->sink_not_reliable = true;
3735 		drm_dbg_kms(display->drm,
3736 			    "Sink PSR capability changed, disabling PSR\n");
3737 
3738 		/* Clearing it */
3739 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
3740 	}
3741 }
3742 
3743 /*
3744  * On common bits:
3745  * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
3746  * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
3747  * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
3748  * this function is relying on PSR definitions
3749  */
3750 void intel_psr_short_pulse(struct intel_dp *intel_dp)
3751 {
3752 	struct intel_display *display = to_intel_display(intel_dp);
3753 	struct intel_psr *psr = &intel_dp->psr;
3754 	u8 status, error_status;
3755 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3756 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3757 			  DP_PSR_LINK_CRC_ERROR;
3758 
3759 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
3760 		return;
3761 
3762 	mutex_lock(&psr->lock);
3763 
3764 	psr->link_ok = false;
3765 
3766 	if (!psr->enabled)
3767 		goto exit;
3768 
3769 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
3770 		drm_err(display->drm,
3771 			"Error reading PSR status or error status\n");
3772 		goto exit;
3773 	}
3774 
3775 	if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
3776 	    (error_status & errors)) {
3777 		intel_psr_disable_locked(intel_dp);
3778 		psr->sink_not_reliable = true;
3779 	}
3780 
3781 	if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
3782 	    !error_status)
3783 		drm_dbg_kms(display->drm,
3784 			    "PSR sink internal error, disabling PSR\n");
3785 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3786 		drm_dbg_kms(display->drm,
3787 			    "PSR RFB storage error, disabling PSR\n");
3788 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3789 		drm_dbg_kms(display->drm,
3790 			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
3791 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3792 		drm_dbg_kms(display->drm,
3793 			    "PSR Link CRC error, disabling PSR\n");
3794 
3795 	if (error_status & ~errors)
3796 		drm_err(display->drm,
3797 			"PSR_ERROR_STATUS unhandled errors %x\n",
3798 			error_status & ~errors);
3799 	/* clear status register */
3800 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
3801 
3802 	if (!psr->panel_replay_enabled) {
3803 		psr_alpm_check(intel_dp);
3804 		psr_capability_changed_check(intel_dp);
3805 	}
3806 
3807 exit:
3808 	mutex_unlock(&psr->lock);
3809 }
3810 
3811 bool intel_psr_enabled(struct intel_dp *intel_dp)
3812 {
3813 	bool ret;
3814 
3815 	if (!CAN_PSR(intel_dp))
3816 		return false;
3817 
3818 	mutex_lock(&intel_dp->psr.lock);
3819 	ret = intel_dp->psr.enabled;
3820 	mutex_unlock(&intel_dp->psr.lock);
3821 
3822 	return ret;
3823 }
3824 
3825 /**
3826  * intel_psr_link_ok - return psr->link_ok
3827  * @intel_dp: struct intel_dp
3828  *
3829  * We are seeing unexpected link re-trainings with some panels. This is caused
3830  * by panel stating bad link status after PSR is enabled. Code checking link
3831  * status can call this to ensure it can ignore bad link status stated by the
3832  * panel I.e. if panel is stating bad link and intel_psr_link_ok is stating link
3833  * is ok caller should rely on latter.
3834  *
3835  * Return value of link_ok
3836  */
3837 bool intel_psr_link_ok(struct intel_dp *intel_dp)
3838 {
3839 	bool ret;
3840 
3841 	if ((!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) ||
3842 	    !intel_dp_is_edp(intel_dp))
3843 		return false;
3844 
3845 	mutex_lock(&intel_dp->psr.lock);
3846 	ret = intel_dp->psr.link_ok;
3847 	mutex_unlock(&intel_dp->psr.lock);
3848 
3849 	return ret;
3850 }
3851 
3852 /**
3853  * intel_psr_lock - grab PSR lock
3854  * @crtc_state: the crtc state
3855  *
3856  * This is initially meant to be used by around CRTC update, when
3857  * vblank sensitive registers are updated and we need grab the lock
3858  * before it to avoid vblank evasion.
3859  */
3860 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3861 {
3862 	struct intel_display *display = to_intel_display(crtc_state);
3863 	struct intel_encoder *encoder;
3864 
3865 	if (!crtc_state->has_psr)
3866 		return;
3867 
3868 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3869 					     crtc_state->uapi.encoder_mask) {
3870 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3871 
3872 		mutex_lock(&intel_dp->psr.lock);
3873 		break;
3874 	}
3875 }
3876 
3877 /**
3878  * intel_psr_unlock - release PSR lock
3879  * @crtc_state: the crtc state
3880  *
3881  * Release the PSR lock that was held during pipe update.
3882  */
3883 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3884 {
3885 	struct intel_display *display = to_intel_display(crtc_state);
3886 	struct intel_encoder *encoder;
3887 
3888 	if (!crtc_state->has_psr)
3889 		return;
3890 
3891 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3892 					     crtc_state->uapi.encoder_mask) {
3893 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3894 
3895 		mutex_unlock(&intel_dp->psr.lock);
3896 		break;
3897 	}
3898 }
3899 
3900 /* Wa_16025596647 */
3901 static void intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp *intel_dp)
3902 {
3903 	struct intel_display *display = to_intel_display(intel_dp);
3904 	bool dc5_dc6_blocked;
3905 
3906 	if (!intel_dp->psr.active || !intel_dp->psr.pkg_c_latency_used)
3907 		return;
3908 
3909 	dc5_dc6_blocked = is_dc5_dc6_blocked(intel_dp);
3910 
3911 	if (intel_dp->psr.sel_update_enabled)
3912 		psr2_program_idle_frames(intel_dp, dc5_dc6_blocked ? 0 :
3913 					 psr_compute_idle_frames(intel_dp));
3914 	else
3915 		intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
3916 								       intel_dp->psr.pipe,
3917 								       dc5_dc6_blocked);
3918 }
3919 
3920 static void psr_dc5_dc6_wa_work(struct work_struct *work)
3921 {
3922 	struct intel_display *display = container_of(work, typeof(*display),
3923 						     psr_dc5_dc6_wa_work);
3924 	struct intel_encoder *encoder;
3925 
3926 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3927 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3928 
3929 		mutex_lock(&intel_dp->psr.lock);
3930 
3931 		if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled &&
3932 		    !intel_dp->psr.pkg_c_latency_used)
3933 			intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
3934 
3935 		mutex_unlock(&intel_dp->psr.lock);
3936 	}
3937 }
3938 
3939 /**
3940  * intel_psr_notify_dc5_dc6 - Notify PSR about enable/disable dc5/dc6
3941  * @display: intel atomic state
3942  *
3943  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to schedule
3944  * psr_dc5_dc6_wa_work used for applying/removing the workaround.
3945  */
3946 void intel_psr_notify_dc5_dc6(struct intel_display *display)
3947 {
3948 	if (DISPLAY_VER(display) != 20 &&
3949 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3950 		return;
3951 
3952 	schedule_work(&display->psr_dc5_dc6_wa_work);
3953 }
3954 
3955 /**
3956  * intel_psr_dc5_dc6_wa_init - Init work for underrun on idle PSR HW bug wa
3957  * @display: intel atomic state
3958  *
3959  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to init
3960  * psr_dc5_dc6_wa_work used for applying the workaround.
3961  */
3962 void intel_psr_dc5_dc6_wa_init(struct intel_display *display)
3963 {
3964 	if (DISPLAY_VER(display) != 20 &&
3965 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3966 		return;
3967 
3968 	INIT_WORK(&display->psr_dc5_dc6_wa_work, psr_dc5_dc6_wa_work);
3969 }
3970 
3971 /**
3972  * intel_psr_notify_pipe_change - Notify PSR about enable/disable of a pipe
3973  * @state: intel atomic state
3974  * @crtc: intel crtc
3975  * @enable: enable/disable
3976  *
3977  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
3978  * remove the workaround when pipe is getting enabled/disabled
3979  */
3980 void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
3981 				  struct intel_crtc *crtc, bool enable)
3982 {
3983 	struct intel_display *display = to_intel_display(state);
3984 	struct intel_encoder *encoder;
3985 
3986 	if (DISPLAY_VER(display) != 20 &&
3987 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3988 		return;
3989 
3990 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3991 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3992 		u8 active_non_psr_pipes;
3993 
3994 		mutex_lock(&intel_dp->psr.lock);
3995 
3996 		if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
3997 			goto unlock;
3998 
3999 		active_non_psr_pipes = intel_dp->psr.active_non_psr_pipes;
4000 
4001 		if (enable)
4002 			active_non_psr_pipes |= BIT(crtc->pipe);
4003 		else
4004 			active_non_psr_pipes &= ~BIT(crtc->pipe);
4005 
4006 		if (active_non_psr_pipes == intel_dp->psr.active_non_psr_pipes)
4007 			goto unlock;
4008 
4009 		if ((enable && intel_dp->psr.active_non_psr_pipes) ||
4010 		    (!enable && !intel_dp->psr.active_non_psr_pipes) ||
4011 		    !intel_dp->psr.pkg_c_latency_used) {
4012 			intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
4013 			goto unlock;
4014 		}
4015 
4016 		intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
4017 
4018 		intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
4019 unlock:
4020 		mutex_unlock(&intel_dp->psr.lock);
4021 	}
4022 }
4023 
4024 /**
4025  * intel_psr_notify_vblank_enable_disable - Notify PSR about enable/disable of vblank
4026  * @display: intel display struct
4027  * @enable: enable/disable
4028  *
4029  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
4030  * remove the workaround when vblank is getting enabled/disabled
4031  */
4032 void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
4033 					    bool enable)
4034 {
4035 	struct intel_encoder *encoder;
4036 
4037 	for_each_intel_encoder_with_psr(display->drm, encoder) {
4038 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4039 
4040 		mutex_lock(&intel_dp->psr.lock);
4041 		if (intel_dp->psr.panel_replay_enabled) {
4042 			mutex_unlock(&intel_dp->psr.lock);
4043 			break;
4044 		}
4045 
4046 		if (intel_dp->psr.enabled && intel_dp->psr.pkg_c_latency_used)
4047 			intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
4048 
4049 		mutex_unlock(&intel_dp->psr.lock);
4050 		return;
4051 	}
4052 
4053 	/*
4054 	 * NOTE: intel_display_power_set_target_dc_state is used
4055 	 * only by PSR * code for DC3CO handling. DC3CO target
4056 	 * state is currently disabled in * PSR code. If DC3CO
4057 	 * is taken into use we need take that into account here
4058 	 * as well.
4059 	 */
4060 	intel_display_power_set_target_dc_state(display, enable ? DC_STATE_DISABLE :
4061 						DC_STATE_EN_UPTO_DC6);
4062 }
4063 
4064 static void
4065 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
4066 {
4067 	struct intel_display *display = to_intel_display(intel_dp);
4068 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
4069 	const char *status = "unknown";
4070 	u32 val, status_val;
4071 
4072 	if ((intel_dp_is_edp(intel_dp) || DISPLAY_VER(display) >= 30) &&
4073 	    (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) {
4074 		static const char * const live_status[] = {
4075 			"IDLE",
4076 			"CAPTURE",
4077 			"CAPTURE_FS",
4078 			"SLEEP",
4079 			"BUFON_FW",
4080 			"ML_UP",
4081 			"SU_STANDBY",
4082 			"FAST_SLEEP",
4083 			"DEEP_SLEEP",
4084 			"BUF_ON",
4085 			"TG_ON"
4086 		};
4087 		val = intel_de_read(display,
4088 				    EDP_PSR2_STATUS(display, cpu_transcoder));
4089 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
4090 		if (status_val < ARRAY_SIZE(live_status))
4091 			status = live_status[status_val];
4092 	} else {
4093 		static const char * const live_status[] = {
4094 			"IDLE",
4095 			"SRDONACK",
4096 			"SRDENT",
4097 			"BUFOFF",
4098 			"BUFON",
4099 			"AUXACK",
4100 			"SRDOFFACK",
4101 			"SRDENT_ON",
4102 		};
4103 		val = intel_de_read(display,
4104 				    psr_status_reg(display, cpu_transcoder));
4105 		status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
4106 		if (status_val < ARRAY_SIZE(live_status))
4107 			status = live_status[status_val];
4108 	}
4109 
4110 	seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
4111 }
4112 
4113 static void intel_psr_sink_capability(struct intel_dp *intel_dp,
4114 				      struct seq_file *m)
4115 {
4116 	struct intel_psr *psr = &intel_dp->psr;
4117 
4118 	seq_printf(m, "Sink support: PSR = %s",
4119 		   str_yes_no(psr->sink_support));
4120 
4121 	if (psr->sink_support)
4122 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
4123 	if (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
4124 		seq_printf(m, " (Early Transport)");
4125 	seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
4126 	seq_printf(m, ", Panel Replay Selective Update = %s",
4127 		   str_yes_no(psr->sink_panel_replay_su_support));
4128 	seq_printf(m, ", Panel Replay DSC support = %s",
4129 		   panel_replay_dsc_support_str(psr->sink_panel_replay_dsc_support));
4130 	if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
4131 	    DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
4132 		seq_printf(m, " (Early Transport)");
4133 	seq_printf(m, "\n");
4134 }
4135 
4136 static void intel_psr_print_mode(struct intel_dp *intel_dp,
4137 				 struct seq_file *m)
4138 {
4139 	struct intel_psr *psr = &intel_dp->psr;
4140 	const char *status, *mode, *region_et;
4141 
4142 	if (psr->enabled)
4143 		status = " enabled";
4144 	else
4145 		status = "disabled";
4146 
4147 	if (psr->panel_replay_enabled && psr->sel_update_enabled)
4148 		mode = "Panel Replay Selective Update";
4149 	else if (psr->panel_replay_enabled)
4150 		mode = "Panel Replay";
4151 	else if (psr->sel_update_enabled)
4152 		mode = "PSR2";
4153 	else if (psr->enabled)
4154 		mode = "PSR1";
4155 	else
4156 		mode = "";
4157 
4158 	if (psr->su_region_et_enabled)
4159 		region_et = " (Early Transport)";
4160 	else
4161 		region_et = "";
4162 
4163 	seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et);
4164 	if (psr->no_psr_reason)
4165 		seq_printf(m, "  %s\n", psr->no_psr_reason);
4166 }
4167 
4168 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
4169 {
4170 	struct intel_display *display = to_intel_display(intel_dp);
4171 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
4172 	struct intel_psr *psr = &intel_dp->psr;
4173 	struct ref_tracker *wakeref;
4174 	bool enabled;
4175 	u32 val, psr2_ctl;
4176 
4177 	intel_psr_sink_capability(intel_dp, m);
4178 
4179 	if (!(psr->sink_support || psr->sink_panel_replay_support))
4180 		return 0;
4181 
4182 	wakeref = intel_display_rpm_get(display);
4183 	mutex_lock(&psr->lock);
4184 
4185 	intel_psr_print_mode(intel_dp, m);
4186 
4187 	if (!psr->enabled) {
4188 		seq_printf(m, "PSR sink not reliable: %s\n",
4189 			   str_yes_no(psr->sink_not_reliable));
4190 
4191 		goto unlock;
4192 	}
4193 
4194 	if (psr->panel_replay_enabled) {
4195 		val = intel_de_read(display, TRANS_DP2_CTL(cpu_transcoder));
4196 
4197 		if (intel_dp_is_edp(intel_dp))
4198 			psr2_ctl = intel_de_read(display,
4199 						 EDP_PSR2_CTL(display,
4200 							      cpu_transcoder));
4201 
4202 		enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
4203 	} else if (psr->sel_update_enabled) {
4204 		val = intel_de_read(display,
4205 				    EDP_PSR2_CTL(display, cpu_transcoder));
4206 		enabled = val & EDP_PSR2_ENABLE;
4207 	} else {
4208 		val = intel_de_read(display, psr_ctl_reg(display, cpu_transcoder));
4209 		enabled = val & EDP_PSR_ENABLE;
4210 	}
4211 	seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
4212 		   str_enabled_disabled(enabled), val);
4213 	if (psr->panel_replay_enabled && intel_dp_is_edp(intel_dp))
4214 		seq_printf(m, "PSR2_CTL: 0x%08x\n",
4215 			   psr2_ctl);
4216 	psr_source_status(intel_dp, m);
4217 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
4218 		   psr->busy_frontbuffer_bits);
4219 
4220 	/*
4221 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
4222 	 */
4223 	val = intel_de_read(display, psr_perf_cnt_reg(display, cpu_transcoder));
4224 	seq_printf(m, "Performance counter: %u\n",
4225 		   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
4226 
4227 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
4228 		seq_printf(m, "Last attempted entry at: %lld\n",
4229 			   psr->last_entry_attempt);
4230 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
4231 	}
4232 
4233 	if (psr->sel_update_enabled) {
4234 		u32 su_frames_val[3];
4235 		int frame;
4236 
4237 		/*
4238 		 * PSR2_SU_STATUS register has been tied-off since DG2/ADL-P
4239 		 * (it returns zeros only) and it has been removed on Xe2_LPD.
4240 		 */
4241 		if (DISPLAY_VER(display) < 13) {
4242 			/*
4243 			 * Reading all 3 registers before hand to minimize crossing a
4244 			 * frame boundary between register reads
4245 			 */
4246 			for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
4247 				val = intel_de_read(display,
4248 						    PSR2_SU_STATUS(display, cpu_transcoder, frame));
4249 				su_frames_val[frame / 3] = val;
4250 			}
4251 
4252 			seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
4253 
4254 			for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
4255 				u32 su_blocks;
4256 
4257 				su_blocks = su_frames_val[frame / 3] &
4258 					PSR2_SU_STATUS_MASK(frame);
4259 				su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
4260 				seq_printf(m, "%d\t%d\n", frame, su_blocks);
4261 			}
4262 		}
4263 
4264 		seq_printf(m, "PSR2 selective fetch: %s\n",
4265 			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
4266 	}
4267 
4268 unlock:
4269 	mutex_unlock(&psr->lock);
4270 	intel_display_rpm_put(display, wakeref);
4271 
4272 	return 0;
4273 }
4274 
4275 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
4276 {
4277 	struct intel_display *display = m->private;
4278 	struct intel_dp *intel_dp = NULL;
4279 	struct intel_encoder *encoder;
4280 
4281 	if (!HAS_PSR(display))
4282 		return -ENODEV;
4283 
4284 	/* Find the first EDP which supports PSR */
4285 	for_each_intel_encoder_with_psr(display->drm, encoder) {
4286 		intel_dp = enc_to_intel_dp(encoder);
4287 		break;
4288 	}
4289 
4290 	if (!intel_dp)
4291 		return -ENODEV;
4292 
4293 	return intel_psr_status(m, intel_dp);
4294 }
4295 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
4296 
4297 static int
4298 i915_edp_psr_debug_set(void *data, u64 val)
4299 {
4300 	struct intel_display *display = data;
4301 	struct intel_encoder *encoder;
4302 	int ret = -ENODEV;
4303 
4304 	if (!HAS_PSR(display))
4305 		return ret;
4306 
4307 	for_each_intel_encoder_with_psr(display->drm, encoder) {
4308 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4309 
4310 		drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);
4311 
4312 		// TODO: split to each transcoder's PSR debug state
4313 		with_intel_display_rpm(display)
4314 			ret = intel_psr_debug_set(intel_dp, val);
4315 	}
4316 
4317 	return ret;
4318 }
4319 
4320 static int
4321 i915_edp_psr_debug_get(void *data, u64 *val)
4322 {
4323 	struct intel_display *display = data;
4324 	struct intel_encoder *encoder;
4325 
4326 	if (!HAS_PSR(display))
4327 		return -ENODEV;
4328 
4329 	for_each_intel_encoder_with_psr(display->drm, encoder) {
4330 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4331 
4332 		// TODO: split to each transcoder's PSR debug state
4333 		*val = READ_ONCE(intel_dp->psr.debug);
4334 		return 0;
4335 	}
4336 
4337 	return -ENODEV;
4338 }
4339 
4340 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
4341 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
4342 			"%llu\n");
4343 
4344 void intel_psr_debugfs_register(struct intel_display *display)
4345 {
4346 	struct dentry *debugfs_root = display->drm->debugfs_root;
4347 
4348 	debugfs_create_file("i915_edp_psr_debug", 0644, debugfs_root,
4349 			    display, &i915_edp_psr_debug_fops);
4350 
4351 	debugfs_create_file("i915_edp_psr_status", 0444, debugfs_root,
4352 			    display, &i915_edp_psr_status_fops);
4353 }
4354 
4355 static const char *psr_mode_str(struct intel_dp *intel_dp)
4356 {
4357 	if (intel_dp->psr.panel_replay_enabled)
4358 		return "PANEL-REPLAY";
4359 	else if (intel_dp->psr.enabled)
4360 		return "PSR";
4361 
4362 	return "unknown";
4363 }
4364 
4365 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
4366 {
4367 	struct intel_connector *connector = m->private;
4368 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4369 	static const char * const sink_status[] = {
4370 		"inactive",
4371 		"transition to active, capture and display",
4372 		"active, display from RFB",
4373 		"active, capture and display on sink device timings",
4374 		"transition to inactive, capture and display, timing re-sync",
4375 		"reserved",
4376 		"reserved",
4377 		"sink internal error",
4378 	};
4379 	const char *str;
4380 	int ret;
4381 	u8 status, error_status;
4382 
4383 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
4384 		seq_puts(m, "PSR/Panel-Replay Unsupported\n");
4385 		return -ENODEV;
4386 	}
4387 
4388 	if (connector->base.status != connector_status_connected)
4389 		return -ENODEV;
4390 
4391 	ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
4392 	if (ret)
4393 		return ret;
4394 
4395 	status &= DP_PSR_SINK_STATE_MASK;
4396 	if (status < ARRAY_SIZE(sink_status))
4397 		str = sink_status[status];
4398 	else
4399 		str = "unknown";
4400 
4401 	seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
4402 
4403 	seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
4404 
4405 	if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
4406 			    DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
4407 			    DP_PSR_LINK_CRC_ERROR))
4408 		seq_puts(m, ":\n");
4409 	else
4410 		seq_puts(m, "\n");
4411 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
4412 		seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
4413 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
4414 		seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
4415 	if (error_status & DP_PSR_LINK_CRC_ERROR)
4416 		seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
4417 
4418 	return ret;
4419 }
4420 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
4421 
4422 static int i915_psr_status_show(struct seq_file *m, void *data)
4423 {
4424 	struct intel_connector *connector = m->private;
4425 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4426 
4427 	return intel_psr_status(m, intel_dp);
4428 }
4429 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
4430 
4431 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
4432 {
4433 	struct intel_display *display = to_intel_display(connector);
4434 	struct dentry *root = connector->base.debugfs_entry;
4435 
4436 	if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
4437 	    connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
4438 		return;
4439 
4440 	debugfs_create_file("i915_psr_sink_status", 0444, root,
4441 			    connector, &i915_psr_sink_status_fops);
4442 
4443 	if (HAS_PSR(display) || HAS_DP20(display))
4444 		debugfs_create_file("i915_psr_status", 0444, root,
4445 				    connector, &i915_psr_status_fops);
4446 }
4447 
4448 bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state)
4449 {
4450 	/*
4451 	 * eDP Panel Replay uses always ALPM
4452 	 * PSR2 uses ALPM but PSR1 doesn't
4453 	 */
4454 	return intel_dp_is_edp(intel_dp) && (crtc_state->has_sel_update ||
4455 					     crtc_state->has_panel_replay);
4456 }
4457 
4458 bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
4459 				   const struct intel_crtc_state *crtc_state)
4460 {
4461 	return intel_dp_is_edp(intel_dp) && crtc_state->has_panel_replay;
4462 }
4463 
4464 void intel_psr_compute_config_late(struct intel_dp *intel_dp,
4465 				   struct intel_crtc_state *crtc_state)
4466 {
4467 	struct intel_display *display = to_intel_display(intel_dp);
4468 	int vblank = intel_crtc_vblank_length(crtc_state);
4469 	int wake_lines;
4470 
4471 	if (intel_psr_needs_alpm_aux_less(intel_dp, crtc_state))
4472 		wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
4473 	else if (intel_psr_needs_alpm(intel_dp, crtc_state))
4474 		wake_lines = DISPLAY_VER(display) < 20 ?
4475 			     psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
4476 						    crtc_state->alpm_state.fast_wake_lines) :
4477 			     crtc_state->alpm_state.io_wake_lines;
4478 	else
4479 		wake_lines = 0;
4480 
4481 	/*
4482 	 * Disable the PSR features if wake lines exceed the available vblank.
4483 	 * Though SCL is computed based on these PSR features, it is not reset
4484 	 * even if the PSR features are disabled to avoid changing vblank start
4485 	 * at this stage.
4486 	 */
4487 	if (wake_lines && !_wake_lines_fit_into_vblank(crtc_state, vblank, wake_lines)) {
4488 		drm_dbg_kms(display->drm,
4489 			    "Adjusting PSR/PR mode: vblank too short for wake lines = %d\n",
4490 			    wake_lines);
4491 
4492 		if (crtc_state->has_panel_replay) {
4493 			crtc_state->has_panel_replay = false;
4494 			/*
4495 			 * #TODO : Add fall back to PSR/PSR2
4496 			 * Since panel replay cannot be supported, we can fall back to PSR/PSR2.
4497 			 * This will require calling compute_config for psr and psr2 with check for
4498 			 * actual guardband instead of vblank_length.
4499 			 */
4500 			crtc_state->has_psr = false;
4501 		}
4502 
4503 		crtc_state->has_sel_update = false;
4504 		crtc_state->enable_psr2_su_region_et = false;
4505 		crtc_state->enable_psr2_sel_fetch = false;
4506 	}
4507 
4508 	/* Wa_18037818876 */
4509 	if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) {
4510 		crtc_state->has_psr = false;
4511 		drm_dbg_kms(display->drm,
4512 			    "PSR disabled to workaround PSR FSM hang issue\n");
4513 	}
4514 
4515 	intel_psr_set_non_psr_pipes(intel_dp, crtc_state);
4516 }
4517 
4518 int intel_psr_min_guardband(struct intel_crtc_state *crtc_state)
4519 {
4520 	struct intel_display *display = to_intel_display(crtc_state);
4521 	int psr_min_guardband;
4522 	int wake_lines;
4523 
4524 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
4525 		return 0;
4526 
4527 	if (crtc_state->has_panel_replay)
4528 		wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
4529 	else if (crtc_state->has_sel_update)
4530 		wake_lines = DISPLAY_VER(display) < 20 ?
4531 			     psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
4532 						    crtc_state->alpm_state.fast_wake_lines) :
4533 			     crtc_state->alpm_state.io_wake_lines;
4534 	else
4535 		return 0;
4536 
4537 	psr_min_guardband = wake_lines + crtc_state->set_context_latency;
4538 
4539 	if (crtc_state->req_psr2_sdp_prior_scanline)
4540 		psr_min_guardband++;
4541 
4542 	return psr_min_guardband;
4543 }
4544