xref: /linux/drivers/gpu/drm/i915/display/intel_psr.c (revision 0d9f0083f7a5a31d91d501467b499bb8c4b25bdf)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/debugfs.h>
25 
26 #include <drm/drm_atomic_helper.h>
27 #include <drm/drm_damage_helper.h>
28 #include <drm/drm_debugfs.h>
29 #include <drm/drm_vblank.h>
30 
31 #include "i915_reg.h"
32 #include "intel_alpm.h"
33 #include "intel_atomic.h"
34 #include "intel_crtc.h"
35 #include "intel_cursor_regs.h"
36 #include "intel_ddi.h"
37 #include "intel_de.h"
38 #include "intel_display_irq.h"
39 #include "intel_display_regs.h"
40 #include "intel_display_rpm.h"
41 #include "intel_display_types.h"
42 #include "intel_dmc.h"
43 #include "intel_dp.h"
44 #include "intel_dp_aux.h"
45 #include "intel_frontbuffer.h"
46 #include "intel_hdmi.h"
47 #include "intel_psr.h"
48 #include "intel_psr_regs.h"
49 #include "intel_snps_phy.h"
50 #include "intel_step.h"
51 #include "intel_vblank.h"
52 #include "intel_vrr.h"
53 #include "skl_universal_plane.h"
54 
55 /**
56  * DOC: Panel Self Refresh (PSR/SRD)
57  *
58  * Since Haswell Display controller supports Panel Self-Refresh on display
59  * panels witch have a remote frame buffer (RFB) implemented according to PSR
60  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
61  * when system is idle but display is on as it eliminates display refresh
62  * request to DDR memory completely as long as the frame buffer for that
63  * display is unchanged.
64  *
65  * Panel Self Refresh must be supported by both Hardware (source) and
66  * Panel (sink).
67  *
68  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
69  * to power down the link and memory controller. For DSI panels the same idea
70  * is called "manual mode".
71  *
72  * The implementation uses the hardware-based PSR support which automatically
73  * enters/exits self-refresh mode. The hardware takes care of sending the
74  * required DP aux message and could even retrain the link (that part isn't
75  * enabled yet though). The hardware also keeps track of any frontbuffer
76  * changes to know when to exit self-refresh mode again. Unfortunately that
77  * part doesn't work too well, hence why the i915 PSR support uses the
78  * software frontbuffer tracking to make sure it doesn't miss a screen
79  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
80  * get called by the frontbuffer tracking code. Note that because of locking
81  * issues the self-refresh re-enable code is done from a work queue, which
82  * must be correctly synchronized/cancelled when shutting down the pipe."
83  *
84  * DC3CO (DC3 clock off)
85  *
86  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
87  * clock off automatically during PSR2 idle state.
88  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
89  * entry/exit allows the HW to enter a low-power state even when page flipping
90  * periodically (for instance a 30fps video playback scenario).
91  *
92  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
93  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
94  * frames, if no other flip occurs and the function above is executed, DC3CO is
95  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
96  * of another flip.
97  * Front buffer modifications do not trigger DC3CO activation on purpose as it
98  * would bring a lot of complexity and most of the moderns systems will only
99  * use page flips.
100  */
101 
102 /*
103  * Description of PSR mask bits:
104  *
105  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
106  *
107  *  When unmasked (nearly) all display register writes (eg. even
108  *  SWF) trigger a PSR exit. Some registers are excluded from this
109  *  and they have a more specific mask (described below). On icl+
110  *  this bit no longer exists and is effectively always set.
111  *
112  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
113  *
114  *  When unmasked (nearly) all pipe/plane register writes
115  *  trigger a PSR exit. Some plane registers are excluded from this
116  *  and they have a more specific mask (described below).
117  *
118  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
119  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
120  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
121  *
122  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
123  *  SPR_SURF/CURBASE are not included in this and instead are
124  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
125  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
126  *
127  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
128  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
129  *
130  *  When unmasked PSR is blocked as long as the sprite
131  *  plane is enabled. skl+ with their universal planes no
132  *  longer have a mask bit like this, and no plane being
133  *  enabledb blocks PSR.
134  *
135  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
136  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
137  *
138  *  When umasked CURPOS writes trigger a PSR exit. On skl+
139  *  this doesn't exit but CURPOS is included in the
140  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
141  *
142  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
143  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
144  *
145  *  When unmasked PSR is blocked as long as vblank and/or vsync
146  *  interrupt is unmasked in IMR *and* enabled in IER.
147  *
148  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
149  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
150  *
151  *  Selectcs whether PSR exit generates an extra vblank before
152  *  the first frame is transmitted. Also note the opposite polarity
153  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
154  *  unmasked==do not generate the extra vblank).
155  *
156  *  With DC states enabled the extra vblank happens after link training,
157  *  with DC states disabled it happens immediately upuon PSR exit trigger.
158  *  No idea as of now why there is a difference. HSW/BDW (which don't
159  *  even have DMC) always generate it after link training. Go figure.
160  *
161  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
162  *  and thus won't latch until the first vblank. So with DC states
163  *  enabled the register effectively uses the reset value during DC5
164  *  exit+PSR exit sequence, and thus the bit does nothing until
165  *  latched by the vblank that it was trying to prevent from being
166  *  generated in the first place. So we should probably call this
167  *  one a chicken/egg bit instead on skl+.
168  *
169  *  In standby mode (as opposed to link-off) this makes no difference
170  *  as the timing generator keeps running the whole time generating
171  *  normal periodic vblanks.
172  *
173  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
174  *  and doing so makes the behaviour match the skl+ reset value.
175  *
176  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
177  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
178  *
179  *  On BDW without this bit is no vblanks whatsoever are
180  *  generated after PSR exit. On HSW this has no apparent effect.
181  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
182  *
183  * The rest of the bits are more self-explanatory and/or
184  * irrelevant for normal operation.
185  *
186  * Description of intel_crtc_state variables. has_psr, has_panel_replay and
187  * has_sel_update:
188  *
189  *  has_psr (alone):					PSR1
190  *  has_psr + has_sel_update:				PSR2
191  *  has_psr + has_panel_replay:				Panel Replay
192  *  has_psr + has_panel_replay + has_sel_update:	Panel Replay Selective Update
193  *
194  * Description of some intel_psr variables. enabled, panel_replay_enabled,
195  * sel_update_enabled
196  *
197  *  enabled (alone):						PSR1
198  *  enabled + sel_update_enabled:				PSR2
199  *  enabled + panel_replay_enabled:				Panel Replay
200  *  enabled + panel_replay_enabled + sel_update_enabled:	Panel Replay SU
201  */
202 
203 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
204 			   (intel_dp)->psr.source_support)
205 
206 bool intel_encoder_can_psr(struct intel_encoder *encoder)
207 {
208 	if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
209 		return CAN_PSR(enc_to_intel_dp(encoder)) ||
210 		       CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
211 	else
212 		return false;
213 }
214 
215 bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
216 				  const struct intel_crtc_state *crtc_state)
217 {
218 	/*
219 	 * For PSR/PR modes only eDP requires the AUX IO power to be enabled whenever
220 	 * the output is enabled. For non-eDP outputs the main link is always
221 	 * on, hence it doesn't require the HW initiated AUX wake-up signaling used
222 	 * for eDP.
223 	 *
224 	 * TODO:
225 	 * - Consider leaving AUX IO disabled for eDP / PR as well, in case
226 	 *   the ALPM with main-link off mode is not enabled.
227 	 * - Leave AUX IO enabled for DP / PR, once support for ALPM with
228 	 *   main-link off mode is added for it and this mode gets enabled.
229 	 */
230 	return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
231 	       intel_encoder_can_psr(encoder);
232 }
233 
234 static bool psr_global_enabled(struct intel_dp *intel_dp)
235 {
236 	struct intel_connector *connector = intel_dp->attached_connector;
237 
238 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
239 	case I915_PSR_DEBUG_DEFAULT:
240 		return intel_dp_is_edp(intel_dp) ?
241 			connector->panel.vbt.psr.enable : true;
242 	case I915_PSR_DEBUG_DISABLE:
243 		return false;
244 	default:
245 		return true;
246 	}
247 }
248 
249 static bool sel_update_global_enabled(struct intel_dp *intel_dp)
250 {
251 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
252 	case I915_PSR_DEBUG_DISABLE:
253 	case I915_PSR_DEBUG_FORCE_PSR1:
254 		return false;
255 	default:
256 		return true;
257 	}
258 }
259 
260 static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
261 {
262 	struct intel_display *display = to_intel_display(intel_dp);
263 
264 	return !(intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE) &&
265 		display->params.enable_panel_replay;
266 }
267 
268 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
269 {
270 	struct intel_display *display = to_intel_display(intel_dp);
271 
272 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_ERROR :
273 		EDP_PSR_ERROR(intel_dp->psr.transcoder);
274 }
275 
276 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
277 {
278 	struct intel_display *display = to_intel_display(intel_dp);
279 
280 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_POST_EXIT :
281 		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
282 }
283 
284 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
285 {
286 	struct intel_display *display = to_intel_display(intel_dp);
287 
288 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_PRE_ENTRY :
289 		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
290 }
291 
292 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
293 {
294 	struct intel_display *display = to_intel_display(intel_dp);
295 
296 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_MASK :
297 		EDP_PSR_MASK(intel_dp->psr.transcoder);
298 }
299 
300 static i915_reg_t psr_ctl_reg(struct intel_display *display,
301 			      enum transcoder cpu_transcoder)
302 {
303 	if (DISPLAY_VER(display) >= 8)
304 		return EDP_PSR_CTL(display, cpu_transcoder);
305 	else
306 		return HSW_SRD_CTL;
307 }
308 
309 static i915_reg_t psr_debug_reg(struct intel_display *display,
310 				enum transcoder cpu_transcoder)
311 {
312 	if (DISPLAY_VER(display) >= 8)
313 		return EDP_PSR_DEBUG(display, cpu_transcoder);
314 	else
315 		return HSW_SRD_DEBUG;
316 }
317 
318 static i915_reg_t psr_perf_cnt_reg(struct intel_display *display,
319 				   enum transcoder cpu_transcoder)
320 {
321 	if (DISPLAY_VER(display) >= 8)
322 		return EDP_PSR_PERF_CNT(display, cpu_transcoder);
323 	else
324 		return HSW_SRD_PERF_CNT;
325 }
326 
327 static i915_reg_t psr_status_reg(struct intel_display *display,
328 				 enum transcoder cpu_transcoder)
329 {
330 	if (DISPLAY_VER(display) >= 8)
331 		return EDP_PSR_STATUS(display, cpu_transcoder);
332 	else
333 		return HSW_SRD_STATUS;
334 }
335 
336 static i915_reg_t psr_imr_reg(struct intel_display *display,
337 			      enum transcoder cpu_transcoder)
338 {
339 	if (DISPLAY_VER(display) >= 12)
340 		return TRANS_PSR_IMR(display, cpu_transcoder);
341 	else
342 		return EDP_PSR_IMR;
343 }
344 
345 static i915_reg_t psr_iir_reg(struct intel_display *display,
346 			      enum transcoder cpu_transcoder)
347 {
348 	if (DISPLAY_VER(display) >= 12)
349 		return TRANS_PSR_IIR(display, cpu_transcoder);
350 	else
351 		return EDP_PSR_IIR;
352 }
353 
354 static i915_reg_t psr_aux_ctl_reg(struct intel_display *display,
355 				  enum transcoder cpu_transcoder)
356 {
357 	if (DISPLAY_VER(display) >= 8)
358 		return EDP_PSR_AUX_CTL(display, cpu_transcoder);
359 	else
360 		return HSW_SRD_AUX_CTL;
361 }
362 
363 static i915_reg_t psr_aux_data_reg(struct intel_display *display,
364 				   enum transcoder cpu_transcoder, int i)
365 {
366 	if (DISPLAY_VER(display) >= 8)
367 		return EDP_PSR_AUX_DATA(display, cpu_transcoder, i);
368 	else
369 		return HSW_SRD_AUX_DATA(i);
370 }
371 
372 static void psr_irq_control(struct intel_dp *intel_dp)
373 {
374 	struct intel_display *display = to_intel_display(intel_dp);
375 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
376 	u32 mask;
377 
378 	if (intel_dp->psr.panel_replay_enabled)
379 		return;
380 
381 	mask = psr_irq_psr_error_bit_get(intel_dp);
382 	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
383 		mask |= psr_irq_post_exit_bit_get(intel_dp) |
384 			psr_irq_pre_entry_bit_get(intel_dp);
385 
386 	intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
387 		     psr_irq_mask_get(intel_dp), ~mask);
388 }
389 
390 static void psr_event_print(struct intel_display *display,
391 			    u32 val, bool sel_update_enabled)
392 {
393 	drm_dbg_kms(display->drm, "PSR exit events: 0x%x\n", val);
394 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
395 		drm_dbg_kms(display->drm, "\tPSR2 watchdog timer expired\n");
396 	if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled)
397 		drm_dbg_kms(display->drm, "\tPSR2 disabled\n");
398 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
399 		drm_dbg_kms(display->drm, "\tSU dirty FIFO underrun\n");
400 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
401 		drm_dbg_kms(display->drm, "\tSU CRC FIFO underrun\n");
402 	if (val & PSR_EVENT_GRAPHICS_RESET)
403 		drm_dbg_kms(display->drm, "\tGraphics reset\n");
404 	if (val & PSR_EVENT_PCH_INTERRUPT)
405 		drm_dbg_kms(display->drm, "\tPCH interrupt\n");
406 	if (val & PSR_EVENT_MEMORY_UP)
407 		drm_dbg_kms(display->drm, "\tMemory up\n");
408 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
409 		drm_dbg_kms(display->drm, "\tFront buffer modification\n");
410 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
411 		drm_dbg_kms(display->drm, "\tPSR watchdog timer expired\n");
412 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
413 		drm_dbg_kms(display->drm, "\tPIPE registers updated\n");
414 	if (val & PSR_EVENT_REGISTER_UPDATE)
415 		drm_dbg_kms(display->drm, "\tRegister updated\n");
416 	if (val & PSR_EVENT_HDCP_ENABLE)
417 		drm_dbg_kms(display->drm, "\tHDCP enabled\n");
418 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
419 		drm_dbg_kms(display->drm, "\tKVMR session enabled\n");
420 	if (val & PSR_EVENT_VBI_ENABLE)
421 		drm_dbg_kms(display->drm, "\tVBI enabled\n");
422 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
423 		drm_dbg_kms(display->drm, "\tLPSP mode exited\n");
424 	if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled)
425 		drm_dbg_kms(display->drm, "\tPSR disabled\n");
426 }
427 
428 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
429 {
430 	struct intel_display *display = to_intel_display(intel_dp);
431 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
432 	ktime_t time_ns =  ktime_get();
433 
434 	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
435 		intel_dp->psr.last_entry_attempt = time_ns;
436 		drm_dbg_kms(display->drm,
437 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
438 			    transcoder_name(cpu_transcoder));
439 	}
440 
441 	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
442 		intel_dp->psr.last_exit = time_ns;
443 		drm_dbg_kms(display->drm,
444 			    "[transcoder %s] PSR exit completed\n",
445 			    transcoder_name(cpu_transcoder));
446 
447 		if (DISPLAY_VER(display) >= 9) {
448 			u32 val;
449 
450 			val = intel_de_rmw(display,
451 					   PSR_EVENT(display, cpu_transcoder),
452 					   0, 0);
453 
454 			psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
455 		}
456 	}
457 
458 	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
459 		drm_warn(display->drm, "[transcoder %s] PSR aux error\n",
460 			 transcoder_name(cpu_transcoder));
461 
462 		intel_dp->psr.irq_aux_error = true;
463 
464 		/*
465 		 * If this interruption is not masked it will keep
466 		 * interrupting so fast that it prevents the scheduled
467 		 * work to run.
468 		 * Also after a PSR error, we don't want to arm PSR
469 		 * again so we don't care about unmask the interruption
470 		 * or unset irq_aux_error.
471 		 */
472 		intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
473 			     0, psr_irq_psr_error_bit_get(intel_dp));
474 
475 		queue_work(display->wq.unordered, &intel_dp->psr.work);
476 	}
477 }
478 
479 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
480 {
481 	struct intel_display *display = to_intel_display(intel_dp);
482 	u8 val = 8; /* assume the worst if we can't read the value */
483 
484 	if (drm_dp_dpcd_readb(&intel_dp->aux,
485 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
486 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
487 	else
488 		drm_dbg_kms(display->drm,
489 			    "Unable to get sink synchronization latency, assuming 8 frames\n");
490 	return val;
491 }
492 
493 static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
494 {
495 	u8 su_capability = 0;
496 
497 	if (intel_dp->psr.sink_panel_replay_su_support)
498 		drm_dp_dpcd_readb(&intel_dp->aux,
499 				  DP_PANEL_REPLAY_CAP_CAPABILITY,
500 				  &su_capability);
501 	else
502 		su_capability = intel_dp->psr_dpcd[1];
503 
504 	return su_capability;
505 }
506 
507 static unsigned int
508 intel_dp_get_su_x_granularity_offset(struct intel_dp *intel_dp)
509 {
510 	return intel_dp->psr.sink_panel_replay_su_support ?
511 		DP_PANEL_REPLAY_CAP_X_GRANULARITY :
512 		DP_PSR2_SU_X_GRANULARITY;
513 }
514 
515 static unsigned int
516 intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
517 {
518 	return intel_dp->psr.sink_panel_replay_su_support ?
519 		DP_PANEL_REPLAY_CAP_Y_GRANULARITY :
520 		DP_PSR2_SU_Y_GRANULARITY;
521 }
522 
523 /*
524  * Note: Bits related to granularity are same in panel replay and psr
525  * registers. Rely on PSR definitions on these "common" bits.
526  */
527 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
528 {
529 	struct intel_display *display = to_intel_display(intel_dp);
530 	ssize_t r;
531 	u16 w;
532 	u8 y;
533 
534 	/*
535 	 * TODO: Do we need to take into account panel supporting both PSR and
536 	 * Panel replay?
537 	 */
538 
539 	/*
540 	 * If sink don't have specific granularity requirements set legacy
541 	 * ones.
542 	 */
543 	if (!(intel_dp_get_su_capability(intel_dp) &
544 	      DP_PSR2_SU_GRANULARITY_REQUIRED)) {
545 		/* As PSR2 HW sends full lines, we do not care about x granularity */
546 		w = 4;
547 		y = 4;
548 		goto exit;
549 	}
550 
551 	r = drm_dp_dpcd_read(&intel_dp->aux,
552 			     intel_dp_get_su_x_granularity_offset(intel_dp),
553 			     &w, 2);
554 	if (r != 2)
555 		drm_dbg_kms(display->drm,
556 			    "Unable to read selective update x granularity\n");
557 	/*
558 	 * Spec says that if the value read is 0 the default granularity should
559 	 * be used instead.
560 	 */
561 	if (r != 2 || w == 0)
562 		w = 4;
563 
564 	r = drm_dp_dpcd_read(&intel_dp->aux,
565 			     intel_dp_get_su_y_granularity_offset(intel_dp),
566 			     &y, 1);
567 	if (r != 1) {
568 		drm_dbg_kms(display->drm,
569 			    "Unable to read selective update y granularity\n");
570 		y = 4;
571 	}
572 	if (y == 0)
573 		y = 1;
574 
575 exit:
576 	intel_dp->psr.su_w_granularity = w;
577 	intel_dp->psr.su_y_granularity = y;
578 }
579 
580 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
581 {
582 	struct intel_display *display = to_intel_display(intel_dp);
583 	int ret;
584 
585 	ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
586 				    &intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
587 	if (ret < 0)
588 		return;
589 
590 	if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
591 	      DP_PANEL_REPLAY_SUPPORT))
592 		return;
593 
594 	if (intel_dp_is_edp(intel_dp)) {
595 		if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
596 			drm_dbg_kms(display->drm,
597 				    "Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n");
598 			return;
599 		}
600 
601 		if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
602 		      DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
603 			drm_dbg_kms(display->drm,
604 				    "Panel doesn't support early transport, eDP Panel Replay not possible\n");
605 			return;
606 		}
607 	}
608 
609 	intel_dp->psr.sink_panel_replay_support = true;
610 
611 	if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
612 	    DP_PANEL_REPLAY_SU_SUPPORT)
613 		intel_dp->psr.sink_panel_replay_su_support = true;
614 
615 	drm_dbg_kms(display->drm,
616 		    "Panel replay %sis supported by panel\n",
617 		    intel_dp->psr.sink_panel_replay_su_support ?
618 		    "selective_update " : "");
619 }
620 
621 static void _psr_init_dpcd(struct intel_dp *intel_dp)
622 {
623 	struct intel_display *display = to_intel_display(intel_dp);
624 	int ret;
625 
626 	ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
627 				    sizeof(intel_dp->psr_dpcd));
628 	if (ret < 0)
629 		return;
630 
631 	if (!intel_dp->psr_dpcd[0])
632 		return;
633 
634 	drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
635 		    intel_dp->psr_dpcd[0]);
636 
637 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
638 		drm_dbg_kms(display->drm,
639 			    "PSR support not currently available for this panel\n");
640 		return;
641 	}
642 
643 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
644 		drm_dbg_kms(display->drm,
645 			    "Panel lacks power state control, PSR cannot be enabled\n");
646 		return;
647 	}
648 
649 	intel_dp->psr.sink_support = true;
650 	intel_dp->psr.sink_sync_latency =
651 		intel_dp_get_sink_sync_latency(intel_dp);
652 
653 	if (DISPLAY_VER(display) >= 9 &&
654 	    intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
655 		bool y_req = intel_dp->psr_dpcd[1] &
656 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
657 
658 		/*
659 		 * All panels that supports PSR version 03h (PSR2 +
660 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
661 		 * only sure that it is going to be used when required by the
662 		 * panel. This way panel is capable to do selective update
663 		 * without a aux frame sync.
664 		 *
665 		 * To support PSR version 02h and PSR version 03h without
666 		 * Y-coordinate requirement panels we would need to enable
667 		 * GTC first.
668 		 */
669 		intel_dp->psr.sink_psr2_support = y_req &&
670 			intel_alpm_aux_wake_supported(intel_dp);
671 		drm_dbg_kms(display->drm, "PSR2 %ssupported\n",
672 			    intel_dp->psr.sink_psr2_support ? "" : "not ");
673 	}
674 }
675 
676 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
677 {
678 	_psr_init_dpcd(intel_dp);
679 
680 	_panel_replay_init_dpcd(intel_dp);
681 
682 	if (intel_dp->psr.sink_psr2_support ||
683 	    intel_dp->psr.sink_panel_replay_su_support)
684 		intel_dp_get_su_granularity(intel_dp);
685 }
686 
687 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
688 {
689 	struct intel_display *display = to_intel_display(intel_dp);
690 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
691 	u32 aux_clock_divider, aux_ctl;
692 	/* write DP_SET_POWER=D0 */
693 	static const u8 aux_msg[] = {
694 		[0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
695 		[1] = (DP_SET_POWER >> 8) & 0xff,
696 		[2] = DP_SET_POWER & 0xff,
697 		[3] = 1 - 1,
698 		[4] = DP_SET_POWER_D0,
699 	};
700 	int i;
701 
702 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
703 	for (i = 0; i < sizeof(aux_msg); i += 4)
704 		intel_de_write(display,
705 			       psr_aux_data_reg(display, cpu_transcoder, i >> 2),
706 			       intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
707 
708 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
709 
710 	/* Start with bits set for DDI_AUX_CTL register */
711 	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
712 					     aux_clock_divider);
713 
714 	/* Select only valid bits for SRD_AUX_CTL */
715 	aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
716 		EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
717 		EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
718 		EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
719 
720 	intel_de_write(display, psr_aux_ctl_reg(display, cpu_transcoder),
721 		       aux_ctl);
722 }
723 
724 static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay)
725 {
726 	struct intel_display *display = to_intel_display(intel_dp);
727 
728 	if (DISPLAY_VER(display) < 20 || !intel_dp_is_edp(intel_dp) ||
729 	    intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)
730 		return false;
731 
732 	return panel_replay ?
733 		intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
734 		DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
735 		intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED;
736 }
737 
738 static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
739 				      const struct intel_crtc_state *crtc_state)
740 {
741 	u8 val = DP_PANEL_REPLAY_ENABLE |
742 		DP_PANEL_REPLAY_VSC_SDP_CRC_EN |
743 		DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
744 		DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN |
745 		DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN;
746 	u8 panel_replay_config2 = DP_PANEL_REPLAY_CRC_VERIFICATION;
747 
748 	if (crtc_state->has_sel_update)
749 		val |= DP_PANEL_REPLAY_SU_ENABLE;
750 
751 	if (crtc_state->enable_psr2_su_region_et)
752 		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
753 
754 	if (crtc_state->req_psr2_sdp_prior_scanline)
755 		panel_replay_config2 |=
756 			DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE;
757 
758 	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, val);
759 
760 	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG2,
761 			   panel_replay_config2);
762 }
763 
764 static void _psr_enable_sink(struct intel_dp *intel_dp,
765 			     const struct intel_crtc_state *crtc_state)
766 {
767 	struct intel_display *display = to_intel_display(intel_dp);
768 	u8 val = 0;
769 
770 	if (crtc_state->has_sel_update) {
771 		val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
772 	} else {
773 		if (intel_dp->psr.link_standby)
774 			val |= DP_PSR_MAIN_LINK_ACTIVE;
775 
776 		if (DISPLAY_VER(display) >= 8)
777 			val |= DP_PSR_CRC_VERIFICATION;
778 	}
779 
780 	if (crtc_state->req_psr2_sdp_prior_scanline)
781 		val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
782 
783 	if (crtc_state->enable_psr2_su_region_et)
784 		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
785 
786 	if (intel_dp->psr.entry_setup_frames > 0)
787 		val |= DP_PSR_FRAME_CAPTURE;
788 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
789 
790 	val |= DP_PSR_ENABLE;
791 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
792 }
793 
794 static void intel_psr_enable_sink(struct intel_dp *intel_dp,
795 				  const struct intel_crtc_state *crtc_state)
796 {
797 	intel_alpm_enable_sink(intel_dp, crtc_state);
798 
799 	crtc_state->has_panel_replay ?
800 		_panel_replay_enable_sink(intel_dp, crtc_state) :
801 		_psr_enable_sink(intel_dp, crtc_state);
802 
803 	if (intel_dp_is_edp(intel_dp))
804 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
805 }
806 
807 void intel_psr_panel_replay_enable_sink(struct intel_dp *intel_dp)
808 {
809 	if (CAN_PANEL_REPLAY(intel_dp))
810 		drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
811 				   DP_PANEL_REPLAY_ENABLE);
812 }
813 
814 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
815 {
816 	struct intel_display *display = to_intel_display(intel_dp);
817 	struct intel_connector *connector = intel_dp->attached_connector;
818 	u32 val = 0;
819 
820 	if (DISPLAY_VER(display) >= 11)
821 		val |= EDP_PSR_TP4_TIME_0us;
822 
823 	if (display->params.psr_safest_params) {
824 		val |= EDP_PSR_TP1_TIME_2500us;
825 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
826 		goto check_tp3_sel;
827 	}
828 
829 	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
830 		val |= EDP_PSR_TP1_TIME_0us;
831 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
832 		val |= EDP_PSR_TP1_TIME_100us;
833 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
834 		val |= EDP_PSR_TP1_TIME_500us;
835 	else
836 		val |= EDP_PSR_TP1_TIME_2500us;
837 
838 	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
839 		val |= EDP_PSR_TP2_TP3_TIME_0us;
840 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
841 		val |= EDP_PSR_TP2_TP3_TIME_100us;
842 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
843 		val |= EDP_PSR_TP2_TP3_TIME_500us;
844 	else
845 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
846 
847 	/*
848 	 * WA 0479: hsw,bdw
849 	 * "Do not skip both TP1 and TP2/TP3"
850 	 */
851 	if (DISPLAY_VER(display) < 9 &&
852 	    connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
853 	    connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
854 		val |= EDP_PSR_TP2_TP3_TIME_100us;
855 
856 check_tp3_sel:
857 	if (intel_dp_source_supports_tps3(display) &&
858 	    drm_dp_tps3_supported(intel_dp->dpcd))
859 		val |= EDP_PSR_TP_TP1_TP3;
860 	else
861 		val |= EDP_PSR_TP_TP1_TP2;
862 
863 	return val;
864 }
865 
866 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
867 {
868 	struct intel_display *display = to_intel_display(intel_dp);
869 	struct intel_connector *connector = intel_dp->attached_connector;
870 	int idle_frames;
871 
872 	/* Let's use 6 as the minimum to cover all known cases including the
873 	 * off-by-one issue that HW has in some cases.
874 	 */
875 	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
876 	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
877 
878 	if (drm_WARN_ON(display->drm, idle_frames > 0xf))
879 		idle_frames = 0xf;
880 
881 	return idle_frames;
882 }
883 
884 static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp)
885 {
886 	struct intel_display *display = to_intel_display(intel_dp);
887 	u32 current_dc_state = intel_display_power_get_current_dc_state(display);
888 	struct drm_vblank_crtc *vblank = &display->drm->vblank[intel_dp->psr.pipe];
889 
890 	return (current_dc_state != DC_STATE_EN_UPTO_DC5 &&
891 		current_dc_state != DC_STATE_EN_UPTO_DC6) ||
892 		intel_dp->psr.active_non_psr_pipes ||
893 		READ_ONCE(vblank->enabled);
894 }
895 
896 static void hsw_activate_psr1(struct intel_dp *intel_dp)
897 {
898 	struct intel_display *display = to_intel_display(intel_dp);
899 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
900 	u32 max_sleep_time = 0x1f;
901 	u32 val = EDP_PSR_ENABLE;
902 
903 	val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
904 
905 	if (DISPLAY_VER(display) < 20)
906 		val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
907 
908 	if (display->platform.haswell)
909 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
910 
911 	if (intel_dp->psr.link_standby)
912 		val |= EDP_PSR_LINK_STANDBY;
913 
914 	val |= intel_psr1_get_tp_time(intel_dp);
915 
916 	if (DISPLAY_VER(display) >= 8)
917 		val |= EDP_PSR_CRC_ENABLE;
918 
919 	if (DISPLAY_VER(display) >= 20)
920 		val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
921 
922 	intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder),
923 		     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
924 
925 	/* Wa_16025596647 */
926 	if ((DISPLAY_VER(display) == 20 ||
927 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
928 	    is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used)
929 		intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
930 								       intel_dp->psr.pipe,
931 								       true);
932 }
933 
934 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
935 {
936 	struct intel_display *display = to_intel_display(intel_dp);
937 	struct intel_connector *connector = intel_dp->attached_connector;
938 	u32 val = 0;
939 
940 	if (display->params.psr_safest_params)
941 		return EDP_PSR2_TP2_TIME_2500us;
942 
943 	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
944 	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
945 		val |= EDP_PSR2_TP2_TIME_50us;
946 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
947 		val |= EDP_PSR2_TP2_TIME_100us;
948 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
949 		val |= EDP_PSR2_TP2_TIME_500us;
950 	else
951 		val |= EDP_PSR2_TP2_TIME_2500us;
952 
953 	return val;
954 }
955 
956 static int psr2_block_count_lines(struct intel_dp *intel_dp)
957 {
958 	return intel_dp->alpm_parameters.io_wake_lines < 9 &&
959 		intel_dp->alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
960 }
961 
962 static int psr2_block_count(struct intel_dp *intel_dp)
963 {
964 	return psr2_block_count_lines(intel_dp) / 4;
965 }
966 
967 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
968 {
969 	u8 frames_before_su_entry;
970 
971 	frames_before_su_entry = max_t(u8,
972 				       intel_dp->psr.sink_sync_latency + 1,
973 				       2);
974 
975 	/* Entry setup frames must be at least 1 less than frames before SU entry */
976 	if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
977 		frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
978 
979 	return frames_before_su_entry;
980 }
981 
982 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
983 {
984 	struct intel_display *display = to_intel_display(intel_dp);
985 	struct intel_psr *psr = &intel_dp->psr;
986 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
987 
988 	if (intel_dp_is_edp(intel_dp) && psr->sel_update_enabled) {
989 		u32 val = psr->su_region_et_enabled ?
990 			LNL_EDP_PSR2_SU_REGION_ET_ENABLE : 0;
991 
992 		if (intel_dp->psr.req_psr2_sdp_prior_scanline)
993 			val |= EDP_PSR2_SU_SDP_SCANLINE;
994 
995 		intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder),
996 			       val);
997 	}
998 
999 	intel_de_rmw(display,
1000 		     PSR2_MAN_TRK_CTL(display, intel_dp->psr.transcoder),
1001 		     0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
1002 
1003 	intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
1004 		     TRANS_DP2_PANEL_REPLAY_ENABLE);
1005 }
1006 
1007 static void hsw_activate_psr2(struct intel_dp *intel_dp)
1008 {
1009 	struct intel_display *display = to_intel_display(intel_dp);
1010 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1011 	u32 val = EDP_PSR2_ENABLE;
1012 	u32 psr_val = 0;
1013 	u8 idle_frames;
1014 
1015 	/* Wa_16025596647 */
1016 	if ((DISPLAY_VER(display) == 20 ||
1017 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
1018 	    is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used)
1019 		idle_frames = 0;
1020 	else
1021 		idle_frames = psr_compute_idle_frames(intel_dp);
1022 	val |= EDP_PSR2_IDLE_FRAMES(idle_frames);
1023 
1024 	if (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)
1025 		val |= EDP_SU_TRACK_ENABLE;
1026 
1027 	if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
1028 		val |= EDP_Y_COORDINATE_ENABLE;
1029 
1030 	val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
1031 
1032 	val |= intel_psr2_get_tp_time(intel_dp);
1033 
1034 	if (DISPLAY_VER(display) >= 12 && DISPLAY_VER(display) < 20) {
1035 		if (psr2_block_count(intel_dp) > 2)
1036 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
1037 		else
1038 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
1039 	}
1040 
1041 	/* Wa_22012278275:adl-p */
1042 	if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
1043 		static const u8 map[] = {
1044 			2, /* 5 lines */
1045 			1, /* 6 lines */
1046 			0, /* 7 lines */
1047 			3, /* 8 lines */
1048 			6, /* 9 lines */
1049 			5, /* 10 lines */
1050 			4, /* 11 lines */
1051 			7, /* 12 lines */
1052 		};
1053 		/*
1054 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
1055 		 * comments below for more information
1056 		 */
1057 		int tmp;
1058 
1059 		tmp = map[intel_dp->alpm_parameters.io_wake_lines -
1060 			  TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
1061 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
1062 
1063 		tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
1064 		val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
1065 	} else if (DISPLAY_VER(display) >= 20) {
1066 		val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1067 	} else if (DISPLAY_VER(display) >= 12) {
1068 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1069 		val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1070 	} else if (DISPLAY_VER(display) >= 9) {
1071 		val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1072 		val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1073 	}
1074 
1075 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
1076 		val |= EDP_PSR2_SU_SDP_SCANLINE;
1077 
1078 	if (DISPLAY_VER(display) >= 20)
1079 		psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
1080 
1081 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
1082 		u32 tmp;
1083 
1084 		tmp = intel_de_read(display,
1085 				    PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1086 		drm_WARN_ON(display->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
1087 	} else if (HAS_PSR2_SEL_FETCH(display)) {
1088 		intel_de_write(display,
1089 			       PSR2_MAN_TRK_CTL(display, cpu_transcoder), 0);
1090 	}
1091 
1092 	if (intel_dp->psr.su_region_et_enabled)
1093 		val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
1094 
1095 	/*
1096 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
1097 	 * recommending keep this bit unset while PSR2 is enabled.
1098 	 */
1099 	intel_de_write(display, psr_ctl_reg(display, cpu_transcoder), psr_val);
1100 
1101 	intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), val);
1102 }
1103 
1104 static bool
1105 transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
1106 {
1107 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1108 		return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
1109 	else if (DISPLAY_VER(display) >= 12)
1110 		return cpu_transcoder == TRANSCODER_A;
1111 	else if (DISPLAY_VER(display) >= 9)
1112 		return cpu_transcoder == TRANSCODER_EDP;
1113 	else
1114 		return false;
1115 }
1116 
1117 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
1118 {
1119 	if (!crtc_state->hw.active)
1120 		return 0;
1121 
1122 	return DIV_ROUND_UP(1000 * 1000,
1123 			    drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
1124 }
1125 
1126 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
1127 				     u32 idle_frames)
1128 {
1129 	struct intel_display *display = to_intel_display(intel_dp);
1130 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1131 
1132 	intel_de_rmw(display, EDP_PSR2_CTL(display, cpu_transcoder),
1133 		     EDP_PSR2_IDLE_FRAMES_MASK,
1134 		     EDP_PSR2_IDLE_FRAMES(idle_frames));
1135 }
1136 
1137 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
1138 {
1139 	struct intel_display *display = to_intel_display(intel_dp);
1140 
1141 	psr2_program_idle_frames(intel_dp, 0);
1142 	intel_display_power_set_target_dc_state(display, DC_STATE_EN_DC3CO);
1143 }
1144 
1145 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
1146 {
1147 	struct intel_display *display = to_intel_display(intel_dp);
1148 
1149 	intel_display_power_set_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
1150 	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
1151 }
1152 
1153 static void tgl_dc3co_disable_work(struct work_struct *work)
1154 {
1155 	struct intel_dp *intel_dp =
1156 		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
1157 
1158 	mutex_lock(&intel_dp->psr.lock);
1159 	/* If delayed work is pending, it is not idle */
1160 	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
1161 		goto unlock;
1162 
1163 	tgl_psr2_disable_dc3co(intel_dp);
1164 unlock:
1165 	mutex_unlock(&intel_dp->psr.lock);
1166 }
1167 
1168 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
1169 {
1170 	if (!intel_dp->psr.dc3co_exitline)
1171 		return;
1172 
1173 	cancel_delayed_work(&intel_dp->psr.dc3co_work);
1174 	/* Before PSR2 exit disallow dc3co*/
1175 	tgl_psr2_disable_dc3co(intel_dp);
1176 }
1177 
1178 static bool
1179 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
1180 			      struct intel_crtc_state *crtc_state)
1181 {
1182 	struct intel_display *display = to_intel_display(intel_dp);
1183 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1184 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1185 	enum port port = dig_port->base.port;
1186 
1187 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1188 		return pipe <= PIPE_B && port <= PORT_B;
1189 	else
1190 		return pipe == PIPE_A && port == PORT_A;
1191 }
1192 
1193 static void
1194 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
1195 				  struct intel_crtc_state *crtc_state)
1196 {
1197 	struct intel_display *display = to_intel_display(intel_dp);
1198 	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
1199 	struct i915_power_domains *power_domains = &display->power.domains;
1200 	u32 exit_scanlines;
1201 
1202 	/*
1203 	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1204 	 * disable DC3CO until the changed dc3co activating/deactivating sequence
1205 	 * is applied. B.Specs:49196
1206 	 */
1207 	return;
1208 
1209 	/*
1210 	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1211 	 * TODO: when the issue is addressed, this restriction should be removed.
1212 	 */
1213 	if (crtc_state->enable_psr2_sel_fetch)
1214 		return;
1215 
1216 	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1217 		return;
1218 
1219 	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1220 		return;
1221 
1222 	/* Wa_16011303918:adl-p */
1223 	if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
1224 		return;
1225 
1226 	/*
1227 	 * DC3CO Exit time 200us B.Spec 49196
1228 	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1229 	 */
1230 	exit_scanlines =
1231 		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1232 
1233 	if (drm_WARN_ON(display->drm, exit_scanlines > crtc_vdisplay))
1234 		return;
1235 
1236 	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1237 }
1238 
1239 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1240 					      struct intel_crtc_state *crtc_state)
1241 {
1242 	struct intel_display *display = to_intel_display(intel_dp);
1243 
1244 	if (!display->params.enable_psr2_sel_fetch &&
1245 	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1246 		drm_dbg_kms(display->drm,
1247 			    "PSR2 sel fetch not enabled, disabled by parameter\n");
1248 		return false;
1249 	}
1250 
1251 	if (crtc_state->uapi.async_flip) {
1252 		drm_dbg_kms(display->drm,
1253 			    "PSR2 sel fetch not enabled, async flip enabled\n");
1254 		return false;
1255 	}
1256 
1257 	return crtc_state->enable_psr2_sel_fetch = true;
1258 }
1259 
1260 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1261 				   struct intel_crtc_state *crtc_state)
1262 {
1263 	struct intel_display *display = to_intel_display(intel_dp);
1264 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1265 	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1266 	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1267 	u16 y_granularity = 0;
1268 
1269 	/* PSR2 HW only send full lines so we only need to validate the width */
1270 	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1271 		return false;
1272 
1273 	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1274 		return false;
1275 
1276 	/* HW tracking is only aligned to 4 lines */
1277 	if (!crtc_state->enable_psr2_sel_fetch)
1278 		return intel_dp->psr.su_y_granularity == 4;
1279 
1280 	/*
1281 	 * adl_p and mtl platforms have 1 line granularity.
1282 	 * For other platforms with SW tracking we can adjust the y coordinates
1283 	 * to match sink requirement if multiple of 4.
1284 	 */
1285 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1286 		y_granularity = intel_dp->psr.su_y_granularity;
1287 	else if (intel_dp->psr.su_y_granularity <= 2)
1288 		y_granularity = 4;
1289 	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1290 		y_granularity = intel_dp->psr.su_y_granularity;
1291 
1292 	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1293 		return false;
1294 
1295 	if (crtc_state->dsc.compression_enable &&
1296 	    vdsc_cfg->slice_height % y_granularity)
1297 		return false;
1298 
1299 	crtc_state->su_y_granularity = y_granularity;
1300 	return true;
1301 }
1302 
1303 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1304 							struct intel_crtc_state *crtc_state)
1305 {
1306 	struct intel_display *display = to_intel_display(intel_dp);
1307 	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1308 	u32 hblank_total, hblank_ns, req_ns;
1309 
1310 	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1311 	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1312 
1313 	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1314 	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1315 
1316 	if ((hblank_ns - req_ns) > 100)
1317 		return true;
1318 
1319 	/* Not supported <13 / Wa_22012279113:adl-p */
1320 	if (DISPLAY_VER(display) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1321 		return false;
1322 
1323 	crtc_state->req_psr2_sdp_prior_scanline = true;
1324 	return true;
1325 }
1326 
1327 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1328 					const struct drm_display_mode *adjusted_mode)
1329 {
1330 	struct intel_display *display = to_intel_display(intel_dp);
1331 	int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1332 	int entry_setup_frames = 0;
1333 
1334 	if (psr_setup_time < 0) {
1335 		drm_dbg_kms(display->drm,
1336 			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1337 			    intel_dp->psr_dpcd[1]);
1338 		return -ETIME;
1339 	}
1340 
1341 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1342 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1343 		if (DISPLAY_VER(display) >= 20) {
1344 			/* setup entry frames can be up to 3 frames */
1345 			entry_setup_frames = 1;
1346 			drm_dbg_kms(display->drm,
1347 				    "PSR setup entry frames %d\n",
1348 				    entry_setup_frames);
1349 		} else {
1350 			drm_dbg_kms(display->drm,
1351 				    "PSR condition failed: PSR setup time (%d us) too long\n",
1352 				    psr_setup_time);
1353 			return -ETIME;
1354 		}
1355 	}
1356 
1357 	return entry_setup_frames;
1358 }
1359 
1360 static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
1361 				       const struct intel_crtc_state *crtc_state,
1362 				       bool aux_less)
1363 {
1364 	struct intel_display *display = to_intel_display(intel_dp);
1365 	int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
1366 		crtc_state->hw.adjusted_mode.crtc_vblank_start;
1367 	int wake_lines;
1368 
1369 	if (aux_less)
1370 		wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
1371 	else
1372 		wake_lines = DISPLAY_VER(display) < 20 ?
1373 			psr2_block_count_lines(intel_dp) :
1374 			intel_dp->alpm_parameters.io_wake_lines;
1375 
1376 	if (crtc_state->req_psr2_sdp_prior_scanline)
1377 		vblank -= 1;
1378 
1379 	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
1380 	if (vblank < wake_lines)
1381 		return false;
1382 
1383 	return true;
1384 }
1385 
1386 static bool alpm_config_valid(struct intel_dp *intel_dp,
1387 			      const struct intel_crtc_state *crtc_state,
1388 			      bool aux_less)
1389 {
1390 	struct intel_display *display = to_intel_display(intel_dp);
1391 
1392 	if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
1393 		drm_dbg_kms(display->drm,
1394 			    "PSR2/Panel Replay  not enabled, Unable to use long enough wake times\n");
1395 		return false;
1396 	}
1397 
1398 	if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less)) {
1399 		drm_dbg_kms(display->drm,
1400 			    "PSR2/Panel Replay not enabled, too short vblank time\n");
1401 		return false;
1402 	}
1403 
1404 	return true;
1405 }
1406 
1407 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1408 				    struct intel_crtc_state *crtc_state)
1409 {
1410 	struct intel_display *display = to_intel_display(intel_dp);
1411 	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1412 	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1413 	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1414 
1415 	if (!intel_dp->psr.sink_psr2_support || display->params.enable_psr == 1)
1416 		return false;
1417 
1418 	/* JSL and EHL only supports eDP 1.3 */
1419 	if (display->platform.jasperlake || display->platform.elkhartlake) {
1420 		drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
1421 		return false;
1422 	}
1423 
1424 	/* Wa_16011181250 */
1425 	if (display->platform.rocketlake || display->platform.alderlake_s ||
1426 	    display->platform.dg2) {
1427 		drm_dbg_kms(display->drm,
1428 			    "PSR2 is defeatured for this platform\n");
1429 		return false;
1430 	}
1431 
1432 	if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1433 		drm_dbg_kms(display->drm,
1434 			    "PSR2 not completely functional in this stepping\n");
1435 		return false;
1436 	}
1437 
1438 	if (!transcoder_has_psr2(display, crtc_state->cpu_transcoder)) {
1439 		drm_dbg_kms(display->drm,
1440 			    "PSR2 not supported in transcoder %s\n",
1441 			    transcoder_name(crtc_state->cpu_transcoder));
1442 		return false;
1443 	}
1444 
1445 	/*
1446 	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1447 	 * resolution requires DSC to be enabled, priority is given to DSC
1448 	 * over PSR2.
1449 	 */
1450 	if (crtc_state->dsc.compression_enable &&
1451 	    (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)) {
1452 		drm_dbg_kms(display->drm,
1453 			    "PSR2 cannot be enabled since DSC is enabled\n");
1454 		return false;
1455 	}
1456 
1457 	if (DISPLAY_VER(display) >= 20) {
1458 		psr_max_h = crtc_hdisplay;
1459 		psr_max_v = crtc_vdisplay;
1460 		max_bpp = crtc_state->pipe_bpp;
1461 	} else if (IS_DISPLAY_VER(display, 12, 14)) {
1462 		psr_max_h = 5120;
1463 		psr_max_v = 3200;
1464 		max_bpp = 30;
1465 	} else if (IS_DISPLAY_VER(display, 10, 11)) {
1466 		psr_max_h = 4096;
1467 		psr_max_v = 2304;
1468 		max_bpp = 24;
1469 	} else if (DISPLAY_VER(display) == 9) {
1470 		psr_max_h = 3640;
1471 		psr_max_v = 2304;
1472 		max_bpp = 24;
1473 	}
1474 
1475 	if (crtc_state->pipe_bpp > max_bpp) {
1476 		drm_dbg_kms(display->drm,
1477 			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1478 			    crtc_state->pipe_bpp, max_bpp);
1479 		return false;
1480 	}
1481 
1482 	/* Wa_16011303918:adl-p */
1483 	if (crtc_state->vrr.enable &&
1484 	    display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1485 		drm_dbg_kms(display->drm,
1486 			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1487 		return false;
1488 	}
1489 
1490 	if (!alpm_config_valid(intel_dp, crtc_state, false))
1491 		return false;
1492 
1493 	if (!crtc_state->enable_psr2_sel_fetch &&
1494 	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1495 		drm_dbg_kms(display->drm,
1496 			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1497 			    crtc_hdisplay, crtc_vdisplay,
1498 			    psr_max_h, psr_max_v);
1499 		return false;
1500 	}
1501 
1502 	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1503 
1504 	return true;
1505 }
1506 
1507 static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
1508 					  struct intel_crtc_state *crtc_state)
1509 {
1510 	struct intel_display *display = to_intel_display(intel_dp);
1511 
1512 	if (HAS_PSR2_SEL_FETCH(display) &&
1513 	    !intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1514 	    !HAS_PSR_HW_TRACKING(display)) {
1515 		drm_dbg_kms(display->drm,
1516 			    "Selective update not enabled, selective fetch not valid and no HW tracking available\n");
1517 		goto unsupported;
1518 	}
1519 
1520 	if (!sel_update_global_enabled(intel_dp)) {
1521 		drm_dbg_kms(display->drm,
1522 			    "Selective update disabled by flag\n");
1523 		goto unsupported;
1524 	}
1525 
1526 	if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state))
1527 		goto unsupported;
1528 
1529 	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1530 		drm_dbg_kms(display->drm,
1531 			    "Selective update not enabled, SDP indication do not fit in hblank\n");
1532 		goto unsupported;
1533 	}
1534 
1535 	if (crtc_state->has_panel_replay && (DISPLAY_VER(display) < 14 ||
1536 					     !intel_dp->psr.sink_panel_replay_su_support))
1537 		goto unsupported;
1538 
1539 	if (crtc_state->crc_enabled) {
1540 		drm_dbg_kms(display->drm,
1541 			    "Selective update not enabled because it would inhibit pipe CRC calculation\n");
1542 		goto unsupported;
1543 	}
1544 
1545 	if (!psr2_granularity_check(intel_dp, crtc_state)) {
1546 		drm_dbg_kms(display->drm,
1547 			    "Selective update not enabled, SU granularity not compatible\n");
1548 		goto unsupported;
1549 	}
1550 
1551 	crtc_state->enable_psr2_su_region_et =
1552 		psr2_su_region_et_valid(intel_dp, crtc_state->has_panel_replay);
1553 
1554 	return true;
1555 
1556 unsupported:
1557 	crtc_state->enable_psr2_sel_fetch = false;
1558 	return false;
1559 }
1560 
1561 static bool _psr_compute_config(struct intel_dp *intel_dp,
1562 				struct intel_crtc_state *crtc_state)
1563 {
1564 	struct intel_display *display = to_intel_display(intel_dp);
1565 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1566 	int entry_setup_frames;
1567 
1568 	if (!CAN_PSR(intel_dp) || !display->params.enable_psr)
1569 		return false;
1570 
1571 	/*
1572 	 * Currently PSR doesn't work reliably with VRR enabled.
1573 	 */
1574 	if (crtc_state->vrr.enable)
1575 		return false;
1576 
1577 	entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1578 
1579 	if (entry_setup_frames >= 0) {
1580 		intel_dp->psr.entry_setup_frames = entry_setup_frames;
1581 	} else {
1582 		drm_dbg_kms(display->drm,
1583 			    "PSR condition failed: PSR setup timing not met\n");
1584 		return false;
1585 	}
1586 
1587 	return true;
1588 }
1589 
1590 static bool
1591 _panel_replay_compute_config(struct intel_dp *intel_dp,
1592 			     const struct intel_crtc_state *crtc_state,
1593 			     const struct drm_connector_state *conn_state)
1594 {
1595 	struct intel_display *display = to_intel_display(intel_dp);
1596 	struct intel_connector *connector =
1597 		to_intel_connector(conn_state->connector);
1598 	struct intel_hdcp *hdcp = &connector->hdcp;
1599 
1600 	if (!CAN_PANEL_REPLAY(intel_dp))
1601 		return false;
1602 
1603 	if (!panel_replay_global_enabled(intel_dp)) {
1604 		drm_dbg_kms(display->drm, "Panel Replay disabled by flag\n");
1605 		return false;
1606 	}
1607 
1608 	if (crtc_state->crc_enabled) {
1609 		drm_dbg_kms(display->drm,
1610 			    "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
1611 		return false;
1612 	}
1613 
1614 	if (!intel_dp_is_edp(intel_dp))
1615 		return true;
1616 
1617 	/* Remaining checks are for eDP only */
1618 
1619 	if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A &&
1620 	    to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_B)
1621 		return false;
1622 
1623 	/* 128b/132b Panel Replay is not supported on eDP */
1624 	if (intel_dp_is_uhbr(crtc_state)) {
1625 		drm_dbg_kms(display->drm,
1626 			    "Panel Replay is not supported with 128b/132b\n");
1627 		return false;
1628 	}
1629 
1630 	/* HW will not allow Panel Replay on eDP when HDCP enabled */
1631 	if (conn_state->content_protection ==
1632 	    DRM_MODE_CONTENT_PROTECTION_DESIRED ||
1633 	    (conn_state->content_protection ==
1634 	     DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
1635 	     DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) {
1636 		drm_dbg_kms(display->drm,
1637 			    "Panel Replay is not supported with HDCP\n");
1638 		return false;
1639 	}
1640 
1641 	if (!alpm_config_valid(intel_dp, crtc_state, true))
1642 		return false;
1643 
1644 	return true;
1645 }
1646 
1647 static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp,
1648 					   struct intel_crtc_state *crtc_state)
1649 {
1650 	struct intel_display *display = to_intel_display(intel_dp);
1651 
1652 	return (DISPLAY_VER(display) == 20 && intel_dp->psr.entry_setup_frames > 0 &&
1653 		!crtc_state->has_sel_update);
1654 }
1655 
1656 void intel_psr_compute_config(struct intel_dp *intel_dp,
1657 			      struct intel_crtc_state *crtc_state,
1658 			      struct drm_connector_state *conn_state)
1659 {
1660 	struct intel_display *display = to_intel_display(intel_dp);
1661 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1662 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
1663 	struct intel_crtc *crtc;
1664 	u8 active_pipes = 0;
1665 
1666 	if (!psr_global_enabled(intel_dp)) {
1667 		drm_dbg_kms(display->drm, "PSR disabled by flag\n");
1668 		return;
1669 	}
1670 
1671 	if (intel_dp->psr.sink_not_reliable) {
1672 		drm_dbg_kms(display->drm,
1673 			    "PSR sink implementation is not reliable\n");
1674 		return;
1675 	}
1676 
1677 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1678 		drm_dbg_kms(display->drm,
1679 			    "PSR condition failed: Interlaced mode enabled\n");
1680 		return;
1681 	}
1682 
1683 	/*
1684 	 * FIXME figure out what is wrong with PSR+joiner and
1685 	 * fix it. Presumably something related to the fact that
1686 	 * PSR is a transcoder level feature.
1687 	 */
1688 	if (crtc_state->joiner_pipes) {
1689 		drm_dbg_kms(display->drm,
1690 			    "PSR disabled due to joiner\n");
1691 		return;
1692 	}
1693 
1694 	crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
1695 								    crtc_state,
1696 								    conn_state);
1697 
1698 	crtc_state->has_psr = crtc_state->has_panel_replay ? true :
1699 		_psr_compute_config(intel_dp, crtc_state);
1700 
1701 	if (!crtc_state->has_psr)
1702 		return;
1703 
1704 	crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
1705 
1706 	/* Wa_18037818876 */
1707 	if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) {
1708 		crtc_state->has_psr = false;
1709 		drm_dbg_kms(display->drm,
1710 			    "PSR disabled to workaround PSR FSM hang issue\n");
1711 	}
1712 
1713 	/* Rest is for Wa_16025596647 */
1714 	if (DISPLAY_VER(display) != 20 &&
1715 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
1716 		return;
1717 
1718 	/* Not needed by Panel Replay  */
1719 	if (crtc_state->has_panel_replay)
1720 		return;
1721 
1722 	/* We ignore possible secondary PSR/Panel Replay capable eDP */
1723 	for_each_intel_crtc(display->drm, crtc)
1724 		active_pipes |= crtc->active ? BIT(crtc->pipe) : 0;
1725 
1726 	active_pipes = intel_calc_active_pipes(state, active_pipes);
1727 
1728 	crtc_state->active_non_psr_pipes = active_pipes &
1729 		~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe);
1730 }
1731 
1732 void intel_psr_get_config(struct intel_encoder *encoder,
1733 			  struct intel_crtc_state *pipe_config)
1734 {
1735 	struct intel_display *display = to_intel_display(encoder);
1736 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1737 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1738 	struct intel_dp *intel_dp;
1739 	u32 val;
1740 
1741 	if (!dig_port)
1742 		return;
1743 
1744 	intel_dp = &dig_port->dp;
1745 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1746 		return;
1747 
1748 	mutex_lock(&intel_dp->psr.lock);
1749 	if (!intel_dp->psr.enabled)
1750 		goto unlock;
1751 
1752 	if (intel_dp->psr.panel_replay_enabled) {
1753 		pipe_config->has_psr = pipe_config->has_panel_replay = true;
1754 	} else {
1755 		/*
1756 		 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1757 		 * enabled/disabled because of frontbuffer tracking and others.
1758 		 */
1759 		pipe_config->has_psr = true;
1760 	}
1761 
1762 	pipe_config->has_sel_update = intel_dp->psr.sel_update_enabled;
1763 	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1764 
1765 	if (!intel_dp->psr.sel_update_enabled)
1766 		goto unlock;
1767 
1768 	if (HAS_PSR2_SEL_FETCH(display)) {
1769 		val = intel_de_read(display,
1770 				    PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1771 		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1772 			pipe_config->enable_psr2_sel_fetch = true;
1773 	}
1774 
1775 	pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled;
1776 
1777 	if (DISPLAY_VER(display) >= 12) {
1778 		val = intel_de_read(display,
1779 				    TRANS_EXITLINE(display, cpu_transcoder));
1780 		pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1781 	}
1782 unlock:
1783 	mutex_unlock(&intel_dp->psr.lock);
1784 }
1785 
1786 static void intel_psr_activate(struct intel_dp *intel_dp)
1787 {
1788 	struct intel_display *display = to_intel_display(intel_dp);
1789 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1790 
1791 	drm_WARN_ON(display->drm,
1792 		    transcoder_has_psr2(display, cpu_transcoder) &&
1793 		    intel_de_read(display, EDP_PSR2_CTL(display, cpu_transcoder)) & EDP_PSR2_ENABLE);
1794 
1795 	drm_WARN_ON(display->drm,
1796 		    intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)) & EDP_PSR_ENABLE);
1797 
1798 	drm_WARN_ON(display->drm, intel_dp->psr.active);
1799 
1800 	drm_WARN_ON(display->drm, !intel_dp->psr.enabled);
1801 
1802 	lockdep_assert_held(&intel_dp->psr.lock);
1803 
1804 	/* psr1, psr2 and panel-replay are mutually exclusive.*/
1805 	if (intel_dp->psr.panel_replay_enabled)
1806 		dg2_activate_panel_replay(intel_dp);
1807 	else if (intel_dp->psr.sel_update_enabled)
1808 		hsw_activate_psr2(intel_dp);
1809 	else
1810 		hsw_activate_psr1(intel_dp);
1811 
1812 	intel_dp->psr.active = true;
1813 }
1814 
1815 /*
1816  * Wa_16013835468
1817  * Wa_14015648006
1818  */
1819 static void wm_optimization_wa(struct intel_dp *intel_dp,
1820 			       const struct intel_crtc_state *crtc_state)
1821 {
1822 	struct intel_display *display = to_intel_display(intel_dp);
1823 	enum pipe pipe = intel_dp->psr.pipe;
1824 	bool activate = false;
1825 
1826 	/* Wa_14015648006 */
1827 	if (IS_DISPLAY_VER(display, 11, 14) && crtc_state->wm_level_disabled)
1828 		activate = true;
1829 
1830 	/* Wa_16013835468 */
1831 	if (DISPLAY_VER(display) == 12 &&
1832 	    crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1833 	    crtc_state->hw.adjusted_mode.crtc_vdisplay)
1834 		activate = true;
1835 
1836 	if (activate)
1837 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
1838 			     0, LATENCY_REPORTING_REMOVED(pipe));
1839 	else
1840 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
1841 			     LATENCY_REPORTING_REMOVED(pipe), 0);
1842 }
1843 
1844 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1845 				    const struct intel_crtc_state *crtc_state)
1846 {
1847 	struct intel_display *display = to_intel_display(intel_dp);
1848 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1849 	u32 mask = 0;
1850 
1851 	/*
1852 	 * Only HSW and BDW have PSR AUX registers that need to be setup.
1853 	 * SKL+ use hardcoded values PSR AUX transactions
1854 	 */
1855 	if (DISPLAY_VER(display) < 9)
1856 		hsw_psr_setup_aux(intel_dp);
1857 
1858 	/*
1859 	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1860 	 * mask LPSP to avoid dependency on other drivers that might block
1861 	 * runtime_pm besides preventing  other hw tracking issues now we
1862 	 * can rely on frontbuffer tracking.
1863 	 *
1864 	 * From bspec prior LunarLake:
1865 	 * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
1866 	 * panel replay mode.
1867 	 *
1868 	 * From bspec beyod LunarLake:
1869 	 * Panel Replay on DP: No bits are applicable
1870 	 * Panel Replay on eDP: All bits are applicable
1871 	 */
1872 	if (DISPLAY_VER(display) < 20 || intel_dp_is_edp(intel_dp))
1873 		mask = EDP_PSR_DEBUG_MASK_HPD;
1874 
1875 	if (intel_dp_is_edp(intel_dp)) {
1876 		mask |= EDP_PSR_DEBUG_MASK_MEMUP;
1877 
1878 		/*
1879 		 * For some unknown reason on HSW non-ULT (or at least on
1880 		 * Dell Latitude E6540) external displays start to flicker
1881 		 * when PSR is enabled on the eDP. SR/PC6 residency is much
1882 		 * higher than should be possible with an external display.
1883 		 * As a workaround leave LPSP unmasked to prevent PSR entry
1884 		 * when external displays are active.
1885 		 */
1886 		if (DISPLAY_VER(display) >= 8 || display->platform.haswell_ult)
1887 			mask |= EDP_PSR_DEBUG_MASK_LPSP;
1888 
1889 		if (DISPLAY_VER(display) < 20)
1890 			mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1891 
1892 		/*
1893 		 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1894 		 * registers in order to keep the CURSURFLIVE tricks working :(
1895 		 */
1896 		if (IS_DISPLAY_VER(display, 9, 10))
1897 			mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1898 
1899 		/* allow PSR with sprite enabled */
1900 		if (display->platform.haswell)
1901 			mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1902 	}
1903 
1904 	intel_de_write(display, psr_debug_reg(display, cpu_transcoder), mask);
1905 
1906 	psr_irq_control(intel_dp);
1907 
1908 	/*
1909 	 * TODO: if future platforms supports DC3CO in more than one
1910 	 * transcoder, EXITLINE will need to be unset when disabling PSR
1911 	 */
1912 	if (intel_dp->psr.dc3co_exitline)
1913 		intel_de_rmw(display,
1914 			     TRANS_EXITLINE(display, cpu_transcoder),
1915 			     EXITLINE_MASK,
1916 			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1917 
1918 	if (HAS_PSR_HW_TRACKING(display) && HAS_PSR2_SEL_FETCH(display))
1919 		intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1920 			     intel_dp->psr.psr2_sel_fetch_enabled ?
1921 			     IGNORE_PSR2_HW_TRACKING : 0);
1922 
1923 	/*
1924 	 * Wa_16013835468
1925 	 * Wa_14015648006
1926 	 */
1927 	wm_optimization_wa(intel_dp, crtc_state);
1928 
1929 	if (intel_dp->psr.sel_update_enabled) {
1930 		if (DISPLAY_VER(display) == 9)
1931 			intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 0,
1932 				     PSR2_VSC_ENABLE_PROG_HEADER |
1933 				     PSR2_ADD_VERTICAL_LINE_COUNT);
1934 
1935 		/*
1936 		 * Wa_16014451276:adlp,mtl[a0,b0]
1937 		 * All supported adlp panels have 1-based X granularity, this may
1938 		 * cause issues if non-supported panels are used.
1939 		 */
1940 		if (!intel_dp->psr.panel_replay_enabled &&
1941 		    (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
1942 		     display->platform.alderlake_p))
1943 			intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
1944 				     0, ADLP_1_BASED_X_GRANULARITY);
1945 
1946 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1947 		if (!intel_dp->psr.panel_replay_enabled &&
1948 		    IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
1949 			intel_de_rmw(display,
1950 				     MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
1951 				     0,
1952 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1953 		else if (display->platform.alderlake_p)
1954 			intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
1955 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1956 	}
1957 
1958 	/* Wa_16025596647 */
1959 	if ((DISPLAY_VER(display) == 20 ||
1960 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
1961 	    !intel_dp->psr.panel_replay_enabled)
1962 		intel_dmc_block_pkgc(display, intel_dp->psr.pipe, true);
1963 
1964 	intel_alpm_configure(intel_dp, crtc_state);
1965 }
1966 
1967 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1968 {
1969 	struct intel_display *display = to_intel_display(intel_dp);
1970 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1971 	u32 val;
1972 
1973 	if (intel_dp->psr.panel_replay_enabled)
1974 		goto no_err;
1975 
1976 	/*
1977 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1978 	 * will still keep the error set even after the reset done in the
1979 	 * irq_preinstall and irq_uninstall hooks.
1980 	 * And enabling in this situation cause the screen to freeze in the
1981 	 * first time that PSR HW tries to activate so lets keep PSR disabled
1982 	 * to avoid any rendering problems.
1983 	 */
1984 	val = intel_de_read(display, psr_iir_reg(display, cpu_transcoder));
1985 	val &= psr_irq_psr_error_bit_get(intel_dp);
1986 	if (val) {
1987 		intel_dp->psr.sink_not_reliable = true;
1988 		drm_dbg_kms(display->drm,
1989 			    "PSR interruption error set, not enabling PSR\n");
1990 		return false;
1991 	}
1992 
1993 no_err:
1994 	return true;
1995 }
1996 
1997 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1998 				    const struct intel_crtc_state *crtc_state)
1999 {
2000 	struct intel_display *display = to_intel_display(intel_dp);
2001 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2002 	u32 val;
2003 
2004 	drm_WARN_ON(display->drm, intel_dp->psr.enabled);
2005 
2006 	intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update;
2007 	intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
2008 	intel_dp->psr.busy_frontbuffer_bits = 0;
2009 	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
2010 	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
2011 	/* DC5/DC6 requires at least 6 idle frames */
2012 	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
2013 	intel_dp->psr.dc3co_exit_delay = val;
2014 	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
2015 	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
2016 	intel_dp->psr.su_region_et_enabled = crtc_state->enable_psr2_su_region_et;
2017 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2018 	intel_dp->psr.req_psr2_sdp_prior_scanline =
2019 		crtc_state->req_psr2_sdp_prior_scanline;
2020 	intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes;
2021 	intel_dp->psr.pkg_c_latency_used = crtc_state->pkg_c_latency_used;
2022 
2023 	if (!psr_interrupt_error_check(intel_dp))
2024 		return;
2025 
2026 	if (intel_dp->psr.panel_replay_enabled)
2027 		drm_dbg_kms(display->drm, "Enabling Panel Replay\n");
2028 	else
2029 		drm_dbg_kms(display->drm, "Enabling PSR%s\n",
2030 			    intel_dp->psr.sel_update_enabled ? "2" : "1");
2031 
2032 	/*
2033 	 * Enabling sink PSR/Panel Replay here only for PSR. Panel Replay enable
2034 	 * bit is already written at this point. Sink ALPM is enabled here for
2035 	 * PSR and Panel Replay. See
2036 	 * intel_psr_panel_replay_enable_sink. Modifiers/options:
2037 	 *  - Selective Update
2038 	 *  - Region Early Transport
2039 	 *  - Selective Update Region Scanline Capture
2040 	 *  - VSC_SDP_CRC
2041 	 *  - HPD on different Errors
2042 	 *  - CRC verification
2043 	 * are written for PSR and Panel Replay here.
2044 	 */
2045 	intel_psr_enable_sink(intel_dp, crtc_state);
2046 
2047 	if (intel_dp_is_edp(intel_dp))
2048 		intel_snps_phy_update_psr_power_state(&dig_port->base, true);
2049 
2050 	intel_psr_enable_source(intel_dp, crtc_state);
2051 	intel_dp->psr.enabled = true;
2052 	intel_dp->psr.pause_counter = 0;
2053 
2054 	/*
2055 	 * Link_ok is sticky and set here on PSR enable. We can assume link
2056 	 * training is complete as we never continue to PSR enable with
2057 	 * untrained link. Link_ok is kept as set until first short pulse
2058 	 * interrupt. This is targeted to workaround panels stating bad link
2059 	 * after PSR is enabled.
2060 	 */
2061 	intel_dp->psr.link_ok = true;
2062 
2063 	intel_psr_activate(intel_dp);
2064 }
2065 
2066 static void intel_psr_exit(struct intel_dp *intel_dp)
2067 {
2068 	struct intel_display *display = to_intel_display(intel_dp);
2069 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2070 	u32 val;
2071 
2072 	if (!intel_dp->psr.active) {
2073 		if (transcoder_has_psr2(display, cpu_transcoder)) {
2074 			val = intel_de_read(display,
2075 					    EDP_PSR2_CTL(display, cpu_transcoder));
2076 			drm_WARN_ON(display->drm, val & EDP_PSR2_ENABLE);
2077 		}
2078 
2079 		val = intel_de_read(display,
2080 				    psr_ctl_reg(display, cpu_transcoder));
2081 		drm_WARN_ON(display->drm, val & EDP_PSR_ENABLE);
2082 
2083 		return;
2084 	}
2085 
2086 	if (intel_dp->psr.panel_replay_enabled) {
2087 		intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder),
2088 			     TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
2089 	} else if (intel_dp->psr.sel_update_enabled) {
2090 		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
2091 
2092 		val = intel_de_rmw(display,
2093 				   EDP_PSR2_CTL(display, cpu_transcoder),
2094 				   EDP_PSR2_ENABLE, 0);
2095 
2096 		drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
2097 	} else {
2098 		if ((DISPLAY_VER(display) == 20 ||
2099 		     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2100 			intel_dp->psr.pkg_c_latency_used)
2101 			intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
2102 								       intel_dp->psr.pipe,
2103 								       false);
2104 
2105 		val = intel_de_rmw(display,
2106 				   psr_ctl_reg(display, cpu_transcoder),
2107 				   EDP_PSR_ENABLE, 0);
2108 
2109 		drm_WARN_ON(display->drm, !(val & EDP_PSR_ENABLE));
2110 	}
2111 	intel_dp->psr.active = false;
2112 }
2113 
2114 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
2115 {
2116 	struct intel_display *display = to_intel_display(intel_dp);
2117 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2118 	i915_reg_t psr_status;
2119 	u32 psr_status_mask;
2120 
2121 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
2122 					  intel_dp->psr.panel_replay_enabled)) {
2123 		psr_status = EDP_PSR2_STATUS(display, cpu_transcoder);
2124 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
2125 	} else {
2126 		psr_status = psr_status_reg(display, cpu_transcoder);
2127 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
2128 	}
2129 
2130 	/* Wait till PSR is idle */
2131 	if (intel_de_wait_for_clear(display, psr_status,
2132 				    psr_status_mask, 2000))
2133 		drm_err(display->drm, "Timed out waiting PSR idle state\n");
2134 }
2135 
2136 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
2137 {
2138 	struct intel_display *display = to_intel_display(intel_dp);
2139 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2140 
2141 	lockdep_assert_held(&intel_dp->psr.lock);
2142 
2143 	if (!intel_dp->psr.enabled)
2144 		return;
2145 
2146 	if (intel_dp->psr.panel_replay_enabled)
2147 		drm_dbg_kms(display->drm, "Disabling Panel Replay\n");
2148 	else
2149 		drm_dbg_kms(display->drm, "Disabling PSR%s\n",
2150 			    intel_dp->psr.sel_update_enabled ? "2" : "1");
2151 
2152 	intel_psr_exit(intel_dp);
2153 	intel_psr_wait_exit_locked(intel_dp);
2154 
2155 	/*
2156 	 * Wa_16013835468
2157 	 * Wa_14015648006
2158 	 */
2159 	if (DISPLAY_VER(display) >= 11)
2160 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
2161 			     LATENCY_REPORTING_REMOVED(intel_dp->psr.pipe), 0);
2162 
2163 	if (intel_dp->psr.sel_update_enabled) {
2164 		/* Wa_16012604467:adlp,mtl[a0,b0] */
2165 		if (!intel_dp->psr.panel_replay_enabled &&
2166 		    IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
2167 			intel_de_rmw(display,
2168 				     MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
2169 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
2170 		else if (display->platform.alderlake_p)
2171 			intel_de_rmw(display, CLKGATE_DIS_MISC,
2172 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
2173 	}
2174 
2175 	if (intel_dp_is_edp(intel_dp))
2176 		intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
2177 
2178 	if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp))
2179 		intel_alpm_disable(intel_dp);
2180 
2181 	/* Disable PSR on Sink */
2182 	if (!intel_dp->psr.panel_replay_enabled) {
2183 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
2184 
2185 		if (intel_dp->psr.sel_update_enabled)
2186 			drm_dp_dpcd_writeb(&intel_dp->aux,
2187 					   DP_RECEIVER_ALPM_CONFIG, 0);
2188 	}
2189 
2190 	/* Wa_16025596647 */
2191 	if ((DISPLAY_VER(display) == 20 ||
2192 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2193 	    !intel_dp->psr.panel_replay_enabled)
2194 		intel_dmc_block_pkgc(display, intel_dp->psr.pipe, false);
2195 
2196 	intel_dp->psr.enabled = false;
2197 	intel_dp->psr.panel_replay_enabled = false;
2198 	intel_dp->psr.sel_update_enabled = false;
2199 	intel_dp->psr.psr2_sel_fetch_enabled = false;
2200 	intel_dp->psr.su_region_et_enabled = false;
2201 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2202 	intel_dp->psr.active_non_psr_pipes = 0;
2203 	intel_dp->psr.pkg_c_latency_used = 0;
2204 }
2205 
2206 /**
2207  * intel_psr_disable - Disable PSR
2208  * @intel_dp: Intel DP
2209  * @old_crtc_state: old CRTC state
2210  *
2211  * This function needs to be called before disabling pipe.
2212  */
2213 void intel_psr_disable(struct intel_dp *intel_dp,
2214 		       const struct intel_crtc_state *old_crtc_state)
2215 {
2216 	struct intel_display *display = to_intel_display(intel_dp);
2217 
2218 	if (!old_crtc_state->has_psr)
2219 		return;
2220 
2221 	if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp) &&
2222 			!CAN_PANEL_REPLAY(intel_dp)))
2223 		return;
2224 
2225 	mutex_lock(&intel_dp->psr.lock);
2226 
2227 	intel_psr_disable_locked(intel_dp);
2228 
2229 	intel_dp->psr.link_ok = false;
2230 
2231 	mutex_unlock(&intel_dp->psr.lock);
2232 	cancel_work_sync(&intel_dp->psr.work);
2233 	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
2234 }
2235 
2236 /**
2237  * intel_psr_pause - Pause PSR
2238  * @intel_dp: Intel DP
2239  *
2240  * This function need to be called after enabling psr.
2241  */
2242 void intel_psr_pause(struct intel_dp *intel_dp)
2243 {
2244 	struct intel_psr *psr = &intel_dp->psr;
2245 
2246 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2247 		return;
2248 
2249 	mutex_lock(&psr->lock);
2250 
2251 	if (!psr->enabled) {
2252 		mutex_unlock(&psr->lock);
2253 		return;
2254 	}
2255 
2256 	if (intel_dp->psr.pause_counter++ == 0) {
2257 		intel_psr_exit(intel_dp);
2258 		intel_psr_wait_exit_locked(intel_dp);
2259 	}
2260 
2261 	mutex_unlock(&psr->lock);
2262 
2263 	cancel_work_sync(&psr->work);
2264 	cancel_delayed_work_sync(&psr->dc3co_work);
2265 }
2266 
2267 /**
2268  * intel_psr_resume - Resume PSR
2269  * @intel_dp: Intel DP
2270  *
2271  * This function need to be called after pausing psr.
2272  */
2273 void intel_psr_resume(struct intel_dp *intel_dp)
2274 {
2275 	struct intel_display *display = to_intel_display(intel_dp);
2276 	struct intel_psr *psr = &intel_dp->psr;
2277 
2278 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2279 		return;
2280 
2281 	mutex_lock(&psr->lock);
2282 
2283 	if (!psr->enabled)
2284 		goto out;
2285 
2286 	if (!psr->pause_counter) {
2287 		drm_warn(display->drm, "Unbalanced PSR pause/resume!\n");
2288 		goto out;
2289 	}
2290 
2291 	if (--intel_dp->psr.pause_counter == 0)
2292 		intel_psr_activate(intel_dp);
2293 
2294 out:
2295 	mutex_unlock(&psr->lock);
2296 }
2297 
2298 /**
2299  * intel_psr_needs_vblank_notification - Check if PSR need vblank enable/disable
2300  * notification.
2301  * @crtc_state: CRTC status
2302  *
2303  * We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't
2304  * prevent it in case of Panel Replay. Panel Replay switches main link off on
2305  * DC entry. This means vblank interrupts are not fired and is a problem if
2306  * user-space is polling for vblank events. Also Wa_16025596647 needs
2307  * information when vblank is enabled/disabled.
2308  */
2309 bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state)
2310 {
2311 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2312 	struct intel_display *display = to_intel_display(crtc_state);
2313 	struct intel_encoder *encoder;
2314 
2315 	for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) {
2316 		struct intel_dp *intel_dp;
2317 
2318 		if (!intel_encoder_is_dp(encoder))
2319 			continue;
2320 
2321 		intel_dp = enc_to_intel_dp(encoder);
2322 
2323 		if (!intel_dp_is_edp(intel_dp))
2324 			continue;
2325 
2326 		if (CAN_PANEL_REPLAY(intel_dp))
2327 			return true;
2328 
2329 		if ((DISPLAY_VER(display) == 20 ||
2330 		     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2331 		    CAN_PSR(intel_dp))
2332 			return true;
2333 	}
2334 
2335 	return false;
2336 }
2337 
2338 /**
2339  * intel_psr_trigger_frame_change_event - Trigger "Frame Change" event
2340  * @dsb: DSB context
2341  * @state: the atomic state
2342  * @crtc: the CRTC
2343  *
2344  * Generate PSR "Frame Change" event.
2345  */
2346 void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
2347 					  struct intel_atomic_state *state,
2348 					  struct intel_crtc *crtc)
2349 {
2350 	const struct intel_crtc_state *crtc_state =
2351 		intel_pre_commit_crtc_state(state, crtc);
2352 	struct intel_display *display = to_intel_display(crtc);
2353 
2354 	if (crtc_state->has_psr)
2355 		intel_de_write_dsb(display, dsb,
2356 				   CURSURFLIVE(display, crtc->pipe), 0);
2357 }
2358 
2359 /**
2360  * intel_psr_min_vblank_delay - Minimum vblank delay needed by PSR
2361  * @crtc_state: the crtc state
2362  *
2363  * Return minimum vblank delay needed by PSR.
2364  */
2365 int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state)
2366 {
2367 	struct intel_display *display = to_intel_display(crtc_state);
2368 
2369 	if (!crtc_state->has_psr)
2370 		return 0;
2371 
2372 	/* Wa_14015401596 */
2373 	if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14))
2374 		return 1;
2375 
2376 	/* Rest is for SRD_STATUS needed on LunarLake and onwards */
2377 	if (DISPLAY_VER(display) < 20)
2378 		return 0;
2379 
2380 	/*
2381 	 * Comment on SRD_STATUS register in Bspec for LunarLake and onwards:
2382 	 *
2383 	 * To deterministically capture the transition of the state machine
2384 	 * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least
2385 	 * one line after the non-delayed V. Blank.
2386 	 *
2387 	 * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0
2388 	 * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ]
2389 	 * - TRANS_VTOTAL[ Vertical Active ])
2390 	 *
2391 	 * SRD_STATUS is used only by PSR1 on PantherLake.
2392 	 * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake.
2393 	 */
2394 
2395 	if (DISPLAY_VER(display) >= 30 && (crtc_state->has_panel_replay ||
2396 					   crtc_state->has_sel_update))
2397 		return 0;
2398 	else if (DISPLAY_VER(display) < 30 && (crtc_state->has_sel_update ||
2399 					       intel_crtc_has_type(crtc_state,
2400 								   INTEL_OUTPUT_EDP)))
2401 		return 0;
2402 	else
2403 		return 1;
2404 }
2405 
2406 static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
2407 {
2408 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 0 :
2409 		PSR2_MAN_TRK_CTL_ENABLE;
2410 }
2411 
2412 static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
2413 {
2414 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2415 	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
2416 	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
2417 }
2418 
2419 static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
2420 {
2421 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2422 	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
2423 	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
2424 }
2425 
2426 static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
2427 {
2428 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2429 	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
2430 	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
2431 }
2432 
2433 static void intel_psr_force_update(struct intel_dp *intel_dp)
2434 {
2435 	struct intel_display *display = to_intel_display(intel_dp);
2436 
2437 	/*
2438 	 * Display WA #0884: skl+
2439 	 * This documented WA for bxt can be safely applied
2440 	 * broadly so we can force HW tracking to exit PSR
2441 	 * instead of disabling and re-enabling.
2442 	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
2443 	 * but it makes more sense write to the current active
2444 	 * pipe.
2445 	 *
2446 	 * This workaround do not exist for platforms with display 10 or newer
2447 	 * but testing proved that it works for up display 13, for newer
2448 	 * than that testing will be needed.
2449 	 */
2450 	intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0);
2451 }
2452 
2453 void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
2454 					  const struct intel_crtc_state *crtc_state)
2455 {
2456 	struct intel_display *display = to_intel_display(crtc_state);
2457 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2458 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2459 	struct intel_encoder *encoder;
2460 
2461 	if (!crtc_state->enable_psr2_sel_fetch)
2462 		return;
2463 
2464 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2465 					     crtc_state->uapi.encoder_mask) {
2466 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2467 
2468 		if (!dsb)
2469 			lockdep_assert_held(&intel_dp->psr.lock);
2470 
2471 		if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_cff_enabled)
2472 			return;
2473 		break;
2474 	}
2475 
2476 	intel_de_write_dsb(display, dsb,
2477 			   PSR2_MAN_TRK_CTL(display, cpu_transcoder),
2478 			   crtc_state->psr2_man_track_ctl);
2479 
2480 	if (!crtc_state->enable_psr2_su_region_et)
2481 		return;
2482 
2483 	intel_de_write_dsb(display, dsb, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
2484 			   crtc_state->pipe_srcsz_early_tpt);
2485 }
2486 
2487 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2488 				  bool full_update)
2489 {
2490 	struct intel_display *display = to_intel_display(crtc_state);
2491 	u32 val = man_trk_ctl_enable_bit_get(display);
2492 
2493 	/* SF partial frame enable has to be set even on full update */
2494 	val |= man_trk_ctl_partial_frame_bit_get(display);
2495 
2496 	if (full_update) {
2497 		val |= man_trk_ctl_continuos_full_frame(display);
2498 		goto exit;
2499 	}
2500 
2501 	if (crtc_state->psr2_su_area.y1 == -1)
2502 		goto exit;
2503 
2504 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) {
2505 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2506 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2507 	} else {
2508 		drm_WARN_ON(crtc_state->uapi.crtc->dev,
2509 			    crtc_state->psr2_su_area.y1 % 4 ||
2510 			    crtc_state->psr2_su_area.y2 % 4);
2511 
2512 		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2513 			crtc_state->psr2_su_area.y1 / 4 + 1);
2514 		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2515 			crtc_state->psr2_su_area.y2 / 4 + 1);
2516 	}
2517 exit:
2518 	crtc_state->psr2_man_track_ctl = val;
2519 }
2520 
2521 static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
2522 					  bool full_update)
2523 {
2524 	int width, height;
2525 
2526 	if (!crtc_state->enable_psr2_su_region_et || full_update)
2527 		return 0;
2528 
2529 	width = drm_rect_width(&crtc_state->psr2_su_area);
2530 	height = drm_rect_height(&crtc_state->psr2_su_area);
2531 
2532 	return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
2533 }
2534 
2535 static void clip_area_update(struct drm_rect *overlap_damage_area,
2536 			     struct drm_rect *damage_area,
2537 			     struct drm_rect *pipe_src)
2538 {
2539 	if (!drm_rect_intersect(damage_area, pipe_src))
2540 		return;
2541 
2542 	if (overlap_damage_area->y1 == -1) {
2543 		overlap_damage_area->y1 = damage_area->y1;
2544 		overlap_damage_area->y2 = damage_area->y2;
2545 		return;
2546 	}
2547 
2548 	if (damage_area->y1 < overlap_damage_area->y1)
2549 		overlap_damage_area->y1 = damage_area->y1;
2550 
2551 	if (damage_area->y2 > overlap_damage_area->y2)
2552 		overlap_damage_area->y2 = damage_area->y2;
2553 }
2554 
2555 static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2556 {
2557 	struct intel_display *display = to_intel_display(crtc_state);
2558 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2559 	u16 y_alignment;
2560 
2561 	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2562 	if (crtc_state->dsc.compression_enable &&
2563 	    (display->platform.alderlake_p || DISPLAY_VER(display) >= 14))
2564 		y_alignment = vdsc_cfg->slice_height;
2565 	else
2566 		y_alignment = crtc_state->su_y_granularity;
2567 
2568 	crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2569 	if (crtc_state->psr2_su_area.y2 % y_alignment)
2570 		crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2571 						y_alignment) + 1) * y_alignment;
2572 }
2573 
2574 /*
2575  * When early transport is in use we need to extend SU area to cover
2576  * cursor fully when cursor is in SU area.
2577  */
2578 static void
2579 intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
2580 				  struct intel_crtc *crtc,
2581 				  bool *cursor_in_su_area)
2582 {
2583 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2584 	struct intel_plane_state *new_plane_state;
2585 	struct intel_plane *plane;
2586 	int i;
2587 
2588 	if (!crtc_state->enable_psr2_su_region_et)
2589 		return;
2590 
2591 	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
2592 		struct drm_rect inter;
2593 
2594 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2595 			continue;
2596 
2597 		if (plane->id != PLANE_CURSOR)
2598 			continue;
2599 
2600 		if (!new_plane_state->uapi.visible)
2601 			continue;
2602 
2603 		inter = crtc_state->psr2_su_area;
2604 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2605 			continue;
2606 
2607 		clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
2608 				 &crtc_state->pipe_src);
2609 		*cursor_in_su_area = true;
2610 	}
2611 }
2612 
2613 /*
2614  * TODO: Not clear how to handle planes with negative position,
2615  * also planes are not updated if they have a negative X
2616  * position so for now doing a full update in this cases
2617  *
2618  * Plane scaling and rotation is not supported by selective fetch and both
2619  * properties can change without a modeset, so need to be check at every
2620  * atomic commit.
2621  */
2622 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2623 {
2624 	if (plane_state->uapi.dst.y1 < 0 ||
2625 	    plane_state->uapi.dst.x1 < 0 ||
2626 	    plane_state->scaler_id >= 0 ||
2627 	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2628 		return false;
2629 
2630 	return true;
2631 }
2632 
2633 /*
2634  * Check for pipe properties that is not supported by selective fetch.
2635  *
2636  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2637  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2638  * enabled and going to the full update path.
2639  */
2640 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2641 {
2642 	if (crtc_state->scaler_state.scaler_id >= 0)
2643 		return false;
2644 
2645 	return true;
2646 }
2647 
2648 /* Wa 14019834836 */
2649 static void intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state *crtc_state)
2650 {
2651 	struct intel_display *display = to_intel_display(crtc_state);
2652 	struct intel_encoder *encoder;
2653 	int hactive_limit;
2654 
2655 	if (crtc_state->psr2_su_area.y1 != 0 ||
2656 	    crtc_state->psr2_su_area.y2 != 0)
2657 		return;
2658 
2659 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2660 		hactive_limit = intel_dp_is_uhbr(crtc_state) ? 1230 : 546;
2661 	else
2662 		hactive_limit = intel_dp_is_uhbr(crtc_state) ? 615 : 273;
2663 
2664 	if (crtc_state->hw.adjusted_mode.hdisplay < hactive_limit)
2665 		return;
2666 
2667 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2668 					     crtc_state->uapi.encoder_mask) {
2669 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2670 
2671 		if (!intel_dp_is_edp(intel_dp) &&
2672 		    intel_dp->psr.panel_replay_enabled &&
2673 		    intel_dp->psr.sel_update_enabled) {
2674 			crtc_state->psr2_su_area.y2++;
2675 			return;
2676 		}
2677 	}
2678 }
2679 
2680 static void
2681 intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state)
2682 {
2683 	struct intel_display *display = to_intel_display(crtc_state);
2684 
2685 	/* Wa_14014971492 */
2686 	if (!crtc_state->has_panel_replay &&
2687 	    ((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
2688 	      display->platform.alderlake_p || display->platform.tigerlake)) &&
2689 	    crtc_state->splitter.enable)
2690 		crtc_state->psr2_su_area.y1 = 0;
2691 
2692 	/* Wa 14019834836 */
2693 	if (DISPLAY_VER(display) == 30)
2694 		intel_psr_apply_pr_link_on_su_wa(crtc_state);
2695 }
2696 
2697 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2698 				struct intel_crtc *crtc)
2699 {
2700 	struct intel_display *display = to_intel_display(state);
2701 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2702 	struct intel_plane_state *new_plane_state, *old_plane_state;
2703 	struct intel_plane *plane;
2704 	bool full_update = false, cursor_in_su_area = false;
2705 	int i, ret;
2706 
2707 	if (!crtc_state->enable_psr2_sel_fetch)
2708 		return 0;
2709 
2710 	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2711 		full_update = true;
2712 		goto skip_sel_fetch_set_loop;
2713 	}
2714 
2715 	crtc_state->psr2_su_area.x1 = 0;
2716 	crtc_state->psr2_su_area.y1 = -1;
2717 	crtc_state->psr2_su_area.x2 = drm_rect_width(&crtc_state->pipe_src);
2718 	crtc_state->psr2_su_area.y2 = -1;
2719 
2720 	/*
2721 	 * Calculate minimal selective fetch area of each plane and calculate
2722 	 * the pipe damaged area.
2723 	 * In the next loop the plane selective fetch area will actually be set
2724 	 * using whole pipe damaged area.
2725 	 */
2726 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2727 					     new_plane_state, i) {
2728 		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2729 						      .x2 = INT_MAX };
2730 
2731 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2732 			continue;
2733 
2734 		if (!new_plane_state->uapi.visible &&
2735 		    !old_plane_state->uapi.visible)
2736 			continue;
2737 
2738 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2739 			full_update = true;
2740 			break;
2741 		}
2742 
2743 		/*
2744 		 * If visibility or plane moved, mark the whole plane area as
2745 		 * damaged as it needs to be complete redraw in the new and old
2746 		 * position.
2747 		 */
2748 		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2749 		    !drm_rect_equals(&new_plane_state->uapi.dst,
2750 				     &old_plane_state->uapi.dst)) {
2751 			if (old_plane_state->uapi.visible) {
2752 				damaged_area.y1 = old_plane_state->uapi.dst.y1;
2753 				damaged_area.y2 = old_plane_state->uapi.dst.y2;
2754 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2755 						 &crtc_state->pipe_src);
2756 			}
2757 
2758 			if (new_plane_state->uapi.visible) {
2759 				damaged_area.y1 = new_plane_state->uapi.dst.y1;
2760 				damaged_area.y2 = new_plane_state->uapi.dst.y2;
2761 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2762 						 &crtc_state->pipe_src);
2763 			}
2764 			continue;
2765 		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2766 			/* If alpha changed mark the whole plane area as damaged */
2767 			damaged_area.y1 = new_plane_state->uapi.dst.y1;
2768 			damaged_area.y2 = new_plane_state->uapi.dst.y2;
2769 			clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2770 					 &crtc_state->pipe_src);
2771 			continue;
2772 		}
2773 
2774 		src = drm_plane_state_src(&new_plane_state->uapi);
2775 		drm_rect_fp_to_int(&src, &src);
2776 
2777 		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2778 						     &new_plane_state->uapi, &damaged_area))
2779 			continue;
2780 
2781 		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2782 		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2783 		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2784 		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2785 
2786 		clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
2787 	}
2788 
2789 	/*
2790 	 * TODO: For now we are just using full update in case
2791 	 * selective fetch area calculation fails. To optimize this we
2792 	 * should identify cases where this happens and fix the area
2793 	 * calculation for those.
2794 	 */
2795 	if (crtc_state->psr2_su_area.y1 == -1) {
2796 		drm_info_once(display->drm,
2797 			      "Selective fetch area calculation failed in pipe %c\n",
2798 			      pipe_name(crtc->pipe));
2799 		full_update = true;
2800 	}
2801 
2802 	if (full_update)
2803 		goto skip_sel_fetch_set_loop;
2804 
2805 	intel_psr_apply_su_area_workarounds(crtc_state);
2806 
2807 	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2808 	if (ret)
2809 		return ret;
2810 
2811 	/*
2812 	 * Adjust su area to cover cursor fully as necessary (early
2813 	 * transport). This needs to be done after
2814 	 * drm_atomic_add_affected_planes to ensure visible cursor is added into
2815 	 * affected planes even when cursor is not updated by itself.
2816 	 */
2817 	intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
2818 
2819 	intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2820 
2821 	/*
2822 	 * Now that we have the pipe damaged area check if it intersect with
2823 	 * every plane, if it does set the plane selective fetch area.
2824 	 */
2825 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2826 					     new_plane_state, i) {
2827 		struct drm_rect *sel_fetch_area, inter;
2828 		struct intel_plane *linked = new_plane_state->planar_linked_plane;
2829 
2830 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2831 		    !new_plane_state->uapi.visible)
2832 			continue;
2833 
2834 		inter = crtc_state->psr2_su_area;
2835 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2836 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2837 			sel_fetch_area->y1 = -1;
2838 			sel_fetch_area->y2 = -1;
2839 			/*
2840 			 * if plane sel fetch was previously enabled ->
2841 			 * disable it
2842 			 */
2843 			if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2844 				crtc_state->update_planes |= BIT(plane->id);
2845 
2846 			continue;
2847 		}
2848 
2849 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2850 			full_update = true;
2851 			break;
2852 		}
2853 
2854 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2855 		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2856 		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2857 		crtc_state->update_planes |= BIT(plane->id);
2858 
2859 		/*
2860 		 * Sel_fetch_area is calculated for UV plane. Use
2861 		 * same area for Y plane as well.
2862 		 */
2863 		if (linked) {
2864 			struct intel_plane_state *linked_new_plane_state;
2865 			struct drm_rect *linked_sel_fetch_area;
2866 
2867 			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2868 			if (IS_ERR(linked_new_plane_state))
2869 				return PTR_ERR(linked_new_plane_state);
2870 
2871 			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2872 			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2873 			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2874 			crtc_state->update_planes |= BIT(linked->id);
2875 		}
2876 	}
2877 
2878 skip_sel_fetch_set_loop:
2879 	psr2_man_trk_ctl_calc(crtc_state, full_update);
2880 	crtc_state->pipe_srcsz_early_tpt =
2881 		psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
2882 	return 0;
2883 }
2884 
2885 void intel_psr2_panic_force_full_update(struct intel_display *display,
2886 					struct intel_crtc_state *crtc_state)
2887 {
2888 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2889 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2890 	u32 val = man_trk_ctl_enable_bit_get(display);
2891 
2892 	/* SF partial frame enable has to be set even on full update */
2893 	val |= man_trk_ctl_partial_frame_bit_get(display);
2894 	val |= man_trk_ctl_continuos_full_frame(display);
2895 
2896 	/* Directly write the register */
2897 	intel_de_write_fw(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder), val);
2898 
2899 	if (!crtc_state->enable_psr2_su_region_et)
2900 		return;
2901 
2902 	intel_de_write_fw(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe), 0);
2903 }
2904 
2905 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2906 				struct intel_crtc *crtc)
2907 {
2908 	struct intel_display *display = to_intel_display(state);
2909 	const struct intel_crtc_state *old_crtc_state =
2910 		intel_atomic_get_old_crtc_state(state, crtc);
2911 	const struct intel_crtc_state *new_crtc_state =
2912 		intel_atomic_get_new_crtc_state(state, crtc);
2913 	struct intel_encoder *encoder;
2914 
2915 	if (!HAS_PSR(display))
2916 		return;
2917 
2918 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2919 					     old_crtc_state->uapi.encoder_mask) {
2920 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2921 		struct intel_psr *psr = &intel_dp->psr;
2922 
2923 		mutex_lock(&psr->lock);
2924 
2925 		if (psr->enabled) {
2926 			/*
2927 			 * Reasons to disable:
2928 			 * - PSR disabled in new state
2929 			 * - All planes will go inactive
2930 			 * - Changing between PSR versions
2931 			 * - Region Early Transport changing
2932 			 * - Display WA #1136: skl, bxt
2933 			 */
2934 			if (intel_crtc_needs_modeset(new_crtc_state) ||
2935 			    !new_crtc_state->has_psr ||
2936 			    !new_crtc_state->active_planes ||
2937 			    new_crtc_state->has_sel_update != psr->sel_update_enabled ||
2938 			    new_crtc_state->enable_psr2_su_region_et != psr->su_region_et_enabled ||
2939 			    new_crtc_state->has_panel_replay != psr->panel_replay_enabled ||
2940 			    (DISPLAY_VER(display) < 11 && new_crtc_state->wm_level_disabled))
2941 				intel_psr_disable_locked(intel_dp);
2942 			else if (new_crtc_state->wm_level_disabled)
2943 				/* Wa_14015648006 */
2944 				wm_optimization_wa(intel_dp, new_crtc_state);
2945 		}
2946 
2947 		mutex_unlock(&psr->lock);
2948 	}
2949 }
2950 
2951 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2952 				 struct intel_crtc *crtc)
2953 {
2954 	struct intel_display *display = to_intel_display(state);
2955 	const struct intel_crtc_state *crtc_state =
2956 		intel_atomic_get_new_crtc_state(state, crtc);
2957 	struct intel_encoder *encoder;
2958 
2959 	if (!crtc_state->has_psr)
2960 		return;
2961 
2962 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2963 					     crtc_state->uapi.encoder_mask) {
2964 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2965 		struct intel_psr *psr = &intel_dp->psr;
2966 		bool keep_disabled = false;
2967 
2968 		mutex_lock(&psr->lock);
2969 
2970 		drm_WARN_ON(display->drm,
2971 			    psr->enabled && !crtc_state->active_planes);
2972 
2973 		keep_disabled |= psr->sink_not_reliable;
2974 		keep_disabled |= !crtc_state->active_planes;
2975 
2976 		/* Display WA #1136: skl, bxt */
2977 		keep_disabled |= DISPLAY_VER(display) < 11 &&
2978 			crtc_state->wm_level_disabled;
2979 
2980 		if (!psr->enabled && !keep_disabled)
2981 			intel_psr_enable_locked(intel_dp, crtc_state);
2982 		else if (psr->enabled && !crtc_state->wm_level_disabled)
2983 			/* Wa_14015648006 */
2984 			wm_optimization_wa(intel_dp, crtc_state);
2985 
2986 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2987 		if (crtc_state->crc_enabled && psr->enabled)
2988 			intel_psr_force_update(intel_dp);
2989 
2990 		/*
2991 		 * Clear possible busy bits in case we have
2992 		 * invalidate -> flip -> flush sequence.
2993 		 */
2994 		intel_dp->psr.busy_frontbuffer_bits = 0;
2995 
2996 		mutex_unlock(&psr->lock);
2997 	}
2998 }
2999 
3000 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
3001 {
3002 	struct intel_display *display = to_intel_display(intel_dp);
3003 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3004 
3005 	/*
3006 	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
3007 	 * As all higher states has bit 4 of PSR2 state set we can just wait for
3008 	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
3009 	 */
3010 	return intel_de_wait_for_clear(display,
3011 				       EDP_PSR2_STATUS(display, cpu_transcoder),
3012 				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
3013 }
3014 
3015 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
3016 {
3017 	struct intel_display *display = to_intel_display(intel_dp);
3018 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3019 
3020 	/*
3021 	 * From bspec: Panel Self Refresh (BDW+)
3022 	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
3023 	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
3024 	 * defensive enough to cover everything.
3025 	 */
3026 	return intel_de_wait_for_clear(display,
3027 				       psr_status_reg(display, cpu_transcoder),
3028 				       EDP_PSR_STATUS_STATE_MASK, 50);
3029 }
3030 
3031 /**
3032  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
3033  * @new_crtc_state: new CRTC state
3034  *
3035  * This function is expected to be called from pipe_update_start() where it is
3036  * not expected to race with PSR enable or disable.
3037  */
3038 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
3039 {
3040 	struct intel_display *display = to_intel_display(new_crtc_state);
3041 	struct intel_encoder *encoder;
3042 
3043 	if (!new_crtc_state->has_psr)
3044 		return;
3045 
3046 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3047 					     new_crtc_state->uapi.encoder_mask) {
3048 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3049 		int ret;
3050 
3051 		lockdep_assert_held(&intel_dp->psr.lock);
3052 
3053 		if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
3054 			continue;
3055 
3056 		if (intel_dp->psr.sel_update_enabled)
3057 			ret = _psr2_ready_for_pipe_update_locked(intel_dp);
3058 		else
3059 			ret = _psr1_ready_for_pipe_update_locked(intel_dp);
3060 
3061 		if (ret)
3062 			drm_err(display->drm,
3063 				"PSR wait timed out, atomic update may fail\n");
3064 	}
3065 }
3066 
3067 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
3068 {
3069 	struct intel_display *display = to_intel_display(intel_dp);
3070 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3071 	i915_reg_t reg;
3072 	u32 mask;
3073 	int err;
3074 
3075 	if (!intel_dp->psr.enabled)
3076 		return false;
3077 
3078 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
3079 					  intel_dp->psr.panel_replay_enabled)) {
3080 		reg = EDP_PSR2_STATUS(display, cpu_transcoder);
3081 		mask = EDP_PSR2_STATUS_STATE_MASK;
3082 	} else {
3083 		reg = psr_status_reg(display, cpu_transcoder);
3084 		mask = EDP_PSR_STATUS_STATE_MASK;
3085 	}
3086 
3087 	mutex_unlock(&intel_dp->psr.lock);
3088 
3089 	err = intel_de_wait_for_clear(display, reg, mask, 50);
3090 	if (err)
3091 		drm_err(display->drm,
3092 			"Timed out waiting for PSR Idle for re-enable\n");
3093 
3094 	/* After the unlocked wait, verify that PSR is still wanted! */
3095 	mutex_lock(&intel_dp->psr.lock);
3096 	return err == 0 && intel_dp->psr.enabled && !intel_dp->psr.pause_counter;
3097 }
3098 
3099 static int intel_psr_fastset_force(struct intel_display *display)
3100 {
3101 	struct drm_connector_list_iter conn_iter;
3102 	struct drm_modeset_acquire_ctx ctx;
3103 	struct drm_atomic_state *state;
3104 	struct drm_connector *conn;
3105 	int err = 0;
3106 
3107 	state = drm_atomic_state_alloc(display->drm);
3108 	if (!state)
3109 		return -ENOMEM;
3110 
3111 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
3112 
3113 	state->acquire_ctx = &ctx;
3114 	to_intel_atomic_state(state)->internal = true;
3115 
3116 retry:
3117 	drm_connector_list_iter_begin(display->drm, &conn_iter);
3118 	drm_for_each_connector_iter(conn, &conn_iter) {
3119 		struct drm_connector_state *conn_state;
3120 		struct drm_crtc_state *crtc_state;
3121 
3122 		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
3123 			continue;
3124 
3125 		conn_state = drm_atomic_get_connector_state(state, conn);
3126 		if (IS_ERR(conn_state)) {
3127 			err = PTR_ERR(conn_state);
3128 			break;
3129 		}
3130 
3131 		if (!conn_state->crtc)
3132 			continue;
3133 
3134 		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
3135 		if (IS_ERR(crtc_state)) {
3136 			err = PTR_ERR(crtc_state);
3137 			break;
3138 		}
3139 
3140 		/* Mark mode as changed to trigger a pipe->update() */
3141 		crtc_state->mode_changed = true;
3142 	}
3143 	drm_connector_list_iter_end(&conn_iter);
3144 
3145 	if (err == 0)
3146 		err = drm_atomic_commit(state);
3147 
3148 	if (err == -EDEADLK) {
3149 		drm_atomic_state_clear(state);
3150 		err = drm_modeset_backoff(&ctx);
3151 		if (!err)
3152 			goto retry;
3153 	}
3154 
3155 	drm_modeset_drop_locks(&ctx);
3156 	drm_modeset_acquire_fini(&ctx);
3157 	drm_atomic_state_put(state);
3158 
3159 	return err;
3160 }
3161 
3162 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
3163 {
3164 	struct intel_display *display = to_intel_display(intel_dp);
3165 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
3166 	const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3167 					I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3168 	u32 old_mode, old_disable_bits;
3169 	int ret;
3170 
3171 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3172 		    I915_PSR_DEBUG_PANEL_REPLAY_DISABLE |
3173 		    I915_PSR_DEBUG_MODE_MASK) ||
3174 	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
3175 		drm_dbg_kms(display->drm, "Invalid debug mask %llx\n", val);
3176 		return -EINVAL;
3177 	}
3178 
3179 	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
3180 	if (ret)
3181 		return ret;
3182 
3183 	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
3184 	old_disable_bits = intel_dp->psr.debug &
3185 		(I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3186 		 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3187 
3188 	intel_dp->psr.debug = val;
3189 
3190 	/*
3191 	 * Do it right away if it's already enabled, otherwise it will be done
3192 	 * when enabling the source.
3193 	 */
3194 	if (intel_dp->psr.enabled)
3195 		psr_irq_control(intel_dp);
3196 
3197 	mutex_unlock(&intel_dp->psr.lock);
3198 
3199 	if (old_mode != mode || old_disable_bits != disable_bits)
3200 		ret = intel_psr_fastset_force(display);
3201 
3202 	return ret;
3203 }
3204 
3205 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
3206 {
3207 	struct intel_psr *psr = &intel_dp->psr;
3208 
3209 	intel_psr_disable_locked(intel_dp);
3210 	psr->sink_not_reliable = true;
3211 	/* let's make sure that sink is awaken */
3212 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
3213 }
3214 
3215 static void intel_psr_work(struct work_struct *work)
3216 {
3217 	struct intel_dp *intel_dp =
3218 		container_of(work, typeof(*intel_dp), psr.work);
3219 
3220 	mutex_lock(&intel_dp->psr.lock);
3221 
3222 	if (!intel_dp->psr.enabled)
3223 		goto unlock;
3224 
3225 	if (READ_ONCE(intel_dp->psr.irq_aux_error)) {
3226 		intel_psr_handle_irq(intel_dp);
3227 		goto unlock;
3228 	}
3229 
3230 	if (intel_dp->psr.pause_counter)
3231 		goto unlock;
3232 
3233 	/*
3234 	 * We have to make sure PSR is ready for re-enable
3235 	 * otherwise it keeps disabled until next full enable/disable cycle.
3236 	 * PSR might take some time to get fully disabled
3237 	 * and be ready for re-enable.
3238 	 */
3239 	if (!__psr_wait_for_idle_locked(intel_dp))
3240 		goto unlock;
3241 
3242 	/*
3243 	 * The delayed work can race with an invalidate hence we need to
3244 	 * recheck. Since psr_flush first clears this and then reschedules we
3245 	 * won't ever miss a flush when bailing out here.
3246 	 */
3247 	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
3248 		goto unlock;
3249 
3250 	intel_psr_activate(intel_dp);
3251 unlock:
3252 	mutex_unlock(&intel_dp->psr.lock);
3253 }
3254 
3255 static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp)
3256 {
3257 	struct intel_display *display = to_intel_display(intel_dp);
3258 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3259 
3260 	if (!intel_dp->psr.psr2_sel_fetch_enabled)
3261 		return;
3262 
3263 	if (DISPLAY_VER(display) >= 20)
3264 		intel_de_write(display, LNL_SFF_CTL(cpu_transcoder),
3265 			       LNL_SFF_CTL_SF_SINGLE_FULL_FRAME);
3266 	else
3267 		intel_de_write(display,
3268 			       PSR2_MAN_TRK_CTL(display, cpu_transcoder),
3269 			       man_trk_ctl_enable_bit_get(display) |
3270 			       man_trk_ctl_partial_frame_bit_get(display) |
3271 			       man_trk_ctl_single_full_frame_bit_get(display) |
3272 			       man_trk_ctl_continuos_full_frame(display));
3273 }
3274 
3275 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
3276 {
3277 	struct intel_display *display = to_intel_display(intel_dp);
3278 
3279 	if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
3280 		if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3281 			intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
3282 			intel_psr_configure_full_frame_update(intel_dp);
3283 		}
3284 
3285 		intel_psr_force_update(intel_dp);
3286 	} else {
3287 		intel_psr_exit(intel_dp);
3288 	}
3289 }
3290 
3291 /**
3292  * intel_psr_invalidate - Invalidate PSR
3293  * @display: display device
3294  * @frontbuffer_bits: frontbuffer plane tracking bits
3295  * @origin: which operation caused the invalidate
3296  *
3297  * Since the hardware frontbuffer tracking has gaps we need to integrate
3298  * with the software frontbuffer tracking. This function gets called every
3299  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
3300  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
3301  *
3302  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
3303  */
3304 void intel_psr_invalidate(struct intel_display *display,
3305 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
3306 {
3307 	struct intel_encoder *encoder;
3308 
3309 	if (origin == ORIGIN_FLIP)
3310 		return;
3311 
3312 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3313 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3314 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3315 
3316 		mutex_lock(&intel_dp->psr.lock);
3317 		if (!intel_dp->psr.enabled) {
3318 			mutex_unlock(&intel_dp->psr.lock);
3319 			continue;
3320 		}
3321 
3322 		pipe_frontbuffer_bits &=
3323 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3324 		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
3325 
3326 		if (pipe_frontbuffer_bits)
3327 			_psr_invalidate_handle(intel_dp);
3328 
3329 		mutex_unlock(&intel_dp->psr.lock);
3330 	}
3331 }
3332 /*
3333  * When we will be completely rely on PSR2 S/W tracking in future,
3334  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
3335  * event also therefore tgl_dc3co_flush_locked() require to be changed
3336  * accordingly in future.
3337  */
3338 static void
3339 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
3340 		       enum fb_op_origin origin)
3341 {
3342 	struct intel_display *display = to_intel_display(intel_dp);
3343 
3344 	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
3345 	    !intel_dp->psr.active)
3346 		return;
3347 
3348 	/*
3349 	 * At every frontbuffer flush flip event modified delay of delayed work,
3350 	 * when delayed work schedules that means display has been idle.
3351 	 */
3352 	if (!(frontbuffer_bits &
3353 	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
3354 		return;
3355 
3356 	tgl_psr2_enable_dc3co(intel_dp);
3357 	mod_delayed_work(display->wq.unordered, &intel_dp->psr.dc3co_work,
3358 			 intel_dp->psr.dc3co_exit_delay);
3359 }
3360 
3361 static void _psr_flush_handle(struct intel_dp *intel_dp)
3362 {
3363 	struct intel_display *display = to_intel_display(intel_dp);
3364 
3365 	if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
3366 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3367 			/* can we turn CFF off? */
3368 			if (intel_dp->psr.busy_frontbuffer_bits == 0)
3369 				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
3370 		}
3371 
3372 		/*
3373 		 * Still keep cff bit enabled as we don't have proper SU
3374 		 * configuration in case update is sent for any reason after
3375 		 * sff bit gets cleared by the HW on next vblank.
3376 		 *
3377 		 * NOTE: Setting cff bit is not needed for LunarLake onwards as
3378 		 * we have own register for SFF bit and we are not overwriting
3379 		 * existing SU configuration
3380 		 */
3381 		intel_psr_configure_full_frame_update(intel_dp);
3382 
3383 		intel_psr_force_update(intel_dp);
3384 	} else {
3385 		intel_psr_exit(intel_dp);
3386 	}
3387 
3388 	if ((!intel_dp->psr.psr2_sel_fetch_enabled || DISPLAY_VER(display) >= 20) &&
3389 	    !intel_dp->psr.busy_frontbuffer_bits)
3390 		queue_work(display->wq.unordered, &intel_dp->psr.work);
3391 }
3392 
3393 /**
3394  * intel_psr_flush - Flush PSR
3395  * @display: display device
3396  * @frontbuffer_bits: frontbuffer plane tracking bits
3397  * @origin: which operation caused the flush
3398  *
3399  * Since the hardware frontbuffer tracking has gaps we need to integrate
3400  * with the software frontbuffer tracking. This function gets called every
3401  * time frontbuffer rendering has completed and flushed out to memory. PSR
3402  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
3403  *
3404  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
3405  */
3406 void intel_psr_flush(struct intel_display *display,
3407 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
3408 {
3409 	struct intel_encoder *encoder;
3410 
3411 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3412 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3413 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3414 
3415 		mutex_lock(&intel_dp->psr.lock);
3416 		if (!intel_dp->psr.enabled) {
3417 			mutex_unlock(&intel_dp->psr.lock);
3418 			continue;
3419 		}
3420 
3421 		pipe_frontbuffer_bits &=
3422 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3423 		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
3424 
3425 		/*
3426 		 * If the PSR is paused by an explicit intel_psr_paused() call,
3427 		 * we have to ensure that the PSR is not activated until
3428 		 * intel_psr_resume() is called.
3429 		 */
3430 		if (intel_dp->psr.pause_counter)
3431 			goto unlock;
3432 
3433 		if (origin == ORIGIN_FLIP ||
3434 		    (origin == ORIGIN_CURSOR_UPDATE &&
3435 		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
3436 			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
3437 			goto unlock;
3438 		}
3439 
3440 		if (pipe_frontbuffer_bits == 0)
3441 			goto unlock;
3442 
3443 		/* By definition flush = invalidate + flush */
3444 		_psr_flush_handle(intel_dp);
3445 unlock:
3446 		mutex_unlock(&intel_dp->psr.lock);
3447 	}
3448 }
3449 
3450 /**
3451  * intel_psr_init - Init basic PSR work and mutex.
3452  * @intel_dp: Intel DP
3453  *
3454  * This function is called after the initializing connector.
3455  * (the initializing of connector treats the handling of connector capabilities)
3456  * And it initializes basic PSR stuff for each DP Encoder.
3457  */
3458 void intel_psr_init(struct intel_dp *intel_dp)
3459 {
3460 	struct intel_display *display = to_intel_display(intel_dp);
3461 	struct intel_connector *connector = intel_dp->attached_connector;
3462 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3463 
3464 	if (!(HAS_PSR(display) || HAS_DP20(display)))
3465 		return;
3466 
3467 	/*
3468 	 * HSW spec explicitly says PSR is tied to port A.
3469 	 * BDW+ platforms have a instance of PSR registers per transcoder but
3470 	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
3471 	 * than eDP one.
3472 	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
3473 	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
3474 	 * But GEN12 supports a instance of PSR registers per transcoder.
3475 	 */
3476 	if (DISPLAY_VER(display) < 12 && dig_port->base.port != PORT_A) {
3477 		drm_dbg_kms(display->drm,
3478 			    "PSR condition failed: Port not supported\n");
3479 		return;
3480 	}
3481 
3482 	if ((HAS_DP20(display) && !intel_dp_is_edp(intel_dp)) ||
3483 	    DISPLAY_VER(display) >= 20)
3484 		intel_dp->psr.source_panel_replay_support = true;
3485 
3486 	if (HAS_PSR(display) && intel_dp_is_edp(intel_dp))
3487 		intel_dp->psr.source_support = true;
3488 
3489 	/* Set link_standby x link_off defaults */
3490 	if (DISPLAY_VER(display) < 12)
3491 		/* For new platforms up to TGL let's respect VBT back again */
3492 		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
3493 
3494 	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
3495 	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
3496 	mutex_init(&intel_dp->psr.lock);
3497 }
3498 
3499 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
3500 					   u8 *status, u8 *error_status)
3501 {
3502 	struct drm_dp_aux *aux = &intel_dp->aux;
3503 	int ret;
3504 	unsigned int offset;
3505 
3506 	offset = intel_dp->psr.panel_replay_enabled ?
3507 		 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
3508 
3509 	ret = drm_dp_dpcd_readb(aux, offset, status);
3510 	if (ret != 1)
3511 		return ret;
3512 
3513 	offset = intel_dp->psr.panel_replay_enabled ?
3514 		 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
3515 
3516 	ret = drm_dp_dpcd_readb(aux, offset, error_status);
3517 	if (ret != 1)
3518 		return ret;
3519 
3520 	*status = *status & DP_PSR_SINK_STATE_MASK;
3521 
3522 	return 0;
3523 }
3524 
3525 static void psr_alpm_check(struct intel_dp *intel_dp)
3526 {
3527 	struct intel_psr *psr = &intel_dp->psr;
3528 
3529 	if (!psr->sel_update_enabled)
3530 		return;
3531 
3532 	if (intel_alpm_get_error(intel_dp)) {
3533 		intel_psr_disable_locked(intel_dp);
3534 		psr->sink_not_reliable = true;
3535 	}
3536 }
3537 
3538 static void psr_capability_changed_check(struct intel_dp *intel_dp)
3539 {
3540 	struct intel_display *display = to_intel_display(intel_dp);
3541 	struct intel_psr *psr = &intel_dp->psr;
3542 	u8 val;
3543 	int r;
3544 
3545 	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
3546 	if (r != 1) {
3547 		drm_err(display->drm, "Error reading DP_PSR_ESI\n");
3548 		return;
3549 	}
3550 
3551 	if (val & DP_PSR_CAPS_CHANGE) {
3552 		intel_psr_disable_locked(intel_dp);
3553 		psr->sink_not_reliable = true;
3554 		drm_dbg_kms(display->drm,
3555 			    "Sink PSR capability changed, disabling PSR\n");
3556 
3557 		/* Clearing it */
3558 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
3559 	}
3560 }
3561 
3562 /*
3563  * On common bits:
3564  * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
3565  * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
3566  * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
3567  * this function is relying on PSR definitions
3568  */
3569 void intel_psr_short_pulse(struct intel_dp *intel_dp)
3570 {
3571 	struct intel_display *display = to_intel_display(intel_dp);
3572 	struct intel_psr *psr = &intel_dp->psr;
3573 	u8 status, error_status;
3574 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3575 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3576 			  DP_PSR_LINK_CRC_ERROR;
3577 
3578 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
3579 		return;
3580 
3581 	mutex_lock(&psr->lock);
3582 
3583 	psr->link_ok = false;
3584 
3585 	if (!psr->enabled)
3586 		goto exit;
3587 
3588 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
3589 		drm_err(display->drm,
3590 			"Error reading PSR status or error status\n");
3591 		goto exit;
3592 	}
3593 
3594 	if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
3595 	    (error_status & errors)) {
3596 		intel_psr_disable_locked(intel_dp);
3597 		psr->sink_not_reliable = true;
3598 	}
3599 
3600 	if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
3601 	    !error_status)
3602 		drm_dbg_kms(display->drm,
3603 			    "PSR sink internal error, disabling PSR\n");
3604 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3605 		drm_dbg_kms(display->drm,
3606 			    "PSR RFB storage error, disabling PSR\n");
3607 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3608 		drm_dbg_kms(display->drm,
3609 			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
3610 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3611 		drm_dbg_kms(display->drm,
3612 			    "PSR Link CRC error, disabling PSR\n");
3613 
3614 	if (error_status & ~errors)
3615 		drm_err(display->drm,
3616 			"PSR_ERROR_STATUS unhandled errors %x\n",
3617 			error_status & ~errors);
3618 	/* clear status register */
3619 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
3620 
3621 	if (!psr->panel_replay_enabled) {
3622 		psr_alpm_check(intel_dp);
3623 		psr_capability_changed_check(intel_dp);
3624 	}
3625 
3626 exit:
3627 	mutex_unlock(&psr->lock);
3628 }
3629 
3630 bool intel_psr_enabled(struct intel_dp *intel_dp)
3631 {
3632 	bool ret;
3633 
3634 	if (!CAN_PSR(intel_dp))
3635 		return false;
3636 
3637 	mutex_lock(&intel_dp->psr.lock);
3638 	ret = intel_dp->psr.enabled;
3639 	mutex_unlock(&intel_dp->psr.lock);
3640 
3641 	return ret;
3642 }
3643 
3644 /**
3645  * intel_psr_link_ok - return psr->link_ok
3646  * @intel_dp: struct intel_dp
3647  *
3648  * We are seeing unexpected link re-trainings with some panels. This is caused
3649  * by panel stating bad link status after PSR is enabled. Code checking link
3650  * status can call this to ensure it can ignore bad link status stated by the
3651  * panel I.e. if panel is stating bad link and intel_psr_link_ok is stating link
3652  * is ok caller should rely on latter.
3653  *
3654  * Return value of link_ok
3655  */
3656 bool intel_psr_link_ok(struct intel_dp *intel_dp)
3657 {
3658 	bool ret;
3659 
3660 	if ((!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) ||
3661 	    !intel_dp_is_edp(intel_dp))
3662 		return false;
3663 
3664 	mutex_lock(&intel_dp->psr.lock);
3665 	ret = intel_dp->psr.link_ok;
3666 	mutex_unlock(&intel_dp->psr.lock);
3667 
3668 	return ret;
3669 }
3670 
3671 /**
3672  * intel_psr_lock - grab PSR lock
3673  * @crtc_state: the crtc state
3674  *
3675  * This is initially meant to be used by around CRTC update, when
3676  * vblank sensitive registers are updated and we need grab the lock
3677  * before it to avoid vblank evasion.
3678  */
3679 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3680 {
3681 	struct intel_display *display = to_intel_display(crtc_state);
3682 	struct intel_encoder *encoder;
3683 
3684 	if (!crtc_state->has_psr)
3685 		return;
3686 
3687 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3688 					     crtc_state->uapi.encoder_mask) {
3689 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3690 
3691 		mutex_lock(&intel_dp->psr.lock);
3692 		break;
3693 	}
3694 }
3695 
3696 /**
3697  * intel_psr_unlock - release PSR lock
3698  * @crtc_state: the crtc state
3699  *
3700  * Release the PSR lock that was held during pipe update.
3701  */
3702 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3703 {
3704 	struct intel_display *display = to_intel_display(crtc_state);
3705 	struct intel_encoder *encoder;
3706 
3707 	if (!crtc_state->has_psr)
3708 		return;
3709 
3710 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3711 					     crtc_state->uapi.encoder_mask) {
3712 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3713 
3714 		mutex_unlock(&intel_dp->psr.lock);
3715 		break;
3716 	}
3717 }
3718 
3719 /* Wa_16025596647 */
3720 static void intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp *intel_dp)
3721 {
3722 	struct intel_display *display = to_intel_display(intel_dp);
3723 	bool dc5_dc6_blocked;
3724 
3725 	if (!intel_dp->psr.active || !intel_dp->psr.pkg_c_latency_used)
3726 		return;
3727 
3728 	dc5_dc6_blocked = is_dc5_dc6_blocked(intel_dp);
3729 
3730 	if (intel_dp->psr.sel_update_enabled)
3731 		psr2_program_idle_frames(intel_dp, dc5_dc6_blocked ? 0 :
3732 					 psr_compute_idle_frames(intel_dp));
3733 	else
3734 		intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
3735 								       intel_dp->psr.pipe,
3736 								       dc5_dc6_blocked);
3737 }
3738 
3739 static void psr_dc5_dc6_wa_work(struct work_struct *work)
3740 {
3741 	struct intel_display *display = container_of(work, typeof(*display),
3742 						     psr_dc5_dc6_wa_work);
3743 	struct intel_encoder *encoder;
3744 
3745 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3746 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3747 
3748 		mutex_lock(&intel_dp->psr.lock);
3749 
3750 		if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled &&
3751 		    !intel_dp->psr.pkg_c_latency_used)
3752 			intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
3753 
3754 		mutex_unlock(&intel_dp->psr.lock);
3755 	}
3756 }
3757 
3758 /**
3759  * intel_psr_notify_dc5_dc6 - Notify PSR about enable/disable dc5/dc6
3760  * @display: intel atomic state
3761  *
3762  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to schedule
3763  * psr_dc5_dc6_wa_work used for applying/removing the workaround.
3764  */
3765 void intel_psr_notify_dc5_dc6(struct intel_display *display)
3766 {
3767 	if (DISPLAY_VER(display) != 20 &&
3768 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3769 		return;
3770 
3771 	schedule_work(&display->psr_dc5_dc6_wa_work);
3772 }
3773 
3774 /**
3775  * intel_psr_dc5_dc6_wa_init - Init work for underrun on idle PSR HW bug wa
3776  * @display: intel atomic state
3777  *
3778  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to init
3779  * psr_dc5_dc6_wa_work used for applying the workaround.
3780  */
3781 void intel_psr_dc5_dc6_wa_init(struct intel_display *display)
3782 {
3783 	if (DISPLAY_VER(display) != 20 &&
3784 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3785 		return;
3786 
3787 	INIT_WORK(&display->psr_dc5_dc6_wa_work, psr_dc5_dc6_wa_work);
3788 }
3789 
3790 /**
3791  * intel_psr_notify_pipe_change - Notify PSR about enable/disable of a pipe
3792  * @state: intel atomic state
3793  * @crtc: intel crtc
3794  * @enable: enable/disable
3795  *
3796  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
3797  * remove the workaround when pipe is getting enabled/disabled
3798  */
3799 void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
3800 				  struct intel_crtc *crtc, bool enable)
3801 {
3802 	struct intel_display *display = to_intel_display(state);
3803 	struct intel_encoder *encoder;
3804 
3805 	if (DISPLAY_VER(display) != 20 &&
3806 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3807 		return;
3808 
3809 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3810 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3811 		u8 active_non_psr_pipes;
3812 
3813 		mutex_lock(&intel_dp->psr.lock);
3814 
3815 		if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
3816 			goto unlock;
3817 
3818 		active_non_psr_pipes = intel_dp->psr.active_non_psr_pipes;
3819 
3820 		if (enable)
3821 			active_non_psr_pipes |= BIT(crtc->pipe);
3822 		else
3823 			active_non_psr_pipes &= ~BIT(crtc->pipe);
3824 
3825 		if (active_non_psr_pipes == intel_dp->psr.active_non_psr_pipes)
3826 			goto unlock;
3827 
3828 		if ((enable && intel_dp->psr.active_non_psr_pipes) ||
3829 		    (!enable && !intel_dp->psr.active_non_psr_pipes) ||
3830 		    !intel_dp->psr.pkg_c_latency_used) {
3831 			intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
3832 			goto unlock;
3833 		}
3834 
3835 		intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
3836 
3837 		intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
3838 unlock:
3839 		mutex_unlock(&intel_dp->psr.lock);
3840 	}
3841 }
3842 
3843 /**
3844  * intel_psr_notify_vblank_enable_disable - Notify PSR about enable/disable of vblank
3845  * @display: intel display struct
3846  * @enable: enable/disable
3847  *
3848  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
3849  * remove the workaround when vblank is getting enabled/disabled
3850  */
3851 void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
3852 					    bool enable)
3853 {
3854 	struct intel_encoder *encoder;
3855 
3856 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3857 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3858 
3859 		mutex_lock(&intel_dp->psr.lock);
3860 		if (intel_dp->psr.panel_replay_enabled) {
3861 			mutex_unlock(&intel_dp->psr.lock);
3862 			break;
3863 		}
3864 
3865 		if (intel_dp->psr.enabled && intel_dp->psr.pkg_c_latency_used)
3866 			intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
3867 
3868 		mutex_unlock(&intel_dp->psr.lock);
3869 		return;
3870 	}
3871 
3872 	/*
3873 	 * NOTE: intel_display_power_set_target_dc_state is used
3874 	 * only by PSR * code for DC3CO handling. DC3CO target
3875 	 * state is currently disabled in * PSR code. If DC3CO
3876 	 * is taken into use we need take that into account here
3877 	 * as well.
3878 	 */
3879 	intel_display_power_set_target_dc_state(display, enable ? DC_STATE_DISABLE :
3880 						DC_STATE_EN_UPTO_DC6);
3881 }
3882 
3883 static void
3884 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3885 {
3886 	struct intel_display *display = to_intel_display(intel_dp);
3887 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3888 	const char *status = "unknown";
3889 	u32 val, status_val;
3890 
3891 	if ((intel_dp_is_edp(intel_dp) || DISPLAY_VER(display) >= 30) &&
3892 	    (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) {
3893 		static const char * const live_status[] = {
3894 			"IDLE",
3895 			"CAPTURE",
3896 			"CAPTURE_FS",
3897 			"SLEEP",
3898 			"BUFON_FW",
3899 			"ML_UP",
3900 			"SU_STANDBY",
3901 			"FAST_SLEEP",
3902 			"DEEP_SLEEP",
3903 			"BUF_ON",
3904 			"TG_ON"
3905 		};
3906 		val = intel_de_read(display,
3907 				    EDP_PSR2_STATUS(display, cpu_transcoder));
3908 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3909 		if (status_val < ARRAY_SIZE(live_status))
3910 			status = live_status[status_val];
3911 	} else {
3912 		static const char * const live_status[] = {
3913 			"IDLE",
3914 			"SRDONACK",
3915 			"SRDENT",
3916 			"BUFOFF",
3917 			"BUFON",
3918 			"AUXACK",
3919 			"SRDOFFACK",
3920 			"SRDENT_ON",
3921 		};
3922 		val = intel_de_read(display,
3923 				    psr_status_reg(display, cpu_transcoder));
3924 		status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3925 		if (status_val < ARRAY_SIZE(live_status))
3926 			status = live_status[status_val];
3927 	}
3928 
3929 	seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3930 }
3931 
3932 static void intel_psr_sink_capability(struct intel_dp *intel_dp,
3933 				      struct seq_file *m)
3934 {
3935 	struct intel_psr *psr = &intel_dp->psr;
3936 
3937 	seq_printf(m, "Sink support: PSR = %s",
3938 		   str_yes_no(psr->sink_support));
3939 
3940 	if (psr->sink_support)
3941 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3942 	if (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
3943 		seq_printf(m, " (Early Transport)");
3944 	seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
3945 	seq_printf(m, ", Panel Replay Selective Update = %s",
3946 		   str_yes_no(psr->sink_panel_replay_su_support));
3947 	if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
3948 	    DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
3949 		seq_printf(m, " (Early Transport)");
3950 	seq_printf(m, "\n");
3951 }
3952 
3953 static void intel_psr_print_mode(struct intel_dp *intel_dp,
3954 				 struct seq_file *m)
3955 {
3956 	struct intel_psr *psr = &intel_dp->psr;
3957 	const char *status, *mode, *region_et;
3958 
3959 	if (psr->enabled)
3960 		status = " enabled";
3961 	else
3962 		status = "disabled";
3963 
3964 	if (psr->panel_replay_enabled && psr->sel_update_enabled)
3965 		mode = "Panel Replay Selective Update";
3966 	else if (psr->panel_replay_enabled)
3967 		mode = "Panel Replay";
3968 	else if (psr->sel_update_enabled)
3969 		mode = "PSR2";
3970 	else if (psr->enabled)
3971 		mode = "PSR1";
3972 	else
3973 		mode = "";
3974 
3975 	if (psr->su_region_et_enabled)
3976 		region_et = " (Early Transport)";
3977 	else
3978 		region_et = "";
3979 
3980 	seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et);
3981 }
3982 
3983 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3984 {
3985 	struct intel_display *display = to_intel_display(intel_dp);
3986 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3987 	struct intel_psr *psr = &intel_dp->psr;
3988 	struct ref_tracker *wakeref;
3989 	bool enabled;
3990 	u32 val, psr2_ctl;
3991 
3992 	intel_psr_sink_capability(intel_dp, m);
3993 
3994 	if (!(psr->sink_support || psr->sink_panel_replay_support))
3995 		return 0;
3996 
3997 	wakeref = intel_display_rpm_get(display);
3998 	mutex_lock(&psr->lock);
3999 
4000 	intel_psr_print_mode(intel_dp, m);
4001 
4002 	if (!psr->enabled) {
4003 		seq_printf(m, "PSR sink not reliable: %s\n",
4004 			   str_yes_no(psr->sink_not_reliable));
4005 
4006 		goto unlock;
4007 	}
4008 
4009 	if (psr->panel_replay_enabled) {
4010 		val = intel_de_read(display, TRANS_DP2_CTL(cpu_transcoder));
4011 
4012 		if (intel_dp_is_edp(intel_dp))
4013 			psr2_ctl = intel_de_read(display,
4014 						 EDP_PSR2_CTL(display,
4015 							      cpu_transcoder));
4016 
4017 		enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
4018 	} else if (psr->sel_update_enabled) {
4019 		val = intel_de_read(display,
4020 				    EDP_PSR2_CTL(display, cpu_transcoder));
4021 		enabled = val & EDP_PSR2_ENABLE;
4022 	} else {
4023 		val = intel_de_read(display, psr_ctl_reg(display, cpu_transcoder));
4024 		enabled = val & EDP_PSR_ENABLE;
4025 	}
4026 	seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
4027 		   str_enabled_disabled(enabled), val);
4028 	if (psr->panel_replay_enabled && intel_dp_is_edp(intel_dp))
4029 		seq_printf(m, "PSR2_CTL: 0x%08x\n",
4030 			   psr2_ctl);
4031 	psr_source_status(intel_dp, m);
4032 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
4033 		   psr->busy_frontbuffer_bits);
4034 
4035 	/*
4036 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
4037 	 */
4038 	val = intel_de_read(display, psr_perf_cnt_reg(display, cpu_transcoder));
4039 	seq_printf(m, "Performance counter: %u\n",
4040 		   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
4041 
4042 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
4043 		seq_printf(m, "Last attempted entry at: %lld\n",
4044 			   psr->last_entry_attempt);
4045 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
4046 	}
4047 
4048 	if (psr->sel_update_enabled) {
4049 		u32 su_frames_val[3];
4050 		int frame;
4051 
4052 		/*
4053 		 * PSR2_SU_STATUS register has been tied-off since DG2/ADL-P
4054 		 * (it returns zeros only) and it has been removed on Xe2_LPD.
4055 		 */
4056 		if (DISPLAY_VER(display) < 13) {
4057 			/*
4058 			 * Reading all 3 registers before hand to minimize crossing a
4059 			 * frame boundary between register reads
4060 			 */
4061 			for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
4062 				val = intel_de_read(display,
4063 						    PSR2_SU_STATUS(display, cpu_transcoder, frame));
4064 				su_frames_val[frame / 3] = val;
4065 			}
4066 
4067 			seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
4068 
4069 			for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
4070 				u32 su_blocks;
4071 
4072 				su_blocks = su_frames_val[frame / 3] &
4073 					PSR2_SU_STATUS_MASK(frame);
4074 				su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
4075 				seq_printf(m, "%d\t%d\n", frame, su_blocks);
4076 			}
4077 		}
4078 
4079 		seq_printf(m, "PSR2 selective fetch: %s\n",
4080 			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
4081 	}
4082 
4083 unlock:
4084 	mutex_unlock(&psr->lock);
4085 	intel_display_rpm_put(display, wakeref);
4086 
4087 	return 0;
4088 }
4089 
4090 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
4091 {
4092 	struct intel_display *display = m->private;
4093 	struct intel_dp *intel_dp = NULL;
4094 	struct intel_encoder *encoder;
4095 
4096 	if (!HAS_PSR(display))
4097 		return -ENODEV;
4098 
4099 	/* Find the first EDP which supports PSR */
4100 	for_each_intel_encoder_with_psr(display->drm, encoder) {
4101 		intel_dp = enc_to_intel_dp(encoder);
4102 		break;
4103 	}
4104 
4105 	if (!intel_dp)
4106 		return -ENODEV;
4107 
4108 	return intel_psr_status(m, intel_dp);
4109 }
4110 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
4111 
4112 static int
4113 i915_edp_psr_debug_set(void *data, u64 val)
4114 {
4115 	struct intel_display *display = data;
4116 	struct intel_encoder *encoder;
4117 	int ret = -ENODEV;
4118 
4119 	if (!HAS_PSR(display))
4120 		return ret;
4121 
4122 	for_each_intel_encoder_with_psr(display->drm, encoder) {
4123 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4124 
4125 		drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);
4126 
4127 		// TODO: split to each transcoder's PSR debug state
4128 		with_intel_display_rpm(display)
4129 			ret = intel_psr_debug_set(intel_dp, val);
4130 	}
4131 
4132 	return ret;
4133 }
4134 
4135 static int
4136 i915_edp_psr_debug_get(void *data, u64 *val)
4137 {
4138 	struct intel_display *display = data;
4139 	struct intel_encoder *encoder;
4140 
4141 	if (!HAS_PSR(display))
4142 		return -ENODEV;
4143 
4144 	for_each_intel_encoder_with_psr(display->drm, encoder) {
4145 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4146 
4147 		// TODO: split to each transcoder's PSR debug state
4148 		*val = READ_ONCE(intel_dp->psr.debug);
4149 		return 0;
4150 	}
4151 
4152 	return -ENODEV;
4153 }
4154 
4155 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
4156 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
4157 			"%llu\n");
4158 
4159 void intel_psr_debugfs_register(struct intel_display *display)
4160 {
4161 	struct dentry *debugfs_root = display->drm->debugfs_root;
4162 
4163 	debugfs_create_file("i915_edp_psr_debug", 0644, debugfs_root,
4164 			    display, &i915_edp_psr_debug_fops);
4165 
4166 	debugfs_create_file("i915_edp_psr_status", 0444, debugfs_root,
4167 			    display, &i915_edp_psr_status_fops);
4168 }
4169 
4170 static const char *psr_mode_str(struct intel_dp *intel_dp)
4171 {
4172 	if (intel_dp->psr.panel_replay_enabled)
4173 		return "PANEL-REPLAY";
4174 	else if (intel_dp->psr.enabled)
4175 		return "PSR";
4176 
4177 	return "unknown";
4178 }
4179 
4180 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
4181 {
4182 	struct intel_connector *connector = m->private;
4183 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4184 	static const char * const sink_status[] = {
4185 		"inactive",
4186 		"transition to active, capture and display",
4187 		"active, display from RFB",
4188 		"active, capture and display on sink device timings",
4189 		"transition to inactive, capture and display, timing re-sync",
4190 		"reserved",
4191 		"reserved",
4192 		"sink internal error",
4193 	};
4194 	const char *str;
4195 	int ret;
4196 	u8 status, error_status;
4197 
4198 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
4199 		seq_puts(m, "PSR/Panel-Replay Unsupported\n");
4200 		return -ENODEV;
4201 	}
4202 
4203 	if (connector->base.status != connector_status_connected)
4204 		return -ENODEV;
4205 
4206 	ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
4207 	if (ret)
4208 		return ret;
4209 
4210 	status &= DP_PSR_SINK_STATE_MASK;
4211 	if (status < ARRAY_SIZE(sink_status))
4212 		str = sink_status[status];
4213 	else
4214 		str = "unknown";
4215 
4216 	seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
4217 
4218 	seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
4219 
4220 	if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
4221 			    DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
4222 			    DP_PSR_LINK_CRC_ERROR))
4223 		seq_puts(m, ":\n");
4224 	else
4225 		seq_puts(m, "\n");
4226 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
4227 		seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
4228 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
4229 		seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
4230 	if (error_status & DP_PSR_LINK_CRC_ERROR)
4231 		seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
4232 
4233 	return ret;
4234 }
4235 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
4236 
4237 static int i915_psr_status_show(struct seq_file *m, void *data)
4238 {
4239 	struct intel_connector *connector = m->private;
4240 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4241 
4242 	return intel_psr_status(m, intel_dp);
4243 }
4244 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
4245 
4246 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
4247 {
4248 	struct intel_display *display = to_intel_display(connector);
4249 	struct dentry *root = connector->base.debugfs_entry;
4250 
4251 	if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
4252 	    connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
4253 		return;
4254 
4255 	debugfs_create_file("i915_psr_sink_status", 0444, root,
4256 			    connector, &i915_psr_sink_status_fops);
4257 
4258 	if (HAS_PSR(display) || HAS_DP20(display))
4259 		debugfs_create_file("i915_psr_status", 0444, root,
4260 				    connector, &i915_psr_status_fops);
4261 }
4262 
4263 bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state)
4264 {
4265 	/*
4266 	 * eDP Panel Replay uses always ALPM
4267 	 * PSR2 uses ALPM but PSR1 doesn't
4268 	 */
4269 	return intel_dp_is_edp(intel_dp) && (crtc_state->has_sel_update ||
4270 					     crtc_state->has_panel_replay);
4271 }
4272 
4273 bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
4274 				   const struct intel_crtc_state *crtc_state)
4275 {
4276 	return intel_dp_is_edp(intel_dp) && crtc_state->has_panel_replay;
4277 }
4278