xref: /linux/drivers/gpu/drm/i915/display/intel_psr.c (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/debugfs.h>
25 
26 #include <drm/drm_atomic_helper.h>
27 #include <drm/drm_damage_helper.h>
28 #include <drm/drm_debugfs.h>
29 #include <drm/drm_print.h>
30 #include <drm/drm_vblank.h>
31 
32 #include "i915_reg.h"
33 #include "intel_alpm.h"
34 #include "intel_atomic.h"
35 #include "intel_crtc.h"
36 #include "intel_cursor_regs.h"
37 #include "intel_ddi.h"
38 #include "intel_de.h"
39 #include "intel_display_irq.h"
40 #include "intel_display_regs.h"
41 #include "intel_display_rpm.h"
42 #include "intel_display_types.h"
43 #include "intel_dmc.h"
44 #include "intel_dp.h"
45 #include "intel_dp_aux.h"
46 #include "intel_dsb.h"
47 #include "intel_frontbuffer.h"
48 #include "intel_hdmi.h"
49 #include "intel_psr.h"
50 #include "intel_psr_regs.h"
51 #include "intel_snps_phy.h"
52 #include "intel_step.h"
53 #include "intel_vblank.h"
54 #include "intel_vrr.h"
55 #include "skl_universal_plane.h"
56 
57 /**
58  * DOC: Panel Self Refresh (PSR/SRD)
59  *
60  * Since Haswell Display controller supports Panel Self-Refresh on display
61  * panels witch have a remote frame buffer (RFB) implemented according to PSR
62  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
63  * when system is idle but display is on as it eliminates display refresh
64  * request to DDR memory completely as long as the frame buffer for that
65  * display is unchanged.
66  *
67  * Panel Self Refresh must be supported by both Hardware (source) and
68  * Panel (sink).
69  *
70  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
71  * to power down the link and memory controller. For DSI panels the same idea
72  * is called "manual mode".
73  *
74  * The implementation uses the hardware-based PSR support which automatically
75  * enters/exits self-refresh mode. The hardware takes care of sending the
76  * required DP aux message and could even retrain the link (that part isn't
77  * enabled yet though). The hardware also keeps track of any frontbuffer
78  * changes to know when to exit self-refresh mode again. Unfortunately that
79  * part doesn't work too well, hence why the i915 PSR support uses the
80  * software frontbuffer tracking to make sure it doesn't miss a screen
81  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
82  * get called by the frontbuffer tracking code. Note that because of locking
83  * issues the self-refresh re-enable code is done from a work queue, which
84  * must be correctly synchronized/cancelled when shutting down the pipe."
85  *
86  * DC3CO (DC3 clock off)
87  *
88  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
89  * clock off automatically during PSR2 idle state.
90  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
91  * entry/exit allows the HW to enter a low-power state even when page flipping
92  * periodically (for instance a 30fps video playback scenario).
93  *
94  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
95  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
96  * frames, if no other flip occurs and the function above is executed, DC3CO is
97  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
98  * of another flip.
99  * Front buffer modifications do not trigger DC3CO activation on purpose as it
100  * would bring a lot of complexity and most of the moderns systems will only
101  * use page flips.
102  */
103 
104 /*
105  * Description of PSR mask bits:
106  *
107  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
108  *
109  *  When unmasked (nearly) all display register writes (eg. even
110  *  SWF) trigger a PSR exit. Some registers are excluded from this
111  *  and they have a more specific mask (described below). On icl+
112  *  this bit no longer exists and is effectively always set.
113  *
114  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
115  *
116  *  When unmasked (nearly) all pipe/plane register writes
117  *  trigger a PSR exit. Some plane registers are excluded from this
118  *  and they have a more specific mask (described below).
119  *
120  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
121  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
122  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
123  *
124  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
125  *  SPR_SURF/CURBASE are not included in this and instead are
126  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
127  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
128  *
129  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
130  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
131  *
132  *  When unmasked PSR is blocked as long as the sprite
133  *  plane is enabled. skl+ with their universal planes no
134  *  longer have a mask bit like this, and no plane being
135  *  enabledb blocks PSR.
136  *
137  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
138  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
139  *
140  *  When umasked CURPOS writes trigger a PSR exit. On skl+
141  *  this doesn't exit but CURPOS is included in the
142  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
143  *
144  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
145  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
146  *
147  *  When unmasked PSR is blocked as long as vblank and/or vsync
148  *  interrupt is unmasked in IMR *and* enabled in IER.
149  *
150  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
151  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
152  *
153  *  Selectcs whether PSR exit generates an extra vblank before
154  *  the first frame is transmitted. Also note the opposite polarity
155  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
156  *  unmasked==do not generate the extra vblank).
157  *
158  *  With DC states enabled the extra vblank happens after link training,
159  *  with DC states disabled it happens immediately upuon PSR exit trigger.
160  *  No idea as of now why there is a difference. HSW/BDW (which don't
161  *  even have DMC) always generate it after link training. Go figure.
162  *
163  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
164  *  and thus won't latch until the first vblank. So with DC states
165  *  enabled the register effectively uses the reset value during DC5
166  *  exit+PSR exit sequence, and thus the bit does nothing until
167  *  latched by the vblank that it was trying to prevent from being
168  *  generated in the first place. So we should probably call this
169  *  one a chicken/egg bit instead on skl+.
170  *
171  *  In standby mode (as opposed to link-off) this makes no difference
172  *  as the timing generator keeps running the whole time generating
173  *  normal periodic vblanks.
174  *
175  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
176  *  and doing so makes the behaviour match the skl+ reset value.
177  *
178  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
179  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
180  *
181  *  On BDW without this bit is no vblanks whatsoever are
182  *  generated after PSR exit. On HSW this has no apparent effect.
183  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
184  *
185  * The rest of the bits are more self-explanatory and/or
186  * irrelevant for normal operation.
187  *
188  * Description of intel_crtc_state variables. has_psr, has_panel_replay and
189  * has_sel_update:
190  *
191  *  has_psr (alone):					PSR1
192  *  has_psr + has_sel_update:				PSR2
193  *  has_psr + has_panel_replay:				Panel Replay
194  *  has_psr + has_panel_replay + has_sel_update:	Panel Replay Selective Update
195  *
196  * Description of some intel_psr variables. enabled, panel_replay_enabled,
197  * sel_update_enabled
198  *
199  *  enabled (alone):						PSR1
200  *  enabled + sel_update_enabled:				PSR2
201  *  enabled + panel_replay_enabled:				Panel Replay
202  *  enabled + panel_replay_enabled + sel_update_enabled:	Panel Replay SU
203  */
204 
205 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
206 			   (intel_dp)->psr.source_support)
207 
208 bool intel_encoder_can_psr(struct intel_encoder *encoder)
209 {
210 	if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
211 		return CAN_PSR(enc_to_intel_dp(encoder)) ||
212 		       CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
213 	else
214 		return false;
215 }
216 
217 bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
218 				  const struct intel_crtc_state *crtc_state)
219 {
220 	/*
221 	 * For PSR/PR modes only eDP requires the AUX IO power to be enabled whenever
222 	 * the output is enabled. For non-eDP outputs the main link is always
223 	 * on, hence it doesn't require the HW initiated AUX wake-up signaling used
224 	 * for eDP.
225 	 *
226 	 * TODO:
227 	 * - Consider leaving AUX IO disabled for eDP / PR as well, in case
228 	 *   the ALPM with main-link off mode is not enabled.
229 	 * - Leave AUX IO enabled for DP / PR, once support for ALPM with
230 	 *   main-link off mode is added for it and this mode gets enabled.
231 	 */
232 	return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
233 	       intel_encoder_can_psr(encoder);
234 }
235 
236 static bool psr_global_enabled(struct intel_dp *intel_dp)
237 {
238 	struct intel_connector *connector = intel_dp->attached_connector;
239 
240 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
241 	case I915_PSR_DEBUG_DEFAULT:
242 		return intel_dp_is_edp(intel_dp) ?
243 			connector->panel.vbt.psr.enable : true;
244 	case I915_PSR_DEBUG_DISABLE:
245 		return false;
246 	default:
247 		return true;
248 	}
249 }
250 
251 static bool sel_update_global_enabled(struct intel_dp *intel_dp)
252 {
253 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
254 	case I915_PSR_DEBUG_DISABLE:
255 	case I915_PSR_DEBUG_FORCE_PSR1:
256 		return false;
257 	default:
258 		return true;
259 	}
260 }
261 
262 static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
263 {
264 	struct intel_display *display = to_intel_display(intel_dp);
265 
266 	return !(intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE) &&
267 		display->params.enable_panel_replay;
268 }
269 
270 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
271 {
272 	struct intel_display *display = to_intel_display(intel_dp);
273 
274 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_ERROR :
275 		EDP_PSR_ERROR(intel_dp->psr.transcoder);
276 }
277 
278 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
279 {
280 	struct intel_display *display = to_intel_display(intel_dp);
281 
282 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_POST_EXIT :
283 		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
284 }
285 
286 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
287 {
288 	struct intel_display *display = to_intel_display(intel_dp);
289 
290 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_PRE_ENTRY :
291 		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
292 }
293 
294 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
295 {
296 	struct intel_display *display = to_intel_display(intel_dp);
297 
298 	return DISPLAY_VER(display) >= 12 ? TGL_PSR_MASK :
299 		EDP_PSR_MASK(intel_dp->psr.transcoder);
300 }
301 
302 static i915_reg_t psr_ctl_reg(struct intel_display *display,
303 			      enum transcoder cpu_transcoder)
304 {
305 	if (DISPLAY_VER(display) >= 8)
306 		return EDP_PSR_CTL(display, cpu_transcoder);
307 	else
308 		return HSW_SRD_CTL;
309 }
310 
311 static i915_reg_t psr_debug_reg(struct intel_display *display,
312 				enum transcoder cpu_transcoder)
313 {
314 	if (DISPLAY_VER(display) >= 8)
315 		return EDP_PSR_DEBUG(display, cpu_transcoder);
316 	else
317 		return HSW_SRD_DEBUG;
318 }
319 
320 static i915_reg_t psr_perf_cnt_reg(struct intel_display *display,
321 				   enum transcoder cpu_transcoder)
322 {
323 	if (DISPLAY_VER(display) >= 8)
324 		return EDP_PSR_PERF_CNT(display, cpu_transcoder);
325 	else
326 		return HSW_SRD_PERF_CNT;
327 }
328 
329 static i915_reg_t psr_status_reg(struct intel_display *display,
330 				 enum transcoder cpu_transcoder)
331 {
332 	if (DISPLAY_VER(display) >= 8)
333 		return EDP_PSR_STATUS(display, cpu_transcoder);
334 	else
335 		return HSW_SRD_STATUS;
336 }
337 
338 static i915_reg_t psr_imr_reg(struct intel_display *display,
339 			      enum transcoder cpu_transcoder)
340 {
341 	if (DISPLAY_VER(display) >= 12)
342 		return TRANS_PSR_IMR(display, cpu_transcoder);
343 	else
344 		return EDP_PSR_IMR;
345 }
346 
347 static i915_reg_t psr_iir_reg(struct intel_display *display,
348 			      enum transcoder cpu_transcoder)
349 {
350 	if (DISPLAY_VER(display) >= 12)
351 		return TRANS_PSR_IIR(display, cpu_transcoder);
352 	else
353 		return EDP_PSR_IIR;
354 }
355 
356 static i915_reg_t psr_aux_ctl_reg(struct intel_display *display,
357 				  enum transcoder cpu_transcoder)
358 {
359 	if (DISPLAY_VER(display) >= 8)
360 		return EDP_PSR_AUX_CTL(display, cpu_transcoder);
361 	else
362 		return HSW_SRD_AUX_CTL;
363 }
364 
365 static i915_reg_t psr_aux_data_reg(struct intel_display *display,
366 				   enum transcoder cpu_transcoder, int i)
367 {
368 	if (DISPLAY_VER(display) >= 8)
369 		return EDP_PSR_AUX_DATA(display, cpu_transcoder, i);
370 	else
371 		return HSW_SRD_AUX_DATA(i);
372 }
373 
374 static void psr_irq_control(struct intel_dp *intel_dp)
375 {
376 	struct intel_display *display = to_intel_display(intel_dp);
377 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
378 	u32 mask;
379 
380 	if (intel_dp->psr.panel_replay_enabled)
381 		return;
382 
383 	mask = psr_irq_psr_error_bit_get(intel_dp);
384 	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
385 		mask |= psr_irq_post_exit_bit_get(intel_dp) |
386 			psr_irq_pre_entry_bit_get(intel_dp);
387 
388 	intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
389 		     psr_irq_mask_get(intel_dp), ~mask);
390 }
391 
392 static void psr_event_print(struct intel_display *display,
393 			    u32 val, bool sel_update_enabled)
394 {
395 	drm_dbg_kms(display->drm, "PSR exit events: 0x%x\n", val);
396 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
397 		drm_dbg_kms(display->drm, "\tPSR2 watchdog timer expired\n");
398 	if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled)
399 		drm_dbg_kms(display->drm, "\tPSR2 disabled\n");
400 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
401 		drm_dbg_kms(display->drm, "\tSU dirty FIFO underrun\n");
402 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
403 		drm_dbg_kms(display->drm, "\tSU CRC FIFO underrun\n");
404 	if (val & PSR_EVENT_GRAPHICS_RESET)
405 		drm_dbg_kms(display->drm, "\tGraphics reset\n");
406 	if (val & PSR_EVENT_PCH_INTERRUPT)
407 		drm_dbg_kms(display->drm, "\tPCH interrupt\n");
408 	if (val & PSR_EVENT_MEMORY_UP)
409 		drm_dbg_kms(display->drm, "\tMemory up\n");
410 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
411 		drm_dbg_kms(display->drm, "\tFront buffer modification\n");
412 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
413 		drm_dbg_kms(display->drm, "\tPSR watchdog timer expired\n");
414 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
415 		drm_dbg_kms(display->drm, "\tPIPE registers updated\n");
416 	if (val & PSR_EVENT_REGISTER_UPDATE)
417 		drm_dbg_kms(display->drm, "\tRegister updated\n");
418 	if (val & PSR_EVENT_HDCP_ENABLE)
419 		drm_dbg_kms(display->drm, "\tHDCP enabled\n");
420 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
421 		drm_dbg_kms(display->drm, "\tKVMR session enabled\n");
422 	if (val & PSR_EVENT_VBI_ENABLE)
423 		drm_dbg_kms(display->drm, "\tVBI enabled\n");
424 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
425 		drm_dbg_kms(display->drm, "\tLPSP mode exited\n");
426 	if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled)
427 		drm_dbg_kms(display->drm, "\tPSR disabled\n");
428 }
429 
430 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
431 {
432 	struct intel_display *display = to_intel_display(intel_dp);
433 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
434 	ktime_t time_ns =  ktime_get();
435 
436 	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
437 		intel_dp->psr.last_entry_attempt = time_ns;
438 		drm_dbg_kms(display->drm,
439 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
440 			    transcoder_name(cpu_transcoder));
441 	}
442 
443 	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
444 		intel_dp->psr.last_exit = time_ns;
445 		drm_dbg_kms(display->drm,
446 			    "[transcoder %s] PSR exit completed\n",
447 			    transcoder_name(cpu_transcoder));
448 
449 		if (DISPLAY_VER(display) >= 9) {
450 			u32 val;
451 
452 			val = intel_de_rmw(display,
453 					   PSR_EVENT(display, cpu_transcoder),
454 					   0, 0);
455 
456 			psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
457 		}
458 	}
459 
460 	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
461 		drm_warn(display->drm, "[transcoder %s] PSR aux error\n",
462 			 transcoder_name(cpu_transcoder));
463 
464 		intel_dp->psr.irq_aux_error = true;
465 
466 		/*
467 		 * If this interruption is not masked it will keep
468 		 * interrupting so fast that it prevents the scheduled
469 		 * work to run.
470 		 * Also after a PSR error, we don't want to arm PSR
471 		 * again so we don't care about unmask the interruption
472 		 * or unset irq_aux_error.
473 		 */
474 		intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
475 			     0, psr_irq_psr_error_bit_get(intel_dp));
476 
477 		queue_work(display->wq.unordered, &intel_dp->psr.work);
478 	}
479 }
480 
481 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
482 {
483 	struct intel_display *display = to_intel_display(intel_dp);
484 	u8 val = 8; /* assume the worst if we can't read the value */
485 
486 	if (drm_dp_dpcd_readb(&intel_dp->aux,
487 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
488 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
489 	else
490 		drm_dbg_kms(display->drm,
491 			    "Unable to get sink synchronization latency, assuming 8 frames\n");
492 	return val;
493 }
494 
495 static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
496 {
497 	u8 su_capability = 0;
498 
499 	if (intel_dp->psr.sink_panel_replay_su_support) {
500 		if (drm_dp_dpcd_read_byte(&intel_dp->aux,
501 					  DP_PANEL_REPLAY_CAP_CAPABILITY,
502 					  &su_capability) < 0)
503 			return 0;
504 	} else {
505 		su_capability = intel_dp->psr_dpcd[1];
506 	}
507 
508 	return su_capability;
509 }
510 
511 static unsigned int
512 intel_dp_get_su_x_granularity_offset(struct intel_dp *intel_dp)
513 {
514 	return intel_dp->psr.sink_panel_replay_su_support ?
515 		DP_PANEL_REPLAY_CAP_X_GRANULARITY :
516 		DP_PSR2_SU_X_GRANULARITY;
517 }
518 
519 static unsigned int
520 intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
521 {
522 	return intel_dp->psr.sink_panel_replay_su_support ?
523 		DP_PANEL_REPLAY_CAP_Y_GRANULARITY :
524 		DP_PSR2_SU_Y_GRANULARITY;
525 }
526 
527 /*
528  * Note: Bits related to granularity are same in panel replay and psr
529  * registers. Rely on PSR definitions on these "common" bits.
530  */
531 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
532 {
533 	struct intel_display *display = to_intel_display(intel_dp);
534 	ssize_t r;
535 	u16 w;
536 	u8 y;
537 
538 	/*
539 	 * TODO: Do we need to take into account panel supporting both PSR and
540 	 * Panel replay?
541 	 */
542 
543 	/*
544 	 * If sink don't have specific granularity requirements set legacy
545 	 * ones.
546 	 */
547 	if (!(intel_dp_get_su_capability(intel_dp) &
548 	      DP_PSR2_SU_GRANULARITY_REQUIRED)) {
549 		/* As PSR2 HW sends full lines, we do not care about x granularity */
550 		w = 4;
551 		y = 4;
552 		goto exit;
553 	}
554 
555 	r = drm_dp_dpcd_read(&intel_dp->aux,
556 			     intel_dp_get_su_x_granularity_offset(intel_dp),
557 			     &w, 2);
558 	if (r != 2)
559 		drm_dbg_kms(display->drm,
560 			    "Unable to read selective update x granularity\n");
561 	/*
562 	 * Spec says that if the value read is 0 the default granularity should
563 	 * be used instead.
564 	 */
565 	if (r != 2 || w == 0)
566 		w = 4;
567 
568 	r = drm_dp_dpcd_read(&intel_dp->aux,
569 			     intel_dp_get_su_y_granularity_offset(intel_dp),
570 			     &y, 1);
571 	if (r != 1) {
572 		drm_dbg_kms(display->drm,
573 			    "Unable to read selective update y granularity\n");
574 		y = 4;
575 	}
576 	if (y == 0)
577 		y = 1;
578 
579 exit:
580 	intel_dp->psr.su_w_granularity = w;
581 	intel_dp->psr.su_y_granularity = y;
582 }
583 
584 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
585 {
586 	struct intel_display *display = to_intel_display(intel_dp);
587 	int ret;
588 
589 	ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
590 				    &intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
591 	if (ret < 0)
592 		return;
593 
594 	if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
595 	      DP_PANEL_REPLAY_SUPPORT))
596 		return;
597 
598 	if (intel_dp_is_edp(intel_dp)) {
599 		if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
600 			drm_dbg_kms(display->drm,
601 				    "Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n");
602 			return;
603 		}
604 
605 		if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
606 		      DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
607 			drm_dbg_kms(display->drm,
608 				    "Panel doesn't support early transport, eDP Panel Replay not possible\n");
609 			return;
610 		}
611 	}
612 
613 	intel_dp->psr.sink_panel_replay_support = true;
614 
615 	if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
616 	    DP_PANEL_REPLAY_SU_SUPPORT)
617 		intel_dp->psr.sink_panel_replay_su_support = true;
618 
619 	drm_dbg_kms(display->drm,
620 		    "Panel replay %sis supported by panel\n",
621 		    intel_dp->psr.sink_panel_replay_su_support ?
622 		    "selective_update " : "");
623 }
624 
625 static void _psr_init_dpcd(struct intel_dp *intel_dp)
626 {
627 	struct intel_display *display = to_intel_display(intel_dp);
628 	int ret;
629 
630 	ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
631 				    sizeof(intel_dp->psr_dpcd));
632 	if (ret < 0)
633 		return;
634 
635 	if (!intel_dp->psr_dpcd[0])
636 		return;
637 
638 	drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
639 		    intel_dp->psr_dpcd[0]);
640 
641 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
642 		drm_dbg_kms(display->drm,
643 			    "PSR support not currently available for this panel\n");
644 		return;
645 	}
646 
647 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
648 		drm_dbg_kms(display->drm,
649 			    "Panel lacks power state control, PSR cannot be enabled\n");
650 		return;
651 	}
652 
653 	intel_dp->psr.sink_support = true;
654 	intel_dp->psr.sink_sync_latency =
655 		intel_dp_get_sink_sync_latency(intel_dp);
656 
657 	if (DISPLAY_VER(display) >= 9 &&
658 	    intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
659 		bool y_req = intel_dp->psr_dpcd[1] &
660 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
661 
662 		/*
663 		 * All panels that supports PSR version 03h (PSR2 +
664 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
665 		 * only sure that it is going to be used when required by the
666 		 * panel. This way panel is capable to do selective update
667 		 * without a aux frame sync.
668 		 *
669 		 * To support PSR version 02h and PSR version 03h without
670 		 * Y-coordinate requirement panels we would need to enable
671 		 * GTC first.
672 		 */
673 		intel_dp->psr.sink_psr2_support = y_req &&
674 			intel_alpm_aux_wake_supported(intel_dp);
675 		drm_dbg_kms(display->drm, "PSR2 %ssupported\n",
676 			    intel_dp->psr.sink_psr2_support ? "" : "not ");
677 	}
678 }
679 
680 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
681 {
682 	_psr_init_dpcd(intel_dp);
683 
684 	_panel_replay_init_dpcd(intel_dp);
685 
686 	if (intel_dp->psr.sink_psr2_support ||
687 	    intel_dp->psr.sink_panel_replay_su_support)
688 		intel_dp_get_su_granularity(intel_dp);
689 }
690 
691 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
692 {
693 	struct intel_display *display = to_intel_display(intel_dp);
694 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
695 	u32 aux_clock_divider, aux_ctl;
696 	/* write DP_SET_POWER=D0 */
697 	static const u8 aux_msg[] = {
698 		[0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
699 		[1] = (DP_SET_POWER >> 8) & 0xff,
700 		[2] = DP_SET_POWER & 0xff,
701 		[3] = 1 - 1,
702 		[4] = DP_SET_POWER_D0,
703 	};
704 	int i;
705 
706 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
707 	for (i = 0; i < sizeof(aux_msg); i += 4)
708 		intel_de_write(display,
709 			       psr_aux_data_reg(display, cpu_transcoder, i >> 2),
710 			       intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
711 
712 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
713 
714 	/* Start with bits set for DDI_AUX_CTL register */
715 	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
716 					     aux_clock_divider);
717 
718 	/* Select only valid bits for SRD_AUX_CTL */
719 	aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
720 		EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
721 		EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
722 		EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
723 
724 	intel_de_write(display, psr_aux_ctl_reg(display, cpu_transcoder),
725 		       aux_ctl);
726 }
727 
728 static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay)
729 {
730 	struct intel_display *display = to_intel_display(intel_dp);
731 
732 	if (DISPLAY_VER(display) < 20 || !intel_dp_is_edp(intel_dp) ||
733 	    intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)
734 		return false;
735 
736 	return panel_replay ?
737 		intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
738 		DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
739 		intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED;
740 }
741 
742 static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
743 				      const struct intel_crtc_state *crtc_state)
744 {
745 	u8 val = DP_PANEL_REPLAY_ENABLE |
746 		DP_PANEL_REPLAY_VSC_SDP_CRC_EN |
747 		DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
748 		DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN |
749 		DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN;
750 	u8 panel_replay_config2 = DP_PANEL_REPLAY_CRC_VERIFICATION;
751 
752 	if (crtc_state->has_sel_update)
753 		val |= DP_PANEL_REPLAY_SU_ENABLE;
754 
755 	if (crtc_state->enable_psr2_su_region_et)
756 		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
757 
758 	if (crtc_state->req_psr2_sdp_prior_scanline)
759 		panel_replay_config2 |=
760 			DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE;
761 
762 	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, val);
763 
764 	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG2,
765 			   panel_replay_config2);
766 }
767 
768 static void _psr_enable_sink(struct intel_dp *intel_dp,
769 			     const struct intel_crtc_state *crtc_state)
770 {
771 	struct intel_display *display = to_intel_display(intel_dp);
772 	u8 val = 0;
773 
774 	if (crtc_state->has_sel_update) {
775 		val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
776 	} else {
777 		if (intel_dp->psr.link_standby)
778 			val |= DP_PSR_MAIN_LINK_ACTIVE;
779 
780 		if (DISPLAY_VER(display) >= 8)
781 			val |= DP_PSR_CRC_VERIFICATION;
782 	}
783 
784 	if (crtc_state->req_psr2_sdp_prior_scanline)
785 		val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
786 
787 	if (crtc_state->enable_psr2_su_region_et)
788 		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
789 
790 	if (intel_dp->psr.entry_setup_frames > 0)
791 		val |= DP_PSR_FRAME_CAPTURE;
792 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
793 
794 	val |= DP_PSR_ENABLE;
795 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
796 }
797 
798 static void intel_psr_enable_sink(struct intel_dp *intel_dp,
799 				  const struct intel_crtc_state *crtc_state)
800 {
801 	intel_alpm_enable_sink(intel_dp, crtc_state);
802 
803 	crtc_state->has_panel_replay ?
804 		_panel_replay_enable_sink(intel_dp, crtc_state) :
805 		_psr_enable_sink(intel_dp, crtc_state);
806 
807 	if (intel_dp_is_edp(intel_dp))
808 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
809 }
810 
811 void intel_psr_panel_replay_enable_sink(struct intel_dp *intel_dp)
812 {
813 	if (CAN_PANEL_REPLAY(intel_dp))
814 		drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
815 				   DP_PANEL_REPLAY_ENABLE);
816 }
817 
818 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
819 {
820 	struct intel_display *display = to_intel_display(intel_dp);
821 	struct intel_connector *connector = intel_dp->attached_connector;
822 	u32 val = 0;
823 
824 	if (DISPLAY_VER(display) >= 11)
825 		val |= EDP_PSR_TP4_TIME_0us;
826 
827 	if (display->params.psr_safest_params) {
828 		val |= EDP_PSR_TP1_TIME_2500us;
829 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
830 		goto check_tp3_sel;
831 	}
832 
833 	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
834 		val |= EDP_PSR_TP1_TIME_0us;
835 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
836 		val |= EDP_PSR_TP1_TIME_100us;
837 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
838 		val |= EDP_PSR_TP1_TIME_500us;
839 	else
840 		val |= EDP_PSR_TP1_TIME_2500us;
841 
842 	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
843 		val |= EDP_PSR_TP2_TP3_TIME_0us;
844 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
845 		val |= EDP_PSR_TP2_TP3_TIME_100us;
846 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
847 		val |= EDP_PSR_TP2_TP3_TIME_500us;
848 	else
849 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
850 
851 	/*
852 	 * WA 0479: hsw,bdw
853 	 * "Do not skip both TP1 and TP2/TP3"
854 	 */
855 	if (DISPLAY_VER(display) < 9 &&
856 	    connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
857 	    connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
858 		val |= EDP_PSR_TP2_TP3_TIME_100us;
859 
860 check_tp3_sel:
861 	if (intel_dp_source_supports_tps3(display) &&
862 	    drm_dp_tps3_supported(intel_dp->dpcd))
863 		val |= EDP_PSR_TP_TP1_TP3;
864 	else
865 		val |= EDP_PSR_TP_TP1_TP2;
866 
867 	return val;
868 }
869 
870 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
871 {
872 	struct intel_display *display = to_intel_display(intel_dp);
873 	struct intel_connector *connector = intel_dp->attached_connector;
874 	int idle_frames;
875 
876 	/* Let's use 6 as the minimum to cover all known cases including the
877 	 * off-by-one issue that HW has in some cases.
878 	 */
879 	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
880 	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
881 
882 	if (drm_WARN_ON(display->drm, idle_frames > 0xf))
883 		idle_frames = 0xf;
884 
885 	return idle_frames;
886 }
887 
888 static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp)
889 {
890 	struct intel_display *display = to_intel_display(intel_dp);
891 	u32 current_dc_state = intel_display_power_get_current_dc_state(display);
892 	struct drm_vblank_crtc *vblank = &display->drm->vblank[intel_dp->psr.pipe];
893 
894 	return (current_dc_state != DC_STATE_EN_UPTO_DC5 &&
895 		current_dc_state != DC_STATE_EN_UPTO_DC6) ||
896 		intel_dp->psr.active_non_psr_pipes ||
897 		READ_ONCE(vblank->enabled);
898 }
899 
900 static void hsw_activate_psr1(struct intel_dp *intel_dp)
901 {
902 	struct intel_display *display = to_intel_display(intel_dp);
903 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
904 	u32 max_sleep_time = 0x1f;
905 	u32 val = EDP_PSR_ENABLE;
906 
907 	val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
908 
909 	if (DISPLAY_VER(display) < 20)
910 		val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
911 
912 	if (display->platform.haswell)
913 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
914 
915 	if (intel_dp->psr.link_standby)
916 		val |= EDP_PSR_LINK_STANDBY;
917 
918 	val |= intel_psr1_get_tp_time(intel_dp);
919 
920 	if (DISPLAY_VER(display) >= 8)
921 		val |= EDP_PSR_CRC_ENABLE;
922 
923 	if (DISPLAY_VER(display) >= 20)
924 		val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
925 
926 	intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder),
927 		     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
928 
929 	/* Wa_16025596647 */
930 	if ((DISPLAY_VER(display) == 20 ||
931 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
932 	    is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used)
933 		intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
934 								       intel_dp->psr.pipe,
935 								       true);
936 }
937 
938 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
939 {
940 	struct intel_display *display = to_intel_display(intel_dp);
941 	struct intel_connector *connector = intel_dp->attached_connector;
942 	u32 val = 0;
943 
944 	if (display->params.psr_safest_params)
945 		return EDP_PSR2_TP2_TIME_2500us;
946 
947 	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
948 	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
949 		val |= EDP_PSR2_TP2_TIME_50us;
950 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
951 		val |= EDP_PSR2_TP2_TIME_100us;
952 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
953 		val |= EDP_PSR2_TP2_TIME_500us;
954 	else
955 		val |= EDP_PSR2_TP2_TIME_2500us;
956 
957 	return val;
958 }
959 
960 static int psr2_block_count_lines(struct intel_dp *intel_dp)
961 {
962 	return intel_dp->alpm_parameters.io_wake_lines < 9 &&
963 		intel_dp->alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
964 }
965 
966 static int psr2_block_count(struct intel_dp *intel_dp)
967 {
968 	return psr2_block_count_lines(intel_dp) / 4;
969 }
970 
971 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
972 {
973 	u8 frames_before_su_entry;
974 
975 	frames_before_su_entry = max_t(u8,
976 				       intel_dp->psr.sink_sync_latency + 1,
977 				       2);
978 
979 	/* Entry setup frames must be at least 1 less than frames before SU entry */
980 	if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
981 		frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
982 
983 	return frames_before_su_entry;
984 }
985 
986 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
987 {
988 	struct intel_display *display = to_intel_display(intel_dp);
989 	struct intel_psr *psr = &intel_dp->psr;
990 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
991 
992 	if (intel_dp_is_edp(intel_dp) && psr->sel_update_enabled) {
993 		u32 val = psr->su_region_et_enabled ?
994 			LNL_EDP_PSR2_SU_REGION_ET_ENABLE : 0;
995 
996 		if (intel_dp->psr.req_psr2_sdp_prior_scanline)
997 			val |= EDP_PSR2_SU_SDP_SCANLINE;
998 
999 		intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder),
1000 			       val);
1001 	}
1002 
1003 	intel_de_rmw(display,
1004 		     PSR2_MAN_TRK_CTL(display, intel_dp->psr.transcoder),
1005 		     0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
1006 
1007 	intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
1008 		     TRANS_DP2_PANEL_REPLAY_ENABLE);
1009 }
1010 
1011 static void hsw_activate_psr2(struct intel_dp *intel_dp)
1012 {
1013 	struct intel_display *display = to_intel_display(intel_dp);
1014 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1015 	u32 val = EDP_PSR2_ENABLE;
1016 	u32 psr_val = 0;
1017 	u8 idle_frames;
1018 
1019 	/* Wa_16025596647 */
1020 	if ((DISPLAY_VER(display) == 20 ||
1021 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
1022 	    is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used)
1023 		idle_frames = 0;
1024 	else
1025 		idle_frames = psr_compute_idle_frames(intel_dp);
1026 	val |= EDP_PSR2_IDLE_FRAMES(idle_frames);
1027 
1028 	if (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)
1029 		val |= EDP_SU_TRACK_ENABLE;
1030 
1031 	if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
1032 		val |= EDP_Y_COORDINATE_ENABLE;
1033 
1034 	val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
1035 
1036 	val |= intel_psr2_get_tp_time(intel_dp);
1037 
1038 	if (DISPLAY_VER(display) >= 12 && DISPLAY_VER(display) < 20) {
1039 		if (psr2_block_count(intel_dp) > 2)
1040 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
1041 		else
1042 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
1043 	}
1044 
1045 	/* Wa_22012278275:adl-p */
1046 	if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
1047 		static const u8 map[] = {
1048 			2, /* 5 lines */
1049 			1, /* 6 lines */
1050 			0, /* 7 lines */
1051 			3, /* 8 lines */
1052 			6, /* 9 lines */
1053 			5, /* 10 lines */
1054 			4, /* 11 lines */
1055 			7, /* 12 lines */
1056 		};
1057 		/*
1058 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
1059 		 * comments below for more information
1060 		 */
1061 		int tmp;
1062 
1063 		tmp = map[intel_dp->alpm_parameters.io_wake_lines -
1064 			  TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
1065 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
1066 
1067 		tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
1068 		val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
1069 	} else if (DISPLAY_VER(display) >= 20) {
1070 		val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1071 	} else if (DISPLAY_VER(display) >= 12) {
1072 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1073 		val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1074 	} else if (DISPLAY_VER(display) >= 9) {
1075 		val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1076 		val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1077 	}
1078 
1079 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
1080 		val |= EDP_PSR2_SU_SDP_SCANLINE;
1081 
1082 	if (DISPLAY_VER(display) >= 20)
1083 		psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
1084 
1085 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
1086 		u32 tmp;
1087 
1088 		tmp = intel_de_read(display,
1089 				    PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1090 		drm_WARN_ON(display->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
1091 	} else if (HAS_PSR2_SEL_FETCH(display)) {
1092 		intel_de_write(display,
1093 			       PSR2_MAN_TRK_CTL(display, cpu_transcoder), 0);
1094 	}
1095 
1096 	if (intel_dp->psr.su_region_et_enabled)
1097 		val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
1098 
1099 	/*
1100 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
1101 	 * recommending keep this bit unset while PSR2 is enabled.
1102 	 */
1103 	intel_de_write(display, psr_ctl_reg(display, cpu_transcoder), psr_val);
1104 
1105 	intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), val);
1106 }
1107 
1108 static bool
1109 transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
1110 {
1111 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1112 		return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
1113 	else if (DISPLAY_VER(display) >= 12)
1114 		return cpu_transcoder == TRANSCODER_A;
1115 	else if (DISPLAY_VER(display) >= 9)
1116 		return cpu_transcoder == TRANSCODER_EDP;
1117 	else
1118 		return false;
1119 }
1120 
1121 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
1122 {
1123 	if (!crtc_state->hw.active)
1124 		return 0;
1125 
1126 	return DIV_ROUND_UP(1000 * 1000,
1127 			    drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
1128 }
1129 
1130 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
1131 				     u32 idle_frames)
1132 {
1133 	struct intel_display *display = to_intel_display(intel_dp);
1134 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1135 
1136 	intel_de_rmw(display, EDP_PSR2_CTL(display, cpu_transcoder),
1137 		     EDP_PSR2_IDLE_FRAMES_MASK,
1138 		     EDP_PSR2_IDLE_FRAMES(idle_frames));
1139 }
1140 
1141 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
1142 {
1143 	struct intel_display *display = to_intel_display(intel_dp);
1144 
1145 	psr2_program_idle_frames(intel_dp, 0);
1146 	intel_display_power_set_target_dc_state(display, DC_STATE_EN_DC3CO);
1147 }
1148 
1149 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
1150 {
1151 	struct intel_display *display = to_intel_display(intel_dp);
1152 
1153 	intel_display_power_set_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
1154 	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
1155 }
1156 
1157 static void tgl_dc3co_disable_work(struct work_struct *work)
1158 {
1159 	struct intel_dp *intel_dp =
1160 		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
1161 
1162 	mutex_lock(&intel_dp->psr.lock);
1163 	/* If delayed work is pending, it is not idle */
1164 	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
1165 		goto unlock;
1166 
1167 	tgl_psr2_disable_dc3co(intel_dp);
1168 unlock:
1169 	mutex_unlock(&intel_dp->psr.lock);
1170 }
1171 
1172 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
1173 {
1174 	if (!intel_dp->psr.dc3co_exitline)
1175 		return;
1176 
1177 	cancel_delayed_work(&intel_dp->psr.dc3co_work);
1178 	/* Before PSR2 exit disallow dc3co*/
1179 	tgl_psr2_disable_dc3co(intel_dp);
1180 }
1181 
1182 static bool
1183 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
1184 			      struct intel_crtc_state *crtc_state)
1185 {
1186 	struct intel_display *display = to_intel_display(intel_dp);
1187 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1188 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1189 	enum port port = dig_port->base.port;
1190 
1191 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1192 		return pipe <= PIPE_B && port <= PORT_B;
1193 	else
1194 		return pipe == PIPE_A && port == PORT_A;
1195 }
1196 
1197 static void
1198 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
1199 				  struct intel_crtc_state *crtc_state)
1200 {
1201 	struct intel_display *display = to_intel_display(intel_dp);
1202 	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
1203 	struct i915_power_domains *power_domains = &display->power.domains;
1204 	u32 exit_scanlines;
1205 
1206 	/*
1207 	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1208 	 * disable DC3CO until the changed dc3co activating/deactivating sequence
1209 	 * is applied. B.Specs:49196
1210 	 */
1211 	return;
1212 
1213 	/*
1214 	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1215 	 * TODO: when the issue is addressed, this restriction should be removed.
1216 	 */
1217 	if (crtc_state->enable_psr2_sel_fetch)
1218 		return;
1219 
1220 	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1221 		return;
1222 
1223 	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1224 		return;
1225 
1226 	/* Wa_16011303918:adl-p */
1227 	if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
1228 		return;
1229 
1230 	/*
1231 	 * DC3CO Exit time 200us B.Spec 49196
1232 	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1233 	 */
1234 	exit_scanlines =
1235 		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1236 
1237 	if (drm_WARN_ON(display->drm, exit_scanlines > crtc_vdisplay))
1238 		return;
1239 
1240 	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1241 }
1242 
1243 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1244 					      struct intel_crtc_state *crtc_state)
1245 {
1246 	struct intel_display *display = to_intel_display(intel_dp);
1247 
1248 	if (!display->params.enable_psr2_sel_fetch &&
1249 	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1250 		drm_dbg_kms(display->drm,
1251 			    "PSR2 sel fetch not enabled, disabled by parameter\n");
1252 		return false;
1253 	}
1254 
1255 	if (crtc_state->uapi.async_flip) {
1256 		drm_dbg_kms(display->drm,
1257 			    "PSR2 sel fetch not enabled, async flip enabled\n");
1258 		return false;
1259 	}
1260 
1261 	return crtc_state->enable_psr2_sel_fetch = true;
1262 }
1263 
1264 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1265 				   struct intel_crtc_state *crtc_state)
1266 {
1267 	struct intel_display *display = to_intel_display(intel_dp);
1268 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1269 	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1270 	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1271 	u16 y_granularity = 0;
1272 
1273 	/* PSR2 HW only send full lines so we only need to validate the width */
1274 	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1275 		return false;
1276 
1277 	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1278 		return false;
1279 
1280 	/* HW tracking is only aligned to 4 lines */
1281 	if (!crtc_state->enable_psr2_sel_fetch)
1282 		return intel_dp->psr.su_y_granularity == 4;
1283 
1284 	/*
1285 	 * adl_p and mtl platforms have 1 line granularity.
1286 	 * For other platforms with SW tracking we can adjust the y coordinates
1287 	 * to match sink requirement if multiple of 4.
1288 	 */
1289 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1290 		y_granularity = intel_dp->psr.su_y_granularity;
1291 	else if (intel_dp->psr.su_y_granularity <= 2)
1292 		y_granularity = 4;
1293 	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1294 		y_granularity = intel_dp->psr.su_y_granularity;
1295 
1296 	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1297 		return false;
1298 
1299 	if (crtc_state->dsc.compression_enable &&
1300 	    vdsc_cfg->slice_height % y_granularity)
1301 		return false;
1302 
1303 	crtc_state->su_y_granularity = y_granularity;
1304 	return true;
1305 }
1306 
1307 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1308 							struct intel_crtc_state *crtc_state)
1309 {
1310 	struct intel_display *display = to_intel_display(intel_dp);
1311 	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1312 	u32 hblank_total, hblank_ns, req_ns;
1313 
1314 	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1315 	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1316 
1317 	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1318 	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1319 
1320 	if ((hblank_ns - req_ns) > 100)
1321 		return true;
1322 
1323 	/* Not supported <13 / Wa_22012279113:adl-p */
1324 	if (DISPLAY_VER(display) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1325 		return false;
1326 
1327 	crtc_state->req_psr2_sdp_prior_scanline = true;
1328 	return true;
1329 }
1330 
1331 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1332 					const struct drm_display_mode *adjusted_mode)
1333 {
1334 	struct intel_display *display = to_intel_display(intel_dp);
1335 	int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1336 	int entry_setup_frames = 0;
1337 
1338 	if (psr_setup_time < 0) {
1339 		drm_dbg_kms(display->drm,
1340 			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1341 			    intel_dp->psr_dpcd[1]);
1342 		return -ETIME;
1343 	}
1344 
1345 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1346 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1347 		if (DISPLAY_VER(display) >= 20) {
1348 			/* setup entry frames can be up to 3 frames */
1349 			entry_setup_frames = 1;
1350 			drm_dbg_kms(display->drm,
1351 				    "PSR setup entry frames %d\n",
1352 				    entry_setup_frames);
1353 		} else {
1354 			drm_dbg_kms(display->drm,
1355 				    "PSR condition failed: PSR setup time (%d us) too long\n",
1356 				    psr_setup_time);
1357 			return -ETIME;
1358 		}
1359 	}
1360 
1361 	return entry_setup_frames;
1362 }
1363 
1364 static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
1365 				       const struct intel_crtc_state *crtc_state,
1366 				       bool aux_less)
1367 {
1368 	struct intel_display *display = to_intel_display(intel_dp);
1369 	int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
1370 		crtc_state->hw.adjusted_mode.crtc_vblank_start;
1371 	int wake_lines;
1372 
1373 	if (aux_less)
1374 		wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
1375 	else
1376 		wake_lines = DISPLAY_VER(display) < 20 ?
1377 			psr2_block_count_lines(intel_dp) :
1378 			intel_dp->alpm_parameters.io_wake_lines;
1379 
1380 	if (crtc_state->req_psr2_sdp_prior_scanline)
1381 		vblank -= 1;
1382 
1383 	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
1384 	if (vblank < wake_lines)
1385 		return false;
1386 
1387 	return true;
1388 }
1389 
1390 static bool alpm_config_valid(struct intel_dp *intel_dp,
1391 			      const struct intel_crtc_state *crtc_state,
1392 			      bool aux_less)
1393 {
1394 	struct intel_display *display = to_intel_display(intel_dp);
1395 
1396 	if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
1397 		drm_dbg_kms(display->drm,
1398 			    "PSR2/Panel Replay  not enabled, Unable to use long enough wake times\n");
1399 		return false;
1400 	}
1401 
1402 	if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less)) {
1403 		drm_dbg_kms(display->drm,
1404 			    "PSR2/Panel Replay not enabled, too short vblank time\n");
1405 		return false;
1406 	}
1407 
1408 	return true;
1409 }
1410 
1411 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1412 				    struct intel_crtc_state *crtc_state)
1413 {
1414 	struct intel_display *display = to_intel_display(intel_dp);
1415 	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1416 	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1417 	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1418 
1419 	if (!intel_dp->psr.sink_psr2_support || display->params.enable_psr == 1)
1420 		return false;
1421 
1422 	/* JSL and EHL only supports eDP 1.3 */
1423 	if (display->platform.jasperlake || display->platform.elkhartlake) {
1424 		drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
1425 		return false;
1426 	}
1427 
1428 	/* Wa_16011181250 */
1429 	if (display->platform.rocketlake || display->platform.alderlake_s ||
1430 	    display->platform.dg2) {
1431 		drm_dbg_kms(display->drm,
1432 			    "PSR2 is defeatured for this platform\n");
1433 		return false;
1434 	}
1435 
1436 	if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1437 		drm_dbg_kms(display->drm,
1438 			    "PSR2 not completely functional in this stepping\n");
1439 		return false;
1440 	}
1441 
1442 	if (!transcoder_has_psr2(display, crtc_state->cpu_transcoder)) {
1443 		drm_dbg_kms(display->drm,
1444 			    "PSR2 not supported in transcoder %s\n",
1445 			    transcoder_name(crtc_state->cpu_transcoder));
1446 		return false;
1447 	}
1448 
1449 	/*
1450 	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1451 	 * resolution requires DSC to be enabled, priority is given to DSC
1452 	 * over PSR2.
1453 	 */
1454 	if (crtc_state->dsc.compression_enable &&
1455 	    (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)) {
1456 		drm_dbg_kms(display->drm,
1457 			    "PSR2 cannot be enabled since DSC is enabled\n");
1458 		return false;
1459 	}
1460 
1461 	if (DISPLAY_VER(display) >= 20) {
1462 		psr_max_h = crtc_hdisplay;
1463 		psr_max_v = crtc_vdisplay;
1464 		max_bpp = crtc_state->pipe_bpp;
1465 	} else if (IS_DISPLAY_VER(display, 12, 14)) {
1466 		psr_max_h = 5120;
1467 		psr_max_v = 3200;
1468 		max_bpp = 30;
1469 	} else if (IS_DISPLAY_VER(display, 10, 11)) {
1470 		psr_max_h = 4096;
1471 		psr_max_v = 2304;
1472 		max_bpp = 24;
1473 	} else if (DISPLAY_VER(display) == 9) {
1474 		psr_max_h = 3640;
1475 		psr_max_v = 2304;
1476 		max_bpp = 24;
1477 	}
1478 
1479 	if (crtc_state->pipe_bpp > max_bpp) {
1480 		drm_dbg_kms(display->drm,
1481 			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1482 			    crtc_state->pipe_bpp, max_bpp);
1483 		return false;
1484 	}
1485 
1486 	/* Wa_16011303918:adl-p */
1487 	if (crtc_state->vrr.enable &&
1488 	    display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1489 		drm_dbg_kms(display->drm,
1490 			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1491 		return false;
1492 	}
1493 
1494 	if (!alpm_config_valid(intel_dp, crtc_state, false))
1495 		return false;
1496 
1497 	if (!crtc_state->enable_psr2_sel_fetch &&
1498 	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1499 		drm_dbg_kms(display->drm,
1500 			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1501 			    crtc_hdisplay, crtc_vdisplay,
1502 			    psr_max_h, psr_max_v);
1503 		return false;
1504 	}
1505 
1506 	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1507 
1508 	return true;
1509 }
1510 
1511 static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
1512 					  struct intel_crtc_state *crtc_state)
1513 {
1514 	struct intel_display *display = to_intel_display(intel_dp);
1515 
1516 	if (HAS_PSR2_SEL_FETCH(display) &&
1517 	    !intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1518 	    !HAS_PSR_HW_TRACKING(display)) {
1519 		drm_dbg_kms(display->drm,
1520 			    "Selective update not enabled, selective fetch not valid and no HW tracking available\n");
1521 		goto unsupported;
1522 	}
1523 
1524 	if (!sel_update_global_enabled(intel_dp)) {
1525 		drm_dbg_kms(display->drm,
1526 			    "Selective update disabled by flag\n");
1527 		goto unsupported;
1528 	}
1529 
1530 	if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state))
1531 		goto unsupported;
1532 
1533 	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1534 		drm_dbg_kms(display->drm,
1535 			    "Selective update not enabled, SDP indication do not fit in hblank\n");
1536 		goto unsupported;
1537 	}
1538 
1539 	if (crtc_state->has_panel_replay && (DISPLAY_VER(display) < 14 ||
1540 					     !intel_dp->psr.sink_panel_replay_su_support))
1541 		goto unsupported;
1542 
1543 	if (crtc_state->crc_enabled) {
1544 		drm_dbg_kms(display->drm,
1545 			    "Selective update not enabled because it would inhibit pipe CRC calculation\n");
1546 		goto unsupported;
1547 	}
1548 
1549 	if (!psr2_granularity_check(intel_dp, crtc_state)) {
1550 		drm_dbg_kms(display->drm,
1551 			    "Selective update not enabled, SU granularity not compatible\n");
1552 		goto unsupported;
1553 	}
1554 
1555 	crtc_state->enable_psr2_su_region_et =
1556 		psr2_su_region_et_valid(intel_dp, crtc_state->has_panel_replay);
1557 
1558 	return true;
1559 
1560 unsupported:
1561 	crtc_state->enable_psr2_sel_fetch = false;
1562 	return false;
1563 }
1564 
1565 static bool _psr_compute_config(struct intel_dp *intel_dp,
1566 				struct intel_crtc_state *crtc_state)
1567 {
1568 	struct intel_display *display = to_intel_display(intel_dp);
1569 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1570 	int entry_setup_frames;
1571 
1572 	if (!CAN_PSR(intel_dp) || !display->params.enable_psr)
1573 		return false;
1574 
1575 	/*
1576 	 * Currently PSR doesn't work reliably with VRR enabled.
1577 	 */
1578 	if (crtc_state->vrr.enable)
1579 		return false;
1580 
1581 	entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1582 
1583 	if (entry_setup_frames >= 0) {
1584 		intel_dp->psr.entry_setup_frames = entry_setup_frames;
1585 	} else {
1586 		drm_dbg_kms(display->drm,
1587 			    "PSR condition failed: PSR setup timing not met\n");
1588 		return false;
1589 	}
1590 
1591 	return true;
1592 }
1593 
1594 static bool
1595 _panel_replay_compute_config(struct intel_dp *intel_dp,
1596 			     const struct intel_crtc_state *crtc_state,
1597 			     const struct drm_connector_state *conn_state)
1598 {
1599 	struct intel_display *display = to_intel_display(intel_dp);
1600 	struct intel_connector *connector =
1601 		to_intel_connector(conn_state->connector);
1602 	struct intel_hdcp *hdcp = &connector->hdcp;
1603 
1604 	if (!CAN_PANEL_REPLAY(intel_dp))
1605 		return false;
1606 
1607 	if (!panel_replay_global_enabled(intel_dp)) {
1608 		drm_dbg_kms(display->drm, "Panel Replay disabled by flag\n");
1609 		return false;
1610 	}
1611 
1612 	if (crtc_state->crc_enabled) {
1613 		drm_dbg_kms(display->drm,
1614 			    "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
1615 		return false;
1616 	}
1617 
1618 	if (!intel_dp_is_edp(intel_dp))
1619 		return true;
1620 
1621 	/* Remaining checks are for eDP only */
1622 
1623 	if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A &&
1624 	    to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_B)
1625 		return false;
1626 
1627 	/* 128b/132b Panel Replay is not supported on eDP */
1628 	if (intel_dp_is_uhbr(crtc_state)) {
1629 		drm_dbg_kms(display->drm,
1630 			    "Panel Replay is not supported with 128b/132b\n");
1631 		return false;
1632 	}
1633 
1634 	/* HW will not allow Panel Replay on eDP when HDCP enabled */
1635 	if (conn_state->content_protection ==
1636 	    DRM_MODE_CONTENT_PROTECTION_DESIRED ||
1637 	    (conn_state->content_protection ==
1638 	     DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
1639 	     DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) {
1640 		drm_dbg_kms(display->drm,
1641 			    "Panel Replay is not supported with HDCP\n");
1642 		return false;
1643 	}
1644 
1645 	if (!alpm_config_valid(intel_dp, crtc_state, true))
1646 		return false;
1647 
1648 	return true;
1649 }
1650 
1651 static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp,
1652 					   struct intel_crtc_state *crtc_state)
1653 {
1654 	struct intel_display *display = to_intel_display(intel_dp);
1655 
1656 	return (DISPLAY_VER(display) == 20 && intel_dp->psr.entry_setup_frames > 0 &&
1657 		!crtc_state->has_sel_update);
1658 }
1659 
1660 void intel_psr_compute_config(struct intel_dp *intel_dp,
1661 			      struct intel_crtc_state *crtc_state,
1662 			      struct drm_connector_state *conn_state)
1663 {
1664 	struct intel_display *display = to_intel_display(intel_dp);
1665 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1666 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
1667 	struct intel_crtc *crtc;
1668 	u8 active_pipes = 0;
1669 
1670 	if (!psr_global_enabled(intel_dp)) {
1671 		drm_dbg_kms(display->drm, "PSR disabled by flag\n");
1672 		return;
1673 	}
1674 
1675 	if (intel_dp->psr.sink_not_reliable) {
1676 		drm_dbg_kms(display->drm,
1677 			    "PSR sink implementation is not reliable\n");
1678 		return;
1679 	}
1680 
1681 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1682 		drm_dbg_kms(display->drm,
1683 			    "PSR condition failed: Interlaced mode enabled\n");
1684 		return;
1685 	}
1686 
1687 	/*
1688 	 * FIXME figure out what is wrong with PSR+joiner and
1689 	 * fix it. Presumably something related to the fact that
1690 	 * PSR is a transcoder level feature.
1691 	 */
1692 	if (crtc_state->joiner_pipes) {
1693 		drm_dbg_kms(display->drm,
1694 			    "PSR disabled due to joiner\n");
1695 		return;
1696 	}
1697 
1698 	crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
1699 								    crtc_state,
1700 								    conn_state);
1701 
1702 	crtc_state->has_psr = crtc_state->has_panel_replay ? true :
1703 		_psr_compute_config(intel_dp, crtc_state);
1704 
1705 	if (!crtc_state->has_psr)
1706 		return;
1707 
1708 	crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
1709 
1710 	/* Wa_18037818876 */
1711 	if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) {
1712 		crtc_state->has_psr = false;
1713 		drm_dbg_kms(display->drm,
1714 			    "PSR disabled to workaround PSR FSM hang issue\n");
1715 	}
1716 
1717 	/* Rest is for Wa_16025596647 */
1718 	if (DISPLAY_VER(display) != 20 &&
1719 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
1720 		return;
1721 
1722 	/* Not needed by Panel Replay  */
1723 	if (crtc_state->has_panel_replay)
1724 		return;
1725 
1726 	/* We ignore possible secondary PSR/Panel Replay capable eDP */
1727 	for_each_intel_crtc(display->drm, crtc)
1728 		active_pipes |= crtc->active ? BIT(crtc->pipe) : 0;
1729 
1730 	active_pipes = intel_calc_active_pipes(state, active_pipes);
1731 
1732 	crtc_state->active_non_psr_pipes = active_pipes &
1733 		~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe);
1734 }
1735 
1736 void intel_psr_get_config(struct intel_encoder *encoder,
1737 			  struct intel_crtc_state *pipe_config)
1738 {
1739 	struct intel_display *display = to_intel_display(encoder);
1740 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1741 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1742 	struct intel_dp *intel_dp;
1743 	u32 val;
1744 
1745 	if (!dig_port)
1746 		return;
1747 
1748 	intel_dp = &dig_port->dp;
1749 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1750 		return;
1751 
1752 	mutex_lock(&intel_dp->psr.lock);
1753 	if (!intel_dp->psr.enabled)
1754 		goto unlock;
1755 
1756 	if (intel_dp->psr.panel_replay_enabled) {
1757 		pipe_config->has_psr = pipe_config->has_panel_replay = true;
1758 	} else {
1759 		/*
1760 		 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1761 		 * enabled/disabled because of frontbuffer tracking and others.
1762 		 */
1763 		pipe_config->has_psr = true;
1764 	}
1765 
1766 	pipe_config->has_sel_update = intel_dp->psr.sel_update_enabled;
1767 	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1768 
1769 	if (!intel_dp->psr.sel_update_enabled)
1770 		goto unlock;
1771 
1772 	if (HAS_PSR2_SEL_FETCH(display)) {
1773 		val = intel_de_read(display,
1774 				    PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1775 		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1776 			pipe_config->enable_psr2_sel_fetch = true;
1777 	}
1778 
1779 	pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled;
1780 
1781 	if (DISPLAY_VER(display) >= 12) {
1782 		val = intel_de_read(display,
1783 				    TRANS_EXITLINE(display, cpu_transcoder));
1784 		pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1785 	}
1786 unlock:
1787 	mutex_unlock(&intel_dp->psr.lock);
1788 }
1789 
1790 static void intel_psr_activate(struct intel_dp *intel_dp)
1791 {
1792 	struct intel_display *display = to_intel_display(intel_dp);
1793 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1794 
1795 	drm_WARN_ON(display->drm,
1796 		    transcoder_has_psr2(display, cpu_transcoder) &&
1797 		    intel_de_read(display, EDP_PSR2_CTL(display, cpu_transcoder)) & EDP_PSR2_ENABLE);
1798 
1799 	drm_WARN_ON(display->drm,
1800 		    intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)) & EDP_PSR_ENABLE);
1801 
1802 	drm_WARN_ON(display->drm, intel_dp->psr.active);
1803 
1804 	drm_WARN_ON(display->drm, !intel_dp->psr.enabled);
1805 
1806 	lockdep_assert_held(&intel_dp->psr.lock);
1807 
1808 	/* psr1, psr2 and panel-replay are mutually exclusive.*/
1809 	if (intel_dp->psr.panel_replay_enabled)
1810 		dg2_activate_panel_replay(intel_dp);
1811 	else if (intel_dp->psr.sel_update_enabled)
1812 		hsw_activate_psr2(intel_dp);
1813 	else
1814 		hsw_activate_psr1(intel_dp);
1815 
1816 	intel_dp->psr.active = true;
1817 }
1818 
1819 /*
1820  * Wa_16013835468
1821  * Wa_14015648006
1822  */
1823 static void wm_optimization_wa(struct intel_dp *intel_dp,
1824 			       const struct intel_crtc_state *crtc_state)
1825 {
1826 	struct intel_display *display = to_intel_display(intel_dp);
1827 	enum pipe pipe = intel_dp->psr.pipe;
1828 	bool activate = false;
1829 
1830 	/* Wa_14015648006 */
1831 	if (IS_DISPLAY_VER(display, 11, 14) && crtc_state->wm_level_disabled)
1832 		activate = true;
1833 
1834 	/* Wa_16013835468 */
1835 	if (DISPLAY_VER(display) == 12 &&
1836 	    crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1837 	    crtc_state->hw.adjusted_mode.crtc_vdisplay)
1838 		activate = true;
1839 
1840 	if (activate)
1841 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
1842 			     0, LATENCY_REPORTING_REMOVED(pipe));
1843 	else
1844 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
1845 			     LATENCY_REPORTING_REMOVED(pipe), 0);
1846 }
1847 
1848 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1849 				    const struct intel_crtc_state *crtc_state)
1850 {
1851 	struct intel_display *display = to_intel_display(intel_dp);
1852 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1853 	u32 mask = 0;
1854 
1855 	/*
1856 	 * Only HSW and BDW have PSR AUX registers that need to be setup.
1857 	 * SKL+ use hardcoded values PSR AUX transactions
1858 	 */
1859 	if (DISPLAY_VER(display) < 9)
1860 		hsw_psr_setup_aux(intel_dp);
1861 
1862 	/*
1863 	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1864 	 * mask LPSP to avoid dependency on other drivers that might block
1865 	 * runtime_pm besides preventing  other hw tracking issues now we
1866 	 * can rely on frontbuffer tracking.
1867 	 *
1868 	 * From bspec prior LunarLake:
1869 	 * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
1870 	 * panel replay mode.
1871 	 *
1872 	 * From bspec beyod LunarLake:
1873 	 * Panel Replay on DP: No bits are applicable
1874 	 * Panel Replay on eDP: All bits are applicable
1875 	 */
1876 	if (DISPLAY_VER(display) < 20 || intel_dp_is_edp(intel_dp))
1877 		mask = EDP_PSR_DEBUG_MASK_HPD;
1878 
1879 	if (intel_dp_is_edp(intel_dp)) {
1880 		mask |= EDP_PSR_DEBUG_MASK_MEMUP;
1881 
1882 		/*
1883 		 * For some unknown reason on HSW non-ULT (or at least on
1884 		 * Dell Latitude E6540) external displays start to flicker
1885 		 * when PSR is enabled on the eDP. SR/PC6 residency is much
1886 		 * higher than should be possible with an external display.
1887 		 * As a workaround leave LPSP unmasked to prevent PSR entry
1888 		 * when external displays are active.
1889 		 */
1890 		if (DISPLAY_VER(display) >= 8 || display->platform.haswell_ult)
1891 			mask |= EDP_PSR_DEBUG_MASK_LPSP;
1892 
1893 		if (DISPLAY_VER(display) < 20)
1894 			mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1895 
1896 		/*
1897 		 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1898 		 * registers in order to keep the CURSURFLIVE tricks working :(
1899 		 */
1900 		if (IS_DISPLAY_VER(display, 9, 10))
1901 			mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1902 
1903 		/* allow PSR with sprite enabled */
1904 		if (display->platform.haswell)
1905 			mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1906 	}
1907 
1908 	intel_de_write(display, psr_debug_reg(display, cpu_transcoder), mask);
1909 
1910 	psr_irq_control(intel_dp);
1911 
1912 	/*
1913 	 * TODO: if future platforms supports DC3CO in more than one
1914 	 * transcoder, EXITLINE will need to be unset when disabling PSR
1915 	 */
1916 	if (intel_dp->psr.dc3co_exitline)
1917 		intel_de_rmw(display,
1918 			     TRANS_EXITLINE(display, cpu_transcoder),
1919 			     EXITLINE_MASK,
1920 			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1921 
1922 	if (HAS_PSR_HW_TRACKING(display) && HAS_PSR2_SEL_FETCH(display))
1923 		intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1924 			     intel_dp->psr.psr2_sel_fetch_enabled ?
1925 			     IGNORE_PSR2_HW_TRACKING : 0);
1926 
1927 	/*
1928 	 * Wa_16013835468
1929 	 * Wa_14015648006
1930 	 */
1931 	wm_optimization_wa(intel_dp, crtc_state);
1932 
1933 	if (intel_dp->psr.sel_update_enabled) {
1934 		if (DISPLAY_VER(display) == 9)
1935 			intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 0,
1936 				     PSR2_VSC_ENABLE_PROG_HEADER |
1937 				     PSR2_ADD_VERTICAL_LINE_COUNT);
1938 
1939 		/*
1940 		 * Wa_16014451276:adlp,mtl[a0,b0]
1941 		 * All supported adlp panels have 1-based X granularity, this may
1942 		 * cause issues if non-supported panels are used.
1943 		 */
1944 		if (!intel_dp->psr.panel_replay_enabled &&
1945 		    (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
1946 		     display->platform.alderlake_p))
1947 			intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
1948 				     0, ADLP_1_BASED_X_GRANULARITY);
1949 
1950 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1951 		if (!intel_dp->psr.panel_replay_enabled &&
1952 		    IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
1953 			intel_de_rmw(display,
1954 				     MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
1955 				     0,
1956 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1957 		else if (display->platform.alderlake_p)
1958 			intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
1959 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1960 	}
1961 
1962 	/* Wa_16025596647 */
1963 	if ((DISPLAY_VER(display) == 20 ||
1964 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
1965 	    !intel_dp->psr.panel_replay_enabled)
1966 		intel_dmc_block_pkgc(display, intel_dp->psr.pipe, true);
1967 
1968 	intel_alpm_configure(intel_dp, crtc_state);
1969 }
1970 
1971 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1972 {
1973 	struct intel_display *display = to_intel_display(intel_dp);
1974 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1975 	u32 val;
1976 
1977 	if (intel_dp->psr.panel_replay_enabled)
1978 		goto no_err;
1979 
1980 	/*
1981 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1982 	 * will still keep the error set even after the reset done in the
1983 	 * irq_preinstall and irq_uninstall hooks.
1984 	 * And enabling in this situation cause the screen to freeze in the
1985 	 * first time that PSR HW tries to activate so lets keep PSR disabled
1986 	 * to avoid any rendering problems.
1987 	 */
1988 	val = intel_de_read(display, psr_iir_reg(display, cpu_transcoder));
1989 	val &= psr_irq_psr_error_bit_get(intel_dp);
1990 	if (val) {
1991 		intel_dp->psr.sink_not_reliable = true;
1992 		drm_dbg_kms(display->drm,
1993 			    "PSR interruption error set, not enabling PSR\n");
1994 		return false;
1995 	}
1996 
1997 no_err:
1998 	return true;
1999 }
2000 
2001 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
2002 				    const struct intel_crtc_state *crtc_state)
2003 {
2004 	struct intel_display *display = to_intel_display(intel_dp);
2005 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2006 	u32 val;
2007 
2008 	drm_WARN_ON(display->drm, intel_dp->psr.enabled);
2009 
2010 	intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update;
2011 	intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
2012 	intel_dp->psr.busy_frontbuffer_bits = 0;
2013 	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
2014 	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
2015 	/* DC5/DC6 requires at least 6 idle frames */
2016 	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
2017 	intel_dp->psr.dc3co_exit_delay = val;
2018 	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
2019 	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
2020 	intel_dp->psr.su_region_et_enabled = crtc_state->enable_psr2_su_region_et;
2021 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2022 	intel_dp->psr.req_psr2_sdp_prior_scanline =
2023 		crtc_state->req_psr2_sdp_prior_scanline;
2024 	intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes;
2025 	intel_dp->psr.pkg_c_latency_used = crtc_state->pkg_c_latency_used;
2026 
2027 	if (!psr_interrupt_error_check(intel_dp))
2028 		return;
2029 
2030 	if (intel_dp->psr.panel_replay_enabled)
2031 		drm_dbg_kms(display->drm, "Enabling Panel Replay\n");
2032 	else
2033 		drm_dbg_kms(display->drm, "Enabling PSR%s\n",
2034 			    intel_dp->psr.sel_update_enabled ? "2" : "1");
2035 
2036 	/*
2037 	 * Enabling sink PSR/Panel Replay here only for PSR. Panel Replay enable
2038 	 * bit is already written at this point. Sink ALPM is enabled here for
2039 	 * PSR and Panel Replay. See
2040 	 * intel_psr_panel_replay_enable_sink. Modifiers/options:
2041 	 *  - Selective Update
2042 	 *  - Region Early Transport
2043 	 *  - Selective Update Region Scanline Capture
2044 	 *  - VSC_SDP_CRC
2045 	 *  - HPD on different Errors
2046 	 *  - CRC verification
2047 	 * are written for PSR and Panel Replay here.
2048 	 */
2049 	intel_psr_enable_sink(intel_dp, crtc_state);
2050 
2051 	if (intel_dp_is_edp(intel_dp))
2052 		intel_snps_phy_update_psr_power_state(&dig_port->base, true);
2053 
2054 	intel_psr_enable_source(intel_dp, crtc_state);
2055 	intel_dp->psr.enabled = true;
2056 	intel_dp->psr.pause_counter = 0;
2057 
2058 	/*
2059 	 * Link_ok is sticky and set here on PSR enable. We can assume link
2060 	 * training is complete as we never continue to PSR enable with
2061 	 * untrained link. Link_ok is kept as set until first short pulse
2062 	 * interrupt. This is targeted to workaround panels stating bad link
2063 	 * after PSR is enabled.
2064 	 */
2065 	intel_dp->psr.link_ok = true;
2066 
2067 	intel_psr_activate(intel_dp);
2068 }
2069 
2070 static void intel_psr_exit(struct intel_dp *intel_dp)
2071 {
2072 	struct intel_display *display = to_intel_display(intel_dp);
2073 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2074 	u32 val;
2075 
2076 	if (!intel_dp->psr.active) {
2077 		if (transcoder_has_psr2(display, cpu_transcoder)) {
2078 			val = intel_de_read(display,
2079 					    EDP_PSR2_CTL(display, cpu_transcoder));
2080 			drm_WARN_ON(display->drm, val & EDP_PSR2_ENABLE);
2081 		}
2082 
2083 		val = intel_de_read(display,
2084 				    psr_ctl_reg(display, cpu_transcoder));
2085 		drm_WARN_ON(display->drm, val & EDP_PSR_ENABLE);
2086 
2087 		return;
2088 	}
2089 
2090 	if (intel_dp->psr.panel_replay_enabled) {
2091 		intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder),
2092 			     TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
2093 	} else if (intel_dp->psr.sel_update_enabled) {
2094 		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
2095 
2096 		val = intel_de_rmw(display,
2097 				   EDP_PSR2_CTL(display, cpu_transcoder),
2098 				   EDP_PSR2_ENABLE, 0);
2099 
2100 		drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
2101 	} else {
2102 		if ((DISPLAY_VER(display) == 20 ||
2103 		     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2104 			intel_dp->psr.pkg_c_latency_used)
2105 			intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
2106 								       intel_dp->psr.pipe,
2107 								       false);
2108 
2109 		val = intel_de_rmw(display,
2110 				   psr_ctl_reg(display, cpu_transcoder),
2111 				   EDP_PSR_ENABLE, 0);
2112 
2113 		drm_WARN_ON(display->drm, !(val & EDP_PSR_ENABLE));
2114 	}
2115 	intel_dp->psr.active = false;
2116 }
2117 
2118 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
2119 {
2120 	struct intel_display *display = to_intel_display(intel_dp);
2121 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2122 	i915_reg_t psr_status;
2123 	u32 psr_status_mask;
2124 
2125 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
2126 					  intel_dp->psr.panel_replay_enabled)) {
2127 		psr_status = EDP_PSR2_STATUS(display, cpu_transcoder);
2128 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
2129 	} else {
2130 		psr_status = psr_status_reg(display, cpu_transcoder);
2131 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
2132 	}
2133 
2134 	/* Wait till PSR is idle */
2135 	if (intel_de_wait_for_clear(display, psr_status,
2136 				    psr_status_mask, 2000))
2137 		drm_err(display->drm, "Timed out waiting PSR idle state\n");
2138 }
2139 
2140 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
2141 {
2142 	struct intel_display *display = to_intel_display(intel_dp);
2143 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2144 
2145 	lockdep_assert_held(&intel_dp->psr.lock);
2146 
2147 	if (!intel_dp->psr.enabled)
2148 		return;
2149 
2150 	if (intel_dp->psr.panel_replay_enabled)
2151 		drm_dbg_kms(display->drm, "Disabling Panel Replay\n");
2152 	else
2153 		drm_dbg_kms(display->drm, "Disabling PSR%s\n",
2154 			    intel_dp->psr.sel_update_enabled ? "2" : "1");
2155 
2156 	intel_psr_exit(intel_dp);
2157 	intel_psr_wait_exit_locked(intel_dp);
2158 
2159 	/*
2160 	 * Wa_16013835468
2161 	 * Wa_14015648006
2162 	 */
2163 	if (DISPLAY_VER(display) >= 11)
2164 		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
2165 			     LATENCY_REPORTING_REMOVED(intel_dp->psr.pipe), 0);
2166 
2167 	if (intel_dp->psr.sel_update_enabled) {
2168 		/* Wa_16012604467:adlp,mtl[a0,b0] */
2169 		if (!intel_dp->psr.panel_replay_enabled &&
2170 		    IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
2171 			intel_de_rmw(display,
2172 				     MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
2173 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
2174 		else if (display->platform.alderlake_p)
2175 			intel_de_rmw(display, CLKGATE_DIS_MISC,
2176 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
2177 	}
2178 
2179 	if (intel_dp_is_edp(intel_dp))
2180 		intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
2181 
2182 	if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp))
2183 		intel_alpm_disable(intel_dp);
2184 
2185 	/* Disable PSR on Sink */
2186 	if (!intel_dp->psr.panel_replay_enabled) {
2187 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
2188 
2189 		if (intel_dp->psr.sel_update_enabled)
2190 			drm_dp_dpcd_writeb(&intel_dp->aux,
2191 					   DP_RECEIVER_ALPM_CONFIG, 0);
2192 	}
2193 
2194 	/* Wa_16025596647 */
2195 	if ((DISPLAY_VER(display) == 20 ||
2196 	     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2197 	    !intel_dp->psr.panel_replay_enabled)
2198 		intel_dmc_block_pkgc(display, intel_dp->psr.pipe, false);
2199 
2200 	intel_dp->psr.enabled = false;
2201 	intel_dp->psr.panel_replay_enabled = false;
2202 	intel_dp->psr.sel_update_enabled = false;
2203 	intel_dp->psr.psr2_sel_fetch_enabled = false;
2204 	intel_dp->psr.su_region_et_enabled = false;
2205 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2206 	intel_dp->psr.active_non_psr_pipes = 0;
2207 	intel_dp->psr.pkg_c_latency_used = 0;
2208 }
2209 
2210 /**
2211  * intel_psr_disable - Disable PSR
2212  * @intel_dp: Intel DP
2213  * @old_crtc_state: old CRTC state
2214  *
2215  * This function needs to be called before disabling pipe.
2216  */
2217 void intel_psr_disable(struct intel_dp *intel_dp,
2218 		       const struct intel_crtc_state *old_crtc_state)
2219 {
2220 	struct intel_display *display = to_intel_display(intel_dp);
2221 
2222 	if (!old_crtc_state->has_psr)
2223 		return;
2224 
2225 	if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp) &&
2226 			!CAN_PANEL_REPLAY(intel_dp)))
2227 		return;
2228 
2229 	mutex_lock(&intel_dp->psr.lock);
2230 
2231 	intel_psr_disable_locked(intel_dp);
2232 
2233 	intel_dp->psr.link_ok = false;
2234 
2235 	mutex_unlock(&intel_dp->psr.lock);
2236 	cancel_work_sync(&intel_dp->psr.work);
2237 	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
2238 }
2239 
2240 /**
2241  * intel_psr_pause - Pause PSR
2242  * @intel_dp: Intel DP
2243  *
2244  * This function need to be called after enabling psr.
2245  */
2246 void intel_psr_pause(struct intel_dp *intel_dp)
2247 {
2248 	struct intel_psr *psr = &intel_dp->psr;
2249 
2250 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2251 		return;
2252 
2253 	mutex_lock(&psr->lock);
2254 
2255 	if (!psr->enabled) {
2256 		mutex_unlock(&psr->lock);
2257 		return;
2258 	}
2259 
2260 	if (intel_dp->psr.pause_counter++ == 0) {
2261 		intel_psr_exit(intel_dp);
2262 		intel_psr_wait_exit_locked(intel_dp);
2263 	}
2264 
2265 	mutex_unlock(&psr->lock);
2266 
2267 	cancel_work_sync(&psr->work);
2268 	cancel_delayed_work_sync(&psr->dc3co_work);
2269 }
2270 
2271 /**
2272  * intel_psr_resume - Resume PSR
2273  * @intel_dp: Intel DP
2274  *
2275  * This function need to be called after pausing psr.
2276  */
2277 void intel_psr_resume(struct intel_dp *intel_dp)
2278 {
2279 	struct intel_display *display = to_intel_display(intel_dp);
2280 	struct intel_psr *psr = &intel_dp->psr;
2281 
2282 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2283 		return;
2284 
2285 	mutex_lock(&psr->lock);
2286 
2287 	if (!psr->enabled)
2288 		goto out;
2289 
2290 	if (!psr->pause_counter) {
2291 		drm_warn(display->drm, "Unbalanced PSR pause/resume!\n");
2292 		goto out;
2293 	}
2294 
2295 	if (--intel_dp->psr.pause_counter == 0)
2296 		intel_psr_activate(intel_dp);
2297 
2298 out:
2299 	mutex_unlock(&psr->lock);
2300 }
2301 
2302 /**
2303  * intel_psr_needs_vblank_notification - Check if PSR need vblank enable/disable
2304  * notification.
2305  * @crtc_state: CRTC status
2306  *
2307  * We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't
2308  * prevent it in case of Panel Replay. Panel Replay switches main link off on
2309  * DC entry. This means vblank interrupts are not fired and is a problem if
2310  * user-space is polling for vblank events. Also Wa_16025596647 needs
2311  * information when vblank is enabled/disabled.
2312  */
2313 bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state)
2314 {
2315 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2316 	struct intel_display *display = to_intel_display(crtc_state);
2317 	struct intel_encoder *encoder;
2318 
2319 	for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) {
2320 		struct intel_dp *intel_dp;
2321 
2322 		if (!intel_encoder_is_dp(encoder))
2323 			continue;
2324 
2325 		intel_dp = enc_to_intel_dp(encoder);
2326 
2327 		if (!intel_dp_is_edp(intel_dp))
2328 			continue;
2329 
2330 		if (CAN_PANEL_REPLAY(intel_dp))
2331 			return true;
2332 
2333 		if ((DISPLAY_VER(display) == 20 ||
2334 		     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2335 		    CAN_PSR(intel_dp))
2336 			return true;
2337 	}
2338 
2339 	return false;
2340 }
2341 
2342 /**
2343  * intel_psr_trigger_frame_change_event - Trigger "Frame Change" event
2344  * @dsb: DSB context
2345  * @state: the atomic state
2346  * @crtc: the CRTC
2347  *
2348  * Generate PSR "Frame Change" event.
2349  */
2350 void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
2351 					  struct intel_atomic_state *state,
2352 					  struct intel_crtc *crtc)
2353 {
2354 	const struct intel_crtc_state *crtc_state =
2355 		intel_pre_commit_crtc_state(state, crtc);
2356 	struct intel_display *display = to_intel_display(crtc);
2357 
2358 	if (crtc_state->has_psr)
2359 		intel_de_write_dsb(display, dsb,
2360 				   CURSURFLIVE(display, crtc->pipe), 0);
2361 }
2362 
2363 /**
2364  * intel_psr_min_vblank_delay - Minimum vblank delay needed by PSR
2365  * @crtc_state: the crtc state
2366  *
2367  * Return minimum vblank delay needed by PSR.
2368  */
2369 int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state)
2370 {
2371 	struct intel_display *display = to_intel_display(crtc_state);
2372 
2373 	if (!crtc_state->has_psr)
2374 		return 0;
2375 
2376 	/* Wa_14015401596 */
2377 	if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14))
2378 		return 1;
2379 
2380 	/* Rest is for SRD_STATUS needed on LunarLake and onwards */
2381 	if (DISPLAY_VER(display) < 20)
2382 		return 0;
2383 
2384 	/*
2385 	 * Comment on SRD_STATUS register in Bspec for LunarLake and onwards:
2386 	 *
2387 	 * To deterministically capture the transition of the state machine
2388 	 * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least
2389 	 * one line after the non-delayed V. Blank.
2390 	 *
2391 	 * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0
2392 	 * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ]
2393 	 * - TRANS_VTOTAL[ Vertical Active ])
2394 	 *
2395 	 * SRD_STATUS is used only by PSR1 on PantherLake.
2396 	 * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake.
2397 	 */
2398 
2399 	if (DISPLAY_VER(display) >= 30 && (crtc_state->has_panel_replay ||
2400 					   crtc_state->has_sel_update))
2401 		return 0;
2402 	else if (DISPLAY_VER(display) < 30 && (crtc_state->has_sel_update ||
2403 					       intel_crtc_has_type(crtc_state,
2404 								   INTEL_OUTPUT_EDP)))
2405 		return 0;
2406 	else
2407 		return 1;
2408 }
2409 
2410 static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
2411 {
2412 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 0 :
2413 		PSR2_MAN_TRK_CTL_ENABLE;
2414 }
2415 
2416 static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
2417 {
2418 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2419 	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
2420 	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
2421 }
2422 
2423 static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
2424 {
2425 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2426 	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
2427 	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
2428 }
2429 
2430 static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
2431 {
2432 	return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2433 	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
2434 	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
2435 }
2436 
2437 static void intel_psr_force_update(struct intel_dp *intel_dp)
2438 {
2439 	struct intel_display *display = to_intel_display(intel_dp);
2440 
2441 	/*
2442 	 * Display WA #0884: skl+
2443 	 * This documented WA for bxt can be safely applied
2444 	 * broadly so we can force HW tracking to exit PSR
2445 	 * instead of disabling and re-enabling.
2446 	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
2447 	 * but it makes more sense write to the current active
2448 	 * pipe.
2449 	 *
2450 	 * This workaround do not exist for platforms with display 10 or newer
2451 	 * but testing proved that it works for up display 13, for newer
2452 	 * than that testing will be needed.
2453 	 */
2454 	intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0);
2455 }
2456 
2457 void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
2458 					  const struct intel_crtc_state *crtc_state)
2459 {
2460 	struct intel_display *display = to_intel_display(crtc_state);
2461 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2462 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2463 	struct intel_encoder *encoder;
2464 
2465 	if (!crtc_state->enable_psr2_sel_fetch)
2466 		return;
2467 
2468 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2469 					     crtc_state->uapi.encoder_mask) {
2470 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2471 
2472 		if (!dsb)
2473 			lockdep_assert_held(&intel_dp->psr.lock);
2474 
2475 		if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_cff_enabled)
2476 			return;
2477 		break;
2478 	}
2479 
2480 	intel_de_write_dsb(display, dsb,
2481 			   PSR2_MAN_TRK_CTL(display, cpu_transcoder),
2482 			   crtc_state->psr2_man_track_ctl);
2483 
2484 	if (!crtc_state->enable_psr2_su_region_et)
2485 		return;
2486 
2487 	intel_de_write_dsb(display, dsb, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
2488 			   crtc_state->pipe_srcsz_early_tpt);
2489 }
2490 
2491 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2492 				  bool full_update)
2493 {
2494 	struct intel_display *display = to_intel_display(crtc_state);
2495 	u32 val = man_trk_ctl_enable_bit_get(display);
2496 
2497 	/* SF partial frame enable has to be set even on full update */
2498 	val |= man_trk_ctl_partial_frame_bit_get(display);
2499 
2500 	if (full_update) {
2501 		val |= man_trk_ctl_continuos_full_frame(display);
2502 		goto exit;
2503 	}
2504 
2505 	if (crtc_state->psr2_su_area.y1 == -1)
2506 		goto exit;
2507 
2508 	if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) {
2509 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2510 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2511 	} else {
2512 		drm_WARN_ON(crtc_state->uapi.crtc->dev,
2513 			    crtc_state->psr2_su_area.y1 % 4 ||
2514 			    crtc_state->psr2_su_area.y2 % 4);
2515 
2516 		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2517 			crtc_state->psr2_su_area.y1 / 4 + 1);
2518 		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2519 			crtc_state->psr2_su_area.y2 / 4 + 1);
2520 	}
2521 exit:
2522 	crtc_state->psr2_man_track_ctl = val;
2523 }
2524 
2525 static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
2526 					  bool full_update)
2527 {
2528 	int width, height;
2529 
2530 	if (!crtc_state->enable_psr2_su_region_et || full_update)
2531 		return 0;
2532 
2533 	width = drm_rect_width(&crtc_state->psr2_su_area);
2534 	height = drm_rect_height(&crtc_state->psr2_su_area);
2535 
2536 	return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
2537 }
2538 
2539 static void clip_area_update(struct drm_rect *overlap_damage_area,
2540 			     struct drm_rect *damage_area,
2541 			     struct drm_rect *pipe_src)
2542 {
2543 	if (!drm_rect_intersect(damage_area, pipe_src))
2544 		return;
2545 
2546 	if (overlap_damage_area->y1 == -1) {
2547 		overlap_damage_area->y1 = damage_area->y1;
2548 		overlap_damage_area->y2 = damage_area->y2;
2549 		return;
2550 	}
2551 
2552 	if (damage_area->y1 < overlap_damage_area->y1)
2553 		overlap_damage_area->y1 = damage_area->y1;
2554 
2555 	if (damage_area->y2 > overlap_damage_area->y2)
2556 		overlap_damage_area->y2 = damage_area->y2;
2557 }
2558 
2559 static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2560 {
2561 	struct intel_display *display = to_intel_display(crtc_state);
2562 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2563 	u16 y_alignment;
2564 
2565 	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2566 	if (crtc_state->dsc.compression_enable &&
2567 	    (display->platform.alderlake_p || DISPLAY_VER(display) >= 14))
2568 		y_alignment = vdsc_cfg->slice_height;
2569 	else
2570 		y_alignment = crtc_state->su_y_granularity;
2571 
2572 	crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2573 	if (crtc_state->psr2_su_area.y2 % y_alignment)
2574 		crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2575 						y_alignment) + 1) * y_alignment;
2576 }
2577 
2578 /*
2579  * When early transport is in use we need to extend SU area to cover
2580  * cursor fully when cursor is in SU area.
2581  */
2582 static void
2583 intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
2584 				  struct intel_crtc *crtc,
2585 				  bool *cursor_in_su_area)
2586 {
2587 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2588 	struct intel_plane_state *new_plane_state;
2589 	struct intel_plane *plane;
2590 	int i;
2591 
2592 	if (!crtc_state->enable_psr2_su_region_et)
2593 		return;
2594 
2595 	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
2596 		struct drm_rect inter;
2597 
2598 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2599 			continue;
2600 
2601 		if (plane->id != PLANE_CURSOR)
2602 			continue;
2603 
2604 		if (!new_plane_state->uapi.visible)
2605 			continue;
2606 
2607 		inter = crtc_state->psr2_su_area;
2608 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2609 			continue;
2610 
2611 		clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
2612 				 &crtc_state->pipe_src);
2613 		*cursor_in_su_area = true;
2614 	}
2615 }
2616 
2617 /*
2618  * TODO: Not clear how to handle planes with negative position,
2619  * also planes are not updated if they have a negative X
2620  * position so for now doing a full update in this cases
2621  *
2622  * Plane scaling and rotation is not supported by selective fetch and both
2623  * properties can change without a modeset, so need to be check at every
2624  * atomic commit.
2625  */
2626 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2627 {
2628 	if (plane_state->uapi.dst.y1 < 0 ||
2629 	    plane_state->uapi.dst.x1 < 0 ||
2630 	    plane_state->scaler_id >= 0 ||
2631 	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2632 		return false;
2633 
2634 	return true;
2635 }
2636 
2637 /*
2638  * Check for pipe properties that is not supported by selective fetch.
2639  *
2640  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2641  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2642  * enabled and going to the full update path.
2643  */
2644 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2645 {
2646 	if (crtc_state->scaler_state.scaler_id >= 0)
2647 		return false;
2648 
2649 	return true;
2650 }
2651 
2652 /* Wa 14019834836 */
2653 static void intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state *crtc_state)
2654 {
2655 	struct intel_display *display = to_intel_display(crtc_state);
2656 	struct intel_encoder *encoder;
2657 	int hactive_limit;
2658 
2659 	if (crtc_state->psr2_su_area.y1 != 0 ||
2660 	    crtc_state->psr2_su_area.y2 != 0)
2661 		return;
2662 
2663 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2664 		hactive_limit = intel_dp_is_uhbr(crtc_state) ? 1230 : 546;
2665 	else
2666 		hactive_limit = intel_dp_is_uhbr(crtc_state) ? 615 : 273;
2667 
2668 	if (crtc_state->hw.adjusted_mode.hdisplay < hactive_limit)
2669 		return;
2670 
2671 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2672 					     crtc_state->uapi.encoder_mask) {
2673 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2674 
2675 		if (!intel_dp_is_edp(intel_dp) &&
2676 		    intel_dp->psr.panel_replay_enabled &&
2677 		    intel_dp->psr.sel_update_enabled) {
2678 			crtc_state->psr2_su_area.y2++;
2679 			return;
2680 		}
2681 	}
2682 }
2683 
2684 static void
2685 intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state)
2686 {
2687 	struct intel_display *display = to_intel_display(crtc_state);
2688 
2689 	/* Wa_14014971492 */
2690 	if (!crtc_state->has_panel_replay &&
2691 	    ((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
2692 	      display->platform.alderlake_p || display->platform.tigerlake)) &&
2693 	    crtc_state->splitter.enable)
2694 		crtc_state->psr2_su_area.y1 = 0;
2695 
2696 	/* Wa 14019834836 */
2697 	if (DISPLAY_VER(display) == 30)
2698 		intel_psr_apply_pr_link_on_su_wa(crtc_state);
2699 }
2700 
2701 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2702 				struct intel_crtc *crtc)
2703 {
2704 	struct intel_display *display = to_intel_display(state);
2705 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2706 	struct intel_plane_state *new_plane_state, *old_plane_state;
2707 	struct intel_plane *plane;
2708 	bool full_update = false, cursor_in_su_area = false;
2709 	int i, ret;
2710 
2711 	if (!crtc_state->enable_psr2_sel_fetch)
2712 		return 0;
2713 
2714 	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2715 		full_update = true;
2716 		goto skip_sel_fetch_set_loop;
2717 	}
2718 
2719 	crtc_state->psr2_su_area.x1 = 0;
2720 	crtc_state->psr2_su_area.y1 = -1;
2721 	crtc_state->psr2_su_area.x2 = drm_rect_width(&crtc_state->pipe_src);
2722 	crtc_state->psr2_su_area.y2 = -1;
2723 
2724 	/*
2725 	 * Calculate minimal selective fetch area of each plane and calculate
2726 	 * the pipe damaged area.
2727 	 * In the next loop the plane selective fetch area will actually be set
2728 	 * using whole pipe damaged area.
2729 	 */
2730 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2731 					     new_plane_state, i) {
2732 		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2733 						      .x2 = INT_MAX };
2734 
2735 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2736 			continue;
2737 
2738 		if (!new_plane_state->uapi.visible &&
2739 		    !old_plane_state->uapi.visible)
2740 			continue;
2741 
2742 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2743 			full_update = true;
2744 			break;
2745 		}
2746 
2747 		/*
2748 		 * If visibility or plane moved, mark the whole plane area as
2749 		 * damaged as it needs to be complete redraw in the new and old
2750 		 * position.
2751 		 */
2752 		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2753 		    !drm_rect_equals(&new_plane_state->uapi.dst,
2754 				     &old_plane_state->uapi.dst)) {
2755 			if (old_plane_state->uapi.visible) {
2756 				damaged_area.y1 = old_plane_state->uapi.dst.y1;
2757 				damaged_area.y2 = old_plane_state->uapi.dst.y2;
2758 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2759 						 &crtc_state->pipe_src);
2760 			}
2761 
2762 			if (new_plane_state->uapi.visible) {
2763 				damaged_area.y1 = new_plane_state->uapi.dst.y1;
2764 				damaged_area.y2 = new_plane_state->uapi.dst.y2;
2765 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2766 						 &crtc_state->pipe_src);
2767 			}
2768 			continue;
2769 		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2770 			/* If alpha changed mark the whole plane area as damaged */
2771 			damaged_area.y1 = new_plane_state->uapi.dst.y1;
2772 			damaged_area.y2 = new_plane_state->uapi.dst.y2;
2773 			clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2774 					 &crtc_state->pipe_src);
2775 			continue;
2776 		}
2777 
2778 		src = drm_plane_state_src(&new_plane_state->uapi);
2779 		drm_rect_fp_to_int(&src, &src);
2780 
2781 		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2782 						     &new_plane_state->uapi, &damaged_area))
2783 			continue;
2784 
2785 		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2786 		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2787 		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2788 		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2789 
2790 		clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
2791 	}
2792 
2793 	/*
2794 	 * TODO: For now we are just using full update in case
2795 	 * selective fetch area calculation fails. To optimize this we
2796 	 * should identify cases where this happens and fix the area
2797 	 * calculation for those.
2798 	 */
2799 	if (crtc_state->psr2_su_area.y1 == -1) {
2800 		drm_info_once(display->drm,
2801 			      "Selective fetch area calculation failed in pipe %c\n",
2802 			      pipe_name(crtc->pipe));
2803 		full_update = true;
2804 	}
2805 
2806 	if (full_update)
2807 		goto skip_sel_fetch_set_loop;
2808 
2809 	intel_psr_apply_su_area_workarounds(crtc_state);
2810 
2811 	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2812 	if (ret)
2813 		return ret;
2814 
2815 	/*
2816 	 * Adjust su area to cover cursor fully as necessary (early
2817 	 * transport). This needs to be done after
2818 	 * drm_atomic_add_affected_planes to ensure visible cursor is added into
2819 	 * affected planes even when cursor is not updated by itself.
2820 	 */
2821 	intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
2822 
2823 	intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2824 
2825 	/*
2826 	 * Now that we have the pipe damaged area check if it intersect with
2827 	 * every plane, if it does set the plane selective fetch area.
2828 	 */
2829 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2830 					     new_plane_state, i) {
2831 		struct drm_rect *sel_fetch_area, inter;
2832 		struct intel_plane *linked = new_plane_state->planar_linked_plane;
2833 
2834 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2835 		    !new_plane_state->uapi.visible)
2836 			continue;
2837 
2838 		inter = crtc_state->psr2_su_area;
2839 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2840 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2841 			sel_fetch_area->y1 = -1;
2842 			sel_fetch_area->y2 = -1;
2843 			/*
2844 			 * if plane sel fetch was previously enabled ->
2845 			 * disable it
2846 			 */
2847 			if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2848 				crtc_state->update_planes |= BIT(plane->id);
2849 
2850 			continue;
2851 		}
2852 
2853 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2854 			full_update = true;
2855 			break;
2856 		}
2857 
2858 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2859 		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2860 		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2861 		crtc_state->update_planes |= BIT(plane->id);
2862 
2863 		/*
2864 		 * Sel_fetch_area is calculated for UV plane. Use
2865 		 * same area for Y plane as well.
2866 		 */
2867 		if (linked) {
2868 			struct intel_plane_state *linked_new_plane_state;
2869 			struct drm_rect *linked_sel_fetch_area;
2870 
2871 			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2872 			if (IS_ERR(linked_new_plane_state))
2873 				return PTR_ERR(linked_new_plane_state);
2874 
2875 			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2876 			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2877 			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2878 			crtc_state->update_planes |= BIT(linked->id);
2879 		}
2880 	}
2881 
2882 skip_sel_fetch_set_loop:
2883 	psr2_man_trk_ctl_calc(crtc_state, full_update);
2884 	crtc_state->pipe_srcsz_early_tpt =
2885 		psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
2886 	return 0;
2887 }
2888 
2889 void intel_psr2_panic_force_full_update(struct intel_display *display,
2890 					struct intel_crtc_state *crtc_state)
2891 {
2892 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2893 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2894 	u32 val = man_trk_ctl_enable_bit_get(display);
2895 
2896 	/* SF partial frame enable has to be set even on full update */
2897 	val |= man_trk_ctl_partial_frame_bit_get(display);
2898 	val |= man_trk_ctl_continuos_full_frame(display);
2899 
2900 	/* Directly write the register */
2901 	intel_de_write_fw(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder), val);
2902 
2903 	if (!crtc_state->enable_psr2_su_region_et)
2904 		return;
2905 
2906 	intel_de_write_fw(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe), 0);
2907 }
2908 
2909 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2910 				struct intel_crtc *crtc)
2911 {
2912 	struct intel_display *display = to_intel_display(state);
2913 	const struct intel_crtc_state *old_crtc_state =
2914 		intel_atomic_get_old_crtc_state(state, crtc);
2915 	const struct intel_crtc_state *new_crtc_state =
2916 		intel_atomic_get_new_crtc_state(state, crtc);
2917 	struct intel_encoder *encoder;
2918 
2919 	if (!HAS_PSR(display))
2920 		return;
2921 
2922 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2923 					     old_crtc_state->uapi.encoder_mask) {
2924 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2925 		struct intel_psr *psr = &intel_dp->psr;
2926 
2927 		mutex_lock(&psr->lock);
2928 
2929 		if (psr->enabled) {
2930 			/*
2931 			 * Reasons to disable:
2932 			 * - PSR disabled in new state
2933 			 * - All planes will go inactive
2934 			 * - Changing between PSR versions
2935 			 * - Region Early Transport changing
2936 			 * - Display WA #1136: skl, bxt
2937 			 */
2938 			if (intel_crtc_needs_modeset(new_crtc_state) ||
2939 			    !new_crtc_state->has_psr ||
2940 			    !new_crtc_state->active_planes ||
2941 			    new_crtc_state->has_sel_update != psr->sel_update_enabled ||
2942 			    new_crtc_state->enable_psr2_su_region_et != psr->su_region_et_enabled ||
2943 			    new_crtc_state->has_panel_replay != psr->panel_replay_enabled ||
2944 			    (DISPLAY_VER(display) < 11 && new_crtc_state->wm_level_disabled))
2945 				intel_psr_disable_locked(intel_dp);
2946 			else if (new_crtc_state->wm_level_disabled)
2947 				/* Wa_14015648006 */
2948 				wm_optimization_wa(intel_dp, new_crtc_state);
2949 		}
2950 
2951 		mutex_unlock(&psr->lock);
2952 	}
2953 }
2954 
2955 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2956 				 struct intel_crtc *crtc)
2957 {
2958 	struct intel_display *display = to_intel_display(state);
2959 	const struct intel_crtc_state *crtc_state =
2960 		intel_atomic_get_new_crtc_state(state, crtc);
2961 	struct intel_encoder *encoder;
2962 
2963 	if (!crtc_state->has_psr)
2964 		return;
2965 
2966 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2967 					     crtc_state->uapi.encoder_mask) {
2968 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2969 		struct intel_psr *psr = &intel_dp->psr;
2970 		bool keep_disabled = false;
2971 
2972 		mutex_lock(&psr->lock);
2973 
2974 		drm_WARN_ON(display->drm,
2975 			    psr->enabled && !crtc_state->active_planes);
2976 
2977 		keep_disabled |= psr->sink_not_reliable;
2978 		keep_disabled |= !crtc_state->active_planes;
2979 
2980 		/* Display WA #1136: skl, bxt */
2981 		keep_disabled |= DISPLAY_VER(display) < 11 &&
2982 			crtc_state->wm_level_disabled;
2983 
2984 		if (!psr->enabled && !keep_disabled)
2985 			intel_psr_enable_locked(intel_dp, crtc_state);
2986 		else if (psr->enabled && !crtc_state->wm_level_disabled)
2987 			/* Wa_14015648006 */
2988 			wm_optimization_wa(intel_dp, crtc_state);
2989 
2990 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2991 		if (crtc_state->crc_enabled && psr->enabled)
2992 			intel_psr_force_update(intel_dp);
2993 
2994 		/*
2995 		 * Clear possible busy bits in case we have
2996 		 * invalidate -> flip -> flush sequence.
2997 		 */
2998 		intel_dp->psr.busy_frontbuffer_bits = 0;
2999 
3000 		mutex_unlock(&psr->lock);
3001 	}
3002 }
3003 
3004 /*
3005  * From bspec: Panel Self Refresh (BDW+)
3006  * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
3007  * exit training time + 1.5 ms of aux channel handshake. 50 ms is
3008  * defensive enough to cover everything.
3009  */
3010 #define PSR_IDLE_TIMEOUT_MS 50
3011 
3012 static int
3013 _psr2_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state,
3014 				   struct intel_dsb *dsb)
3015 {
3016 	struct intel_display *display = to_intel_display(new_crtc_state);
3017 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3018 
3019 	/*
3020 	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
3021 	 * As all higher states has bit 4 of PSR2 state set we can just wait for
3022 	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
3023 	 */
3024 	if (dsb) {
3025 		intel_dsb_poll(dsb, EDP_PSR2_STATUS(display, cpu_transcoder),
3026 			       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 0, 200,
3027 			       PSR_IDLE_TIMEOUT_MS * 1000 / 200);
3028 		return true;
3029 	}
3030 
3031 	return intel_de_wait_for_clear(display,
3032 				       EDP_PSR2_STATUS(display, cpu_transcoder),
3033 				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP,
3034 				       PSR_IDLE_TIMEOUT_MS);
3035 }
3036 
3037 static int
3038 _psr1_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state,
3039 				   struct intel_dsb *dsb)
3040 {
3041 	struct intel_display *display = to_intel_display(new_crtc_state);
3042 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3043 
3044 	if (dsb) {
3045 		intel_dsb_poll(dsb, psr_status_reg(display, cpu_transcoder),
3046 			       EDP_PSR_STATUS_STATE_MASK, 0, 200,
3047 			       PSR_IDLE_TIMEOUT_MS * 1000 / 200);
3048 		return true;
3049 	}
3050 
3051 	return intel_de_wait_for_clear(display,
3052 				       psr_status_reg(display, cpu_transcoder),
3053 				       EDP_PSR_STATUS_STATE_MASK,
3054 				       PSR_IDLE_TIMEOUT_MS);
3055 }
3056 
3057 /**
3058  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
3059  * @new_crtc_state: new CRTC state
3060  *
3061  * This function is expected to be called from pipe_update_start() where it is
3062  * not expected to race with PSR enable or disable.
3063  */
3064 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
3065 {
3066 	struct intel_display *display = to_intel_display(new_crtc_state);
3067 	struct intel_encoder *encoder;
3068 
3069 	if (!new_crtc_state->has_psr)
3070 		return;
3071 
3072 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3073 					     new_crtc_state->uapi.encoder_mask) {
3074 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3075 		int ret;
3076 
3077 		lockdep_assert_held(&intel_dp->psr.lock);
3078 
3079 		if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
3080 			continue;
3081 
3082 		if (intel_dp->psr.sel_update_enabled)
3083 			ret = _psr2_ready_for_pipe_update_locked(new_crtc_state,
3084 								 NULL);
3085 		else
3086 			ret = _psr1_ready_for_pipe_update_locked(new_crtc_state,
3087 								 NULL);
3088 
3089 		if (ret)
3090 			drm_err(display->drm,
3091 				"PSR wait timed out, atomic update may fail\n");
3092 	}
3093 }
3094 
3095 void intel_psr_wait_for_idle_dsb(struct intel_dsb *dsb,
3096 				 const struct intel_crtc_state *new_crtc_state)
3097 {
3098 	if (!new_crtc_state->has_psr || new_crtc_state->has_panel_replay)
3099 		return;
3100 
3101 	if (new_crtc_state->has_sel_update)
3102 		_psr2_ready_for_pipe_update_locked(new_crtc_state, dsb);
3103 	else
3104 		_psr1_ready_for_pipe_update_locked(new_crtc_state, dsb);
3105 }
3106 
3107 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
3108 {
3109 	struct intel_display *display = to_intel_display(intel_dp);
3110 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3111 	i915_reg_t reg;
3112 	u32 mask;
3113 	int err;
3114 
3115 	if (!intel_dp->psr.enabled)
3116 		return false;
3117 
3118 	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
3119 					  intel_dp->psr.panel_replay_enabled)) {
3120 		reg = EDP_PSR2_STATUS(display, cpu_transcoder);
3121 		mask = EDP_PSR2_STATUS_STATE_MASK;
3122 	} else {
3123 		reg = psr_status_reg(display, cpu_transcoder);
3124 		mask = EDP_PSR_STATUS_STATE_MASK;
3125 	}
3126 
3127 	mutex_unlock(&intel_dp->psr.lock);
3128 
3129 	err = intel_de_wait_for_clear(display, reg, mask, 50);
3130 	if (err)
3131 		drm_err(display->drm,
3132 			"Timed out waiting for PSR Idle for re-enable\n");
3133 
3134 	/* After the unlocked wait, verify that PSR is still wanted! */
3135 	mutex_lock(&intel_dp->psr.lock);
3136 	return err == 0 && intel_dp->psr.enabled && !intel_dp->psr.pause_counter;
3137 }
3138 
3139 static int intel_psr_fastset_force(struct intel_display *display)
3140 {
3141 	struct drm_connector_list_iter conn_iter;
3142 	struct drm_modeset_acquire_ctx ctx;
3143 	struct drm_atomic_state *state;
3144 	struct drm_connector *conn;
3145 	int err = 0;
3146 
3147 	state = drm_atomic_state_alloc(display->drm);
3148 	if (!state)
3149 		return -ENOMEM;
3150 
3151 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
3152 
3153 	state->acquire_ctx = &ctx;
3154 	to_intel_atomic_state(state)->internal = true;
3155 
3156 retry:
3157 	drm_connector_list_iter_begin(display->drm, &conn_iter);
3158 	drm_for_each_connector_iter(conn, &conn_iter) {
3159 		struct drm_connector_state *conn_state;
3160 		struct drm_crtc_state *crtc_state;
3161 
3162 		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
3163 			continue;
3164 
3165 		conn_state = drm_atomic_get_connector_state(state, conn);
3166 		if (IS_ERR(conn_state)) {
3167 			err = PTR_ERR(conn_state);
3168 			break;
3169 		}
3170 
3171 		if (!conn_state->crtc)
3172 			continue;
3173 
3174 		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
3175 		if (IS_ERR(crtc_state)) {
3176 			err = PTR_ERR(crtc_state);
3177 			break;
3178 		}
3179 
3180 		/* Mark mode as changed to trigger a pipe->update() */
3181 		crtc_state->mode_changed = true;
3182 	}
3183 	drm_connector_list_iter_end(&conn_iter);
3184 
3185 	if (err == 0)
3186 		err = drm_atomic_commit(state);
3187 
3188 	if (err == -EDEADLK) {
3189 		drm_atomic_state_clear(state);
3190 		err = drm_modeset_backoff(&ctx);
3191 		if (!err)
3192 			goto retry;
3193 	}
3194 
3195 	drm_modeset_drop_locks(&ctx);
3196 	drm_modeset_acquire_fini(&ctx);
3197 	drm_atomic_state_put(state);
3198 
3199 	return err;
3200 }
3201 
3202 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
3203 {
3204 	struct intel_display *display = to_intel_display(intel_dp);
3205 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
3206 	const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3207 					I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3208 	u32 old_mode, old_disable_bits;
3209 	int ret;
3210 
3211 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3212 		    I915_PSR_DEBUG_PANEL_REPLAY_DISABLE |
3213 		    I915_PSR_DEBUG_MODE_MASK) ||
3214 	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
3215 		drm_dbg_kms(display->drm, "Invalid debug mask %llx\n", val);
3216 		return -EINVAL;
3217 	}
3218 
3219 	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
3220 	if (ret)
3221 		return ret;
3222 
3223 	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
3224 	old_disable_bits = intel_dp->psr.debug &
3225 		(I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3226 		 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3227 
3228 	intel_dp->psr.debug = val;
3229 
3230 	/*
3231 	 * Do it right away if it's already enabled, otherwise it will be done
3232 	 * when enabling the source.
3233 	 */
3234 	if (intel_dp->psr.enabled)
3235 		psr_irq_control(intel_dp);
3236 
3237 	mutex_unlock(&intel_dp->psr.lock);
3238 
3239 	if (old_mode != mode || old_disable_bits != disable_bits)
3240 		ret = intel_psr_fastset_force(display);
3241 
3242 	return ret;
3243 }
3244 
3245 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
3246 {
3247 	struct intel_psr *psr = &intel_dp->psr;
3248 
3249 	intel_psr_disable_locked(intel_dp);
3250 	psr->sink_not_reliable = true;
3251 	/* let's make sure that sink is awaken */
3252 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
3253 }
3254 
3255 static void intel_psr_work(struct work_struct *work)
3256 {
3257 	struct intel_dp *intel_dp =
3258 		container_of(work, typeof(*intel_dp), psr.work);
3259 
3260 	mutex_lock(&intel_dp->psr.lock);
3261 
3262 	if (!intel_dp->psr.enabled)
3263 		goto unlock;
3264 
3265 	if (READ_ONCE(intel_dp->psr.irq_aux_error)) {
3266 		intel_psr_handle_irq(intel_dp);
3267 		goto unlock;
3268 	}
3269 
3270 	if (intel_dp->psr.pause_counter)
3271 		goto unlock;
3272 
3273 	/*
3274 	 * We have to make sure PSR is ready for re-enable
3275 	 * otherwise it keeps disabled until next full enable/disable cycle.
3276 	 * PSR might take some time to get fully disabled
3277 	 * and be ready for re-enable.
3278 	 */
3279 	if (!__psr_wait_for_idle_locked(intel_dp))
3280 		goto unlock;
3281 
3282 	/*
3283 	 * The delayed work can race with an invalidate hence we need to
3284 	 * recheck. Since psr_flush first clears this and then reschedules we
3285 	 * won't ever miss a flush when bailing out here.
3286 	 */
3287 	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
3288 		goto unlock;
3289 
3290 	intel_psr_activate(intel_dp);
3291 unlock:
3292 	mutex_unlock(&intel_dp->psr.lock);
3293 }
3294 
3295 static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp)
3296 {
3297 	struct intel_display *display = to_intel_display(intel_dp);
3298 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3299 
3300 	if (!intel_dp->psr.psr2_sel_fetch_enabled)
3301 		return;
3302 
3303 	if (DISPLAY_VER(display) >= 20)
3304 		intel_de_write(display, LNL_SFF_CTL(cpu_transcoder),
3305 			       LNL_SFF_CTL_SF_SINGLE_FULL_FRAME);
3306 	else
3307 		intel_de_write(display,
3308 			       PSR2_MAN_TRK_CTL(display, cpu_transcoder),
3309 			       man_trk_ctl_enable_bit_get(display) |
3310 			       man_trk_ctl_partial_frame_bit_get(display) |
3311 			       man_trk_ctl_single_full_frame_bit_get(display) |
3312 			       man_trk_ctl_continuos_full_frame(display));
3313 }
3314 
3315 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
3316 {
3317 	struct intel_display *display = to_intel_display(intel_dp);
3318 
3319 	if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
3320 		if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3321 			intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
3322 			intel_psr_configure_full_frame_update(intel_dp);
3323 		}
3324 
3325 		intel_psr_force_update(intel_dp);
3326 	} else {
3327 		intel_psr_exit(intel_dp);
3328 	}
3329 }
3330 
3331 /**
3332  * intel_psr_invalidate - Invalidate PSR
3333  * @display: display device
3334  * @frontbuffer_bits: frontbuffer plane tracking bits
3335  * @origin: which operation caused the invalidate
3336  *
3337  * Since the hardware frontbuffer tracking has gaps we need to integrate
3338  * with the software frontbuffer tracking. This function gets called every
3339  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
3340  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
3341  *
3342  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
3343  */
3344 void intel_psr_invalidate(struct intel_display *display,
3345 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
3346 {
3347 	struct intel_encoder *encoder;
3348 
3349 	if (origin == ORIGIN_FLIP)
3350 		return;
3351 
3352 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3353 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3354 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3355 
3356 		mutex_lock(&intel_dp->psr.lock);
3357 		if (!intel_dp->psr.enabled) {
3358 			mutex_unlock(&intel_dp->psr.lock);
3359 			continue;
3360 		}
3361 
3362 		pipe_frontbuffer_bits &=
3363 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3364 		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
3365 
3366 		if (pipe_frontbuffer_bits)
3367 			_psr_invalidate_handle(intel_dp);
3368 
3369 		mutex_unlock(&intel_dp->psr.lock);
3370 	}
3371 }
3372 /*
3373  * When we will be completely rely on PSR2 S/W tracking in future,
3374  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
3375  * event also therefore tgl_dc3co_flush_locked() require to be changed
3376  * accordingly in future.
3377  */
3378 static void
3379 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
3380 		       enum fb_op_origin origin)
3381 {
3382 	struct intel_display *display = to_intel_display(intel_dp);
3383 
3384 	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
3385 	    !intel_dp->psr.active)
3386 		return;
3387 
3388 	/*
3389 	 * At every frontbuffer flush flip event modified delay of delayed work,
3390 	 * when delayed work schedules that means display has been idle.
3391 	 */
3392 	if (!(frontbuffer_bits &
3393 	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
3394 		return;
3395 
3396 	tgl_psr2_enable_dc3co(intel_dp);
3397 	mod_delayed_work(display->wq.unordered, &intel_dp->psr.dc3co_work,
3398 			 intel_dp->psr.dc3co_exit_delay);
3399 }
3400 
3401 static void _psr_flush_handle(struct intel_dp *intel_dp)
3402 {
3403 	struct intel_display *display = to_intel_display(intel_dp);
3404 
3405 	if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
3406 		/* Selective fetch prior LNL */
3407 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3408 			/* can we turn CFF off? */
3409 			if (intel_dp->psr.busy_frontbuffer_bits == 0)
3410 				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
3411 		}
3412 
3413 		/*
3414 		 * Still keep cff bit enabled as we don't have proper SU
3415 		 * configuration in case update is sent for any reason after
3416 		 * sff bit gets cleared by the HW on next vblank.
3417 		 *
3418 		 * NOTE: Setting cff bit is not needed for LunarLake onwards as
3419 		 * we have own register for SFF bit and we are not overwriting
3420 		 * existing SU configuration
3421 		 */
3422 		intel_psr_configure_full_frame_update(intel_dp);
3423 
3424 		intel_psr_force_update(intel_dp);
3425 	} else if (!intel_dp->psr.psr2_sel_fetch_enabled) {
3426 		/*
3427 		 * PSR1 on all platforms
3428 		 * PSR2 HW tracking
3429 		 * Panel Replay Full frame update
3430 		 */
3431 		intel_psr_force_update(intel_dp);
3432 	} else {
3433 		/* Selective update LNL onwards */
3434 		intel_psr_exit(intel_dp);
3435 	}
3436 
3437 	if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
3438 		queue_work(display->wq.unordered, &intel_dp->psr.work);
3439 }
3440 
3441 /**
3442  * intel_psr_flush - Flush PSR
3443  * @display: display device
3444  * @frontbuffer_bits: frontbuffer plane tracking bits
3445  * @origin: which operation caused the flush
3446  *
3447  * Since the hardware frontbuffer tracking has gaps we need to integrate
3448  * with the software frontbuffer tracking. This function gets called every
3449  * time frontbuffer rendering has completed and flushed out to memory. PSR
3450  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
3451  *
3452  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
3453  */
3454 void intel_psr_flush(struct intel_display *display,
3455 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
3456 {
3457 	struct intel_encoder *encoder;
3458 
3459 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3460 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3461 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3462 
3463 		mutex_lock(&intel_dp->psr.lock);
3464 		if (!intel_dp->psr.enabled) {
3465 			mutex_unlock(&intel_dp->psr.lock);
3466 			continue;
3467 		}
3468 
3469 		pipe_frontbuffer_bits &=
3470 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3471 		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
3472 
3473 		/*
3474 		 * If the PSR is paused by an explicit intel_psr_paused() call,
3475 		 * we have to ensure that the PSR is not activated until
3476 		 * intel_psr_resume() is called.
3477 		 */
3478 		if (intel_dp->psr.pause_counter)
3479 			goto unlock;
3480 
3481 		if (origin == ORIGIN_FLIP ||
3482 		    (origin == ORIGIN_CURSOR_UPDATE &&
3483 		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
3484 			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
3485 			goto unlock;
3486 		}
3487 
3488 		if (pipe_frontbuffer_bits == 0)
3489 			goto unlock;
3490 
3491 		/* By definition flush = invalidate + flush */
3492 		_psr_flush_handle(intel_dp);
3493 unlock:
3494 		mutex_unlock(&intel_dp->psr.lock);
3495 	}
3496 }
3497 
3498 /**
3499  * intel_psr_init - Init basic PSR work and mutex.
3500  * @intel_dp: Intel DP
3501  *
3502  * This function is called after the initializing connector.
3503  * (the initializing of connector treats the handling of connector capabilities)
3504  * And it initializes basic PSR stuff for each DP Encoder.
3505  */
3506 void intel_psr_init(struct intel_dp *intel_dp)
3507 {
3508 	struct intel_display *display = to_intel_display(intel_dp);
3509 	struct intel_connector *connector = intel_dp->attached_connector;
3510 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3511 
3512 	if (!(HAS_PSR(display) || HAS_DP20(display)))
3513 		return;
3514 
3515 	/*
3516 	 * HSW spec explicitly says PSR is tied to port A.
3517 	 * BDW+ platforms have a instance of PSR registers per transcoder but
3518 	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
3519 	 * than eDP one.
3520 	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
3521 	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
3522 	 * But GEN12 supports a instance of PSR registers per transcoder.
3523 	 */
3524 	if (DISPLAY_VER(display) < 12 && dig_port->base.port != PORT_A) {
3525 		drm_dbg_kms(display->drm,
3526 			    "PSR condition failed: Port not supported\n");
3527 		return;
3528 	}
3529 
3530 	if ((HAS_DP20(display) && !intel_dp_is_edp(intel_dp)) ||
3531 	    DISPLAY_VER(display) >= 20)
3532 		intel_dp->psr.source_panel_replay_support = true;
3533 
3534 	if (HAS_PSR(display) && intel_dp_is_edp(intel_dp))
3535 		intel_dp->psr.source_support = true;
3536 
3537 	/* Set link_standby x link_off defaults */
3538 	if (DISPLAY_VER(display) < 12)
3539 		/* For new platforms up to TGL let's respect VBT back again */
3540 		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
3541 
3542 	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
3543 	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
3544 	mutex_init(&intel_dp->psr.lock);
3545 }
3546 
3547 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
3548 					   u8 *status, u8 *error_status)
3549 {
3550 	struct drm_dp_aux *aux = &intel_dp->aux;
3551 	int ret;
3552 	unsigned int offset;
3553 
3554 	offset = intel_dp->psr.panel_replay_enabled ?
3555 		 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
3556 
3557 	ret = drm_dp_dpcd_readb(aux, offset, status);
3558 	if (ret != 1)
3559 		return ret;
3560 
3561 	offset = intel_dp->psr.panel_replay_enabled ?
3562 		 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
3563 
3564 	ret = drm_dp_dpcd_readb(aux, offset, error_status);
3565 	if (ret != 1)
3566 		return ret;
3567 
3568 	*status = *status & DP_PSR_SINK_STATE_MASK;
3569 
3570 	return 0;
3571 }
3572 
3573 static void psr_alpm_check(struct intel_dp *intel_dp)
3574 {
3575 	struct intel_psr *psr = &intel_dp->psr;
3576 
3577 	if (!psr->sel_update_enabled)
3578 		return;
3579 
3580 	if (intel_alpm_get_error(intel_dp)) {
3581 		intel_psr_disable_locked(intel_dp);
3582 		psr->sink_not_reliable = true;
3583 	}
3584 }
3585 
3586 static void psr_capability_changed_check(struct intel_dp *intel_dp)
3587 {
3588 	struct intel_display *display = to_intel_display(intel_dp);
3589 	struct intel_psr *psr = &intel_dp->psr;
3590 	u8 val;
3591 	int r;
3592 
3593 	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
3594 	if (r != 1) {
3595 		drm_err(display->drm, "Error reading DP_PSR_ESI\n");
3596 		return;
3597 	}
3598 
3599 	if (val & DP_PSR_CAPS_CHANGE) {
3600 		intel_psr_disable_locked(intel_dp);
3601 		psr->sink_not_reliable = true;
3602 		drm_dbg_kms(display->drm,
3603 			    "Sink PSR capability changed, disabling PSR\n");
3604 
3605 		/* Clearing it */
3606 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
3607 	}
3608 }
3609 
3610 /*
3611  * On common bits:
3612  * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
3613  * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
3614  * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
3615  * this function is relying on PSR definitions
3616  */
3617 void intel_psr_short_pulse(struct intel_dp *intel_dp)
3618 {
3619 	struct intel_display *display = to_intel_display(intel_dp);
3620 	struct intel_psr *psr = &intel_dp->psr;
3621 	u8 status, error_status;
3622 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3623 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3624 			  DP_PSR_LINK_CRC_ERROR;
3625 
3626 	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
3627 		return;
3628 
3629 	mutex_lock(&psr->lock);
3630 
3631 	psr->link_ok = false;
3632 
3633 	if (!psr->enabled)
3634 		goto exit;
3635 
3636 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
3637 		drm_err(display->drm,
3638 			"Error reading PSR status or error status\n");
3639 		goto exit;
3640 	}
3641 
3642 	if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
3643 	    (error_status & errors)) {
3644 		intel_psr_disable_locked(intel_dp);
3645 		psr->sink_not_reliable = true;
3646 	}
3647 
3648 	if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
3649 	    !error_status)
3650 		drm_dbg_kms(display->drm,
3651 			    "PSR sink internal error, disabling PSR\n");
3652 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3653 		drm_dbg_kms(display->drm,
3654 			    "PSR RFB storage error, disabling PSR\n");
3655 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3656 		drm_dbg_kms(display->drm,
3657 			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
3658 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3659 		drm_dbg_kms(display->drm,
3660 			    "PSR Link CRC error, disabling PSR\n");
3661 
3662 	if (error_status & ~errors)
3663 		drm_err(display->drm,
3664 			"PSR_ERROR_STATUS unhandled errors %x\n",
3665 			error_status & ~errors);
3666 	/* clear status register */
3667 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
3668 
3669 	if (!psr->panel_replay_enabled) {
3670 		psr_alpm_check(intel_dp);
3671 		psr_capability_changed_check(intel_dp);
3672 	}
3673 
3674 exit:
3675 	mutex_unlock(&psr->lock);
3676 }
3677 
3678 bool intel_psr_enabled(struct intel_dp *intel_dp)
3679 {
3680 	bool ret;
3681 
3682 	if (!CAN_PSR(intel_dp))
3683 		return false;
3684 
3685 	mutex_lock(&intel_dp->psr.lock);
3686 	ret = intel_dp->psr.enabled;
3687 	mutex_unlock(&intel_dp->psr.lock);
3688 
3689 	return ret;
3690 }
3691 
3692 /**
3693  * intel_psr_link_ok - return psr->link_ok
3694  * @intel_dp: struct intel_dp
3695  *
3696  * We are seeing unexpected link re-trainings with some panels. This is caused
3697  * by panel stating bad link status after PSR is enabled. Code checking link
3698  * status can call this to ensure it can ignore bad link status stated by the
3699  * panel I.e. if panel is stating bad link and intel_psr_link_ok is stating link
3700  * is ok caller should rely on latter.
3701  *
3702  * Return value of link_ok
3703  */
3704 bool intel_psr_link_ok(struct intel_dp *intel_dp)
3705 {
3706 	bool ret;
3707 
3708 	if ((!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) ||
3709 	    !intel_dp_is_edp(intel_dp))
3710 		return false;
3711 
3712 	mutex_lock(&intel_dp->psr.lock);
3713 	ret = intel_dp->psr.link_ok;
3714 	mutex_unlock(&intel_dp->psr.lock);
3715 
3716 	return ret;
3717 }
3718 
3719 /**
3720  * intel_psr_lock - grab PSR lock
3721  * @crtc_state: the crtc state
3722  *
3723  * This is initially meant to be used by around CRTC update, when
3724  * vblank sensitive registers are updated and we need grab the lock
3725  * before it to avoid vblank evasion.
3726  */
3727 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3728 {
3729 	struct intel_display *display = to_intel_display(crtc_state);
3730 	struct intel_encoder *encoder;
3731 
3732 	if (!crtc_state->has_psr)
3733 		return;
3734 
3735 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3736 					     crtc_state->uapi.encoder_mask) {
3737 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3738 
3739 		mutex_lock(&intel_dp->psr.lock);
3740 		break;
3741 	}
3742 }
3743 
3744 /**
3745  * intel_psr_unlock - release PSR lock
3746  * @crtc_state: the crtc state
3747  *
3748  * Release the PSR lock that was held during pipe update.
3749  */
3750 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3751 {
3752 	struct intel_display *display = to_intel_display(crtc_state);
3753 	struct intel_encoder *encoder;
3754 
3755 	if (!crtc_state->has_psr)
3756 		return;
3757 
3758 	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3759 					     crtc_state->uapi.encoder_mask) {
3760 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3761 
3762 		mutex_unlock(&intel_dp->psr.lock);
3763 		break;
3764 	}
3765 }
3766 
3767 /* Wa_16025596647 */
3768 static void intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp *intel_dp)
3769 {
3770 	struct intel_display *display = to_intel_display(intel_dp);
3771 	bool dc5_dc6_blocked;
3772 
3773 	if (!intel_dp->psr.active || !intel_dp->psr.pkg_c_latency_used)
3774 		return;
3775 
3776 	dc5_dc6_blocked = is_dc5_dc6_blocked(intel_dp);
3777 
3778 	if (intel_dp->psr.sel_update_enabled)
3779 		psr2_program_idle_frames(intel_dp, dc5_dc6_blocked ? 0 :
3780 					 psr_compute_idle_frames(intel_dp));
3781 	else
3782 		intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
3783 								       intel_dp->psr.pipe,
3784 								       dc5_dc6_blocked);
3785 }
3786 
3787 static void psr_dc5_dc6_wa_work(struct work_struct *work)
3788 {
3789 	struct intel_display *display = container_of(work, typeof(*display),
3790 						     psr_dc5_dc6_wa_work);
3791 	struct intel_encoder *encoder;
3792 
3793 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3794 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3795 
3796 		mutex_lock(&intel_dp->psr.lock);
3797 
3798 		if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled &&
3799 		    !intel_dp->psr.pkg_c_latency_used)
3800 			intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
3801 
3802 		mutex_unlock(&intel_dp->psr.lock);
3803 	}
3804 }
3805 
3806 /**
3807  * intel_psr_notify_dc5_dc6 - Notify PSR about enable/disable dc5/dc6
3808  * @display: intel atomic state
3809  *
3810  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to schedule
3811  * psr_dc5_dc6_wa_work used for applying/removing the workaround.
3812  */
3813 void intel_psr_notify_dc5_dc6(struct intel_display *display)
3814 {
3815 	if (DISPLAY_VER(display) != 20 &&
3816 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3817 		return;
3818 
3819 	schedule_work(&display->psr_dc5_dc6_wa_work);
3820 }
3821 
3822 /**
3823  * intel_psr_dc5_dc6_wa_init - Init work for underrun on idle PSR HW bug wa
3824  * @display: intel atomic state
3825  *
3826  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to init
3827  * psr_dc5_dc6_wa_work used for applying the workaround.
3828  */
3829 void intel_psr_dc5_dc6_wa_init(struct intel_display *display)
3830 {
3831 	if (DISPLAY_VER(display) != 20 &&
3832 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3833 		return;
3834 
3835 	INIT_WORK(&display->psr_dc5_dc6_wa_work, psr_dc5_dc6_wa_work);
3836 }
3837 
3838 /**
3839  * intel_psr_notify_pipe_change - Notify PSR about enable/disable of a pipe
3840  * @state: intel atomic state
3841  * @crtc: intel crtc
3842  * @enable: enable/disable
3843  *
3844  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
3845  * remove the workaround when pipe is getting enabled/disabled
3846  */
3847 void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
3848 				  struct intel_crtc *crtc, bool enable)
3849 {
3850 	struct intel_display *display = to_intel_display(state);
3851 	struct intel_encoder *encoder;
3852 
3853 	if (DISPLAY_VER(display) != 20 &&
3854 	    !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3855 		return;
3856 
3857 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3858 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3859 		u8 active_non_psr_pipes;
3860 
3861 		mutex_lock(&intel_dp->psr.lock);
3862 
3863 		if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
3864 			goto unlock;
3865 
3866 		active_non_psr_pipes = intel_dp->psr.active_non_psr_pipes;
3867 
3868 		if (enable)
3869 			active_non_psr_pipes |= BIT(crtc->pipe);
3870 		else
3871 			active_non_psr_pipes &= ~BIT(crtc->pipe);
3872 
3873 		if (active_non_psr_pipes == intel_dp->psr.active_non_psr_pipes)
3874 			goto unlock;
3875 
3876 		if ((enable && intel_dp->psr.active_non_psr_pipes) ||
3877 		    (!enable && !intel_dp->psr.active_non_psr_pipes) ||
3878 		    !intel_dp->psr.pkg_c_latency_used) {
3879 			intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
3880 			goto unlock;
3881 		}
3882 
3883 		intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
3884 
3885 		intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
3886 unlock:
3887 		mutex_unlock(&intel_dp->psr.lock);
3888 	}
3889 }
3890 
3891 /**
3892  * intel_psr_notify_vblank_enable_disable - Notify PSR about enable/disable of vblank
3893  * @display: intel display struct
3894  * @enable: enable/disable
3895  *
3896  * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
3897  * remove the workaround when vblank is getting enabled/disabled
3898  */
3899 void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
3900 					    bool enable)
3901 {
3902 	struct intel_encoder *encoder;
3903 
3904 	for_each_intel_encoder_with_psr(display->drm, encoder) {
3905 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3906 
3907 		mutex_lock(&intel_dp->psr.lock);
3908 		if (intel_dp->psr.panel_replay_enabled) {
3909 			mutex_unlock(&intel_dp->psr.lock);
3910 			break;
3911 		}
3912 
3913 		if (intel_dp->psr.enabled && intel_dp->psr.pkg_c_latency_used)
3914 			intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
3915 
3916 		mutex_unlock(&intel_dp->psr.lock);
3917 		return;
3918 	}
3919 
3920 	/*
3921 	 * NOTE: intel_display_power_set_target_dc_state is used
3922 	 * only by PSR * code for DC3CO handling. DC3CO target
3923 	 * state is currently disabled in * PSR code. If DC3CO
3924 	 * is taken into use we need take that into account here
3925 	 * as well.
3926 	 */
3927 	intel_display_power_set_target_dc_state(display, enable ? DC_STATE_DISABLE :
3928 						DC_STATE_EN_UPTO_DC6);
3929 }
3930 
3931 static void
3932 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3933 {
3934 	struct intel_display *display = to_intel_display(intel_dp);
3935 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3936 	const char *status = "unknown";
3937 	u32 val, status_val;
3938 
3939 	if ((intel_dp_is_edp(intel_dp) || DISPLAY_VER(display) >= 30) &&
3940 	    (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) {
3941 		static const char * const live_status[] = {
3942 			"IDLE",
3943 			"CAPTURE",
3944 			"CAPTURE_FS",
3945 			"SLEEP",
3946 			"BUFON_FW",
3947 			"ML_UP",
3948 			"SU_STANDBY",
3949 			"FAST_SLEEP",
3950 			"DEEP_SLEEP",
3951 			"BUF_ON",
3952 			"TG_ON"
3953 		};
3954 		val = intel_de_read(display,
3955 				    EDP_PSR2_STATUS(display, cpu_transcoder));
3956 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3957 		if (status_val < ARRAY_SIZE(live_status))
3958 			status = live_status[status_val];
3959 	} else {
3960 		static const char * const live_status[] = {
3961 			"IDLE",
3962 			"SRDONACK",
3963 			"SRDENT",
3964 			"BUFOFF",
3965 			"BUFON",
3966 			"AUXACK",
3967 			"SRDOFFACK",
3968 			"SRDENT_ON",
3969 		};
3970 		val = intel_de_read(display,
3971 				    psr_status_reg(display, cpu_transcoder));
3972 		status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3973 		if (status_val < ARRAY_SIZE(live_status))
3974 			status = live_status[status_val];
3975 	}
3976 
3977 	seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3978 }
3979 
3980 static void intel_psr_sink_capability(struct intel_dp *intel_dp,
3981 				      struct seq_file *m)
3982 {
3983 	struct intel_psr *psr = &intel_dp->psr;
3984 
3985 	seq_printf(m, "Sink support: PSR = %s",
3986 		   str_yes_no(psr->sink_support));
3987 
3988 	if (psr->sink_support)
3989 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3990 	if (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
3991 		seq_printf(m, " (Early Transport)");
3992 	seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
3993 	seq_printf(m, ", Panel Replay Selective Update = %s",
3994 		   str_yes_no(psr->sink_panel_replay_su_support));
3995 	if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
3996 	    DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
3997 		seq_printf(m, " (Early Transport)");
3998 	seq_printf(m, "\n");
3999 }
4000 
4001 static void intel_psr_print_mode(struct intel_dp *intel_dp,
4002 				 struct seq_file *m)
4003 {
4004 	struct intel_psr *psr = &intel_dp->psr;
4005 	const char *status, *mode, *region_et;
4006 
4007 	if (psr->enabled)
4008 		status = " enabled";
4009 	else
4010 		status = "disabled";
4011 
4012 	if (psr->panel_replay_enabled && psr->sel_update_enabled)
4013 		mode = "Panel Replay Selective Update";
4014 	else if (psr->panel_replay_enabled)
4015 		mode = "Panel Replay";
4016 	else if (psr->sel_update_enabled)
4017 		mode = "PSR2";
4018 	else if (psr->enabled)
4019 		mode = "PSR1";
4020 	else
4021 		mode = "";
4022 
4023 	if (psr->su_region_et_enabled)
4024 		region_et = " (Early Transport)";
4025 	else
4026 		region_et = "";
4027 
4028 	seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et);
4029 }
4030 
4031 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
4032 {
4033 	struct intel_display *display = to_intel_display(intel_dp);
4034 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
4035 	struct intel_psr *psr = &intel_dp->psr;
4036 	struct ref_tracker *wakeref;
4037 	bool enabled;
4038 	u32 val, psr2_ctl;
4039 
4040 	intel_psr_sink_capability(intel_dp, m);
4041 
4042 	if (!(psr->sink_support || psr->sink_panel_replay_support))
4043 		return 0;
4044 
4045 	wakeref = intel_display_rpm_get(display);
4046 	mutex_lock(&psr->lock);
4047 
4048 	intel_psr_print_mode(intel_dp, m);
4049 
4050 	if (!psr->enabled) {
4051 		seq_printf(m, "PSR sink not reliable: %s\n",
4052 			   str_yes_no(psr->sink_not_reliable));
4053 
4054 		goto unlock;
4055 	}
4056 
4057 	if (psr->panel_replay_enabled) {
4058 		val = intel_de_read(display, TRANS_DP2_CTL(cpu_transcoder));
4059 
4060 		if (intel_dp_is_edp(intel_dp))
4061 			psr2_ctl = intel_de_read(display,
4062 						 EDP_PSR2_CTL(display,
4063 							      cpu_transcoder));
4064 
4065 		enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
4066 	} else if (psr->sel_update_enabled) {
4067 		val = intel_de_read(display,
4068 				    EDP_PSR2_CTL(display, cpu_transcoder));
4069 		enabled = val & EDP_PSR2_ENABLE;
4070 	} else {
4071 		val = intel_de_read(display, psr_ctl_reg(display, cpu_transcoder));
4072 		enabled = val & EDP_PSR_ENABLE;
4073 	}
4074 	seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
4075 		   str_enabled_disabled(enabled), val);
4076 	if (psr->panel_replay_enabled && intel_dp_is_edp(intel_dp))
4077 		seq_printf(m, "PSR2_CTL: 0x%08x\n",
4078 			   psr2_ctl);
4079 	psr_source_status(intel_dp, m);
4080 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
4081 		   psr->busy_frontbuffer_bits);
4082 
4083 	/*
4084 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
4085 	 */
4086 	val = intel_de_read(display, psr_perf_cnt_reg(display, cpu_transcoder));
4087 	seq_printf(m, "Performance counter: %u\n",
4088 		   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
4089 
4090 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
4091 		seq_printf(m, "Last attempted entry at: %lld\n",
4092 			   psr->last_entry_attempt);
4093 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
4094 	}
4095 
4096 	if (psr->sel_update_enabled) {
4097 		u32 su_frames_val[3];
4098 		int frame;
4099 
4100 		/*
4101 		 * PSR2_SU_STATUS register has been tied-off since DG2/ADL-P
4102 		 * (it returns zeros only) and it has been removed on Xe2_LPD.
4103 		 */
4104 		if (DISPLAY_VER(display) < 13) {
4105 			/*
4106 			 * Reading all 3 registers before hand to minimize crossing a
4107 			 * frame boundary between register reads
4108 			 */
4109 			for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
4110 				val = intel_de_read(display,
4111 						    PSR2_SU_STATUS(display, cpu_transcoder, frame));
4112 				su_frames_val[frame / 3] = val;
4113 			}
4114 
4115 			seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
4116 
4117 			for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
4118 				u32 su_blocks;
4119 
4120 				su_blocks = su_frames_val[frame / 3] &
4121 					PSR2_SU_STATUS_MASK(frame);
4122 				su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
4123 				seq_printf(m, "%d\t%d\n", frame, su_blocks);
4124 			}
4125 		}
4126 
4127 		seq_printf(m, "PSR2 selective fetch: %s\n",
4128 			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
4129 	}
4130 
4131 unlock:
4132 	mutex_unlock(&psr->lock);
4133 	intel_display_rpm_put(display, wakeref);
4134 
4135 	return 0;
4136 }
4137 
4138 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
4139 {
4140 	struct intel_display *display = m->private;
4141 	struct intel_dp *intel_dp = NULL;
4142 	struct intel_encoder *encoder;
4143 
4144 	if (!HAS_PSR(display))
4145 		return -ENODEV;
4146 
4147 	/* Find the first EDP which supports PSR */
4148 	for_each_intel_encoder_with_psr(display->drm, encoder) {
4149 		intel_dp = enc_to_intel_dp(encoder);
4150 		break;
4151 	}
4152 
4153 	if (!intel_dp)
4154 		return -ENODEV;
4155 
4156 	return intel_psr_status(m, intel_dp);
4157 }
4158 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
4159 
4160 static int
4161 i915_edp_psr_debug_set(void *data, u64 val)
4162 {
4163 	struct intel_display *display = data;
4164 	struct intel_encoder *encoder;
4165 	int ret = -ENODEV;
4166 
4167 	if (!HAS_PSR(display))
4168 		return ret;
4169 
4170 	for_each_intel_encoder_with_psr(display->drm, encoder) {
4171 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4172 
4173 		drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);
4174 
4175 		// TODO: split to each transcoder's PSR debug state
4176 		with_intel_display_rpm(display)
4177 			ret = intel_psr_debug_set(intel_dp, val);
4178 	}
4179 
4180 	return ret;
4181 }
4182 
4183 static int
4184 i915_edp_psr_debug_get(void *data, u64 *val)
4185 {
4186 	struct intel_display *display = data;
4187 	struct intel_encoder *encoder;
4188 
4189 	if (!HAS_PSR(display))
4190 		return -ENODEV;
4191 
4192 	for_each_intel_encoder_with_psr(display->drm, encoder) {
4193 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4194 
4195 		// TODO: split to each transcoder's PSR debug state
4196 		*val = READ_ONCE(intel_dp->psr.debug);
4197 		return 0;
4198 	}
4199 
4200 	return -ENODEV;
4201 }
4202 
4203 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
4204 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
4205 			"%llu\n");
4206 
4207 void intel_psr_debugfs_register(struct intel_display *display)
4208 {
4209 	struct dentry *debugfs_root = display->drm->debugfs_root;
4210 
4211 	debugfs_create_file("i915_edp_psr_debug", 0644, debugfs_root,
4212 			    display, &i915_edp_psr_debug_fops);
4213 
4214 	debugfs_create_file("i915_edp_psr_status", 0444, debugfs_root,
4215 			    display, &i915_edp_psr_status_fops);
4216 }
4217 
4218 static const char *psr_mode_str(struct intel_dp *intel_dp)
4219 {
4220 	if (intel_dp->psr.panel_replay_enabled)
4221 		return "PANEL-REPLAY";
4222 	else if (intel_dp->psr.enabled)
4223 		return "PSR";
4224 
4225 	return "unknown";
4226 }
4227 
4228 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
4229 {
4230 	struct intel_connector *connector = m->private;
4231 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4232 	static const char * const sink_status[] = {
4233 		"inactive",
4234 		"transition to active, capture and display",
4235 		"active, display from RFB",
4236 		"active, capture and display on sink device timings",
4237 		"transition to inactive, capture and display, timing re-sync",
4238 		"reserved",
4239 		"reserved",
4240 		"sink internal error",
4241 	};
4242 	const char *str;
4243 	int ret;
4244 	u8 status, error_status;
4245 
4246 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
4247 		seq_puts(m, "PSR/Panel-Replay Unsupported\n");
4248 		return -ENODEV;
4249 	}
4250 
4251 	if (connector->base.status != connector_status_connected)
4252 		return -ENODEV;
4253 
4254 	ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
4255 	if (ret)
4256 		return ret;
4257 
4258 	status &= DP_PSR_SINK_STATE_MASK;
4259 	if (status < ARRAY_SIZE(sink_status))
4260 		str = sink_status[status];
4261 	else
4262 		str = "unknown";
4263 
4264 	seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
4265 
4266 	seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
4267 
4268 	if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
4269 			    DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
4270 			    DP_PSR_LINK_CRC_ERROR))
4271 		seq_puts(m, ":\n");
4272 	else
4273 		seq_puts(m, "\n");
4274 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
4275 		seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
4276 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
4277 		seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
4278 	if (error_status & DP_PSR_LINK_CRC_ERROR)
4279 		seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
4280 
4281 	return ret;
4282 }
4283 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
4284 
4285 static int i915_psr_status_show(struct seq_file *m, void *data)
4286 {
4287 	struct intel_connector *connector = m->private;
4288 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4289 
4290 	return intel_psr_status(m, intel_dp);
4291 }
4292 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
4293 
4294 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
4295 {
4296 	struct intel_display *display = to_intel_display(connector);
4297 	struct dentry *root = connector->base.debugfs_entry;
4298 
4299 	if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
4300 	    connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
4301 		return;
4302 
4303 	debugfs_create_file("i915_psr_sink_status", 0444, root,
4304 			    connector, &i915_psr_sink_status_fops);
4305 
4306 	if (HAS_PSR(display) || HAS_DP20(display))
4307 		debugfs_create_file("i915_psr_status", 0444, root,
4308 				    connector, &i915_psr_status_fops);
4309 }
4310 
4311 bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state)
4312 {
4313 	/*
4314 	 * eDP Panel Replay uses always ALPM
4315 	 * PSR2 uses ALPM but PSR1 doesn't
4316 	 */
4317 	return intel_dp_is_edp(intel_dp) && (crtc_state->has_sel_update ||
4318 					     crtc_state->has_panel_replay);
4319 }
4320 
4321 bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
4322 				   const struct intel_crtc_state *crtc_state)
4323 {
4324 	return intel_dp_is_edp(intel_dp) && crtc_state->has_panel_replay;
4325 }
4326