xref: /linux/drivers/gpu/drm/i915/display/intel_psr.c (revision ea89a742daf4317038fbab6776d36726dd7a1e2a)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26 #include <drm/drm_debugfs.h>
27 
28 #include "i915_drv.h"
29 #include "i915_reg.h"
30 #include "intel_atomic.h"
31 #include "intel_crtc.h"
32 #include "intel_ddi.h"
33 #include "intel_de.h"
34 #include "intel_display_types.h"
35 #include "intel_dp.h"
36 #include "intel_dp_aux.h"
37 #include "intel_frontbuffer.h"
38 #include "intel_hdmi.h"
39 #include "intel_psr.h"
40 #include "intel_psr_regs.h"
41 #include "intel_snps_phy.h"
42 #include "skl_universal_plane.h"
43 
44 /**
45  * DOC: Panel Self Refresh (PSR/SRD)
46  *
47  * Since Haswell Display controller supports Panel Self-Refresh on display
48  * panels witch have a remote frame buffer (RFB) implemented according to PSR
49  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
50  * when system is idle but display is on as it eliminates display refresh
51  * request to DDR memory completely as long as the frame buffer for that
52  * display is unchanged.
53  *
54  * Panel Self Refresh must be supported by both Hardware (source) and
55  * Panel (sink).
56  *
57  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
58  * to power down the link and memory controller. For DSI panels the same idea
59  * is called "manual mode".
60  *
61  * The implementation uses the hardware-based PSR support which automatically
62  * enters/exits self-refresh mode. The hardware takes care of sending the
63  * required DP aux message and could even retrain the link (that part isn't
64  * enabled yet though). The hardware also keeps track of any frontbuffer
65  * changes to know when to exit self-refresh mode again. Unfortunately that
66  * part doesn't work too well, hence why the i915 PSR support uses the
67  * software frontbuffer tracking to make sure it doesn't miss a screen
68  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
69  * get called by the frontbuffer tracking code. Note that because of locking
70  * issues the self-refresh re-enable code is done from a work queue, which
71  * must be correctly synchronized/cancelled when shutting down the pipe."
72  *
73  * DC3CO (DC3 clock off)
74  *
75  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
76  * clock off automatically during PSR2 idle state.
77  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
78  * entry/exit allows the HW to enter a low-power state even when page flipping
79  * periodically (for instance a 30fps video playback scenario).
80  *
81  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
82  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
83  * frames, if no other flip occurs and the function above is executed, DC3CO is
84  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
85  * of another flip.
86  * Front buffer modifications do not trigger DC3CO activation on purpose as it
87  * would bring a lot of complexity and most of the moderns systems will only
88  * use page flips.
89  */
90 
91 /*
92  * Description of PSR mask bits:
93  *
94  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
95  *
96  *  When unmasked (nearly) all display register writes (eg. even
97  *  SWF) trigger a PSR exit. Some registers are excluded from this
98  *  and they have a more specific mask (described below). On icl+
99  *  this bit no longer exists and is effectively always set.
100  *
101  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
102  *
103  *  When unmasked (nearly) all pipe/plane register writes
104  *  trigger a PSR exit. Some plane registers are excluded from this
105  *  and they have a more specific mask (described below).
106  *
107  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
108  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
109  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
110  *
111  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
112  *  SPR_SURF/CURBASE are not included in this and instead are
113  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
114  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
115  *
116  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
117  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
118  *
119  *  When unmasked PSR is blocked as long as the sprite
120  *  plane is enabled. skl+ with their universal planes no
121  *  longer have a mask bit like this, and no plane being
122  *  enabledb blocks PSR.
123  *
124  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
125  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
126  *
127  *  When umasked CURPOS writes trigger a PSR exit. On skl+
128  *  this doesn't exit but CURPOS is included in the
129  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
130  *
131  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
132  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
133  *
134  *  When unmasked PSR is blocked as long as vblank and/or vsync
135  *  interrupt is unmasked in IMR *and* enabled in IER.
136  *
137  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
138  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
139  *
140  *  Selectcs whether PSR exit generates an extra vblank before
141  *  the first frame is transmitted. Also note the opposite polarity
142  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
143  *  unmasked==do not generate the extra vblank).
144  *
145  *  With DC states enabled the extra vblank happens after link training,
146  *  with DC states disabled it happens immediately upuon PSR exit trigger.
147  *  No idea as of now why there is a difference. HSW/BDW (which don't
148  *  even have DMC) always generate it after link training. Go figure.
149  *
150  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
151  *  and thus won't latch until the first vblank. So with DC states
152  *  enabled the register effctively uses the reset value during DC5
153  *  exit+PSR exit sequence, and thus the bit does nothing until
154  *  latched by the vblank that it was trying to prevent from being
155  *  generated in the first place. So we should probably call this
156  *  one a chicken/egg bit instead on skl+.
157  *
158  *  In standby mode (as opposed to link-off) this makes no difference
159  *  as the timing generator keeps running the whole time generating
160  *  normal periodic vblanks.
161  *
162  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
163  *  and doing so makes the behaviour match the skl+ reset value.
164  *
165  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
166  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
167  *
168  *  On BDW without this bit is no vblanks whatsoever are
169  *  generated after PSR exit. On HSW this has no apparant effect.
170  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
171  *
172  * The rest of the bits are more self-explanatory and/or
173  * irrelevant for normal operation.
174  */
175 
176 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
177 			   (intel_dp)->psr.source_support)
178 
179 #define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
180 				    (intel_dp)->psr.source_panel_replay_support)
181 
182 bool intel_encoder_can_psr(struct intel_encoder *encoder)
183 {
184 	if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
185 		return CAN_PSR(enc_to_intel_dp(encoder)) ||
186 		       CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
187 	else
188 		return false;
189 }
190 
191 static bool psr_global_enabled(struct intel_dp *intel_dp)
192 {
193 	struct intel_connector *connector = intel_dp->attached_connector;
194 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
195 
196 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
197 	case I915_PSR_DEBUG_DEFAULT:
198 		if (i915->display.params.enable_psr == -1)
199 			return connector->panel.vbt.psr.enable;
200 		return i915->display.params.enable_psr;
201 	case I915_PSR_DEBUG_DISABLE:
202 		return false;
203 	default:
204 		return true;
205 	}
206 }
207 
208 static bool psr2_global_enabled(struct intel_dp *intel_dp)
209 {
210 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
211 
212 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
213 	case I915_PSR_DEBUG_DISABLE:
214 	case I915_PSR_DEBUG_FORCE_PSR1:
215 		return false;
216 	default:
217 		if (i915->display.params.enable_psr == 1)
218 			return false;
219 		return true;
220 	}
221 }
222 
223 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
224 {
225 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
226 
227 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
228 		EDP_PSR_ERROR(intel_dp->psr.transcoder);
229 }
230 
231 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
232 {
233 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
234 
235 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
236 		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
237 }
238 
239 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
240 {
241 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
242 
243 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
244 		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
245 }
246 
247 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
248 {
249 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
250 
251 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
252 		EDP_PSR_MASK(intel_dp->psr.transcoder);
253 }
254 
255 static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
256 			      enum transcoder cpu_transcoder)
257 {
258 	if (DISPLAY_VER(dev_priv) >= 8)
259 		return EDP_PSR_CTL(cpu_transcoder);
260 	else
261 		return HSW_SRD_CTL;
262 }
263 
264 static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
265 				enum transcoder cpu_transcoder)
266 {
267 	if (DISPLAY_VER(dev_priv) >= 8)
268 		return EDP_PSR_DEBUG(cpu_transcoder);
269 	else
270 		return HSW_SRD_DEBUG;
271 }
272 
273 static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
274 				   enum transcoder cpu_transcoder)
275 {
276 	if (DISPLAY_VER(dev_priv) >= 8)
277 		return EDP_PSR_PERF_CNT(cpu_transcoder);
278 	else
279 		return HSW_SRD_PERF_CNT;
280 }
281 
282 static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
283 				 enum transcoder cpu_transcoder)
284 {
285 	if (DISPLAY_VER(dev_priv) >= 8)
286 		return EDP_PSR_STATUS(cpu_transcoder);
287 	else
288 		return HSW_SRD_STATUS;
289 }
290 
291 static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
292 			      enum transcoder cpu_transcoder)
293 {
294 	if (DISPLAY_VER(dev_priv) >= 12)
295 		return TRANS_PSR_IMR(cpu_transcoder);
296 	else
297 		return EDP_PSR_IMR;
298 }
299 
300 static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
301 			      enum transcoder cpu_transcoder)
302 {
303 	if (DISPLAY_VER(dev_priv) >= 12)
304 		return TRANS_PSR_IIR(cpu_transcoder);
305 	else
306 		return EDP_PSR_IIR;
307 }
308 
309 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
310 				  enum transcoder cpu_transcoder)
311 {
312 	if (DISPLAY_VER(dev_priv) >= 8)
313 		return EDP_PSR_AUX_CTL(cpu_transcoder);
314 	else
315 		return HSW_SRD_AUX_CTL;
316 }
317 
318 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
319 				   enum transcoder cpu_transcoder, int i)
320 {
321 	if (DISPLAY_VER(dev_priv) >= 8)
322 		return EDP_PSR_AUX_DATA(cpu_transcoder, i);
323 	else
324 		return HSW_SRD_AUX_DATA(i);
325 }
326 
327 static void psr_irq_control(struct intel_dp *intel_dp)
328 {
329 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
330 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
331 	u32 mask;
332 
333 	mask = psr_irq_psr_error_bit_get(intel_dp);
334 	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
335 		mask |= psr_irq_post_exit_bit_get(intel_dp) |
336 			psr_irq_pre_entry_bit_get(intel_dp);
337 
338 	intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
339 		     psr_irq_mask_get(intel_dp), ~mask);
340 }
341 
342 static void psr_event_print(struct drm_i915_private *i915,
343 			    u32 val, bool psr2_enabled)
344 {
345 	drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
346 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
347 		drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
348 	if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
349 		drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
350 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
351 		drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
352 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
353 		drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
354 	if (val & PSR_EVENT_GRAPHICS_RESET)
355 		drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
356 	if (val & PSR_EVENT_PCH_INTERRUPT)
357 		drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
358 	if (val & PSR_EVENT_MEMORY_UP)
359 		drm_dbg_kms(&i915->drm, "\tMemory up\n");
360 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
361 		drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
362 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
363 		drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
364 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
365 		drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
366 	if (val & PSR_EVENT_REGISTER_UPDATE)
367 		drm_dbg_kms(&i915->drm, "\tRegister updated\n");
368 	if (val & PSR_EVENT_HDCP_ENABLE)
369 		drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
370 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
371 		drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
372 	if (val & PSR_EVENT_VBI_ENABLE)
373 		drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
374 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
375 		drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
376 	if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
377 		drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
378 }
379 
380 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
381 {
382 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
383 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
384 	ktime_t time_ns =  ktime_get();
385 
386 	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
387 		intel_dp->psr.last_entry_attempt = time_ns;
388 		drm_dbg_kms(&dev_priv->drm,
389 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
390 			    transcoder_name(cpu_transcoder));
391 	}
392 
393 	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
394 		intel_dp->psr.last_exit = time_ns;
395 		drm_dbg_kms(&dev_priv->drm,
396 			    "[transcoder %s] PSR exit completed\n",
397 			    transcoder_name(cpu_transcoder));
398 
399 		if (DISPLAY_VER(dev_priv) >= 9) {
400 			u32 val;
401 
402 			val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
403 
404 			psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
405 		}
406 	}
407 
408 	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
409 		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
410 			 transcoder_name(cpu_transcoder));
411 
412 		intel_dp->psr.irq_aux_error = true;
413 
414 		/*
415 		 * If this interruption is not masked it will keep
416 		 * interrupting so fast that it prevents the scheduled
417 		 * work to run.
418 		 * Also after a PSR error, we don't want to arm PSR
419 		 * again so we don't care about unmask the interruption
420 		 * or unset irq_aux_error.
421 		 */
422 		intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
423 			     0, psr_irq_psr_error_bit_get(intel_dp));
424 
425 		queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
426 	}
427 }
428 
429 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
430 {
431 	u8 alpm_caps = 0;
432 
433 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
434 			      &alpm_caps) != 1)
435 		return false;
436 	return alpm_caps & DP_ALPM_CAP;
437 }
438 
439 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
440 {
441 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
442 	u8 val = 8; /* assume the worst if we can't read the value */
443 
444 	if (drm_dp_dpcd_readb(&intel_dp->aux,
445 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
446 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
447 	else
448 		drm_dbg_kms(&i915->drm,
449 			    "Unable to get sink synchronization latency, assuming 8 frames\n");
450 	return val;
451 }
452 
453 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
454 {
455 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
456 	ssize_t r;
457 	u16 w;
458 	u8 y;
459 
460 	/* If sink don't have specific granularity requirements set legacy ones */
461 	if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
462 		/* As PSR2 HW sends full lines, we do not care about x granularity */
463 		w = 4;
464 		y = 4;
465 		goto exit;
466 	}
467 
468 	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
469 	if (r != 2)
470 		drm_dbg_kms(&i915->drm,
471 			    "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
472 	/*
473 	 * Spec says that if the value read is 0 the default granularity should
474 	 * be used instead.
475 	 */
476 	if (r != 2 || w == 0)
477 		w = 4;
478 
479 	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
480 	if (r != 1) {
481 		drm_dbg_kms(&i915->drm,
482 			    "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
483 		y = 4;
484 	}
485 	if (y == 0)
486 		y = 1;
487 
488 exit:
489 	intel_dp->psr.su_w_granularity = w;
490 	intel_dp->psr.su_y_granularity = y;
491 }
492 
493 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
494 {
495 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
496 	u8 pr_dpcd = 0;
497 
498 	intel_dp->psr.sink_panel_replay_support = false;
499 	drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP, &pr_dpcd);
500 
501 	if (!(pr_dpcd & DP_PANEL_REPLAY_SUPPORT)) {
502 		drm_dbg_kms(&i915->drm,
503 			    "Panel replay is not supported by panel\n");
504 		return;
505 	}
506 
507 	drm_dbg_kms(&i915->drm,
508 		    "Panel replay is supported by panel\n");
509 	intel_dp->psr.sink_panel_replay_support = true;
510 }
511 
512 static void _psr_init_dpcd(struct intel_dp *intel_dp)
513 {
514 	struct drm_i915_private *i915 =
515 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
516 
517 	drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
518 		    intel_dp->psr_dpcd[0]);
519 
520 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
521 		drm_dbg_kms(&i915->drm,
522 			    "PSR support not currently available for this panel\n");
523 		return;
524 	}
525 
526 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
527 		drm_dbg_kms(&i915->drm,
528 			    "Panel lacks power state control, PSR cannot be enabled\n");
529 		return;
530 	}
531 
532 	intel_dp->psr.sink_support = true;
533 	intel_dp->psr.sink_sync_latency =
534 		intel_dp_get_sink_sync_latency(intel_dp);
535 
536 	if (DISPLAY_VER(i915) >= 9 &&
537 	    intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
538 		bool y_req = intel_dp->psr_dpcd[1] &
539 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
540 		bool alpm = intel_dp_get_alpm_status(intel_dp);
541 
542 		/*
543 		 * All panels that supports PSR version 03h (PSR2 +
544 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
545 		 * only sure that it is going to be used when required by the
546 		 * panel. This way panel is capable to do selective update
547 		 * without a aux frame sync.
548 		 *
549 		 * To support PSR version 02h and PSR version 03h without
550 		 * Y-coordinate requirement panels we would need to enable
551 		 * GTC first.
552 		 */
553 		intel_dp->psr.sink_psr2_support = y_req && alpm;
554 		drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
555 			    intel_dp->psr.sink_psr2_support ? "" : "not ");
556 	}
557 }
558 
559 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
560 {
561 	_panel_replay_init_dpcd(intel_dp);
562 
563 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
564 			 sizeof(intel_dp->psr_dpcd));
565 
566 	if (intel_dp->psr_dpcd[0])
567 		_psr_init_dpcd(intel_dp);
568 
569 	if (intel_dp->psr.sink_psr2_support)
570 		intel_dp_get_su_granularity(intel_dp);
571 }
572 
573 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
574 {
575 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
576 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
577 	u32 aux_clock_divider, aux_ctl;
578 	/* write DP_SET_POWER=D0 */
579 	static const u8 aux_msg[] = {
580 		[0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
581 		[1] = (DP_SET_POWER >> 8) & 0xff,
582 		[2] = DP_SET_POWER & 0xff,
583 		[3] = 1 - 1,
584 		[4] = DP_SET_POWER_D0,
585 	};
586 	int i;
587 
588 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
589 	for (i = 0; i < sizeof(aux_msg); i += 4)
590 		intel_de_write(dev_priv,
591 			       psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
592 			       intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
593 
594 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
595 
596 	/* Start with bits set for DDI_AUX_CTL register */
597 	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
598 					     aux_clock_divider);
599 
600 	/* Select only valid bits for SRD_AUX_CTL */
601 	aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
602 		EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
603 		EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
604 		EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
605 
606 	intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
607 		       aux_ctl);
608 }
609 
610 static bool psr2_su_region_et_valid(struct intel_dp *intel_dp)
611 {
612 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
613 
614 	if (DISPLAY_VER(i915) >= 20 &&
615 	    intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
616 	    !(intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE))
617 		return true;
618 
619 	return false;
620 }
621 
622 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
623 {
624 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
625 	u8 dpcd_val = DP_PSR_ENABLE;
626 
627 	if (intel_dp->psr.panel_replay_enabled)
628 		return;
629 
630 	if (intel_dp->psr.psr2_enabled) {
631 		/* Enable ALPM at sink for psr2 */
632 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
633 				   DP_ALPM_ENABLE |
634 				   DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
635 
636 		dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
637 		if (psr2_su_region_et_valid(intel_dp))
638 			dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET;
639 	} else {
640 		if (intel_dp->psr.link_standby)
641 			dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
642 
643 		if (DISPLAY_VER(dev_priv) >= 8)
644 			dpcd_val |= DP_PSR_CRC_VERIFICATION;
645 	}
646 
647 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
648 		dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
649 
650 	if (intel_dp->psr.entry_setup_frames > 0)
651 		dpcd_val |= DP_PSR_FRAME_CAPTURE;
652 
653 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
654 
655 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
656 }
657 
658 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
659 {
660 	struct intel_connector *connector = intel_dp->attached_connector;
661 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
662 	u32 val = 0;
663 
664 	if (DISPLAY_VER(dev_priv) >= 11)
665 		val |= EDP_PSR_TP4_TIME_0us;
666 
667 	if (dev_priv->display.params.psr_safest_params) {
668 		val |= EDP_PSR_TP1_TIME_2500us;
669 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
670 		goto check_tp3_sel;
671 	}
672 
673 	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
674 		val |= EDP_PSR_TP1_TIME_0us;
675 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
676 		val |= EDP_PSR_TP1_TIME_100us;
677 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
678 		val |= EDP_PSR_TP1_TIME_500us;
679 	else
680 		val |= EDP_PSR_TP1_TIME_2500us;
681 
682 	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
683 		val |= EDP_PSR_TP2_TP3_TIME_0us;
684 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
685 		val |= EDP_PSR_TP2_TP3_TIME_100us;
686 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
687 		val |= EDP_PSR_TP2_TP3_TIME_500us;
688 	else
689 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
690 
691 	/*
692 	 * WA 0479: hsw,bdw
693 	 * "Do not skip both TP1 and TP2/TP3"
694 	 */
695 	if (DISPLAY_VER(dev_priv) < 9 &&
696 	    connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
697 	    connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
698 		val |= EDP_PSR_TP2_TP3_TIME_100us;
699 
700 check_tp3_sel:
701 	if (intel_dp_source_supports_tps3(dev_priv) &&
702 	    drm_dp_tps3_supported(intel_dp->dpcd))
703 		val |= EDP_PSR_TP_TP1_TP3;
704 	else
705 		val |= EDP_PSR_TP_TP1_TP2;
706 
707 	return val;
708 }
709 
710 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
711 {
712 	struct intel_connector *connector = intel_dp->attached_connector;
713 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
714 	int idle_frames;
715 
716 	/* Let's use 6 as the minimum to cover all known cases including the
717 	 * off-by-one issue that HW has in some cases.
718 	 */
719 	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
720 	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
721 
722 	if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
723 		idle_frames = 0xf;
724 
725 	return idle_frames;
726 }
727 
728 static void hsw_activate_psr1(struct intel_dp *intel_dp)
729 {
730 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
731 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
732 	u32 max_sleep_time = 0x1f;
733 	u32 val = EDP_PSR_ENABLE;
734 
735 	val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
736 
737 	if (DISPLAY_VER(dev_priv) < 20)
738 		val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
739 
740 	if (IS_HASWELL(dev_priv))
741 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
742 
743 	if (intel_dp->psr.link_standby)
744 		val |= EDP_PSR_LINK_STANDBY;
745 
746 	val |= intel_psr1_get_tp_time(intel_dp);
747 
748 	if (DISPLAY_VER(dev_priv) >= 8)
749 		val |= EDP_PSR_CRC_ENABLE;
750 
751 	if (DISPLAY_VER(dev_priv) >= 20)
752 		val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
753 
754 	intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
755 		     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
756 }
757 
758 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
759 {
760 	struct intel_connector *connector = intel_dp->attached_connector;
761 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
762 	u32 val = 0;
763 
764 	if (dev_priv->display.params.psr_safest_params)
765 		return EDP_PSR2_TP2_TIME_2500us;
766 
767 	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
768 	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
769 		val |= EDP_PSR2_TP2_TIME_50us;
770 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
771 		val |= EDP_PSR2_TP2_TIME_100us;
772 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
773 		val |= EDP_PSR2_TP2_TIME_500us;
774 	else
775 		val |= EDP_PSR2_TP2_TIME_2500us;
776 
777 	return val;
778 }
779 
780 static int psr2_block_count_lines(struct intel_dp *intel_dp)
781 {
782 	return intel_dp->psr.alpm_parameters.io_wake_lines < 9 &&
783 		intel_dp->psr.alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
784 }
785 
786 static int psr2_block_count(struct intel_dp *intel_dp)
787 {
788 	return psr2_block_count_lines(intel_dp) / 4;
789 }
790 
791 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
792 {
793 	u8 frames_before_su_entry;
794 
795 	frames_before_su_entry = max_t(u8,
796 				       intel_dp->psr.sink_sync_latency + 1,
797 				       2);
798 
799 	/* Entry setup frames must be at least 1 less than frames before SU entry */
800 	if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
801 		frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
802 
803 	return frames_before_su_entry;
804 }
805 
806 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
807 {
808 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
809 
810 	intel_de_rmw(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
811 		     0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
812 
813 	intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
814 		     TRANS_DP2_PANEL_REPLAY_ENABLE);
815 }
816 
817 static void hsw_activate_psr2(struct intel_dp *intel_dp)
818 {
819 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
820 	struct intel_psr *psr = &intel_dp->psr;
821 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
822 	u32 val = EDP_PSR2_ENABLE;
823 	u32 psr_val = 0;
824 
825 	val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
826 
827 	if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
828 		val |= EDP_SU_TRACK_ENABLE;
829 
830 	if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
831 		val |= EDP_Y_COORDINATE_ENABLE;
832 
833 	val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
834 
835 	val |= intel_psr2_get_tp_time(intel_dp);
836 
837 	if (DISPLAY_VER(dev_priv) >= 12) {
838 		if (psr2_block_count(intel_dp) > 2)
839 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
840 		else
841 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
842 	}
843 
844 	/* Wa_22012278275:adl-p */
845 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
846 		static const u8 map[] = {
847 			2, /* 5 lines */
848 			1, /* 6 lines */
849 			0, /* 7 lines */
850 			3, /* 8 lines */
851 			6, /* 9 lines */
852 			5, /* 10 lines */
853 			4, /* 11 lines */
854 			7, /* 12 lines */
855 		};
856 		/*
857 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
858 		 * comments bellow for more information
859 		 */
860 		int tmp;
861 
862 		tmp = map[psr->alpm_parameters.io_wake_lines -
863 			  TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
864 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
865 
866 		tmp = map[psr->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
867 		val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
868 	} else if (DISPLAY_VER(dev_priv) >= 12) {
869 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines);
870 		val |= TGL_EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines);
871 	} else if (DISPLAY_VER(dev_priv) >= 9) {
872 		val |= EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines);
873 		val |= EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines);
874 	}
875 
876 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
877 		val |= EDP_PSR2_SU_SDP_SCANLINE;
878 
879 	if (DISPLAY_VER(dev_priv) >= 20)
880 		psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
881 
882 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
883 		u32 tmp;
884 
885 		tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
886 		drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
887 	} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
888 		intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
889 	}
890 
891 	if (psr2_su_region_et_valid(intel_dp))
892 		val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
893 
894 	/*
895 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
896 	 * recommending keep this bit unset while PSR2 is enabled.
897 	 */
898 	intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
899 
900 	intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
901 }
902 
903 static bool
904 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
905 {
906 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
907 		return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
908 	else if (DISPLAY_VER(dev_priv) >= 12)
909 		return cpu_transcoder == TRANSCODER_A;
910 	else if (DISPLAY_VER(dev_priv) >= 9)
911 		return cpu_transcoder == TRANSCODER_EDP;
912 	else
913 		return false;
914 }
915 
916 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
917 {
918 	if (!crtc_state->hw.active)
919 		return 0;
920 
921 	return DIV_ROUND_UP(1000 * 1000,
922 			    drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
923 }
924 
925 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
926 				     u32 idle_frames)
927 {
928 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
929 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
930 
931 	intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
932 		     EDP_PSR2_IDLE_FRAMES_MASK,
933 		     EDP_PSR2_IDLE_FRAMES(idle_frames));
934 }
935 
936 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
937 {
938 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
939 
940 	psr2_program_idle_frames(intel_dp, 0);
941 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
942 }
943 
944 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
945 {
946 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
947 
948 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
949 	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
950 }
951 
952 static void tgl_dc3co_disable_work(struct work_struct *work)
953 {
954 	struct intel_dp *intel_dp =
955 		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
956 
957 	mutex_lock(&intel_dp->psr.lock);
958 	/* If delayed work is pending, it is not idle */
959 	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
960 		goto unlock;
961 
962 	tgl_psr2_disable_dc3co(intel_dp);
963 unlock:
964 	mutex_unlock(&intel_dp->psr.lock);
965 }
966 
967 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
968 {
969 	if (!intel_dp->psr.dc3co_exitline)
970 		return;
971 
972 	cancel_delayed_work(&intel_dp->psr.dc3co_work);
973 	/* Before PSR2 exit disallow dc3co*/
974 	tgl_psr2_disable_dc3co(intel_dp);
975 }
976 
977 static bool
978 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
979 			      struct intel_crtc_state *crtc_state)
980 {
981 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
982 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
983 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
984 	enum port port = dig_port->base.port;
985 
986 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
987 		return pipe <= PIPE_B && port <= PORT_B;
988 	else
989 		return pipe == PIPE_A && port == PORT_A;
990 }
991 
992 static void
993 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
994 				  struct intel_crtc_state *crtc_state)
995 {
996 	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
997 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
998 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
999 	u32 exit_scanlines;
1000 
1001 	/*
1002 	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1003 	 * disable DC3CO until the changed dc3co activating/deactivating sequence
1004 	 * is applied. B.Specs:49196
1005 	 */
1006 	return;
1007 
1008 	/*
1009 	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1010 	 * TODO: when the issue is addressed, this restriction should be removed.
1011 	 */
1012 	if (crtc_state->enable_psr2_sel_fetch)
1013 		return;
1014 
1015 	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1016 		return;
1017 
1018 	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1019 		return;
1020 
1021 	/* Wa_16011303918:adl-p */
1022 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1023 		return;
1024 
1025 	/*
1026 	 * DC3CO Exit time 200us B.Spec 49196
1027 	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1028 	 */
1029 	exit_scanlines =
1030 		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1031 
1032 	if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
1033 		return;
1034 
1035 	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1036 }
1037 
1038 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1039 					      struct intel_crtc_state *crtc_state)
1040 {
1041 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1042 
1043 	if (!dev_priv->display.params.enable_psr2_sel_fetch &&
1044 	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1045 		drm_dbg_kms(&dev_priv->drm,
1046 			    "PSR2 sel fetch not enabled, disabled by parameter\n");
1047 		return false;
1048 	}
1049 
1050 	if (crtc_state->uapi.async_flip) {
1051 		drm_dbg_kms(&dev_priv->drm,
1052 			    "PSR2 sel fetch not enabled, async flip enabled\n");
1053 		return false;
1054 	}
1055 
1056 	if (psr2_su_region_et_valid(intel_dp))
1057 		crtc_state->enable_psr2_su_region_et = true;
1058 
1059 	return crtc_state->enable_psr2_sel_fetch = true;
1060 }
1061 
1062 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1063 				   struct intel_crtc_state *crtc_state)
1064 {
1065 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1066 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1067 	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1068 	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1069 	u16 y_granularity = 0;
1070 
1071 	/* PSR2 HW only send full lines so we only need to validate the width */
1072 	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1073 		return false;
1074 
1075 	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1076 		return false;
1077 
1078 	/* HW tracking is only aligned to 4 lines */
1079 	if (!crtc_state->enable_psr2_sel_fetch)
1080 		return intel_dp->psr.su_y_granularity == 4;
1081 
1082 	/*
1083 	 * adl_p and mtl platforms have 1 line granularity.
1084 	 * For other platforms with SW tracking we can adjust the y coordinates
1085 	 * to match sink requirement if multiple of 4.
1086 	 */
1087 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1088 		y_granularity = intel_dp->psr.su_y_granularity;
1089 	else if (intel_dp->psr.su_y_granularity <= 2)
1090 		y_granularity = 4;
1091 	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1092 		y_granularity = intel_dp->psr.su_y_granularity;
1093 
1094 	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1095 		return false;
1096 
1097 	if (crtc_state->dsc.compression_enable &&
1098 	    vdsc_cfg->slice_height % y_granularity)
1099 		return false;
1100 
1101 	crtc_state->su_y_granularity = y_granularity;
1102 	return true;
1103 }
1104 
1105 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1106 							struct intel_crtc_state *crtc_state)
1107 {
1108 	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1109 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1110 	u32 hblank_total, hblank_ns, req_ns;
1111 
1112 	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1113 	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1114 
1115 	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1116 	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1117 
1118 	if ((hblank_ns - req_ns) > 100)
1119 		return true;
1120 
1121 	/* Not supported <13 / Wa_22012279113:adl-p */
1122 	if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1123 		return false;
1124 
1125 	crtc_state->req_psr2_sdp_prior_scanline = true;
1126 	return true;
1127 }
1128 
1129 static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
1130 				     struct intel_crtc_state *crtc_state)
1131 {
1132 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1133 	int check_entry_lines;
1134 
1135 	if (DISPLAY_VER(i915) < 20)
1136 		return true;
1137 
1138 	/* ALPM Entry Check = 2 + CEILING( 5us /tline ) */
1139 	check_entry_lines = 2 +
1140 		intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 5);
1141 
1142 	if (check_entry_lines > 15)
1143 		return false;
1144 
1145 	if (i915->display.params.psr_safest_params)
1146 		check_entry_lines = 15;
1147 
1148 	intel_dp->psr.alpm_parameters.check_entry_lines = check_entry_lines;
1149 
1150 	return true;
1151 }
1152 
1153 static bool _compute_alpm_params(struct intel_dp *intel_dp,
1154 				 struct intel_crtc_state *crtc_state)
1155 {
1156 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1157 	int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
1158 	u8 max_wake_lines;
1159 
1160 	if (DISPLAY_VER(i915) >= 12) {
1161 		io_wake_time = 42;
1162 		/*
1163 		 * According to Bspec it's 42us, but based on testing
1164 		 * it is not enough -> use 45 us.
1165 		 */
1166 		fast_wake_time = 45;
1167 
1168 		/* TODO: Check how we can use ALPM_CTL fast wake extended field */
1169 		max_wake_lines = 12;
1170 	} else {
1171 		io_wake_time = 50;
1172 		fast_wake_time = 32;
1173 		max_wake_lines = 8;
1174 	}
1175 
1176 	io_wake_lines = intel_usecs_to_scanlines(
1177 		&crtc_state->hw.adjusted_mode, io_wake_time);
1178 	fast_wake_lines = intel_usecs_to_scanlines(
1179 		&crtc_state->hw.adjusted_mode, fast_wake_time);
1180 
1181 	if (io_wake_lines > max_wake_lines ||
1182 	    fast_wake_lines > max_wake_lines)
1183 		return false;
1184 
1185 	if (!_lnl_compute_alpm_params(intel_dp, crtc_state))
1186 		return false;
1187 
1188 	if (i915->display.params.psr_safest_params)
1189 		io_wake_lines = fast_wake_lines = max_wake_lines;
1190 
1191 	/* According to Bspec lower limit should be set as 7 lines. */
1192 	intel_dp->psr.alpm_parameters.io_wake_lines = max(io_wake_lines, 7);
1193 	intel_dp->psr.alpm_parameters.fast_wake_lines = max(fast_wake_lines, 7);
1194 
1195 	return true;
1196 }
1197 
1198 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1199 					const struct drm_display_mode *adjusted_mode)
1200 {
1201 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1202 	int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1203 	int entry_setup_frames = 0;
1204 
1205 	if (psr_setup_time < 0) {
1206 		drm_dbg_kms(&i915->drm,
1207 			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1208 			    intel_dp->psr_dpcd[1]);
1209 		return -ETIME;
1210 	}
1211 
1212 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1213 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1214 		if (DISPLAY_VER(i915) >= 20) {
1215 			/* setup entry frames can be up to 3 frames */
1216 			entry_setup_frames = 1;
1217 			drm_dbg_kms(&i915->drm,
1218 				    "PSR setup entry frames %d\n",
1219 				    entry_setup_frames);
1220 		} else {
1221 			drm_dbg_kms(&i915->drm,
1222 				    "PSR condition failed: PSR setup time (%d us) too long\n",
1223 				    psr_setup_time);
1224 			return -ETIME;
1225 		}
1226 	}
1227 
1228 	return entry_setup_frames;
1229 }
1230 
1231 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1232 				    struct intel_crtc_state *crtc_state)
1233 {
1234 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1235 	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1236 	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1237 	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1238 
1239 	if (!intel_dp->psr.sink_psr2_support)
1240 		return false;
1241 
1242 	/* JSL and EHL only supports eDP 1.3 */
1243 	if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1244 		drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1245 		return false;
1246 	}
1247 
1248 	/* Wa_16011181250 */
1249 	if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1250 	    IS_DG2(dev_priv)) {
1251 		drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1252 		return false;
1253 	}
1254 
1255 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1256 		drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1257 		return false;
1258 	}
1259 
1260 	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1261 		drm_dbg_kms(&dev_priv->drm,
1262 			    "PSR2 not supported in transcoder %s\n",
1263 			    transcoder_name(crtc_state->cpu_transcoder));
1264 		return false;
1265 	}
1266 
1267 	if (!psr2_global_enabled(intel_dp)) {
1268 		drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
1269 		return false;
1270 	}
1271 
1272 	/*
1273 	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1274 	 * resolution requires DSC to be enabled, priority is given to DSC
1275 	 * over PSR2.
1276 	 */
1277 	if (crtc_state->dsc.compression_enable &&
1278 	    (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
1279 		drm_dbg_kms(&dev_priv->drm,
1280 			    "PSR2 cannot be enabled since DSC is enabled\n");
1281 		return false;
1282 	}
1283 
1284 	if (crtc_state->crc_enabled) {
1285 		drm_dbg_kms(&dev_priv->drm,
1286 			    "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1287 		return false;
1288 	}
1289 
1290 	if (DISPLAY_VER(dev_priv) >= 12) {
1291 		psr_max_h = 5120;
1292 		psr_max_v = 3200;
1293 		max_bpp = 30;
1294 	} else if (DISPLAY_VER(dev_priv) >= 10) {
1295 		psr_max_h = 4096;
1296 		psr_max_v = 2304;
1297 		max_bpp = 24;
1298 	} else if (DISPLAY_VER(dev_priv) == 9) {
1299 		psr_max_h = 3640;
1300 		psr_max_v = 2304;
1301 		max_bpp = 24;
1302 	}
1303 
1304 	if (crtc_state->pipe_bpp > max_bpp) {
1305 		drm_dbg_kms(&dev_priv->drm,
1306 			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1307 			    crtc_state->pipe_bpp, max_bpp);
1308 		return false;
1309 	}
1310 
1311 	/* Wa_16011303918:adl-p */
1312 	if (crtc_state->vrr.enable &&
1313 	    IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1314 		drm_dbg_kms(&dev_priv->drm,
1315 			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1316 		return false;
1317 	}
1318 
1319 	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1320 		drm_dbg_kms(&dev_priv->drm,
1321 			    "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1322 		return false;
1323 	}
1324 
1325 	if (!_compute_alpm_params(intel_dp, crtc_state)) {
1326 		drm_dbg_kms(&dev_priv->drm,
1327 			    "PSR2 not enabled, Unable to use long enough wake times\n");
1328 		return false;
1329 	}
1330 
1331 	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
1332 	if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1333 	    crtc_state->hw.adjusted_mode.crtc_vblank_start <
1334 	    psr2_block_count_lines(intel_dp)) {
1335 		drm_dbg_kms(&dev_priv->drm,
1336 			    "PSR2 not enabled, too short vblank time\n");
1337 		return false;
1338 	}
1339 
1340 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1341 		if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1342 		    !HAS_PSR_HW_TRACKING(dev_priv)) {
1343 			drm_dbg_kms(&dev_priv->drm,
1344 				    "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1345 			return false;
1346 		}
1347 	}
1348 
1349 	if (!psr2_granularity_check(intel_dp, crtc_state)) {
1350 		drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1351 		goto unsupported;
1352 	}
1353 
1354 	if (!crtc_state->enable_psr2_sel_fetch &&
1355 	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1356 		drm_dbg_kms(&dev_priv->drm,
1357 			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1358 			    crtc_hdisplay, crtc_vdisplay,
1359 			    psr_max_h, psr_max_v);
1360 		goto unsupported;
1361 	}
1362 
1363 	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1364 	return true;
1365 
1366 unsupported:
1367 	crtc_state->enable_psr2_sel_fetch = false;
1368 	return false;
1369 }
1370 
1371 static bool _psr_compute_config(struct intel_dp *intel_dp,
1372 				struct intel_crtc_state *crtc_state)
1373 {
1374 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1375 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1376 	int entry_setup_frames;
1377 
1378 	/*
1379 	 * Current PSR panels don't work reliably with VRR enabled
1380 	 * So if VRR is enabled, do not enable PSR.
1381 	 */
1382 	if (crtc_state->vrr.enable)
1383 		return false;
1384 
1385 	if (!CAN_PSR(intel_dp))
1386 		return false;
1387 
1388 	entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1389 
1390 	if (entry_setup_frames >= 0) {
1391 		intel_dp->psr.entry_setup_frames = entry_setup_frames;
1392 	} else {
1393 		drm_dbg_kms(&dev_priv->drm,
1394 			    "PSR condition failed: PSR setup timing not met\n");
1395 		return false;
1396 	}
1397 
1398 	return true;
1399 }
1400 
1401 void intel_psr_compute_config(struct intel_dp *intel_dp,
1402 			      struct intel_crtc_state *crtc_state,
1403 			      struct drm_connector_state *conn_state)
1404 {
1405 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1406 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1407 
1408 	if (!psr_global_enabled(intel_dp)) {
1409 		drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1410 		return;
1411 	}
1412 
1413 	if (intel_dp->psr.sink_not_reliable) {
1414 		drm_dbg_kms(&dev_priv->drm,
1415 			    "PSR sink implementation is not reliable\n");
1416 		return;
1417 	}
1418 
1419 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1420 		drm_dbg_kms(&dev_priv->drm,
1421 			    "PSR condition failed: Interlaced mode enabled\n");
1422 		return;
1423 	}
1424 
1425 	if (CAN_PANEL_REPLAY(intel_dp))
1426 		crtc_state->has_panel_replay = true;
1427 	else
1428 		crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state);
1429 
1430 	if (!(crtc_state->has_panel_replay || crtc_state->has_psr))
1431 		return;
1432 
1433 	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1434 }
1435 
1436 void intel_psr_get_config(struct intel_encoder *encoder,
1437 			  struct intel_crtc_state *pipe_config)
1438 {
1439 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1440 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1441 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1442 	struct intel_dp *intel_dp;
1443 	u32 val;
1444 
1445 	if (!dig_port)
1446 		return;
1447 
1448 	intel_dp = &dig_port->dp;
1449 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1450 		return;
1451 
1452 	mutex_lock(&intel_dp->psr.lock);
1453 	if (!intel_dp->psr.enabled)
1454 		goto unlock;
1455 
1456 	if (intel_dp->psr.panel_replay_enabled) {
1457 		pipe_config->has_panel_replay = true;
1458 	} else {
1459 		/*
1460 		 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1461 		 * enabled/disabled because of frontbuffer tracking and others.
1462 		 */
1463 		pipe_config->has_psr = true;
1464 	}
1465 
1466 	pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1467 	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1468 
1469 	if (!intel_dp->psr.psr2_enabled)
1470 		goto unlock;
1471 
1472 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1473 		val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
1474 		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1475 			pipe_config->enable_psr2_sel_fetch = true;
1476 	}
1477 
1478 	if (DISPLAY_VER(dev_priv) >= 12) {
1479 		val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
1480 		pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1481 	}
1482 unlock:
1483 	mutex_unlock(&intel_dp->psr.lock);
1484 }
1485 
1486 static void intel_psr_activate(struct intel_dp *intel_dp)
1487 {
1488 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1489 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1490 
1491 	drm_WARN_ON(&dev_priv->drm,
1492 		    transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1493 		    intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1494 
1495 	drm_WARN_ON(&dev_priv->drm,
1496 		    intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1497 
1498 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1499 
1500 	lockdep_assert_held(&intel_dp->psr.lock);
1501 
1502 	/* psr1, psr2 and panel-replay are mutually exclusive.*/
1503 	if (intel_dp->psr.panel_replay_enabled)
1504 		dg2_activate_panel_replay(intel_dp);
1505 	else if (intel_dp->psr.psr2_enabled)
1506 		hsw_activate_psr2(intel_dp);
1507 	else
1508 		hsw_activate_psr1(intel_dp);
1509 
1510 	intel_dp->psr.active = true;
1511 }
1512 
1513 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1514 {
1515 	switch (intel_dp->psr.pipe) {
1516 	case PIPE_A:
1517 		return LATENCY_REPORTING_REMOVED_PIPE_A;
1518 	case PIPE_B:
1519 		return LATENCY_REPORTING_REMOVED_PIPE_B;
1520 	case PIPE_C:
1521 		return LATENCY_REPORTING_REMOVED_PIPE_C;
1522 	case PIPE_D:
1523 		return LATENCY_REPORTING_REMOVED_PIPE_D;
1524 	default:
1525 		MISSING_CASE(intel_dp->psr.pipe);
1526 		return 0;
1527 	}
1528 }
1529 
1530 /*
1531  * Wa_16013835468
1532  * Wa_14015648006
1533  */
1534 static void wm_optimization_wa(struct intel_dp *intel_dp,
1535 			       const struct intel_crtc_state *crtc_state)
1536 {
1537 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1538 	bool set_wa_bit = false;
1539 
1540 	/* Wa_14015648006 */
1541 	if (IS_DISPLAY_VER(dev_priv, 11, 14))
1542 		set_wa_bit |= crtc_state->wm_level_disabled;
1543 
1544 	/* Wa_16013835468 */
1545 	if (DISPLAY_VER(dev_priv) == 12)
1546 		set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1547 			crtc_state->hw.adjusted_mode.crtc_vdisplay;
1548 
1549 	if (set_wa_bit)
1550 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1551 			     0, wa_16013835468_bit_get(intel_dp));
1552 	else
1553 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1554 			     wa_16013835468_bit_get(intel_dp), 0);
1555 }
1556 
1557 static void lnl_alpm_configure(struct intel_dp *intel_dp)
1558 {
1559 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1560 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1561 	struct intel_psr *psr = &intel_dp->psr;
1562 
1563 	if (DISPLAY_VER(dev_priv) < 20)
1564 		return;
1565 
1566 	intel_de_write(dev_priv, ALPM_CTL(cpu_transcoder),
1567 		       ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
1568 		       ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines) |
1569 		       ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines));
1570 }
1571 
1572 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1573 				    const struct intel_crtc_state *crtc_state)
1574 {
1575 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1576 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1577 	u32 mask;
1578 
1579 	/*
1580 	 * Only HSW and BDW have PSR AUX registers that need to be setup.
1581 	 * SKL+ use hardcoded values PSR AUX transactions
1582 	 */
1583 	if (DISPLAY_VER(dev_priv) < 9)
1584 		hsw_psr_setup_aux(intel_dp);
1585 
1586 	/*
1587 	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1588 	 * mask LPSP to avoid dependency on other drivers that might block
1589 	 * runtime_pm besides preventing  other hw tracking issues now we
1590 	 * can rely on frontbuffer tracking.
1591 	 */
1592 	mask = EDP_PSR_DEBUG_MASK_MEMUP |
1593 	       EDP_PSR_DEBUG_MASK_HPD;
1594 
1595 	/*
1596 	 * For some unknown reason on HSW non-ULT (or at least on
1597 	 * Dell Latitude E6540) external displays start to flicker
1598 	 * when PSR is enabled on the eDP. SR/PC6 residency is much
1599 	 * higher than should be possible with an external display.
1600 	 * As a workaround leave LPSP unmasked to prevent PSR entry
1601 	 * when external displays are active.
1602 	 */
1603 	if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
1604 		mask |= EDP_PSR_DEBUG_MASK_LPSP;
1605 
1606 	if (DISPLAY_VER(dev_priv) < 20)
1607 		mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1608 
1609 	/*
1610 	 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1611 	 * registers in order to keep the CURSURFLIVE tricks working :(
1612 	 */
1613 	if (IS_DISPLAY_VER(dev_priv, 9, 10))
1614 		mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1615 
1616 	/* allow PSR with sprite enabled */
1617 	if (IS_HASWELL(dev_priv))
1618 		mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1619 
1620 	intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1621 
1622 	psr_irq_control(intel_dp);
1623 
1624 	/*
1625 	 * TODO: if future platforms supports DC3CO in more than one
1626 	 * transcoder, EXITLINE will need to be unset when disabling PSR
1627 	 */
1628 	if (intel_dp->psr.dc3co_exitline)
1629 		intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1630 			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1631 
1632 	if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1633 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1634 			     intel_dp->psr.psr2_sel_fetch_enabled ?
1635 			     IGNORE_PSR2_HW_TRACKING : 0);
1636 
1637 	lnl_alpm_configure(intel_dp);
1638 
1639 	/*
1640 	 * Wa_16013835468
1641 	 * Wa_14015648006
1642 	 */
1643 	wm_optimization_wa(intel_dp, crtc_state);
1644 
1645 	if (intel_dp->psr.psr2_enabled) {
1646 		if (DISPLAY_VER(dev_priv) == 9)
1647 			intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1648 				     PSR2_VSC_ENABLE_PROG_HEADER |
1649 				     PSR2_ADD_VERTICAL_LINE_COUNT);
1650 
1651 		/*
1652 		 * Wa_16014451276:adlp,mtl[a0,b0]
1653 		 * All supported adlp panels have 1-based X granularity, this may
1654 		 * cause issues if non-supported panels are used.
1655 		 */
1656 		if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
1657 		    IS_ALDERLAKE_P(dev_priv))
1658 			intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
1659 				     0, ADLP_1_BASED_X_GRANULARITY);
1660 
1661 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1662 		if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1663 			intel_de_rmw(dev_priv,
1664 				     MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1665 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1666 		else if (IS_ALDERLAKE_P(dev_priv))
1667 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1668 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1669 	}
1670 }
1671 
1672 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1673 {
1674 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1675 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1676 	u32 val;
1677 
1678 	/*
1679 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1680 	 * will still keep the error set even after the reset done in the
1681 	 * irq_preinstall and irq_uninstall hooks.
1682 	 * And enabling in this situation cause the screen to freeze in the
1683 	 * first time that PSR HW tries to activate so lets keep PSR disabled
1684 	 * to avoid any rendering problems.
1685 	 */
1686 	val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1687 	val &= psr_irq_psr_error_bit_get(intel_dp);
1688 	if (val) {
1689 		intel_dp->psr.sink_not_reliable = true;
1690 		drm_dbg_kms(&dev_priv->drm,
1691 			    "PSR interruption error set, not enabling PSR\n");
1692 		return false;
1693 	}
1694 
1695 	return true;
1696 }
1697 
1698 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1699 				    const struct intel_crtc_state *crtc_state)
1700 {
1701 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1702 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1703 	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1704 	u32 val;
1705 
1706 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1707 
1708 	intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1709 	intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
1710 	intel_dp->psr.busy_frontbuffer_bits = 0;
1711 	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1712 	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1713 	/* DC5/DC6 requires at least 6 idle frames */
1714 	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1715 	intel_dp->psr.dc3co_exit_delay = val;
1716 	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1717 	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1718 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1719 	intel_dp->psr.req_psr2_sdp_prior_scanline =
1720 		crtc_state->req_psr2_sdp_prior_scanline;
1721 
1722 	if (!psr_interrupt_error_check(intel_dp))
1723 		return;
1724 
1725 	if (intel_dp->psr.panel_replay_enabled)
1726 		drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
1727 	else
1728 		drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1729 			    intel_dp->psr.psr2_enabled ? "2" : "1");
1730 
1731 	intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1732 	intel_psr_enable_sink(intel_dp);
1733 	intel_psr_enable_source(intel_dp, crtc_state);
1734 	intel_dp->psr.enabled = true;
1735 	intel_dp->psr.paused = false;
1736 
1737 	intel_psr_activate(intel_dp);
1738 }
1739 
1740 static void intel_psr_exit(struct intel_dp *intel_dp)
1741 {
1742 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1743 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1744 	u32 val;
1745 
1746 	if (!intel_dp->psr.active) {
1747 		if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1748 			val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
1749 			drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1750 		}
1751 
1752 		val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
1753 		drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1754 
1755 		return;
1756 	}
1757 
1758 	if (intel_dp->psr.panel_replay_enabled) {
1759 		intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
1760 			     TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
1761 	} else if (intel_dp->psr.psr2_enabled) {
1762 		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1763 
1764 		val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
1765 				   EDP_PSR2_ENABLE, 0);
1766 
1767 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1768 	} else {
1769 		val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
1770 				   EDP_PSR_ENABLE, 0);
1771 
1772 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1773 	}
1774 	intel_dp->psr.active = false;
1775 }
1776 
1777 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1778 {
1779 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1780 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1781 	i915_reg_t psr_status;
1782 	u32 psr_status_mask;
1783 
1784 	if (intel_dp->psr.psr2_enabled) {
1785 		psr_status = EDP_PSR2_STATUS(cpu_transcoder);
1786 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1787 	} else {
1788 		psr_status = psr_status_reg(dev_priv, cpu_transcoder);
1789 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1790 	}
1791 
1792 	/* Wait till PSR is idle */
1793 	if (intel_de_wait_for_clear(dev_priv, psr_status,
1794 				    psr_status_mask, 2000))
1795 		drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1796 }
1797 
1798 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1799 {
1800 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1801 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1802 	enum phy phy = intel_port_to_phy(dev_priv,
1803 					 dp_to_dig_port(intel_dp)->base.port);
1804 
1805 	lockdep_assert_held(&intel_dp->psr.lock);
1806 
1807 	if (!intel_dp->psr.enabled)
1808 		return;
1809 
1810 	if (intel_dp->psr.panel_replay_enabled)
1811 		drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
1812 	else
1813 		drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1814 			    intel_dp->psr.psr2_enabled ? "2" : "1");
1815 
1816 	intel_psr_exit(intel_dp);
1817 	intel_psr_wait_exit_locked(intel_dp);
1818 
1819 	/*
1820 	 * Wa_16013835468
1821 	 * Wa_14015648006
1822 	 */
1823 	if (DISPLAY_VER(dev_priv) >= 11)
1824 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1825 			     wa_16013835468_bit_get(intel_dp), 0);
1826 
1827 	if (intel_dp->psr.psr2_enabled) {
1828 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1829 		if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1830 			intel_de_rmw(dev_priv,
1831 				     MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
1832 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1833 		else if (IS_ALDERLAKE_P(dev_priv))
1834 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1835 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1836 	}
1837 
1838 	intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1839 
1840 	/* Disable PSR on Sink */
1841 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1842 
1843 	if (intel_dp->psr.psr2_enabled)
1844 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1845 
1846 	intel_dp->psr.enabled = false;
1847 	intel_dp->psr.panel_replay_enabled = false;
1848 	intel_dp->psr.psr2_enabled = false;
1849 	intel_dp->psr.psr2_sel_fetch_enabled = false;
1850 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1851 }
1852 
1853 /**
1854  * intel_psr_disable - Disable PSR
1855  * @intel_dp: Intel DP
1856  * @old_crtc_state: old CRTC state
1857  *
1858  * This function needs to be called before disabling pipe.
1859  */
1860 void intel_psr_disable(struct intel_dp *intel_dp,
1861 		       const struct intel_crtc_state *old_crtc_state)
1862 {
1863 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1864 
1865 	if (!old_crtc_state->has_psr)
1866 		return;
1867 
1868 	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1869 		return;
1870 
1871 	mutex_lock(&intel_dp->psr.lock);
1872 
1873 	intel_psr_disable_locked(intel_dp);
1874 
1875 	mutex_unlock(&intel_dp->psr.lock);
1876 	cancel_work_sync(&intel_dp->psr.work);
1877 	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1878 }
1879 
1880 /**
1881  * intel_psr_pause - Pause PSR
1882  * @intel_dp: Intel DP
1883  *
1884  * This function need to be called after enabling psr.
1885  */
1886 void intel_psr_pause(struct intel_dp *intel_dp)
1887 {
1888 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1889 	struct intel_psr *psr = &intel_dp->psr;
1890 
1891 	if (!CAN_PSR(intel_dp))
1892 		return;
1893 
1894 	mutex_lock(&psr->lock);
1895 
1896 	if (!psr->enabled) {
1897 		mutex_unlock(&psr->lock);
1898 		return;
1899 	}
1900 
1901 	/* If we ever hit this, we will need to add refcount to pause/resume */
1902 	drm_WARN_ON(&dev_priv->drm, psr->paused);
1903 
1904 	intel_psr_exit(intel_dp);
1905 	intel_psr_wait_exit_locked(intel_dp);
1906 	psr->paused = true;
1907 
1908 	mutex_unlock(&psr->lock);
1909 
1910 	cancel_work_sync(&psr->work);
1911 	cancel_delayed_work_sync(&psr->dc3co_work);
1912 }
1913 
1914 /**
1915  * intel_psr_resume - Resume PSR
1916  * @intel_dp: Intel DP
1917  *
1918  * This function need to be called after pausing psr.
1919  */
1920 void intel_psr_resume(struct intel_dp *intel_dp)
1921 {
1922 	struct intel_psr *psr = &intel_dp->psr;
1923 
1924 	if (!CAN_PSR(intel_dp))
1925 		return;
1926 
1927 	mutex_lock(&psr->lock);
1928 
1929 	if (!psr->paused)
1930 		goto unlock;
1931 
1932 	psr->paused = false;
1933 	intel_psr_activate(intel_dp);
1934 
1935 unlock:
1936 	mutex_unlock(&psr->lock);
1937 }
1938 
1939 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1940 {
1941 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1942 		PSR2_MAN_TRK_CTL_ENABLE;
1943 }
1944 
1945 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1946 {
1947 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1948 	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1949 	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1950 }
1951 
1952 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1953 {
1954 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1955 	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1956 	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1957 }
1958 
1959 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1960 {
1961 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1962 	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1963 	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1964 }
1965 
1966 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1967 {
1968 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1969 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1970 
1971 	if (intel_dp->psr.psr2_sel_fetch_enabled)
1972 		intel_de_write(dev_priv,
1973 			       PSR2_MAN_TRK_CTL(cpu_transcoder),
1974 			       man_trk_ctl_enable_bit_get(dev_priv) |
1975 			       man_trk_ctl_partial_frame_bit_get(dev_priv) |
1976 			       man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1977 			       man_trk_ctl_continuos_full_frame(dev_priv));
1978 
1979 	/*
1980 	 * Display WA #0884: skl+
1981 	 * This documented WA for bxt can be safely applied
1982 	 * broadly so we can force HW tracking to exit PSR
1983 	 * instead of disabling and re-enabling.
1984 	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1985 	 * but it makes more sense write to the current active
1986 	 * pipe.
1987 	 *
1988 	 * This workaround do not exist for platforms with display 10 or newer
1989 	 * but testing proved that it works for up display 13, for newer
1990 	 * than that testing will be needed.
1991 	 */
1992 	intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1993 }
1994 
1995 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1996 {
1997 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1998 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1999 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2000 	struct intel_encoder *encoder;
2001 
2002 	if (!crtc_state->enable_psr2_sel_fetch)
2003 		return;
2004 
2005 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2006 					     crtc_state->uapi.encoder_mask) {
2007 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2008 
2009 		lockdep_assert_held(&intel_dp->psr.lock);
2010 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
2011 			return;
2012 		break;
2013 	}
2014 
2015 	intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2016 		       crtc_state->psr2_man_track_ctl);
2017 
2018 	if (!crtc_state->enable_psr2_su_region_et)
2019 		return;
2020 
2021 	intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
2022 		       crtc_state->pipe_srcsz_early_tpt);
2023 }
2024 
2025 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2026 				  bool full_update)
2027 {
2028 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2029 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2030 	u32 val = man_trk_ctl_enable_bit_get(dev_priv);
2031 
2032 	/* SF partial frame enable has to be set even on full update */
2033 	val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
2034 
2035 	if (full_update) {
2036 		val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
2037 		val |= man_trk_ctl_continuos_full_frame(dev_priv);
2038 		goto exit;
2039 	}
2040 
2041 	if (crtc_state->psr2_su_area.y1 == -1)
2042 		goto exit;
2043 
2044 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
2045 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2046 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2047 	} else {
2048 		drm_WARN_ON(crtc_state->uapi.crtc->dev,
2049 			    crtc_state->psr2_su_area.y1 % 4 ||
2050 			    crtc_state->psr2_su_area.y2 % 4);
2051 
2052 		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2053 			crtc_state->psr2_su_area.y1 / 4 + 1);
2054 		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2055 			crtc_state->psr2_su_area.y2 / 4 + 1);
2056 	}
2057 exit:
2058 	crtc_state->psr2_man_track_ctl = val;
2059 }
2060 
2061 static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
2062 					  bool full_update)
2063 {
2064 	int width, height;
2065 
2066 	if (!crtc_state->enable_psr2_su_region_et || full_update)
2067 		return 0;
2068 
2069 	width = drm_rect_width(&crtc_state->psr2_su_area);
2070 	height = drm_rect_height(&crtc_state->psr2_su_area);
2071 
2072 	return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
2073 }
2074 
2075 static void clip_area_update(struct drm_rect *overlap_damage_area,
2076 			     struct drm_rect *damage_area,
2077 			     struct drm_rect *pipe_src)
2078 {
2079 	if (!drm_rect_intersect(damage_area, pipe_src))
2080 		return;
2081 
2082 	if (overlap_damage_area->y1 == -1) {
2083 		overlap_damage_area->y1 = damage_area->y1;
2084 		overlap_damage_area->y2 = damage_area->y2;
2085 		return;
2086 	}
2087 
2088 	if (damage_area->y1 < overlap_damage_area->y1)
2089 		overlap_damage_area->y1 = damage_area->y1;
2090 
2091 	if (damage_area->y2 > overlap_damage_area->y2)
2092 		overlap_damage_area->y2 = damage_area->y2;
2093 }
2094 
2095 static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2096 {
2097 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2098 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2099 	u16 y_alignment;
2100 
2101 	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2102 	if (crtc_state->dsc.compression_enable &&
2103 	    (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
2104 		y_alignment = vdsc_cfg->slice_height;
2105 	else
2106 		y_alignment = crtc_state->su_y_granularity;
2107 
2108 	crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2109 	if (crtc_state->psr2_su_area.y2 % y_alignment)
2110 		crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2111 						y_alignment) + 1) * y_alignment;
2112 }
2113 
2114 /*
2115  * When early transport is in use we need to extend SU area to cover
2116  * cursor fully when cursor is in SU area.
2117  */
2118 static void
2119 intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
2120 				  struct intel_crtc *crtc)
2121 {
2122 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2123 	struct intel_plane_state *new_plane_state;
2124 	struct intel_plane *plane;
2125 	int i;
2126 
2127 	if (!crtc_state->enable_psr2_su_region_et)
2128 		return;
2129 
2130 	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
2131 		struct drm_rect inter;
2132 
2133 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2134 			continue;
2135 
2136 		if (plane->id != PLANE_CURSOR)
2137 			continue;
2138 
2139 		if (!new_plane_state->uapi.visible)
2140 			continue;
2141 
2142 		inter = crtc_state->psr2_su_area;
2143 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2144 			continue;
2145 
2146 		clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
2147 				 &crtc_state->pipe_src);
2148 	}
2149 }
2150 
2151 /*
2152  * TODO: Not clear how to handle planes with negative position,
2153  * also planes are not updated if they have a negative X
2154  * position so for now doing a full update in this cases
2155  *
2156  * Plane scaling and rotation is not supported by selective fetch and both
2157  * properties can change without a modeset, so need to be check at every
2158  * atomic commit.
2159  */
2160 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2161 {
2162 	if (plane_state->uapi.dst.y1 < 0 ||
2163 	    plane_state->uapi.dst.x1 < 0 ||
2164 	    plane_state->scaler_id >= 0 ||
2165 	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2166 		return false;
2167 
2168 	return true;
2169 }
2170 
2171 /*
2172  * Check for pipe properties that is not supported by selective fetch.
2173  *
2174  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2175  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2176  * enabled and going to the full update path.
2177  */
2178 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2179 {
2180 	if (crtc_state->scaler_state.scaler_id >= 0)
2181 		return false;
2182 
2183 	return true;
2184 }
2185 
2186 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2187 				struct intel_crtc *crtc)
2188 {
2189 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2190 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2191 	struct intel_plane_state *new_plane_state, *old_plane_state;
2192 	struct intel_plane *plane;
2193 	bool full_update = false;
2194 	int i, ret;
2195 
2196 	if (!crtc_state->enable_psr2_sel_fetch)
2197 		return 0;
2198 
2199 	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2200 		full_update = true;
2201 		goto skip_sel_fetch_set_loop;
2202 	}
2203 
2204 	crtc_state->psr2_su_area.x1 = 0;
2205 	crtc_state->psr2_su_area.y1 = -1;
2206 	crtc_state->psr2_su_area.x2 = INT_MAX;
2207 	crtc_state->psr2_su_area.y2 = -1;
2208 
2209 	/*
2210 	 * Calculate minimal selective fetch area of each plane and calculate
2211 	 * the pipe damaged area.
2212 	 * In the next loop the plane selective fetch area will actually be set
2213 	 * using whole pipe damaged area.
2214 	 */
2215 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2216 					     new_plane_state, i) {
2217 		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2218 						      .x2 = INT_MAX };
2219 
2220 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2221 			continue;
2222 
2223 		if (!new_plane_state->uapi.visible &&
2224 		    !old_plane_state->uapi.visible)
2225 			continue;
2226 
2227 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2228 			full_update = true;
2229 			break;
2230 		}
2231 
2232 		/*
2233 		 * If visibility or plane moved, mark the whole plane area as
2234 		 * damaged as it needs to be complete redraw in the new and old
2235 		 * position.
2236 		 */
2237 		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2238 		    !drm_rect_equals(&new_plane_state->uapi.dst,
2239 				     &old_plane_state->uapi.dst)) {
2240 			if (old_plane_state->uapi.visible) {
2241 				damaged_area.y1 = old_plane_state->uapi.dst.y1;
2242 				damaged_area.y2 = old_plane_state->uapi.dst.y2;
2243 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2244 						 &crtc_state->pipe_src);
2245 			}
2246 
2247 			if (new_plane_state->uapi.visible) {
2248 				damaged_area.y1 = new_plane_state->uapi.dst.y1;
2249 				damaged_area.y2 = new_plane_state->uapi.dst.y2;
2250 				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2251 						 &crtc_state->pipe_src);
2252 			}
2253 			continue;
2254 		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2255 			/* If alpha changed mark the whole plane area as damaged */
2256 			damaged_area.y1 = new_plane_state->uapi.dst.y1;
2257 			damaged_area.y2 = new_plane_state->uapi.dst.y2;
2258 			clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2259 					 &crtc_state->pipe_src);
2260 			continue;
2261 		}
2262 
2263 		src = drm_plane_state_src(&new_plane_state->uapi);
2264 		drm_rect_fp_to_int(&src, &src);
2265 
2266 		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2267 						     &new_plane_state->uapi, &damaged_area))
2268 			continue;
2269 
2270 		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2271 		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2272 		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2273 		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2274 
2275 		clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
2276 	}
2277 
2278 	/*
2279 	 * TODO: For now we are just using full update in case
2280 	 * selective fetch area calculation fails. To optimize this we
2281 	 * should identify cases where this happens and fix the area
2282 	 * calculation for those.
2283 	 */
2284 	if (crtc_state->psr2_su_area.y1 == -1) {
2285 		drm_info_once(&dev_priv->drm,
2286 			      "Selective fetch area calculation failed in pipe %c\n",
2287 			      pipe_name(crtc->pipe));
2288 		full_update = true;
2289 	}
2290 
2291 	if (full_update)
2292 		goto skip_sel_fetch_set_loop;
2293 
2294 	/* Wa_14014971492 */
2295 	if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
2296 	     IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
2297 	    crtc_state->splitter.enable)
2298 		crtc_state->psr2_su_area.y1 = 0;
2299 
2300 	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2301 	if (ret)
2302 		return ret;
2303 
2304 	/*
2305 	 * Adjust su area to cover cursor fully as necessary (early
2306 	 * transport). This needs to be done after
2307 	 * drm_atomic_add_affected_planes to ensure visible cursor is added into
2308 	 * affected planes even when cursor is not updated by itself.
2309 	 */
2310 	intel_psr2_sel_fetch_et_alignment(state, crtc);
2311 
2312 	intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2313 
2314 	/*
2315 	 * Now that we have the pipe damaged area check if it intersect with
2316 	 * every plane, if it does set the plane selective fetch area.
2317 	 */
2318 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2319 					     new_plane_state, i) {
2320 		struct drm_rect *sel_fetch_area, inter;
2321 		struct intel_plane *linked = new_plane_state->planar_linked_plane;
2322 
2323 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2324 		    !new_plane_state->uapi.visible)
2325 			continue;
2326 
2327 		inter = crtc_state->psr2_su_area;
2328 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2329 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2330 			sel_fetch_area->y1 = -1;
2331 			sel_fetch_area->y2 = -1;
2332 			/*
2333 			 * if plane sel fetch was previously enabled ->
2334 			 * disable it
2335 			 */
2336 			if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2337 				crtc_state->update_planes |= BIT(plane->id);
2338 
2339 			continue;
2340 		}
2341 
2342 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2343 			full_update = true;
2344 			break;
2345 		}
2346 
2347 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2348 		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2349 		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2350 		crtc_state->update_planes |= BIT(plane->id);
2351 
2352 		/*
2353 		 * Sel_fetch_area is calculated for UV plane. Use
2354 		 * same area for Y plane as well.
2355 		 */
2356 		if (linked) {
2357 			struct intel_plane_state *linked_new_plane_state;
2358 			struct drm_rect *linked_sel_fetch_area;
2359 
2360 			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2361 			if (IS_ERR(linked_new_plane_state))
2362 				return PTR_ERR(linked_new_plane_state);
2363 
2364 			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2365 			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2366 			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2367 			crtc_state->update_planes |= BIT(linked->id);
2368 		}
2369 	}
2370 
2371 skip_sel_fetch_set_loop:
2372 	psr2_man_trk_ctl_calc(crtc_state, full_update);
2373 	crtc_state->pipe_srcsz_early_tpt =
2374 		psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
2375 	return 0;
2376 }
2377 
2378 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2379 				struct intel_crtc *crtc)
2380 {
2381 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2382 	const struct intel_crtc_state *old_crtc_state =
2383 		intel_atomic_get_old_crtc_state(state, crtc);
2384 	const struct intel_crtc_state *new_crtc_state =
2385 		intel_atomic_get_new_crtc_state(state, crtc);
2386 	struct intel_encoder *encoder;
2387 
2388 	if (!HAS_PSR(i915))
2389 		return;
2390 
2391 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2392 					     old_crtc_state->uapi.encoder_mask) {
2393 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2394 		struct intel_psr *psr = &intel_dp->psr;
2395 		bool needs_to_disable = false;
2396 
2397 		mutex_lock(&psr->lock);
2398 
2399 		/*
2400 		 * Reasons to disable:
2401 		 * - PSR disabled in new state
2402 		 * - All planes will go inactive
2403 		 * - Changing between PSR versions
2404 		 * - Display WA #1136: skl, bxt
2405 		 */
2406 		needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2407 		needs_to_disable |= !new_crtc_state->has_psr;
2408 		needs_to_disable |= !new_crtc_state->active_planes;
2409 		needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2410 		needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2411 			new_crtc_state->wm_level_disabled;
2412 
2413 		if (psr->enabled && needs_to_disable)
2414 			intel_psr_disable_locked(intel_dp);
2415 		else if (psr->enabled && new_crtc_state->wm_level_disabled)
2416 			/* Wa_14015648006 */
2417 			wm_optimization_wa(intel_dp, new_crtc_state);
2418 
2419 		mutex_unlock(&psr->lock);
2420 	}
2421 }
2422 
2423 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2424 				 struct intel_crtc *crtc)
2425 {
2426 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2427 	const struct intel_crtc_state *crtc_state =
2428 		intel_atomic_get_new_crtc_state(state, crtc);
2429 	struct intel_encoder *encoder;
2430 
2431 	if (!(crtc_state->has_psr || crtc_state->has_panel_replay))
2432 		return;
2433 
2434 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2435 					     crtc_state->uapi.encoder_mask) {
2436 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2437 		struct intel_psr *psr = &intel_dp->psr;
2438 		bool keep_disabled = false;
2439 
2440 		mutex_lock(&psr->lock);
2441 
2442 		drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2443 
2444 		keep_disabled |= psr->sink_not_reliable;
2445 		keep_disabled |= !crtc_state->active_planes;
2446 
2447 		/* Display WA #1136: skl, bxt */
2448 		keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2449 			crtc_state->wm_level_disabled;
2450 
2451 		if (!psr->enabled && !keep_disabled)
2452 			intel_psr_enable_locked(intel_dp, crtc_state);
2453 		else if (psr->enabled && !crtc_state->wm_level_disabled)
2454 			/* Wa_14015648006 */
2455 			wm_optimization_wa(intel_dp, crtc_state);
2456 
2457 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2458 		if (crtc_state->crc_enabled && psr->enabled)
2459 			psr_force_hw_tracking_exit(intel_dp);
2460 
2461 		/*
2462 		 * Clear possible busy bits in case we have
2463 		 * invalidate -> flip -> flush sequence.
2464 		 */
2465 		intel_dp->psr.busy_frontbuffer_bits = 0;
2466 
2467 		mutex_unlock(&psr->lock);
2468 	}
2469 }
2470 
2471 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2472 {
2473 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2474 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2475 
2476 	/*
2477 	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2478 	 * As all higher states has bit 4 of PSR2 state set we can just wait for
2479 	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2480 	 */
2481 	return intel_de_wait_for_clear(dev_priv,
2482 				       EDP_PSR2_STATUS(cpu_transcoder),
2483 				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2484 }
2485 
2486 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2487 {
2488 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2489 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2490 
2491 	/*
2492 	 * From bspec: Panel Self Refresh (BDW+)
2493 	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2494 	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2495 	 * defensive enough to cover everything.
2496 	 */
2497 	return intel_de_wait_for_clear(dev_priv,
2498 				       psr_status_reg(dev_priv, cpu_transcoder),
2499 				       EDP_PSR_STATUS_STATE_MASK, 50);
2500 }
2501 
2502 /**
2503  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2504  * @new_crtc_state: new CRTC state
2505  *
2506  * This function is expected to be called from pipe_update_start() where it is
2507  * not expected to race with PSR enable or disable.
2508  */
2509 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2510 {
2511 	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2512 	struct intel_encoder *encoder;
2513 
2514 	if (!new_crtc_state->has_psr)
2515 		return;
2516 
2517 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2518 					     new_crtc_state->uapi.encoder_mask) {
2519 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2520 		int ret;
2521 
2522 		lockdep_assert_held(&intel_dp->psr.lock);
2523 
2524 		if (!intel_dp->psr.enabled)
2525 			continue;
2526 
2527 		if (intel_dp->psr.psr2_enabled)
2528 			ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2529 		else
2530 			ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2531 
2532 		if (ret)
2533 			drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2534 	}
2535 }
2536 
2537 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2538 {
2539 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2540 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2541 	i915_reg_t reg;
2542 	u32 mask;
2543 	int err;
2544 
2545 	if (!intel_dp->psr.enabled)
2546 		return false;
2547 
2548 	if (intel_dp->psr.psr2_enabled) {
2549 		reg = EDP_PSR2_STATUS(cpu_transcoder);
2550 		mask = EDP_PSR2_STATUS_STATE_MASK;
2551 	} else {
2552 		reg = psr_status_reg(dev_priv, cpu_transcoder);
2553 		mask = EDP_PSR_STATUS_STATE_MASK;
2554 	}
2555 
2556 	mutex_unlock(&intel_dp->psr.lock);
2557 
2558 	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2559 	if (err)
2560 		drm_err(&dev_priv->drm,
2561 			"Timed out waiting for PSR Idle for re-enable\n");
2562 
2563 	/* After the unlocked wait, verify that PSR is still wanted! */
2564 	mutex_lock(&intel_dp->psr.lock);
2565 	return err == 0 && intel_dp->psr.enabled;
2566 }
2567 
2568 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2569 {
2570 	struct drm_connector_list_iter conn_iter;
2571 	struct drm_modeset_acquire_ctx ctx;
2572 	struct drm_atomic_state *state;
2573 	struct drm_connector *conn;
2574 	int err = 0;
2575 
2576 	state = drm_atomic_state_alloc(&dev_priv->drm);
2577 	if (!state)
2578 		return -ENOMEM;
2579 
2580 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2581 
2582 	state->acquire_ctx = &ctx;
2583 	to_intel_atomic_state(state)->internal = true;
2584 
2585 retry:
2586 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2587 	drm_for_each_connector_iter(conn, &conn_iter) {
2588 		struct drm_connector_state *conn_state;
2589 		struct drm_crtc_state *crtc_state;
2590 
2591 		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2592 			continue;
2593 
2594 		conn_state = drm_atomic_get_connector_state(state, conn);
2595 		if (IS_ERR(conn_state)) {
2596 			err = PTR_ERR(conn_state);
2597 			break;
2598 		}
2599 
2600 		if (!conn_state->crtc)
2601 			continue;
2602 
2603 		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2604 		if (IS_ERR(crtc_state)) {
2605 			err = PTR_ERR(crtc_state);
2606 			break;
2607 		}
2608 
2609 		/* Mark mode as changed to trigger a pipe->update() */
2610 		crtc_state->mode_changed = true;
2611 	}
2612 	drm_connector_list_iter_end(&conn_iter);
2613 
2614 	if (err == 0)
2615 		err = drm_atomic_commit(state);
2616 
2617 	if (err == -EDEADLK) {
2618 		drm_atomic_state_clear(state);
2619 		err = drm_modeset_backoff(&ctx);
2620 		if (!err)
2621 			goto retry;
2622 	}
2623 
2624 	drm_modeset_drop_locks(&ctx);
2625 	drm_modeset_acquire_fini(&ctx);
2626 	drm_atomic_state_put(state);
2627 
2628 	return err;
2629 }
2630 
2631 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2632 {
2633 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2634 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2635 	u32 old_mode;
2636 	int ret;
2637 
2638 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2639 	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2640 		drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2641 		return -EINVAL;
2642 	}
2643 
2644 	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2645 	if (ret)
2646 		return ret;
2647 
2648 	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2649 	intel_dp->psr.debug = val;
2650 
2651 	/*
2652 	 * Do it right away if it's already enabled, otherwise it will be done
2653 	 * when enabling the source.
2654 	 */
2655 	if (intel_dp->psr.enabled)
2656 		psr_irq_control(intel_dp);
2657 
2658 	mutex_unlock(&intel_dp->psr.lock);
2659 
2660 	if (old_mode != mode)
2661 		ret = intel_psr_fastset_force(dev_priv);
2662 
2663 	return ret;
2664 }
2665 
2666 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2667 {
2668 	struct intel_psr *psr = &intel_dp->psr;
2669 
2670 	intel_psr_disable_locked(intel_dp);
2671 	psr->sink_not_reliable = true;
2672 	/* let's make sure that sink is awaken */
2673 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2674 }
2675 
2676 static void intel_psr_work(struct work_struct *work)
2677 {
2678 	struct intel_dp *intel_dp =
2679 		container_of(work, typeof(*intel_dp), psr.work);
2680 
2681 	mutex_lock(&intel_dp->psr.lock);
2682 
2683 	if (!intel_dp->psr.enabled)
2684 		goto unlock;
2685 
2686 	if (READ_ONCE(intel_dp->psr.irq_aux_error))
2687 		intel_psr_handle_irq(intel_dp);
2688 
2689 	/*
2690 	 * We have to make sure PSR is ready for re-enable
2691 	 * otherwise it keeps disabled until next full enable/disable cycle.
2692 	 * PSR might take some time to get fully disabled
2693 	 * and be ready for re-enable.
2694 	 */
2695 	if (!__psr_wait_for_idle_locked(intel_dp))
2696 		goto unlock;
2697 
2698 	/*
2699 	 * The delayed work can race with an invalidate hence we need to
2700 	 * recheck. Since psr_flush first clears this and then reschedules we
2701 	 * won't ever miss a flush when bailing out here.
2702 	 */
2703 	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2704 		goto unlock;
2705 
2706 	intel_psr_activate(intel_dp);
2707 unlock:
2708 	mutex_unlock(&intel_dp->psr.lock);
2709 }
2710 
2711 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2712 {
2713 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2714 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2715 
2716 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2717 		u32 val;
2718 
2719 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2720 			/* Send one update otherwise lag is observed in screen */
2721 			intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2722 			return;
2723 		}
2724 
2725 		val = man_trk_ctl_enable_bit_get(dev_priv) |
2726 		      man_trk_ctl_partial_frame_bit_get(dev_priv) |
2727 		      man_trk_ctl_continuos_full_frame(dev_priv);
2728 		intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
2729 		intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2730 		intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2731 	} else {
2732 		intel_psr_exit(intel_dp);
2733 	}
2734 }
2735 
2736 /**
2737  * intel_psr_invalidate - Invalidate PSR
2738  * @dev_priv: i915 device
2739  * @frontbuffer_bits: frontbuffer plane tracking bits
2740  * @origin: which operation caused the invalidate
2741  *
2742  * Since the hardware frontbuffer tracking has gaps we need to integrate
2743  * with the software frontbuffer tracking. This function gets called every
2744  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2745  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2746  *
2747  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2748  */
2749 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2750 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
2751 {
2752 	struct intel_encoder *encoder;
2753 
2754 	if (origin == ORIGIN_FLIP)
2755 		return;
2756 
2757 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2758 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2759 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2760 
2761 		mutex_lock(&intel_dp->psr.lock);
2762 		if (!intel_dp->psr.enabled) {
2763 			mutex_unlock(&intel_dp->psr.lock);
2764 			continue;
2765 		}
2766 
2767 		pipe_frontbuffer_bits &=
2768 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2769 		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2770 
2771 		if (pipe_frontbuffer_bits)
2772 			_psr_invalidate_handle(intel_dp);
2773 
2774 		mutex_unlock(&intel_dp->psr.lock);
2775 	}
2776 }
2777 /*
2778  * When we will be completely rely on PSR2 S/W tracking in future,
2779  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2780  * event also therefore tgl_dc3co_flush_locked() require to be changed
2781  * accordingly in future.
2782  */
2783 static void
2784 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2785 		       enum fb_op_origin origin)
2786 {
2787 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2788 
2789 	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2790 	    !intel_dp->psr.active)
2791 		return;
2792 
2793 	/*
2794 	 * At every frontbuffer flush flip event modified delay of delayed work,
2795 	 * when delayed work schedules that means display has been idle.
2796 	 */
2797 	if (!(frontbuffer_bits &
2798 	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2799 		return;
2800 
2801 	tgl_psr2_enable_dc3co(intel_dp);
2802 	mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
2803 			 intel_dp->psr.dc3co_exit_delay);
2804 }
2805 
2806 static void _psr_flush_handle(struct intel_dp *intel_dp)
2807 {
2808 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2809 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2810 
2811 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2812 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2813 			/* can we turn CFF off? */
2814 			if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2815 				u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2816 					man_trk_ctl_partial_frame_bit_get(dev_priv) |
2817 					man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2818 					man_trk_ctl_continuos_full_frame(dev_priv);
2819 
2820 				/*
2821 				 * Set psr2_sel_fetch_cff_enabled as false to allow selective
2822 				 * updates. Still keep cff bit enabled as we don't have proper
2823 				 * SU configuration in case update is sent for any reason after
2824 				 * sff bit gets cleared by the HW on next vblank.
2825 				 */
2826 				intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2827 					       val);
2828 				intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2829 				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2830 			}
2831 		} else {
2832 			/*
2833 			 * continuous full frame is disabled, only a single full
2834 			 * frame is required
2835 			 */
2836 			psr_force_hw_tracking_exit(intel_dp);
2837 		}
2838 	} else {
2839 		psr_force_hw_tracking_exit(intel_dp);
2840 
2841 		if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2842 			queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
2843 	}
2844 }
2845 
2846 /**
2847  * intel_psr_flush - Flush PSR
2848  * @dev_priv: i915 device
2849  * @frontbuffer_bits: frontbuffer plane tracking bits
2850  * @origin: which operation caused the flush
2851  *
2852  * Since the hardware frontbuffer tracking has gaps we need to integrate
2853  * with the software frontbuffer tracking. This function gets called every
2854  * time frontbuffer rendering has completed and flushed out to memory. PSR
2855  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2856  *
2857  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2858  */
2859 void intel_psr_flush(struct drm_i915_private *dev_priv,
2860 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
2861 {
2862 	struct intel_encoder *encoder;
2863 
2864 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2865 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2866 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2867 
2868 		mutex_lock(&intel_dp->psr.lock);
2869 		if (!intel_dp->psr.enabled) {
2870 			mutex_unlock(&intel_dp->psr.lock);
2871 			continue;
2872 		}
2873 
2874 		pipe_frontbuffer_bits &=
2875 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2876 		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2877 
2878 		/*
2879 		 * If the PSR is paused by an explicit intel_psr_paused() call,
2880 		 * we have to ensure that the PSR is not activated until
2881 		 * intel_psr_resume() is called.
2882 		 */
2883 		if (intel_dp->psr.paused)
2884 			goto unlock;
2885 
2886 		if (origin == ORIGIN_FLIP ||
2887 		    (origin == ORIGIN_CURSOR_UPDATE &&
2888 		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
2889 			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2890 			goto unlock;
2891 		}
2892 
2893 		if (pipe_frontbuffer_bits == 0)
2894 			goto unlock;
2895 
2896 		/* By definition flush = invalidate + flush */
2897 		_psr_flush_handle(intel_dp);
2898 unlock:
2899 		mutex_unlock(&intel_dp->psr.lock);
2900 	}
2901 }
2902 
2903 /**
2904  * intel_psr_init - Init basic PSR work and mutex.
2905  * @intel_dp: Intel DP
2906  *
2907  * This function is called after the initializing connector.
2908  * (the initializing of connector treats the handling of connector capabilities)
2909  * And it initializes basic PSR stuff for each DP Encoder.
2910  */
2911 void intel_psr_init(struct intel_dp *intel_dp)
2912 {
2913 	struct intel_connector *connector = intel_dp->attached_connector;
2914 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2915 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2916 
2917 	if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
2918 		return;
2919 
2920 	/*
2921 	 * HSW spec explicitly says PSR is tied to port A.
2922 	 * BDW+ platforms have a instance of PSR registers per transcoder but
2923 	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2924 	 * than eDP one.
2925 	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2926 	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2927 	 * But GEN12 supports a instance of PSR registers per transcoder.
2928 	 */
2929 	if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2930 		drm_dbg_kms(&dev_priv->drm,
2931 			    "PSR condition failed: Port not supported\n");
2932 		return;
2933 	}
2934 
2935 	if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp))
2936 		intel_dp->psr.source_panel_replay_support = true;
2937 	else
2938 		intel_dp->psr.source_support = true;
2939 
2940 	/* Disable early transport for now */
2941 	intel_dp->psr.debug |= I915_PSR_DEBUG_SU_REGION_ET_DISABLE;
2942 
2943 	/* Set link_standby x link_off defaults */
2944 	if (DISPLAY_VER(dev_priv) < 12)
2945 		/* For new platforms up to TGL let's respect VBT back again */
2946 		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2947 
2948 	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2949 	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2950 	mutex_init(&intel_dp->psr.lock);
2951 }
2952 
2953 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2954 					   u8 *status, u8 *error_status)
2955 {
2956 	struct drm_dp_aux *aux = &intel_dp->aux;
2957 	int ret;
2958 	unsigned int offset;
2959 
2960 	offset = intel_dp->psr.panel_replay_enabled ?
2961 		 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
2962 
2963 	ret = drm_dp_dpcd_readb(aux, offset, status);
2964 	if (ret != 1)
2965 		return ret;
2966 
2967 	offset = intel_dp->psr.panel_replay_enabled ?
2968 		 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
2969 
2970 	ret = drm_dp_dpcd_readb(aux, offset, error_status);
2971 	if (ret != 1)
2972 		return ret;
2973 
2974 	*status = *status & DP_PSR_SINK_STATE_MASK;
2975 
2976 	return 0;
2977 }
2978 
2979 static void psr_alpm_check(struct intel_dp *intel_dp)
2980 {
2981 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2982 	struct drm_dp_aux *aux = &intel_dp->aux;
2983 	struct intel_psr *psr = &intel_dp->psr;
2984 	u8 val;
2985 	int r;
2986 
2987 	if (!psr->psr2_enabled)
2988 		return;
2989 
2990 	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2991 	if (r != 1) {
2992 		drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2993 		return;
2994 	}
2995 
2996 	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2997 		intel_psr_disable_locked(intel_dp);
2998 		psr->sink_not_reliable = true;
2999 		drm_dbg_kms(&dev_priv->drm,
3000 			    "ALPM lock timeout error, disabling PSR\n");
3001 
3002 		/* Clearing error */
3003 		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
3004 	}
3005 }
3006 
3007 static void psr_capability_changed_check(struct intel_dp *intel_dp)
3008 {
3009 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3010 	struct intel_psr *psr = &intel_dp->psr;
3011 	u8 val;
3012 	int r;
3013 
3014 	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
3015 	if (r != 1) {
3016 		drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
3017 		return;
3018 	}
3019 
3020 	if (val & DP_PSR_CAPS_CHANGE) {
3021 		intel_psr_disable_locked(intel_dp);
3022 		psr->sink_not_reliable = true;
3023 		drm_dbg_kms(&dev_priv->drm,
3024 			    "Sink PSR capability changed, disabling PSR\n");
3025 
3026 		/* Clearing it */
3027 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
3028 	}
3029 }
3030 
3031 void intel_psr_short_pulse(struct intel_dp *intel_dp)
3032 {
3033 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3034 	struct intel_psr *psr = &intel_dp->psr;
3035 	u8 status, error_status;
3036 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3037 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3038 			  DP_PSR_LINK_CRC_ERROR;
3039 
3040 	if (!CAN_PSR(intel_dp))
3041 		return;
3042 
3043 	mutex_lock(&psr->lock);
3044 
3045 	if (!psr->enabled)
3046 		goto exit;
3047 
3048 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
3049 		drm_err(&dev_priv->drm,
3050 			"Error reading PSR status or error status\n");
3051 		goto exit;
3052 	}
3053 
3054 	if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
3055 		intel_psr_disable_locked(intel_dp);
3056 		psr->sink_not_reliable = true;
3057 	}
3058 
3059 	if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
3060 		drm_dbg_kms(&dev_priv->drm,
3061 			    "PSR sink internal error, disabling PSR\n");
3062 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3063 		drm_dbg_kms(&dev_priv->drm,
3064 			    "PSR RFB storage error, disabling PSR\n");
3065 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3066 		drm_dbg_kms(&dev_priv->drm,
3067 			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
3068 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3069 		drm_dbg_kms(&dev_priv->drm,
3070 			    "PSR Link CRC error, disabling PSR\n");
3071 
3072 	if (error_status & ~errors)
3073 		drm_err(&dev_priv->drm,
3074 			"PSR_ERROR_STATUS unhandled errors %x\n",
3075 			error_status & ~errors);
3076 	/* clear status register */
3077 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
3078 
3079 	psr_alpm_check(intel_dp);
3080 	psr_capability_changed_check(intel_dp);
3081 
3082 exit:
3083 	mutex_unlock(&psr->lock);
3084 }
3085 
3086 bool intel_psr_enabled(struct intel_dp *intel_dp)
3087 {
3088 	bool ret;
3089 
3090 	if (!CAN_PSR(intel_dp))
3091 		return false;
3092 
3093 	mutex_lock(&intel_dp->psr.lock);
3094 	ret = intel_dp->psr.enabled;
3095 	mutex_unlock(&intel_dp->psr.lock);
3096 
3097 	return ret;
3098 }
3099 
3100 /**
3101  * intel_psr_lock - grab PSR lock
3102  * @crtc_state: the crtc state
3103  *
3104  * This is initially meant to be used by around CRTC update, when
3105  * vblank sensitive registers are updated and we need grab the lock
3106  * before it to avoid vblank evasion.
3107  */
3108 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3109 {
3110 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3111 	struct intel_encoder *encoder;
3112 
3113 	if (!crtc_state->has_psr)
3114 		return;
3115 
3116 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
3117 					     crtc_state->uapi.encoder_mask) {
3118 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3119 
3120 		mutex_lock(&intel_dp->psr.lock);
3121 		break;
3122 	}
3123 }
3124 
3125 /**
3126  * intel_psr_unlock - release PSR lock
3127  * @crtc_state: the crtc state
3128  *
3129  * Release the PSR lock that was held during pipe update.
3130  */
3131 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3132 {
3133 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3134 	struct intel_encoder *encoder;
3135 
3136 	if (!crtc_state->has_psr)
3137 		return;
3138 
3139 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
3140 					     crtc_state->uapi.encoder_mask) {
3141 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3142 
3143 		mutex_unlock(&intel_dp->psr.lock);
3144 		break;
3145 	}
3146 }
3147 
3148 static void
3149 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3150 {
3151 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3152 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3153 	const char *status = "unknown";
3154 	u32 val, status_val;
3155 
3156 	if (intel_dp->psr.psr2_enabled) {
3157 		static const char * const live_status[] = {
3158 			"IDLE",
3159 			"CAPTURE",
3160 			"CAPTURE_FS",
3161 			"SLEEP",
3162 			"BUFON_FW",
3163 			"ML_UP",
3164 			"SU_STANDBY",
3165 			"FAST_SLEEP",
3166 			"DEEP_SLEEP",
3167 			"BUF_ON",
3168 			"TG_ON"
3169 		};
3170 		val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
3171 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3172 		if (status_val < ARRAY_SIZE(live_status))
3173 			status = live_status[status_val];
3174 	} else {
3175 		static const char * const live_status[] = {
3176 			"IDLE",
3177 			"SRDONACK",
3178 			"SRDENT",
3179 			"BUFOFF",
3180 			"BUFON",
3181 			"AUXACK",
3182 			"SRDOFFACK",
3183 			"SRDENT_ON",
3184 		};
3185 		val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
3186 		status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3187 		if (status_val < ARRAY_SIZE(live_status))
3188 			status = live_status[status_val];
3189 	}
3190 
3191 	seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3192 }
3193 
3194 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3195 {
3196 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3197 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3198 	struct intel_psr *psr = &intel_dp->psr;
3199 	intel_wakeref_t wakeref;
3200 	const char *status;
3201 	bool enabled;
3202 	u32 val;
3203 
3204 	seq_printf(m, "Sink support: PSR = %s",
3205 		   str_yes_no(psr->sink_support));
3206 
3207 	if (psr->sink_support)
3208 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3209 	seq_printf(m, ", Panel Replay = %s\n", str_yes_no(psr->sink_panel_replay_support));
3210 
3211 	if (!(psr->sink_support || psr->sink_panel_replay_support))
3212 		return 0;
3213 
3214 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3215 	mutex_lock(&psr->lock);
3216 
3217 	if (psr->panel_replay_enabled)
3218 		status = "Panel Replay Enabled";
3219 	else if (psr->enabled)
3220 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
3221 	else
3222 		status = "disabled";
3223 	seq_printf(m, "PSR mode: %s\n", status);
3224 
3225 	if (!psr->enabled) {
3226 		seq_printf(m, "PSR sink not reliable: %s\n",
3227 			   str_yes_no(psr->sink_not_reliable));
3228 
3229 		goto unlock;
3230 	}
3231 
3232 	if (psr->panel_replay_enabled) {
3233 		val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
3234 		enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3235 	} else if (psr->psr2_enabled) {
3236 		val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
3237 		enabled = val & EDP_PSR2_ENABLE;
3238 	} else {
3239 		val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
3240 		enabled = val & EDP_PSR_ENABLE;
3241 	}
3242 	seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
3243 		   str_enabled_disabled(enabled), val);
3244 	psr_source_status(intel_dp, m);
3245 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3246 		   psr->busy_frontbuffer_bits);
3247 
3248 	/*
3249 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
3250 	 */
3251 	val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3252 	seq_printf(m, "Performance counter: %u\n",
3253 		   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3254 
3255 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
3256 		seq_printf(m, "Last attempted entry at: %lld\n",
3257 			   psr->last_entry_attempt);
3258 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3259 	}
3260 
3261 	if (psr->psr2_enabled) {
3262 		u32 su_frames_val[3];
3263 		int frame;
3264 
3265 		/*
3266 		 * Reading all 3 registers before hand to minimize crossing a
3267 		 * frame boundary between register reads
3268 		 */
3269 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3270 			val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
3271 			su_frames_val[frame / 3] = val;
3272 		}
3273 
3274 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3275 
3276 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3277 			u32 su_blocks;
3278 
3279 			su_blocks = su_frames_val[frame / 3] &
3280 				    PSR2_SU_STATUS_MASK(frame);
3281 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3282 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
3283 		}
3284 
3285 		seq_printf(m, "PSR2 selective fetch: %s\n",
3286 			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3287 	}
3288 
3289 unlock:
3290 	mutex_unlock(&psr->lock);
3291 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3292 
3293 	return 0;
3294 }
3295 
3296 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3297 {
3298 	struct drm_i915_private *dev_priv = m->private;
3299 	struct intel_dp *intel_dp = NULL;
3300 	struct intel_encoder *encoder;
3301 
3302 	if (!HAS_PSR(dev_priv))
3303 		return -ENODEV;
3304 
3305 	/* Find the first EDP which supports PSR */
3306 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3307 		intel_dp = enc_to_intel_dp(encoder);
3308 		break;
3309 	}
3310 
3311 	if (!intel_dp)
3312 		return -ENODEV;
3313 
3314 	return intel_psr_status(m, intel_dp);
3315 }
3316 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3317 
3318 static int
3319 i915_edp_psr_debug_set(void *data, u64 val)
3320 {
3321 	struct drm_i915_private *dev_priv = data;
3322 	struct intel_encoder *encoder;
3323 	intel_wakeref_t wakeref;
3324 	int ret = -ENODEV;
3325 
3326 	if (!HAS_PSR(dev_priv))
3327 		return ret;
3328 
3329 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3330 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3331 
3332 		drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3333 
3334 		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3335 
3336 		// TODO: split to each transcoder's PSR debug state
3337 		ret = intel_psr_debug_set(intel_dp, val);
3338 
3339 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3340 	}
3341 
3342 	return ret;
3343 }
3344 
3345 static int
3346 i915_edp_psr_debug_get(void *data, u64 *val)
3347 {
3348 	struct drm_i915_private *dev_priv = data;
3349 	struct intel_encoder *encoder;
3350 
3351 	if (!HAS_PSR(dev_priv))
3352 		return -ENODEV;
3353 
3354 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3355 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3356 
3357 		// TODO: split to each transcoder's PSR debug state
3358 		*val = READ_ONCE(intel_dp->psr.debug);
3359 		return 0;
3360 	}
3361 
3362 	return -ENODEV;
3363 }
3364 
3365 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3366 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3367 			"%llu\n");
3368 
3369 void intel_psr_debugfs_register(struct drm_i915_private *i915)
3370 {
3371 	struct drm_minor *minor = i915->drm.primary;
3372 
3373 	debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3374 			    i915, &i915_edp_psr_debug_fops);
3375 
3376 	debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3377 			    i915, &i915_edp_psr_status_fops);
3378 }
3379 
3380 static const char *psr_mode_str(struct intel_dp *intel_dp)
3381 {
3382 	if (intel_dp->psr.panel_replay_enabled)
3383 		return "PANEL-REPLAY";
3384 	else if (intel_dp->psr.enabled)
3385 		return "PSR";
3386 
3387 	return "unknown";
3388 }
3389 
3390 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3391 {
3392 	struct intel_connector *connector = m->private;
3393 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3394 	static const char * const sink_status[] = {
3395 		"inactive",
3396 		"transition to active, capture and display",
3397 		"active, display from RFB",
3398 		"active, capture and display on sink device timings",
3399 		"transition to inactive, capture and display, timing re-sync",
3400 		"reserved",
3401 		"reserved",
3402 		"sink internal error",
3403 	};
3404 	static const char * const panel_replay_status[] = {
3405 		"Sink device frame is locked to the Source device",
3406 		"Sink device is coasting, using the VTotal target",
3407 		"Sink device is governing the frame rate (frame rate unlock is granted)",
3408 		"Sink device in the process of re-locking with the Source device",
3409 	};
3410 	const char *str;
3411 	int ret;
3412 	u8 status, error_status;
3413 	u32 idx;
3414 
3415 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
3416 		seq_puts(m, "PSR/Panel-Replay Unsupported\n");
3417 		return -ENODEV;
3418 	}
3419 
3420 	if (connector->base.status != connector_status_connected)
3421 		return -ENODEV;
3422 
3423 	ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
3424 	if (ret)
3425 		return ret;
3426 
3427 	str = "unknown";
3428 	if (intel_dp->psr.panel_replay_enabled) {
3429 		idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT;
3430 		if (idx < ARRAY_SIZE(panel_replay_status))
3431 			str = panel_replay_status[idx];
3432 	} else if (intel_dp->psr.enabled) {
3433 		idx = status & DP_PSR_SINK_STATE_MASK;
3434 		if (idx < ARRAY_SIZE(sink_status))
3435 			str = sink_status[idx];
3436 	}
3437 
3438 	seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
3439 
3440 	seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
3441 
3442 	if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
3443 			    DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3444 			    DP_PSR_LINK_CRC_ERROR))
3445 		seq_puts(m, ":\n");
3446 	else
3447 		seq_puts(m, "\n");
3448 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3449 		seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
3450 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3451 		seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
3452 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3453 		seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
3454 
3455 	return ret;
3456 }
3457 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3458 
3459 static int i915_psr_status_show(struct seq_file *m, void *data)
3460 {
3461 	struct intel_connector *connector = m->private;
3462 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3463 
3464 	return intel_psr_status(m, intel_dp);
3465 }
3466 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3467 
3468 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3469 {
3470 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3471 	struct dentry *root = connector->base.debugfs_entry;
3472 
3473 	/* TODO: Add support for MST connectors as well. */
3474 	if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
3475 	     connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
3476 	    connector->mst_port)
3477 		return;
3478 
3479 	debugfs_create_file("i915_psr_sink_status", 0444, root,
3480 			    connector, &i915_psr_sink_status_fops);
3481 
3482 	if (HAS_PSR(i915) || HAS_DP20(i915))
3483 		debugfs_create_file("i915_psr_status", 0444, root,
3484 				    connector, &i915_psr_status_fops);
3485 }
3486