1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <linux/debugfs.h>
25
26 #include <drm/drm_atomic_helper.h>
27 #include <drm/drm_damage_helper.h>
28 #include <drm/drm_debugfs.h>
29 #include <drm/drm_vblank.h>
30
31 #include "i915_reg.h"
32 #include "intel_alpm.h"
33 #include "intel_atomic.h"
34 #include "intel_crtc.h"
35 #include "intel_cursor_regs.h"
36 #include "intel_ddi.h"
37 #include "intel_de.h"
38 #include "intel_display_irq.h"
39 #include "intel_display_regs.h"
40 #include "intel_display_rpm.h"
41 #include "intel_display_types.h"
42 #include "intel_dmc.h"
43 #include "intel_dp.h"
44 #include "intel_dp_aux.h"
45 #include "intel_dsb.h"
46 #include "intel_frontbuffer.h"
47 #include "intel_hdmi.h"
48 #include "intel_psr.h"
49 #include "intel_psr_regs.h"
50 #include "intel_snps_phy.h"
51 #include "intel_step.h"
52 #include "intel_vblank.h"
53 #include "intel_vrr.h"
54 #include "skl_universal_plane.h"
55
56 /**
57 * DOC: Panel Self Refresh (PSR/SRD)
58 *
59 * Since Haswell Display controller supports Panel Self-Refresh on display
60 * panels witch have a remote frame buffer (RFB) implemented according to PSR
61 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
62 * when system is idle but display is on as it eliminates display refresh
63 * request to DDR memory completely as long as the frame buffer for that
64 * display is unchanged.
65 *
66 * Panel Self Refresh must be supported by both Hardware (source) and
67 * Panel (sink).
68 *
69 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
70 * to power down the link and memory controller. For DSI panels the same idea
71 * is called "manual mode".
72 *
73 * The implementation uses the hardware-based PSR support which automatically
74 * enters/exits self-refresh mode. The hardware takes care of sending the
75 * required DP aux message and could even retrain the link (that part isn't
76 * enabled yet though). The hardware also keeps track of any frontbuffer
77 * changes to know when to exit self-refresh mode again. Unfortunately that
78 * part doesn't work too well, hence why the i915 PSR support uses the
79 * software frontbuffer tracking to make sure it doesn't miss a screen
80 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
81 * get called by the frontbuffer tracking code. Note that because of locking
82 * issues the self-refresh re-enable code is done from a work queue, which
83 * must be correctly synchronized/cancelled when shutting down the pipe."
84 *
85 * DC3CO (DC3 clock off)
86 *
87 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
88 * clock off automatically during PSR2 idle state.
89 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
90 * entry/exit allows the HW to enter a low-power state even when page flipping
91 * periodically (for instance a 30fps video playback scenario).
92 *
93 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
94 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
95 * frames, if no other flip occurs and the function above is executed, DC3CO is
96 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
97 * of another flip.
98 * Front buffer modifications do not trigger DC3CO activation on purpose as it
99 * would bring a lot of complexity and most of the moderns systems will only
100 * use page flips.
101 */
102
103 /*
104 * Description of PSR mask bits:
105 *
106 * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
107 *
108 * When unmasked (nearly) all display register writes (eg. even
109 * SWF) trigger a PSR exit. Some registers are excluded from this
110 * and they have a more specific mask (described below). On icl+
111 * this bit no longer exists and is effectively always set.
112 *
113 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
114 *
115 * When unmasked (nearly) all pipe/plane register writes
116 * trigger a PSR exit. Some plane registers are excluded from this
117 * and they have a more specific mask (described below).
118 *
119 * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
120 * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
121 * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
122 *
123 * When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
124 * SPR_SURF/CURBASE are not included in this and instead are
125 * controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
126 * EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
127 *
128 * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
129 * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
130 *
131 * When unmasked PSR is blocked as long as the sprite
132 * plane is enabled. skl+ with their universal planes no
133 * longer have a mask bit like this, and no plane being
134 * enabledb blocks PSR.
135 *
136 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
137 * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
138 *
139 * When umasked CURPOS writes trigger a PSR exit. On skl+
140 * this doesn't exit but CURPOS is included in the
141 * PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
142 *
143 * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
144 * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
145 *
146 * When unmasked PSR is blocked as long as vblank and/or vsync
147 * interrupt is unmasked in IMR *and* enabled in IER.
148 *
149 * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
150 * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
151 *
152 * Selectcs whether PSR exit generates an extra vblank before
153 * the first frame is transmitted. Also note the opposite polarity
154 * if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
155 * unmasked==do not generate the extra vblank).
156 *
157 * With DC states enabled the extra vblank happens after link training,
158 * with DC states disabled it happens immediately upuon PSR exit trigger.
159 * No idea as of now why there is a difference. HSW/BDW (which don't
160 * even have DMC) always generate it after link training. Go figure.
161 *
162 * Unfortunately CHICKEN_TRANS itself seems to be double buffered
163 * and thus won't latch until the first vblank. So with DC states
164 * enabled the register effectively uses the reset value during DC5
165 * exit+PSR exit sequence, and thus the bit does nothing until
166 * latched by the vblank that it was trying to prevent from being
167 * generated in the first place. So we should probably call this
168 * one a chicken/egg bit instead on skl+.
169 *
170 * In standby mode (as opposed to link-off) this makes no difference
171 * as the timing generator keeps running the whole time generating
172 * normal periodic vblanks.
173 *
174 * WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
175 * and doing so makes the behaviour match the skl+ reset value.
176 *
177 * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
178 * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
179 *
180 * On BDW without this bit is no vblanks whatsoever are
181 * generated after PSR exit. On HSW this has no apparent effect.
182 * WaPsrDPRSUnmaskVBlankInSRD says to set this.
183 *
184 * The rest of the bits are more self-explanatory and/or
185 * irrelevant for normal operation.
186 *
187 * Description of intel_crtc_state variables. has_psr, has_panel_replay and
188 * has_sel_update:
189 *
190 * has_psr (alone): PSR1
191 * has_psr + has_sel_update: PSR2
192 * has_psr + has_panel_replay: Panel Replay
193 * has_psr + has_panel_replay + has_sel_update: Panel Replay Selective Update
194 *
195 * Description of some intel_psr variables. enabled, panel_replay_enabled,
196 * sel_update_enabled
197 *
198 * enabled (alone): PSR1
199 * enabled + sel_update_enabled: PSR2
200 * enabled + panel_replay_enabled: Panel Replay
201 * enabled + panel_replay_enabled + sel_update_enabled: Panel Replay SU
202 */
203
204 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
205 (intel_dp)->psr.source_support)
206
intel_encoder_can_psr(struct intel_encoder * encoder)207 bool intel_encoder_can_psr(struct intel_encoder *encoder)
208 {
209 if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
210 return CAN_PSR(enc_to_intel_dp(encoder)) ||
211 CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
212 else
213 return false;
214 }
215
intel_psr_needs_aux_io_power(struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state)216 bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
217 const struct intel_crtc_state *crtc_state)
218 {
219 /*
220 * For PSR/PR modes only eDP requires the AUX IO power to be enabled whenever
221 * the output is enabled. For non-eDP outputs the main link is always
222 * on, hence it doesn't require the HW initiated AUX wake-up signaling used
223 * for eDP.
224 *
225 * TODO:
226 * - Consider leaving AUX IO disabled for eDP / PR as well, in case
227 * the ALPM with main-link off mode is not enabled.
228 * - Leave AUX IO enabled for DP / PR, once support for ALPM with
229 * main-link off mode is added for it and this mode gets enabled.
230 */
231 return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
232 intel_encoder_can_psr(encoder);
233 }
234
psr_global_enabled(struct intel_dp * intel_dp)235 static bool psr_global_enabled(struct intel_dp *intel_dp)
236 {
237 struct intel_connector *connector = intel_dp->attached_connector;
238
239 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
240 case I915_PSR_DEBUG_DEFAULT:
241 return intel_dp_is_edp(intel_dp) ?
242 connector->panel.vbt.psr.enable : true;
243 case I915_PSR_DEBUG_DISABLE:
244 return false;
245 default:
246 return true;
247 }
248 }
249
sel_update_global_enabled(struct intel_dp * intel_dp)250 static bool sel_update_global_enabled(struct intel_dp *intel_dp)
251 {
252 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
253 case I915_PSR_DEBUG_DISABLE:
254 case I915_PSR_DEBUG_FORCE_PSR1:
255 return false;
256 default:
257 return true;
258 }
259 }
260
panel_replay_global_enabled(struct intel_dp * intel_dp)261 static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
262 {
263 struct intel_display *display = to_intel_display(intel_dp);
264
265 return !(intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE) &&
266 display->params.enable_panel_replay;
267 }
268
psr_irq_psr_error_bit_get(struct intel_dp * intel_dp)269 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
270 {
271 struct intel_display *display = to_intel_display(intel_dp);
272
273 return DISPLAY_VER(display) >= 12 ? TGL_PSR_ERROR :
274 EDP_PSR_ERROR(intel_dp->psr.transcoder);
275 }
276
psr_irq_post_exit_bit_get(struct intel_dp * intel_dp)277 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
278 {
279 struct intel_display *display = to_intel_display(intel_dp);
280
281 return DISPLAY_VER(display) >= 12 ? TGL_PSR_POST_EXIT :
282 EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
283 }
284
psr_irq_pre_entry_bit_get(struct intel_dp * intel_dp)285 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
286 {
287 struct intel_display *display = to_intel_display(intel_dp);
288
289 return DISPLAY_VER(display) >= 12 ? TGL_PSR_PRE_ENTRY :
290 EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
291 }
292
psr_irq_mask_get(struct intel_dp * intel_dp)293 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
294 {
295 struct intel_display *display = to_intel_display(intel_dp);
296
297 return DISPLAY_VER(display) >= 12 ? TGL_PSR_MASK :
298 EDP_PSR_MASK(intel_dp->psr.transcoder);
299 }
300
psr_ctl_reg(struct intel_display * display,enum transcoder cpu_transcoder)301 static i915_reg_t psr_ctl_reg(struct intel_display *display,
302 enum transcoder cpu_transcoder)
303 {
304 if (DISPLAY_VER(display) >= 8)
305 return EDP_PSR_CTL(display, cpu_transcoder);
306 else
307 return HSW_SRD_CTL;
308 }
309
psr_debug_reg(struct intel_display * display,enum transcoder cpu_transcoder)310 static i915_reg_t psr_debug_reg(struct intel_display *display,
311 enum transcoder cpu_transcoder)
312 {
313 if (DISPLAY_VER(display) >= 8)
314 return EDP_PSR_DEBUG(display, cpu_transcoder);
315 else
316 return HSW_SRD_DEBUG;
317 }
318
psr_perf_cnt_reg(struct intel_display * display,enum transcoder cpu_transcoder)319 static i915_reg_t psr_perf_cnt_reg(struct intel_display *display,
320 enum transcoder cpu_transcoder)
321 {
322 if (DISPLAY_VER(display) >= 8)
323 return EDP_PSR_PERF_CNT(display, cpu_transcoder);
324 else
325 return HSW_SRD_PERF_CNT;
326 }
327
psr_status_reg(struct intel_display * display,enum transcoder cpu_transcoder)328 static i915_reg_t psr_status_reg(struct intel_display *display,
329 enum transcoder cpu_transcoder)
330 {
331 if (DISPLAY_VER(display) >= 8)
332 return EDP_PSR_STATUS(display, cpu_transcoder);
333 else
334 return HSW_SRD_STATUS;
335 }
336
psr_imr_reg(struct intel_display * display,enum transcoder cpu_transcoder)337 static i915_reg_t psr_imr_reg(struct intel_display *display,
338 enum transcoder cpu_transcoder)
339 {
340 if (DISPLAY_VER(display) >= 12)
341 return TRANS_PSR_IMR(display, cpu_transcoder);
342 else
343 return EDP_PSR_IMR;
344 }
345
psr_iir_reg(struct intel_display * display,enum transcoder cpu_transcoder)346 static i915_reg_t psr_iir_reg(struct intel_display *display,
347 enum transcoder cpu_transcoder)
348 {
349 if (DISPLAY_VER(display) >= 12)
350 return TRANS_PSR_IIR(display, cpu_transcoder);
351 else
352 return EDP_PSR_IIR;
353 }
354
psr_aux_ctl_reg(struct intel_display * display,enum transcoder cpu_transcoder)355 static i915_reg_t psr_aux_ctl_reg(struct intel_display *display,
356 enum transcoder cpu_transcoder)
357 {
358 if (DISPLAY_VER(display) >= 8)
359 return EDP_PSR_AUX_CTL(display, cpu_transcoder);
360 else
361 return HSW_SRD_AUX_CTL;
362 }
363
psr_aux_data_reg(struct intel_display * display,enum transcoder cpu_transcoder,int i)364 static i915_reg_t psr_aux_data_reg(struct intel_display *display,
365 enum transcoder cpu_transcoder, int i)
366 {
367 if (DISPLAY_VER(display) >= 8)
368 return EDP_PSR_AUX_DATA(display, cpu_transcoder, i);
369 else
370 return HSW_SRD_AUX_DATA(i);
371 }
372
psr_irq_control(struct intel_dp * intel_dp)373 static void psr_irq_control(struct intel_dp *intel_dp)
374 {
375 struct intel_display *display = to_intel_display(intel_dp);
376 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
377 u32 mask;
378
379 if (intel_dp->psr.panel_replay_enabled)
380 return;
381
382 mask = psr_irq_psr_error_bit_get(intel_dp);
383 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
384 mask |= psr_irq_post_exit_bit_get(intel_dp) |
385 psr_irq_pre_entry_bit_get(intel_dp);
386
387 intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
388 psr_irq_mask_get(intel_dp), ~mask);
389 }
390
psr_event_print(struct intel_display * display,u32 val,bool sel_update_enabled)391 static void psr_event_print(struct intel_display *display,
392 u32 val, bool sel_update_enabled)
393 {
394 drm_dbg_kms(display->drm, "PSR exit events: 0x%x\n", val);
395 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
396 drm_dbg_kms(display->drm, "\tPSR2 watchdog timer expired\n");
397 if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled)
398 drm_dbg_kms(display->drm, "\tPSR2 disabled\n");
399 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
400 drm_dbg_kms(display->drm, "\tSU dirty FIFO underrun\n");
401 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
402 drm_dbg_kms(display->drm, "\tSU CRC FIFO underrun\n");
403 if (val & PSR_EVENT_GRAPHICS_RESET)
404 drm_dbg_kms(display->drm, "\tGraphics reset\n");
405 if (val & PSR_EVENT_PCH_INTERRUPT)
406 drm_dbg_kms(display->drm, "\tPCH interrupt\n");
407 if (val & PSR_EVENT_MEMORY_UP)
408 drm_dbg_kms(display->drm, "\tMemory up\n");
409 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
410 drm_dbg_kms(display->drm, "\tFront buffer modification\n");
411 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
412 drm_dbg_kms(display->drm, "\tPSR watchdog timer expired\n");
413 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
414 drm_dbg_kms(display->drm, "\tPIPE registers updated\n");
415 if (val & PSR_EVENT_REGISTER_UPDATE)
416 drm_dbg_kms(display->drm, "\tRegister updated\n");
417 if (val & PSR_EVENT_HDCP_ENABLE)
418 drm_dbg_kms(display->drm, "\tHDCP enabled\n");
419 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
420 drm_dbg_kms(display->drm, "\tKVMR session enabled\n");
421 if (val & PSR_EVENT_VBI_ENABLE)
422 drm_dbg_kms(display->drm, "\tVBI enabled\n");
423 if (val & PSR_EVENT_LPSP_MODE_EXIT)
424 drm_dbg_kms(display->drm, "\tLPSP mode exited\n");
425 if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled)
426 drm_dbg_kms(display->drm, "\tPSR disabled\n");
427 }
428
intel_psr_irq_handler(struct intel_dp * intel_dp,u32 psr_iir)429 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
430 {
431 struct intel_display *display = to_intel_display(intel_dp);
432 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
433 ktime_t time_ns = ktime_get();
434
435 if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
436 intel_dp->psr.last_entry_attempt = time_ns;
437 drm_dbg_kms(display->drm,
438 "[transcoder %s] PSR entry attempt in 2 vblanks\n",
439 transcoder_name(cpu_transcoder));
440 }
441
442 if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
443 intel_dp->psr.last_exit = time_ns;
444 drm_dbg_kms(display->drm,
445 "[transcoder %s] PSR exit completed\n",
446 transcoder_name(cpu_transcoder));
447
448 if (DISPLAY_VER(display) >= 9) {
449 u32 val;
450
451 val = intel_de_rmw(display,
452 PSR_EVENT(display, cpu_transcoder),
453 0, 0);
454
455 psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
456 }
457 }
458
459 if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
460 drm_warn(display->drm, "[transcoder %s] PSR aux error\n",
461 transcoder_name(cpu_transcoder));
462
463 intel_dp->psr.irq_aux_error = true;
464
465 /*
466 * If this interruption is not masked it will keep
467 * interrupting so fast that it prevents the scheduled
468 * work to run.
469 * Also after a PSR error, we don't want to arm PSR
470 * again so we don't care about unmask the interruption
471 * or unset irq_aux_error.
472 */
473 intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
474 0, psr_irq_psr_error_bit_get(intel_dp));
475
476 queue_work(display->wq.unordered, &intel_dp->psr.work);
477 }
478 }
479
intel_dp_get_sink_sync_latency(struct intel_dp * intel_dp)480 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
481 {
482 struct intel_display *display = to_intel_display(intel_dp);
483 u8 val = 8; /* assume the worst if we can't read the value */
484
485 if (drm_dp_dpcd_readb(&intel_dp->aux,
486 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
487 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
488 else
489 drm_dbg_kms(display->drm,
490 "Unable to get sink synchronization latency, assuming 8 frames\n");
491 return val;
492 }
493
intel_dp_get_su_capability(struct intel_dp * intel_dp)494 static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
495 {
496 u8 su_capability = 0;
497
498 if (intel_dp->psr.sink_panel_replay_su_support) {
499 if (drm_dp_dpcd_read_byte(&intel_dp->aux,
500 DP_PANEL_REPLAY_CAP_CAPABILITY,
501 &su_capability) < 0)
502 return 0;
503 } else {
504 su_capability = intel_dp->psr_dpcd[1];
505 }
506
507 return su_capability;
508 }
509
510 static unsigned int
intel_dp_get_su_x_granularity_offset(struct intel_dp * intel_dp)511 intel_dp_get_su_x_granularity_offset(struct intel_dp *intel_dp)
512 {
513 return intel_dp->psr.sink_panel_replay_su_support ?
514 DP_PANEL_REPLAY_CAP_X_GRANULARITY :
515 DP_PSR2_SU_X_GRANULARITY;
516 }
517
518 static unsigned int
intel_dp_get_su_y_granularity_offset(struct intel_dp * intel_dp)519 intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
520 {
521 return intel_dp->psr.sink_panel_replay_su_support ?
522 DP_PANEL_REPLAY_CAP_Y_GRANULARITY :
523 DP_PSR2_SU_Y_GRANULARITY;
524 }
525
526 /*
527 * Note: Bits related to granularity are same in panel replay and psr
528 * registers. Rely on PSR definitions on these "common" bits.
529 */
intel_dp_get_su_granularity(struct intel_dp * intel_dp)530 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
531 {
532 struct intel_display *display = to_intel_display(intel_dp);
533 ssize_t r;
534 u16 w;
535 u8 y;
536
537 /*
538 * TODO: Do we need to take into account panel supporting both PSR and
539 * Panel replay?
540 */
541
542 /*
543 * If sink don't have specific granularity requirements set legacy
544 * ones.
545 */
546 if (!(intel_dp_get_su_capability(intel_dp) &
547 DP_PSR2_SU_GRANULARITY_REQUIRED)) {
548 /* As PSR2 HW sends full lines, we do not care about x granularity */
549 w = 4;
550 y = 4;
551 goto exit;
552 }
553
554 r = drm_dp_dpcd_read(&intel_dp->aux,
555 intel_dp_get_su_x_granularity_offset(intel_dp),
556 &w, 2);
557 if (r != 2)
558 drm_dbg_kms(display->drm,
559 "Unable to read selective update x granularity\n");
560 /*
561 * Spec says that if the value read is 0 the default granularity should
562 * be used instead.
563 */
564 if (r != 2 || w == 0)
565 w = 4;
566
567 r = drm_dp_dpcd_read(&intel_dp->aux,
568 intel_dp_get_su_y_granularity_offset(intel_dp),
569 &y, 1);
570 if (r != 1) {
571 drm_dbg_kms(display->drm,
572 "Unable to read selective update y granularity\n");
573 y = 4;
574 }
575 if (y == 0)
576 y = 1;
577
578 exit:
579 intel_dp->psr.su_w_granularity = w;
580 intel_dp->psr.su_y_granularity = y;
581 }
582
_panel_replay_init_dpcd(struct intel_dp * intel_dp)583 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
584 {
585 struct intel_display *display = to_intel_display(intel_dp);
586 int ret;
587
588 /* TODO: Enable Panel Replay on MST once it's properly implemented. */
589 if (intel_dp->mst_detect == DRM_DP_MST)
590 return;
591
592 ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
593 &intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
594 if (ret < 0)
595 return;
596
597 if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
598 DP_PANEL_REPLAY_SUPPORT))
599 return;
600
601 if (intel_dp_is_edp(intel_dp)) {
602 if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
603 drm_dbg_kms(display->drm,
604 "Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n");
605 return;
606 }
607
608 if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
609 DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
610 drm_dbg_kms(display->drm,
611 "Panel doesn't support early transport, eDP Panel Replay not possible\n");
612 return;
613 }
614 }
615
616 intel_dp->psr.sink_panel_replay_support = true;
617
618 if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
619 DP_PANEL_REPLAY_SU_SUPPORT)
620 intel_dp->psr.sink_panel_replay_su_support = true;
621
622 drm_dbg_kms(display->drm,
623 "Panel replay %sis supported by panel\n",
624 intel_dp->psr.sink_panel_replay_su_support ?
625 "selective_update " : "");
626 }
627
_psr_init_dpcd(struct intel_dp * intel_dp)628 static void _psr_init_dpcd(struct intel_dp *intel_dp)
629 {
630 struct intel_display *display = to_intel_display(intel_dp);
631 int ret;
632
633 ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
634 sizeof(intel_dp->psr_dpcd));
635 if (ret < 0)
636 return;
637
638 if (!intel_dp->psr_dpcd[0])
639 return;
640
641 drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
642 intel_dp->psr_dpcd[0]);
643
644 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
645 drm_dbg_kms(display->drm,
646 "PSR support not currently available for this panel\n");
647 return;
648 }
649
650 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
651 drm_dbg_kms(display->drm,
652 "Panel lacks power state control, PSR cannot be enabled\n");
653 return;
654 }
655
656 intel_dp->psr.sink_support = true;
657 intel_dp->psr.sink_sync_latency =
658 intel_dp_get_sink_sync_latency(intel_dp);
659
660 if (DISPLAY_VER(display) >= 9 &&
661 intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
662 bool y_req = intel_dp->psr_dpcd[1] &
663 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
664
665 /*
666 * All panels that supports PSR version 03h (PSR2 +
667 * Y-coordinate) can handle Y-coordinates in VSC but we are
668 * only sure that it is going to be used when required by the
669 * panel. This way panel is capable to do selective update
670 * without a aux frame sync.
671 *
672 * To support PSR version 02h and PSR version 03h without
673 * Y-coordinate requirement panels we would need to enable
674 * GTC first.
675 */
676 intel_dp->psr.sink_psr2_support = y_req &&
677 intel_alpm_aux_wake_supported(intel_dp);
678 drm_dbg_kms(display->drm, "PSR2 %ssupported\n",
679 intel_dp->psr.sink_psr2_support ? "" : "not ");
680 }
681 }
682
intel_psr_init_dpcd(struct intel_dp * intel_dp)683 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
684 {
685 _psr_init_dpcd(intel_dp);
686
687 _panel_replay_init_dpcd(intel_dp);
688
689 if (intel_dp->psr.sink_psr2_support ||
690 intel_dp->psr.sink_panel_replay_su_support)
691 intel_dp_get_su_granularity(intel_dp);
692 }
693
hsw_psr_setup_aux(struct intel_dp * intel_dp)694 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
695 {
696 struct intel_display *display = to_intel_display(intel_dp);
697 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
698 u32 aux_clock_divider, aux_ctl;
699 /* write DP_SET_POWER=D0 */
700 static const u8 aux_msg[] = {
701 [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
702 [1] = (DP_SET_POWER >> 8) & 0xff,
703 [2] = DP_SET_POWER & 0xff,
704 [3] = 1 - 1,
705 [4] = DP_SET_POWER_D0,
706 };
707 int i;
708
709 BUILD_BUG_ON(sizeof(aux_msg) > 20);
710 for (i = 0; i < sizeof(aux_msg); i += 4)
711 intel_de_write(display,
712 psr_aux_data_reg(display, cpu_transcoder, i >> 2),
713 intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
714
715 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
716
717 /* Start with bits set for DDI_AUX_CTL register */
718 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
719 aux_clock_divider);
720
721 /* Select only valid bits for SRD_AUX_CTL */
722 aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
723 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
724 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
725 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
726
727 intel_de_write(display, psr_aux_ctl_reg(display, cpu_transcoder),
728 aux_ctl);
729 }
730
psr2_su_region_et_valid(struct intel_dp * intel_dp,bool panel_replay)731 static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay)
732 {
733 struct intel_display *display = to_intel_display(intel_dp);
734
735 if (DISPLAY_VER(display) < 20 || !intel_dp_is_edp(intel_dp) ||
736 intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)
737 return false;
738
739 return panel_replay ?
740 intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
741 DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
742 intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED;
743 }
744
_panel_replay_enable_sink(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)745 static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
746 const struct intel_crtc_state *crtc_state)
747 {
748 u8 val = DP_PANEL_REPLAY_ENABLE |
749 DP_PANEL_REPLAY_VSC_SDP_CRC_EN |
750 DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
751 DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN |
752 DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN;
753 u8 panel_replay_config2 = DP_PANEL_REPLAY_CRC_VERIFICATION;
754
755 if (crtc_state->has_sel_update)
756 val |= DP_PANEL_REPLAY_SU_ENABLE;
757
758 if (crtc_state->enable_psr2_su_region_et)
759 val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
760
761 if (crtc_state->req_psr2_sdp_prior_scanline)
762 panel_replay_config2 |=
763 DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE;
764
765 drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, val);
766
767 drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG2,
768 panel_replay_config2);
769 }
770
_psr_enable_sink(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)771 static void _psr_enable_sink(struct intel_dp *intel_dp,
772 const struct intel_crtc_state *crtc_state)
773 {
774 struct intel_display *display = to_intel_display(intel_dp);
775 u8 val = 0;
776
777 if (crtc_state->has_sel_update) {
778 val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
779 } else {
780 if (intel_dp->psr.link_standby)
781 val |= DP_PSR_MAIN_LINK_ACTIVE;
782
783 if (DISPLAY_VER(display) >= 8)
784 val |= DP_PSR_CRC_VERIFICATION;
785 }
786
787 if (crtc_state->req_psr2_sdp_prior_scanline)
788 val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
789
790 if (crtc_state->enable_psr2_su_region_et)
791 val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
792
793 if (intel_dp->psr.entry_setup_frames > 0)
794 val |= DP_PSR_FRAME_CAPTURE;
795 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
796
797 val |= DP_PSR_ENABLE;
798 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
799 }
800
intel_psr_enable_sink(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)801 static void intel_psr_enable_sink(struct intel_dp *intel_dp,
802 const struct intel_crtc_state *crtc_state)
803 {
804 intel_alpm_enable_sink(intel_dp, crtc_state);
805
806 crtc_state->has_panel_replay ?
807 _panel_replay_enable_sink(intel_dp, crtc_state) :
808 _psr_enable_sink(intel_dp, crtc_state);
809
810 if (intel_dp_is_edp(intel_dp))
811 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
812 }
813
intel_psr_panel_replay_enable_sink(struct intel_dp * intel_dp)814 void intel_psr_panel_replay_enable_sink(struct intel_dp *intel_dp)
815 {
816 if (CAN_PANEL_REPLAY(intel_dp))
817 drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
818 DP_PANEL_REPLAY_ENABLE);
819 }
820
intel_psr1_get_tp_time(struct intel_dp * intel_dp)821 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
822 {
823 struct intel_display *display = to_intel_display(intel_dp);
824 struct intel_connector *connector = intel_dp->attached_connector;
825 u32 val = 0;
826
827 if (DISPLAY_VER(display) >= 11)
828 val |= EDP_PSR_TP4_TIME_0us;
829
830 if (display->params.psr_safest_params) {
831 val |= EDP_PSR_TP1_TIME_2500us;
832 val |= EDP_PSR_TP2_TP3_TIME_2500us;
833 goto check_tp3_sel;
834 }
835
836 if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
837 val |= EDP_PSR_TP1_TIME_0us;
838 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
839 val |= EDP_PSR_TP1_TIME_100us;
840 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
841 val |= EDP_PSR_TP1_TIME_500us;
842 else
843 val |= EDP_PSR_TP1_TIME_2500us;
844
845 if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
846 val |= EDP_PSR_TP2_TP3_TIME_0us;
847 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
848 val |= EDP_PSR_TP2_TP3_TIME_100us;
849 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
850 val |= EDP_PSR_TP2_TP3_TIME_500us;
851 else
852 val |= EDP_PSR_TP2_TP3_TIME_2500us;
853
854 /*
855 * WA 0479: hsw,bdw
856 * "Do not skip both TP1 and TP2/TP3"
857 */
858 if (DISPLAY_VER(display) < 9 &&
859 connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
860 connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
861 val |= EDP_PSR_TP2_TP3_TIME_100us;
862
863 check_tp3_sel:
864 if (intel_dp_source_supports_tps3(display) &&
865 drm_dp_tps3_supported(intel_dp->dpcd))
866 val |= EDP_PSR_TP_TP1_TP3;
867 else
868 val |= EDP_PSR_TP_TP1_TP2;
869
870 return val;
871 }
872
psr_compute_idle_frames(struct intel_dp * intel_dp)873 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
874 {
875 struct intel_display *display = to_intel_display(intel_dp);
876 struct intel_connector *connector = intel_dp->attached_connector;
877 int idle_frames;
878
879 /* Let's use 6 as the minimum to cover all known cases including the
880 * off-by-one issue that HW has in some cases.
881 */
882 idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
883 idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
884
885 if (drm_WARN_ON(display->drm, idle_frames > 0xf))
886 idle_frames = 0xf;
887
888 return idle_frames;
889 }
890
is_dc5_dc6_blocked(struct intel_dp * intel_dp)891 static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp)
892 {
893 struct intel_display *display = to_intel_display(intel_dp);
894 u32 current_dc_state = intel_display_power_get_current_dc_state(display);
895 struct intel_crtc *crtc = intel_crtc_for_pipe(display, intel_dp->psr.pipe);
896 struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base);
897
898 return (current_dc_state != DC_STATE_EN_UPTO_DC5 &&
899 current_dc_state != DC_STATE_EN_UPTO_DC6) ||
900 intel_dp->psr.active_non_psr_pipes ||
901 READ_ONCE(vblank->enabled);
902 }
903
hsw_activate_psr1(struct intel_dp * intel_dp)904 static void hsw_activate_psr1(struct intel_dp *intel_dp)
905 {
906 struct intel_display *display = to_intel_display(intel_dp);
907 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
908 u32 max_sleep_time = 0x1f;
909 u32 val = EDP_PSR_ENABLE;
910
911 val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
912
913 if (DISPLAY_VER(display) < 20)
914 val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
915
916 if (display->platform.haswell)
917 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
918
919 if (intel_dp->psr.link_standby)
920 val |= EDP_PSR_LINK_STANDBY;
921
922 val |= intel_psr1_get_tp_time(intel_dp);
923
924 if (DISPLAY_VER(display) >= 8)
925 val |= EDP_PSR_CRC_ENABLE;
926
927 if (DISPLAY_VER(display) >= 20)
928 val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
929
930 intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder),
931 ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
932
933 /* Wa_16025596647 */
934 if ((DISPLAY_VER(display) == 20 ||
935 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
936 is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used)
937 intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
938 intel_dp->psr.pipe,
939 true);
940 }
941
intel_psr2_get_tp_time(struct intel_dp * intel_dp)942 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
943 {
944 struct intel_display *display = to_intel_display(intel_dp);
945 struct intel_connector *connector = intel_dp->attached_connector;
946 u32 val = 0;
947
948 if (display->params.psr_safest_params)
949 return EDP_PSR2_TP2_TIME_2500us;
950
951 if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
952 connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
953 val |= EDP_PSR2_TP2_TIME_50us;
954 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
955 val |= EDP_PSR2_TP2_TIME_100us;
956 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
957 val |= EDP_PSR2_TP2_TIME_500us;
958 else
959 val |= EDP_PSR2_TP2_TIME_2500us;
960
961 return val;
962 }
963
psr2_block_count_lines(struct intel_dp * intel_dp)964 static int psr2_block_count_lines(struct intel_dp *intel_dp)
965 {
966 return intel_dp->alpm_parameters.io_wake_lines < 9 &&
967 intel_dp->alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
968 }
969
psr2_block_count(struct intel_dp * intel_dp)970 static int psr2_block_count(struct intel_dp *intel_dp)
971 {
972 return psr2_block_count_lines(intel_dp) / 4;
973 }
974
frames_before_su_entry(struct intel_dp * intel_dp)975 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
976 {
977 u8 frames_before_su_entry;
978
979 frames_before_su_entry = max_t(u8,
980 intel_dp->psr.sink_sync_latency + 1,
981 2);
982
983 /* Entry setup frames must be at least 1 less than frames before SU entry */
984 if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
985 frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
986
987 return frames_before_su_entry;
988 }
989
dg2_activate_panel_replay(struct intel_dp * intel_dp)990 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
991 {
992 struct intel_display *display = to_intel_display(intel_dp);
993 struct intel_psr *psr = &intel_dp->psr;
994 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
995
996 if (intel_dp_is_edp(intel_dp) && psr->sel_update_enabled) {
997 u32 val = psr->su_region_et_enabled ?
998 LNL_EDP_PSR2_SU_REGION_ET_ENABLE : 0;
999
1000 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
1001 val |= EDP_PSR2_SU_SDP_SCANLINE;
1002
1003 intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder),
1004 val);
1005 }
1006
1007 intel_de_rmw(display,
1008 PSR2_MAN_TRK_CTL(display, intel_dp->psr.transcoder),
1009 0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
1010
1011 intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
1012 TRANS_DP2_PANEL_REPLAY_ENABLE);
1013 }
1014
hsw_activate_psr2(struct intel_dp * intel_dp)1015 static void hsw_activate_psr2(struct intel_dp *intel_dp)
1016 {
1017 struct intel_display *display = to_intel_display(intel_dp);
1018 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1019 u32 val = EDP_PSR2_ENABLE;
1020 u32 psr_val = 0;
1021 u8 idle_frames;
1022
1023 /* Wa_16025596647 */
1024 if ((DISPLAY_VER(display) == 20 ||
1025 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
1026 is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used)
1027 idle_frames = 0;
1028 else
1029 idle_frames = psr_compute_idle_frames(intel_dp);
1030 val |= EDP_PSR2_IDLE_FRAMES(idle_frames);
1031
1032 if (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)
1033 val |= EDP_SU_TRACK_ENABLE;
1034
1035 if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
1036 val |= EDP_Y_COORDINATE_ENABLE;
1037
1038 val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
1039
1040 val |= intel_psr2_get_tp_time(intel_dp);
1041
1042 if (DISPLAY_VER(display) >= 12 && DISPLAY_VER(display) < 20) {
1043 if (psr2_block_count(intel_dp) > 2)
1044 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
1045 else
1046 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
1047 }
1048
1049 /* Wa_22012278275:adl-p */
1050 if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
1051 static const u8 map[] = {
1052 2, /* 5 lines */
1053 1, /* 6 lines */
1054 0, /* 7 lines */
1055 3, /* 8 lines */
1056 6, /* 9 lines */
1057 5, /* 10 lines */
1058 4, /* 11 lines */
1059 7, /* 12 lines */
1060 };
1061 /*
1062 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
1063 * comments below for more information
1064 */
1065 int tmp;
1066
1067 tmp = map[intel_dp->alpm_parameters.io_wake_lines -
1068 TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
1069 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
1070
1071 tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
1072 val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
1073 } else if (DISPLAY_VER(display) >= 20) {
1074 val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1075 } else if (DISPLAY_VER(display) >= 12) {
1076 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1077 val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1078 } else if (DISPLAY_VER(display) >= 9) {
1079 val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1080 val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1081 }
1082
1083 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
1084 val |= EDP_PSR2_SU_SDP_SCANLINE;
1085
1086 if (DISPLAY_VER(display) >= 20)
1087 psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
1088
1089 if (intel_dp->psr.psr2_sel_fetch_enabled) {
1090 u32 tmp;
1091
1092 tmp = intel_de_read(display,
1093 PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1094 drm_WARN_ON(display->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
1095 } else if (HAS_PSR2_SEL_FETCH(display)) {
1096 intel_de_write(display,
1097 PSR2_MAN_TRK_CTL(display, cpu_transcoder), 0);
1098 }
1099
1100 if (intel_dp->psr.su_region_et_enabled)
1101 val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
1102
1103 /*
1104 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
1105 * recommending keep this bit unset while PSR2 is enabled.
1106 */
1107 intel_de_write(display, psr_ctl_reg(display, cpu_transcoder), psr_val);
1108
1109 intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), val);
1110 }
1111
1112 static bool
transcoder_has_psr2(struct intel_display * display,enum transcoder cpu_transcoder)1113 transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
1114 {
1115 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1116 return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
1117 else if (DISPLAY_VER(display) >= 12)
1118 return cpu_transcoder == TRANSCODER_A;
1119 else if (DISPLAY_VER(display) >= 9)
1120 return cpu_transcoder == TRANSCODER_EDP;
1121 else
1122 return false;
1123 }
1124
intel_get_frame_time_us(const struct intel_crtc_state * crtc_state)1125 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
1126 {
1127 if (!crtc_state->hw.active)
1128 return 0;
1129
1130 return DIV_ROUND_UP(1000 * 1000,
1131 drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
1132 }
1133
psr2_program_idle_frames(struct intel_dp * intel_dp,u32 idle_frames)1134 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
1135 u32 idle_frames)
1136 {
1137 struct intel_display *display = to_intel_display(intel_dp);
1138 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1139
1140 intel_de_rmw(display, EDP_PSR2_CTL(display, cpu_transcoder),
1141 EDP_PSR2_IDLE_FRAMES_MASK,
1142 EDP_PSR2_IDLE_FRAMES(idle_frames));
1143 }
1144
tgl_psr2_enable_dc3co(struct intel_dp * intel_dp)1145 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
1146 {
1147 struct intel_display *display = to_intel_display(intel_dp);
1148
1149 psr2_program_idle_frames(intel_dp, 0);
1150 intel_display_power_set_target_dc_state(display, DC_STATE_EN_DC3CO);
1151 }
1152
tgl_psr2_disable_dc3co(struct intel_dp * intel_dp)1153 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
1154 {
1155 struct intel_display *display = to_intel_display(intel_dp);
1156
1157 intel_display_power_set_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
1158 psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
1159 }
1160
tgl_dc3co_disable_work(struct work_struct * work)1161 static void tgl_dc3co_disable_work(struct work_struct *work)
1162 {
1163 struct intel_dp *intel_dp =
1164 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
1165
1166 mutex_lock(&intel_dp->psr.lock);
1167 /* If delayed work is pending, it is not idle */
1168 if (delayed_work_pending(&intel_dp->psr.dc3co_work))
1169 goto unlock;
1170
1171 tgl_psr2_disable_dc3co(intel_dp);
1172 unlock:
1173 mutex_unlock(&intel_dp->psr.lock);
1174 }
1175
tgl_disallow_dc3co_on_psr2_exit(struct intel_dp * intel_dp)1176 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
1177 {
1178 if (!intel_dp->psr.dc3co_exitline)
1179 return;
1180
1181 cancel_delayed_work(&intel_dp->psr.dc3co_work);
1182 /* Before PSR2 exit disallow dc3co*/
1183 tgl_psr2_disable_dc3co(intel_dp);
1184 }
1185
1186 static bool
dc3co_is_pipe_port_compatible(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1187 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
1188 struct intel_crtc_state *crtc_state)
1189 {
1190 struct intel_display *display = to_intel_display(intel_dp);
1191 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1192 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1193 enum port port = dig_port->base.port;
1194
1195 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1196 return pipe <= PIPE_B && port <= PORT_B;
1197 else
1198 return pipe == PIPE_A && port == PORT_A;
1199 }
1200
1201 static void
tgl_dc3co_exitline_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1202 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
1203 struct intel_crtc_state *crtc_state)
1204 {
1205 struct intel_display *display = to_intel_display(intel_dp);
1206 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
1207 struct i915_power_domains *power_domains = &display->power.domains;
1208 u32 exit_scanlines;
1209
1210 /*
1211 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1212 * disable DC3CO until the changed dc3co activating/deactivating sequence
1213 * is applied. B.Specs:49196
1214 */
1215 return;
1216
1217 /*
1218 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1219 * TODO: when the issue is addressed, this restriction should be removed.
1220 */
1221 if (crtc_state->enable_psr2_sel_fetch)
1222 return;
1223
1224 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1225 return;
1226
1227 if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1228 return;
1229
1230 /* Wa_16011303918:adl-p */
1231 if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
1232 return;
1233
1234 /*
1235 * DC3CO Exit time 200us B.Spec 49196
1236 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1237 */
1238 exit_scanlines =
1239 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1240
1241 if (drm_WARN_ON(display->drm, exit_scanlines > crtc_vdisplay))
1242 return;
1243
1244 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1245 }
1246
intel_psr2_sel_fetch_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1247 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1248 struct intel_crtc_state *crtc_state)
1249 {
1250 struct intel_display *display = to_intel_display(intel_dp);
1251
1252 if (!display->params.enable_psr2_sel_fetch &&
1253 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1254 drm_dbg_kms(display->drm,
1255 "PSR2 sel fetch not enabled, disabled by parameter\n");
1256 return false;
1257 }
1258
1259 return crtc_state->enable_psr2_sel_fetch = true;
1260 }
1261
psr2_granularity_check(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1262 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1263 struct intel_crtc_state *crtc_state)
1264 {
1265 struct intel_display *display = to_intel_display(intel_dp);
1266 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1267 const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1268 const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1269 u16 y_granularity = 0;
1270
1271 /* PSR2 HW only send full lines so we only need to validate the width */
1272 if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1273 return false;
1274
1275 if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1276 return false;
1277
1278 /* HW tracking is only aligned to 4 lines */
1279 if (!crtc_state->enable_psr2_sel_fetch)
1280 return intel_dp->psr.su_y_granularity == 4;
1281
1282 /*
1283 * adl_p and mtl platforms have 1 line granularity.
1284 * For other platforms with SW tracking we can adjust the y coordinates
1285 * to match sink requirement if multiple of 4.
1286 */
1287 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1288 y_granularity = intel_dp->psr.su_y_granularity;
1289 else if (intel_dp->psr.su_y_granularity <= 2)
1290 y_granularity = 4;
1291 else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1292 y_granularity = intel_dp->psr.su_y_granularity;
1293
1294 if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1295 return false;
1296
1297 if (crtc_state->dsc.compression_enable &&
1298 vdsc_cfg->slice_height % y_granularity)
1299 return false;
1300
1301 crtc_state->su_y_granularity = y_granularity;
1302 return true;
1303 }
1304
_compute_psr2_sdp_prior_scanline_indication(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1305 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1306 struct intel_crtc_state *crtc_state)
1307 {
1308 struct intel_display *display = to_intel_display(intel_dp);
1309 const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1310 u32 hblank_total, hblank_ns, req_ns;
1311
1312 hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1313 hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1314
1315 /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1316 req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1317
1318 if ((hblank_ns - req_ns) > 100)
1319 return true;
1320
1321 /* Not supported <13 / Wa_22012279113:adl-p */
1322 if (DISPLAY_VER(display) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1323 return false;
1324
1325 crtc_state->req_psr2_sdp_prior_scanline = true;
1326 return true;
1327 }
1328
intel_psr_entry_setup_frames(struct intel_dp * intel_dp,const struct drm_display_mode * adjusted_mode)1329 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1330 const struct drm_display_mode *adjusted_mode)
1331 {
1332 struct intel_display *display = to_intel_display(intel_dp);
1333 int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1334 int entry_setup_frames = 0;
1335
1336 if (psr_setup_time < 0) {
1337 drm_dbg_kms(display->drm,
1338 "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1339 intel_dp->psr_dpcd[1]);
1340 return -ETIME;
1341 }
1342
1343 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1344 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1345 if (DISPLAY_VER(display) >= 20) {
1346 /* setup entry frames can be up to 3 frames */
1347 entry_setup_frames = 1;
1348 drm_dbg_kms(display->drm,
1349 "PSR setup entry frames %d\n",
1350 entry_setup_frames);
1351 } else {
1352 drm_dbg_kms(display->drm,
1353 "PSR condition failed: PSR setup time (%d us) too long\n",
1354 psr_setup_time);
1355 return -ETIME;
1356 }
1357 }
1358
1359 return entry_setup_frames;
1360 }
1361
wake_lines_fit_into_vblank(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,bool aux_less)1362 static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
1363 const struct intel_crtc_state *crtc_state,
1364 bool aux_less)
1365 {
1366 struct intel_display *display = to_intel_display(intel_dp);
1367 int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
1368 crtc_state->hw.adjusted_mode.crtc_vblank_start;
1369 int wake_lines;
1370
1371 if (aux_less)
1372 wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
1373 else
1374 wake_lines = DISPLAY_VER(display) < 20 ?
1375 psr2_block_count_lines(intel_dp) :
1376 intel_dp->alpm_parameters.io_wake_lines;
1377
1378 if (crtc_state->req_psr2_sdp_prior_scanline)
1379 vblank -= 1;
1380
1381 /* Vblank >= PSR2_CTL Block Count Number maximum line count */
1382 if (vblank < wake_lines)
1383 return false;
1384
1385 return true;
1386 }
1387
alpm_config_valid(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,bool aux_less)1388 static bool alpm_config_valid(struct intel_dp *intel_dp,
1389 const struct intel_crtc_state *crtc_state,
1390 bool aux_less)
1391 {
1392 struct intel_display *display = to_intel_display(intel_dp);
1393
1394 if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
1395 drm_dbg_kms(display->drm,
1396 "PSR2/Panel Replay not enabled, Unable to use long enough wake times\n");
1397 return false;
1398 }
1399
1400 if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less)) {
1401 drm_dbg_kms(display->drm,
1402 "PSR2/Panel Replay not enabled, too short vblank time\n");
1403 return false;
1404 }
1405
1406 return true;
1407 }
1408
intel_psr2_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1409 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1410 struct intel_crtc_state *crtc_state)
1411 {
1412 struct intel_display *display = to_intel_display(intel_dp);
1413 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1414 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1415 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1416
1417 if (!intel_dp->psr.sink_psr2_support || display->params.enable_psr == 1)
1418 return false;
1419
1420 /* JSL and EHL only supports eDP 1.3 */
1421 if (display->platform.jasperlake || display->platform.elkhartlake) {
1422 drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
1423 return false;
1424 }
1425
1426 /* Wa_16011181250 */
1427 if (display->platform.rocketlake || display->platform.alderlake_s ||
1428 display->platform.dg2) {
1429 drm_dbg_kms(display->drm,
1430 "PSR2 is defeatured for this platform\n");
1431 return false;
1432 }
1433
1434 if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1435 drm_dbg_kms(display->drm,
1436 "PSR2 not completely functional in this stepping\n");
1437 return false;
1438 }
1439
1440 if (!transcoder_has_psr2(display, crtc_state->cpu_transcoder)) {
1441 drm_dbg_kms(display->drm,
1442 "PSR2 not supported in transcoder %s\n",
1443 transcoder_name(crtc_state->cpu_transcoder));
1444 return false;
1445 }
1446
1447 /*
1448 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1449 * resolution requires DSC to be enabled, priority is given to DSC
1450 * over PSR2.
1451 */
1452 if (crtc_state->dsc.compression_enable &&
1453 (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)) {
1454 drm_dbg_kms(display->drm,
1455 "PSR2 cannot be enabled since DSC is enabled\n");
1456 return false;
1457 }
1458
1459 if (DISPLAY_VER(display) >= 20) {
1460 psr_max_h = crtc_hdisplay;
1461 psr_max_v = crtc_vdisplay;
1462 max_bpp = crtc_state->pipe_bpp;
1463 } else if (IS_DISPLAY_VER(display, 12, 14)) {
1464 psr_max_h = 5120;
1465 psr_max_v = 3200;
1466 max_bpp = 30;
1467 } else if (IS_DISPLAY_VER(display, 10, 11)) {
1468 psr_max_h = 4096;
1469 psr_max_v = 2304;
1470 max_bpp = 24;
1471 } else if (DISPLAY_VER(display) == 9) {
1472 psr_max_h = 3640;
1473 psr_max_v = 2304;
1474 max_bpp = 24;
1475 }
1476
1477 if (crtc_state->pipe_bpp > max_bpp) {
1478 drm_dbg_kms(display->drm,
1479 "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1480 crtc_state->pipe_bpp, max_bpp);
1481 return false;
1482 }
1483
1484 /* Wa_16011303918:adl-p */
1485 if (crtc_state->vrr.enable &&
1486 display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1487 drm_dbg_kms(display->drm,
1488 "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1489 return false;
1490 }
1491
1492 if (!alpm_config_valid(intel_dp, crtc_state, false))
1493 return false;
1494
1495 if (!crtc_state->enable_psr2_sel_fetch &&
1496 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1497 drm_dbg_kms(display->drm,
1498 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1499 crtc_hdisplay, crtc_vdisplay,
1500 psr_max_h, psr_max_v);
1501 return false;
1502 }
1503
1504 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1505
1506 return true;
1507 }
1508
intel_sel_update_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1509 static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
1510 struct intel_crtc_state *crtc_state)
1511 {
1512 struct intel_display *display = to_intel_display(intel_dp);
1513
1514 if (HAS_PSR2_SEL_FETCH(display) &&
1515 !intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1516 !HAS_PSR_HW_TRACKING(display)) {
1517 drm_dbg_kms(display->drm,
1518 "Selective update not enabled, selective fetch not valid and no HW tracking available\n");
1519 goto unsupported;
1520 }
1521
1522 if (!sel_update_global_enabled(intel_dp)) {
1523 drm_dbg_kms(display->drm,
1524 "Selective update disabled by flag\n");
1525 goto unsupported;
1526 }
1527
1528 if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state))
1529 goto unsupported;
1530
1531 if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1532 drm_dbg_kms(display->drm,
1533 "Selective update not enabled, SDP indication do not fit in hblank\n");
1534 goto unsupported;
1535 }
1536
1537 if (crtc_state->has_panel_replay && (DISPLAY_VER(display) < 14 ||
1538 !intel_dp->psr.sink_panel_replay_su_support))
1539 goto unsupported;
1540
1541 if (crtc_state->crc_enabled) {
1542 drm_dbg_kms(display->drm,
1543 "Selective update not enabled because it would inhibit pipe CRC calculation\n");
1544 goto unsupported;
1545 }
1546
1547 if (!psr2_granularity_check(intel_dp, crtc_state)) {
1548 drm_dbg_kms(display->drm,
1549 "Selective update not enabled, SU granularity not compatible\n");
1550 goto unsupported;
1551 }
1552
1553 crtc_state->enable_psr2_su_region_et =
1554 psr2_su_region_et_valid(intel_dp, crtc_state->has_panel_replay);
1555
1556 return true;
1557
1558 unsupported:
1559 crtc_state->enable_psr2_sel_fetch = false;
1560 return false;
1561 }
1562
_psr_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1563 static bool _psr_compute_config(struct intel_dp *intel_dp,
1564 struct intel_crtc_state *crtc_state)
1565 {
1566 struct intel_display *display = to_intel_display(intel_dp);
1567 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1568 int entry_setup_frames;
1569
1570 if (!CAN_PSR(intel_dp) || !display->params.enable_psr)
1571 return false;
1572
1573 /*
1574 * Currently PSR doesn't work reliably with VRR enabled.
1575 */
1576 if (crtc_state->vrr.enable)
1577 return false;
1578
1579 entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1580
1581 if (entry_setup_frames >= 0) {
1582 intel_dp->psr.entry_setup_frames = entry_setup_frames;
1583 } else {
1584 drm_dbg_kms(display->drm,
1585 "PSR condition failed: PSR setup timing not met\n");
1586 return false;
1587 }
1588
1589 return true;
1590 }
1591
1592 static bool
_panel_replay_compute_config(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,const struct drm_connector_state * conn_state)1593 _panel_replay_compute_config(struct intel_dp *intel_dp,
1594 const struct intel_crtc_state *crtc_state,
1595 const struct drm_connector_state *conn_state)
1596 {
1597 struct intel_display *display = to_intel_display(intel_dp);
1598 struct intel_connector *connector =
1599 to_intel_connector(conn_state->connector);
1600 struct intel_hdcp *hdcp = &connector->hdcp;
1601
1602 if (!CAN_PANEL_REPLAY(intel_dp))
1603 return false;
1604
1605 if (!panel_replay_global_enabled(intel_dp)) {
1606 drm_dbg_kms(display->drm, "Panel Replay disabled by flag\n");
1607 return false;
1608 }
1609
1610 if (crtc_state->crc_enabled) {
1611 drm_dbg_kms(display->drm,
1612 "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
1613 return false;
1614 }
1615
1616 if (!intel_dp_is_edp(intel_dp))
1617 return true;
1618
1619 /* Remaining checks are for eDP only */
1620
1621 if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A &&
1622 to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_B)
1623 return false;
1624
1625 /* 128b/132b Panel Replay is not supported on eDP */
1626 if (intel_dp_is_uhbr(crtc_state)) {
1627 drm_dbg_kms(display->drm,
1628 "Panel Replay is not supported with 128b/132b\n");
1629 return false;
1630 }
1631
1632 /* HW will not allow Panel Replay on eDP when HDCP enabled */
1633 if (conn_state->content_protection ==
1634 DRM_MODE_CONTENT_PROTECTION_DESIRED ||
1635 (conn_state->content_protection ==
1636 DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
1637 DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) {
1638 drm_dbg_kms(display->drm,
1639 "Panel Replay is not supported with HDCP\n");
1640 return false;
1641 }
1642
1643 if (!alpm_config_valid(intel_dp, crtc_state, true))
1644 return false;
1645
1646 return true;
1647 }
1648
intel_psr_needs_wa_18037818876(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1649 static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp,
1650 struct intel_crtc_state *crtc_state)
1651 {
1652 struct intel_display *display = to_intel_display(intel_dp);
1653
1654 return (DISPLAY_VER(display) == 20 && intel_dp->psr.entry_setup_frames > 0 &&
1655 !crtc_state->has_sel_update);
1656 }
1657
intel_psr_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state,struct drm_connector_state * conn_state)1658 void intel_psr_compute_config(struct intel_dp *intel_dp,
1659 struct intel_crtc_state *crtc_state,
1660 struct drm_connector_state *conn_state)
1661 {
1662 struct intel_display *display = to_intel_display(intel_dp);
1663 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1664 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
1665 struct intel_crtc *crtc;
1666 u8 active_pipes = 0;
1667
1668 if (!psr_global_enabled(intel_dp)) {
1669 drm_dbg_kms(display->drm, "PSR disabled by flag\n");
1670 return;
1671 }
1672
1673 if (intel_dp->psr.sink_not_reliable) {
1674 drm_dbg_kms(display->drm,
1675 "PSR sink implementation is not reliable\n");
1676 return;
1677 }
1678
1679 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1680 drm_dbg_kms(display->drm,
1681 "PSR condition failed: Interlaced mode enabled\n");
1682 return;
1683 }
1684
1685 /*
1686 * FIXME figure out what is wrong with PSR+joiner and
1687 * fix it. Presumably something related to the fact that
1688 * PSR is a transcoder level feature.
1689 */
1690 if (crtc_state->joiner_pipes) {
1691 drm_dbg_kms(display->drm,
1692 "PSR disabled due to joiner\n");
1693 return;
1694 }
1695
1696 crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
1697 crtc_state,
1698 conn_state);
1699
1700 crtc_state->has_psr = crtc_state->has_panel_replay ? true :
1701 _psr_compute_config(intel_dp, crtc_state);
1702
1703 if (!crtc_state->has_psr)
1704 return;
1705
1706 crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
1707
1708 /* Wa_18037818876 */
1709 if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) {
1710 crtc_state->has_psr = false;
1711 drm_dbg_kms(display->drm,
1712 "PSR disabled to workaround PSR FSM hang issue\n");
1713 }
1714
1715 /* Rest is for Wa_16025596647 */
1716 if (DISPLAY_VER(display) != 20 &&
1717 !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
1718 return;
1719
1720 /* Not needed by Panel Replay */
1721 if (crtc_state->has_panel_replay)
1722 return;
1723
1724 /* We ignore possible secondary PSR/Panel Replay capable eDP */
1725 for_each_intel_crtc(display->drm, crtc)
1726 active_pipes |= crtc->active ? BIT(crtc->pipe) : 0;
1727
1728 active_pipes = intel_calc_active_pipes(state, active_pipes);
1729
1730 crtc_state->active_non_psr_pipes = active_pipes &
1731 ~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe);
1732 }
1733
intel_psr_get_config(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config)1734 void intel_psr_get_config(struct intel_encoder *encoder,
1735 struct intel_crtc_state *pipe_config)
1736 {
1737 struct intel_display *display = to_intel_display(encoder);
1738 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1739 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1740 struct intel_dp *intel_dp;
1741 u32 val;
1742
1743 if (!dig_port)
1744 return;
1745
1746 intel_dp = &dig_port->dp;
1747 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1748 return;
1749
1750 mutex_lock(&intel_dp->psr.lock);
1751 if (!intel_dp->psr.enabled)
1752 goto unlock;
1753
1754 if (intel_dp->psr.panel_replay_enabled) {
1755 pipe_config->has_psr = pipe_config->has_panel_replay = true;
1756 } else {
1757 /*
1758 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1759 * enabled/disabled because of frontbuffer tracking and others.
1760 */
1761 pipe_config->has_psr = true;
1762 }
1763
1764 pipe_config->has_sel_update = intel_dp->psr.sel_update_enabled;
1765 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1766
1767 if (!intel_dp->psr.sel_update_enabled)
1768 goto unlock;
1769
1770 if (HAS_PSR2_SEL_FETCH(display)) {
1771 val = intel_de_read(display,
1772 PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1773 if (val & PSR2_MAN_TRK_CTL_ENABLE)
1774 pipe_config->enable_psr2_sel_fetch = true;
1775 }
1776
1777 pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled;
1778
1779 if (DISPLAY_VER(display) >= 12) {
1780 val = intel_de_read(display,
1781 TRANS_EXITLINE(display, cpu_transcoder));
1782 pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1783 }
1784 unlock:
1785 mutex_unlock(&intel_dp->psr.lock);
1786 }
1787
intel_psr_activate(struct intel_dp * intel_dp)1788 static void intel_psr_activate(struct intel_dp *intel_dp)
1789 {
1790 struct intel_display *display = to_intel_display(intel_dp);
1791 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1792
1793 drm_WARN_ON(display->drm,
1794 transcoder_has_psr2(display, cpu_transcoder) &&
1795 intel_de_read(display, EDP_PSR2_CTL(display, cpu_transcoder)) & EDP_PSR2_ENABLE);
1796
1797 drm_WARN_ON(display->drm,
1798 intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)) & EDP_PSR_ENABLE);
1799
1800 drm_WARN_ON(display->drm, intel_dp->psr.active);
1801
1802 drm_WARN_ON(display->drm, !intel_dp->psr.enabled);
1803
1804 lockdep_assert_held(&intel_dp->psr.lock);
1805
1806 /* psr1, psr2 and panel-replay are mutually exclusive.*/
1807 if (intel_dp->psr.panel_replay_enabled)
1808 dg2_activate_panel_replay(intel_dp);
1809 else if (intel_dp->psr.sel_update_enabled)
1810 hsw_activate_psr2(intel_dp);
1811 else
1812 hsw_activate_psr1(intel_dp);
1813
1814 intel_dp->psr.active = true;
1815 }
1816
1817 /*
1818 * Wa_16013835468
1819 * Wa_14015648006
1820 */
wm_optimization_wa(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1821 static void wm_optimization_wa(struct intel_dp *intel_dp,
1822 const struct intel_crtc_state *crtc_state)
1823 {
1824 struct intel_display *display = to_intel_display(intel_dp);
1825 enum pipe pipe = intel_dp->psr.pipe;
1826 bool activate = false;
1827
1828 /* Wa_14015648006 */
1829 if (IS_DISPLAY_VER(display, 11, 14) && crtc_state->wm_level_disabled)
1830 activate = true;
1831
1832 /* Wa_16013835468 */
1833 if (DISPLAY_VER(display) == 12 &&
1834 crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1835 crtc_state->hw.adjusted_mode.crtc_vdisplay)
1836 activate = true;
1837
1838 if (activate)
1839 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
1840 0, LATENCY_REPORTING_REMOVED(pipe));
1841 else
1842 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
1843 LATENCY_REPORTING_REMOVED(pipe), 0);
1844 }
1845
intel_psr_enable_source(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1846 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1847 const struct intel_crtc_state *crtc_state)
1848 {
1849 struct intel_display *display = to_intel_display(intel_dp);
1850 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1851 u32 mask = 0;
1852
1853 /*
1854 * Only HSW and BDW have PSR AUX registers that need to be setup.
1855 * SKL+ use hardcoded values PSR AUX transactions
1856 */
1857 if (DISPLAY_VER(display) < 9)
1858 hsw_psr_setup_aux(intel_dp);
1859
1860 /*
1861 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1862 * mask LPSP to avoid dependency on other drivers that might block
1863 * runtime_pm besides preventing other hw tracking issues now we
1864 * can rely on frontbuffer tracking.
1865 *
1866 * From bspec prior LunarLake:
1867 * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
1868 * panel replay mode.
1869 *
1870 * From bspec beyod LunarLake:
1871 * Panel Replay on DP: No bits are applicable
1872 * Panel Replay on eDP: All bits are applicable
1873 */
1874 if (DISPLAY_VER(display) < 20 || intel_dp_is_edp(intel_dp))
1875 mask = EDP_PSR_DEBUG_MASK_HPD;
1876
1877 if (intel_dp_is_edp(intel_dp)) {
1878 mask |= EDP_PSR_DEBUG_MASK_MEMUP;
1879
1880 /*
1881 * For some unknown reason on HSW non-ULT (or at least on
1882 * Dell Latitude E6540) external displays start to flicker
1883 * when PSR is enabled on the eDP. SR/PC6 residency is much
1884 * higher than should be possible with an external display.
1885 * As a workaround leave LPSP unmasked to prevent PSR entry
1886 * when external displays are active.
1887 */
1888 if (DISPLAY_VER(display) >= 8 || display->platform.haswell_ult)
1889 mask |= EDP_PSR_DEBUG_MASK_LPSP;
1890
1891 if (DISPLAY_VER(display) < 20)
1892 mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1893
1894 /*
1895 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1896 * registers in order to keep the CURSURFLIVE tricks working :(
1897 */
1898 if (IS_DISPLAY_VER(display, 9, 10))
1899 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1900
1901 /* allow PSR with sprite enabled */
1902 if (display->platform.haswell)
1903 mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1904 }
1905
1906 intel_de_write(display, psr_debug_reg(display, cpu_transcoder), mask);
1907
1908 psr_irq_control(intel_dp);
1909
1910 /*
1911 * TODO: if future platforms supports DC3CO in more than one
1912 * transcoder, EXITLINE will need to be unset when disabling PSR
1913 */
1914 if (intel_dp->psr.dc3co_exitline)
1915 intel_de_rmw(display,
1916 TRANS_EXITLINE(display, cpu_transcoder),
1917 EXITLINE_MASK,
1918 intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1919
1920 if (HAS_PSR_HW_TRACKING(display) && HAS_PSR2_SEL_FETCH(display))
1921 intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1922 intel_dp->psr.psr2_sel_fetch_enabled ?
1923 IGNORE_PSR2_HW_TRACKING : 0);
1924
1925 /*
1926 * Wa_16013835468
1927 * Wa_14015648006
1928 */
1929 wm_optimization_wa(intel_dp, crtc_state);
1930
1931 if (intel_dp->psr.sel_update_enabled) {
1932 if (DISPLAY_VER(display) == 9)
1933 intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 0,
1934 PSR2_VSC_ENABLE_PROG_HEADER |
1935 PSR2_ADD_VERTICAL_LINE_COUNT);
1936
1937 /*
1938 * Wa_16014451276:adlp,mtl[a0,b0]
1939 * All supported adlp panels have 1-based X granularity, this may
1940 * cause issues if non-supported panels are used.
1941 */
1942 if (!intel_dp->psr.panel_replay_enabled &&
1943 (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
1944 display->platform.alderlake_p))
1945 intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
1946 0, ADLP_1_BASED_X_GRANULARITY);
1947
1948 /* Wa_16012604467:adlp,mtl[a0,b0] */
1949 if (!intel_dp->psr.panel_replay_enabled &&
1950 IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
1951 intel_de_rmw(display,
1952 MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
1953 0,
1954 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1955 else if (display->platform.alderlake_p)
1956 intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
1957 CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1958 }
1959
1960 /* Wa_16025596647 */
1961 if ((DISPLAY_VER(display) == 20 ||
1962 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
1963 !intel_dp->psr.panel_replay_enabled)
1964 intel_dmc_block_pkgc(display, intel_dp->psr.pipe, true);
1965
1966 intel_alpm_configure(intel_dp, crtc_state);
1967 }
1968
psr_interrupt_error_check(struct intel_dp * intel_dp)1969 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1970 {
1971 struct intel_display *display = to_intel_display(intel_dp);
1972 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1973 u32 val;
1974
1975 if (intel_dp->psr.panel_replay_enabled)
1976 goto no_err;
1977
1978 /*
1979 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1980 * will still keep the error set even after the reset done in the
1981 * irq_preinstall and irq_uninstall hooks.
1982 * And enabling in this situation cause the screen to freeze in the
1983 * first time that PSR HW tries to activate so lets keep PSR disabled
1984 * to avoid any rendering problems.
1985 */
1986 val = intel_de_read(display, psr_iir_reg(display, cpu_transcoder));
1987 val &= psr_irq_psr_error_bit_get(intel_dp);
1988 if (val) {
1989 intel_dp->psr.sink_not_reliable = true;
1990 drm_dbg_kms(display->drm,
1991 "PSR interruption error set, not enabling PSR\n");
1992 return false;
1993 }
1994
1995 no_err:
1996 return true;
1997 }
1998
intel_psr_enable_locked(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1999 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
2000 const struct intel_crtc_state *crtc_state)
2001 {
2002 struct intel_display *display = to_intel_display(intel_dp);
2003 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2004 u32 val;
2005
2006 drm_WARN_ON(display->drm, intel_dp->psr.enabled);
2007
2008 intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update;
2009 intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
2010 intel_dp->psr.busy_frontbuffer_bits = 0;
2011 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
2012 intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
2013 /* DC5/DC6 requires at least 6 idle frames */
2014 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
2015 intel_dp->psr.dc3co_exit_delay = val;
2016 intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
2017 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
2018 intel_dp->psr.su_region_et_enabled = crtc_state->enable_psr2_su_region_et;
2019 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2020 intel_dp->psr.req_psr2_sdp_prior_scanline =
2021 crtc_state->req_psr2_sdp_prior_scanline;
2022 intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes;
2023 intel_dp->psr.pkg_c_latency_used = crtc_state->pkg_c_latency_used;
2024
2025 if (!psr_interrupt_error_check(intel_dp))
2026 return;
2027
2028 if (intel_dp->psr.panel_replay_enabled)
2029 drm_dbg_kms(display->drm, "Enabling Panel Replay\n");
2030 else
2031 drm_dbg_kms(display->drm, "Enabling PSR%s\n",
2032 intel_dp->psr.sel_update_enabled ? "2" : "1");
2033
2034 /*
2035 * Enabling sink PSR/Panel Replay here only for PSR. Panel Replay enable
2036 * bit is already written at this point. Sink ALPM is enabled here for
2037 * PSR and Panel Replay. See
2038 * intel_psr_panel_replay_enable_sink. Modifiers/options:
2039 * - Selective Update
2040 * - Region Early Transport
2041 * - Selective Update Region Scanline Capture
2042 * - VSC_SDP_CRC
2043 * - HPD on different Errors
2044 * - CRC verification
2045 * are written for PSR and Panel Replay here.
2046 */
2047 intel_psr_enable_sink(intel_dp, crtc_state);
2048
2049 if (intel_dp_is_edp(intel_dp))
2050 intel_snps_phy_update_psr_power_state(&dig_port->base, true);
2051
2052 intel_psr_enable_source(intel_dp, crtc_state);
2053 intel_dp->psr.enabled = true;
2054 intel_dp->psr.pause_counter = 0;
2055
2056 /*
2057 * Link_ok is sticky and set here on PSR enable. We can assume link
2058 * training is complete as we never continue to PSR enable with
2059 * untrained link. Link_ok is kept as set until first short pulse
2060 * interrupt. This is targeted to workaround panels stating bad link
2061 * after PSR is enabled.
2062 */
2063 intel_dp->psr.link_ok = true;
2064
2065 intel_psr_activate(intel_dp);
2066 }
2067
intel_psr_exit(struct intel_dp * intel_dp)2068 static void intel_psr_exit(struct intel_dp *intel_dp)
2069 {
2070 struct intel_display *display = to_intel_display(intel_dp);
2071 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2072 u32 val;
2073
2074 if (!intel_dp->psr.active) {
2075 if (transcoder_has_psr2(display, cpu_transcoder)) {
2076 val = intel_de_read(display,
2077 EDP_PSR2_CTL(display, cpu_transcoder));
2078 drm_WARN_ON(display->drm, val & EDP_PSR2_ENABLE);
2079 }
2080
2081 val = intel_de_read(display,
2082 psr_ctl_reg(display, cpu_transcoder));
2083 drm_WARN_ON(display->drm, val & EDP_PSR_ENABLE);
2084
2085 return;
2086 }
2087
2088 if (intel_dp->psr.panel_replay_enabled) {
2089 intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder),
2090 TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
2091 } else if (intel_dp->psr.sel_update_enabled) {
2092 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
2093
2094 val = intel_de_rmw(display,
2095 EDP_PSR2_CTL(display, cpu_transcoder),
2096 EDP_PSR2_ENABLE, 0);
2097
2098 drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
2099 } else {
2100 if ((DISPLAY_VER(display) == 20 ||
2101 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2102 intel_dp->psr.pkg_c_latency_used)
2103 intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
2104 intel_dp->psr.pipe,
2105 false);
2106
2107 val = intel_de_rmw(display,
2108 psr_ctl_reg(display, cpu_transcoder),
2109 EDP_PSR_ENABLE, 0);
2110
2111 drm_WARN_ON(display->drm, !(val & EDP_PSR_ENABLE));
2112 }
2113 intel_dp->psr.active = false;
2114 }
2115
intel_psr_wait_exit_locked(struct intel_dp * intel_dp)2116 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
2117 {
2118 struct intel_display *display = to_intel_display(intel_dp);
2119 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2120 i915_reg_t psr_status;
2121 u32 psr_status_mask;
2122
2123 if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
2124 intel_dp->psr.panel_replay_enabled)) {
2125 psr_status = EDP_PSR2_STATUS(display, cpu_transcoder);
2126 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
2127 } else {
2128 psr_status = psr_status_reg(display, cpu_transcoder);
2129 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
2130 }
2131
2132 /* Wait till PSR is idle */
2133 if (intel_de_wait_for_clear(display, psr_status,
2134 psr_status_mask, 2000))
2135 drm_err(display->drm, "Timed out waiting PSR idle state\n");
2136 }
2137
intel_psr_disable_locked(struct intel_dp * intel_dp)2138 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
2139 {
2140 struct intel_display *display = to_intel_display(intel_dp);
2141 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2142
2143 lockdep_assert_held(&intel_dp->psr.lock);
2144
2145 if (!intel_dp->psr.enabled)
2146 return;
2147
2148 if (intel_dp->psr.panel_replay_enabled)
2149 drm_dbg_kms(display->drm, "Disabling Panel Replay\n");
2150 else
2151 drm_dbg_kms(display->drm, "Disabling PSR%s\n",
2152 intel_dp->psr.sel_update_enabled ? "2" : "1");
2153
2154 intel_psr_exit(intel_dp);
2155 intel_psr_wait_exit_locked(intel_dp);
2156
2157 /*
2158 * Wa_16013835468
2159 * Wa_14015648006
2160 */
2161 if (DISPLAY_VER(display) >= 11)
2162 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
2163 LATENCY_REPORTING_REMOVED(intel_dp->psr.pipe), 0);
2164
2165 if (intel_dp->psr.sel_update_enabled) {
2166 /* Wa_16012604467:adlp,mtl[a0,b0] */
2167 if (!intel_dp->psr.panel_replay_enabled &&
2168 IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
2169 intel_de_rmw(display,
2170 MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
2171 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
2172 else if (display->platform.alderlake_p)
2173 intel_de_rmw(display, CLKGATE_DIS_MISC,
2174 CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
2175 }
2176
2177 if (intel_dp_is_edp(intel_dp))
2178 intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
2179
2180 if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp))
2181 intel_alpm_disable(intel_dp);
2182
2183 /* Disable PSR on Sink */
2184 if (!intel_dp->psr.panel_replay_enabled) {
2185 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
2186
2187 if (intel_dp->psr.sel_update_enabled)
2188 drm_dp_dpcd_writeb(&intel_dp->aux,
2189 DP_RECEIVER_ALPM_CONFIG, 0);
2190 }
2191
2192 /* Wa_16025596647 */
2193 if ((DISPLAY_VER(display) == 20 ||
2194 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2195 !intel_dp->psr.panel_replay_enabled)
2196 intel_dmc_block_pkgc(display, intel_dp->psr.pipe, false);
2197
2198 intel_dp->psr.enabled = false;
2199 intel_dp->psr.panel_replay_enabled = false;
2200 intel_dp->psr.sel_update_enabled = false;
2201 intel_dp->psr.psr2_sel_fetch_enabled = false;
2202 intel_dp->psr.su_region_et_enabled = false;
2203 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2204 intel_dp->psr.active_non_psr_pipes = 0;
2205 intel_dp->psr.pkg_c_latency_used = 0;
2206 }
2207
2208 /**
2209 * intel_psr_disable - Disable PSR
2210 * @intel_dp: Intel DP
2211 * @old_crtc_state: old CRTC state
2212 *
2213 * This function needs to be called before disabling pipe.
2214 */
intel_psr_disable(struct intel_dp * intel_dp,const struct intel_crtc_state * old_crtc_state)2215 void intel_psr_disable(struct intel_dp *intel_dp,
2216 const struct intel_crtc_state *old_crtc_state)
2217 {
2218 struct intel_display *display = to_intel_display(intel_dp);
2219
2220 if (!old_crtc_state->has_psr)
2221 return;
2222
2223 if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp) &&
2224 !CAN_PANEL_REPLAY(intel_dp)))
2225 return;
2226
2227 mutex_lock(&intel_dp->psr.lock);
2228
2229 intel_psr_disable_locked(intel_dp);
2230
2231 intel_dp->psr.link_ok = false;
2232
2233 mutex_unlock(&intel_dp->psr.lock);
2234 cancel_work_sync(&intel_dp->psr.work);
2235 cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
2236 }
2237
2238 /**
2239 * intel_psr_pause - Pause PSR
2240 * @intel_dp: Intel DP
2241 *
2242 * This function need to be called after enabling psr.
2243 */
intel_psr_pause(struct intel_dp * intel_dp)2244 void intel_psr_pause(struct intel_dp *intel_dp)
2245 {
2246 struct intel_psr *psr = &intel_dp->psr;
2247
2248 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2249 return;
2250
2251 mutex_lock(&psr->lock);
2252
2253 if (!psr->enabled) {
2254 mutex_unlock(&psr->lock);
2255 return;
2256 }
2257
2258 if (intel_dp->psr.pause_counter++ == 0) {
2259 intel_psr_exit(intel_dp);
2260 intel_psr_wait_exit_locked(intel_dp);
2261 }
2262
2263 mutex_unlock(&psr->lock);
2264
2265 cancel_work_sync(&psr->work);
2266 cancel_delayed_work_sync(&psr->dc3co_work);
2267 }
2268
2269 /**
2270 * intel_psr_resume - Resume PSR
2271 * @intel_dp: Intel DP
2272 *
2273 * This function need to be called after pausing psr.
2274 */
intel_psr_resume(struct intel_dp * intel_dp)2275 void intel_psr_resume(struct intel_dp *intel_dp)
2276 {
2277 struct intel_display *display = to_intel_display(intel_dp);
2278 struct intel_psr *psr = &intel_dp->psr;
2279
2280 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2281 return;
2282
2283 mutex_lock(&psr->lock);
2284
2285 if (!psr->enabled)
2286 goto out;
2287
2288 if (!psr->pause_counter) {
2289 drm_warn(display->drm, "Unbalanced PSR pause/resume!\n");
2290 goto out;
2291 }
2292
2293 if (--intel_dp->psr.pause_counter == 0)
2294 intel_psr_activate(intel_dp);
2295
2296 out:
2297 mutex_unlock(&psr->lock);
2298 }
2299
2300 /**
2301 * intel_psr_needs_vblank_notification - Check if PSR need vblank enable/disable
2302 * notification.
2303 * @crtc_state: CRTC status
2304 *
2305 * We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't
2306 * prevent it in case of Panel Replay. Panel Replay switches main link off on
2307 * DC entry. This means vblank interrupts are not fired and is a problem if
2308 * user-space is polling for vblank events. Also Wa_16025596647 needs
2309 * information when vblank is enabled/disabled.
2310 */
intel_psr_needs_vblank_notification(const struct intel_crtc_state * crtc_state)2311 bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state)
2312 {
2313 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2314 struct intel_display *display = to_intel_display(crtc_state);
2315 struct intel_encoder *encoder;
2316
2317 for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) {
2318 struct intel_dp *intel_dp;
2319
2320 if (!intel_encoder_is_dp(encoder))
2321 continue;
2322
2323 intel_dp = enc_to_intel_dp(encoder);
2324
2325 if (!intel_dp_is_edp(intel_dp))
2326 continue;
2327
2328 if (CAN_PANEL_REPLAY(intel_dp))
2329 return true;
2330
2331 if ((DISPLAY_VER(display) == 20 ||
2332 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2333 CAN_PSR(intel_dp))
2334 return true;
2335 }
2336
2337 return false;
2338 }
2339
2340 /**
2341 * intel_psr_trigger_frame_change_event - Trigger "Frame Change" event
2342 * @dsb: DSB context
2343 * @state: the atomic state
2344 * @crtc: the CRTC
2345 *
2346 * Generate PSR "Frame Change" event.
2347 */
intel_psr_trigger_frame_change_event(struct intel_dsb * dsb,struct intel_atomic_state * state,struct intel_crtc * crtc)2348 void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
2349 struct intel_atomic_state *state,
2350 struct intel_crtc *crtc)
2351 {
2352 const struct intel_crtc_state *crtc_state =
2353 intel_pre_commit_crtc_state(state, crtc);
2354 struct intel_display *display = to_intel_display(crtc);
2355
2356 if (crtc_state->has_psr)
2357 intel_de_write_dsb(display, dsb,
2358 CURSURFLIVE(display, crtc->pipe), 0);
2359 }
2360
2361 /**
2362 * intel_psr_min_vblank_delay - Minimum vblank delay needed by PSR
2363 * @crtc_state: the crtc state
2364 *
2365 * Return minimum vblank delay needed by PSR.
2366 */
intel_psr_min_vblank_delay(const struct intel_crtc_state * crtc_state)2367 int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state)
2368 {
2369 struct intel_display *display = to_intel_display(crtc_state);
2370
2371 if (!crtc_state->has_psr)
2372 return 0;
2373
2374 /* Wa_14015401596 */
2375 if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14))
2376 return 1;
2377
2378 /* Rest is for SRD_STATUS needed on LunarLake and onwards */
2379 if (DISPLAY_VER(display) < 20)
2380 return 0;
2381
2382 /*
2383 * Comment on SRD_STATUS register in Bspec for LunarLake and onwards:
2384 *
2385 * To deterministically capture the transition of the state machine
2386 * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least
2387 * one line after the non-delayed V. Blank.
2388 *
2389 * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0
2390 * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ]
2391 * - TRANS_VTOTAL[ Vertical Active ])
2392 *
2393 * SRD_STATUS is used only by PSR1 on PantherLake.
2394 * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake.
2395 */
2396
2397 if (DISPLAY_VER(display) >= 30 && (crtc_state->has_panel_replay ||
2398 crtc_state->has_sel_update))
2399 return 0;
2400 else if (DISPLAY_VER(display) < 30 && (crtc_state->has_sel_update ||
2401 intel_crtc_has_type(crtc_state,
2402 INTEL_OUTPUT_EDP)))
2403 return 0;
2404 else
2405 return 1;
2406 }
2407
man_trk_ctl_enable_bit_get(struct intel_display * display)2408 static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
2409 {
2410 return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 0 :
2411 PSR2_MAN_TRK_CTL_ENABLE;
2412 }
2413
man_trk_ctl_single_full_frame_bit_get(struct intel_display * display)2414 static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
2415 {
2416 return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2417 ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
2418 PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
2419 }
2420
man_trk_ctl_partial_frame_bit_get(struct intel_display * display)2421 static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
2422 {
2423 return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2424 ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
2425 PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
2426 }
2427
man_trk_ctl_continuos_full_frame(struct intel_display * display)2428 static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
2429 {
2430 return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2431 ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
2432 PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
2433 }
2434
intel_psr_force_update(struct intel_dp * intel_dp)2435 static void intel_psr_force_update(struct intel_dp *intel_dp)
2436 {
2437 struct intel_display *display = to_intel_display(intel_dp);
2438
2439 /*
2440 * Display WA #0884: skl+
2441 * This documented WA for bxt can be safely applied
2442 * broadly so we can force HW tracking to exit PSR
2443 * instead of disabling and re-enabling.
2444 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
2445 * but it makes more sense write to the current active
2446 * pipe.
2447 *
2448 * This workaround do not exist for platforms with display 10 or newer
2449 * but testing proved that it works for up display 13, for newer
2450 * than that testing will be needed.
2451 */
2452 intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0);
2453 }
2454
intel_psr2_program_trans_man_trk_ctl(struct intel_dsb * dsb,const struct intel_crtc_state * crtc_state)2455 void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
2456 const struct intel_crtc_state *crtc_state)
2457 {
2458 struct intel_display *display = to_intel_display(crtc_state);
2459 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2460 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2461 struct intel_encoder *encoder;
2462
2463 if (!crtc_state->enable_psr2_sel_fetch)
2464 return;
2465
2466 for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2467 crtc_state->uapi.encoder_mask) {
2468 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2469
2470 if (!dsb)
2471 lockdep_assert_held(&intel_dp->psr.lock);
2472
2473 if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_cff_enabled)
2474 return;
2475 break;
2476 }
2477
2478 intel_de_write_dsb(display, dsb,
2479 PSR2_MAN_TRK_CTL(display, cpu_transcoder),
2480 crtc_state->psr2_man_track_ctl);
2481
2482 if (!crtc_state->enable_psr2_su_region_et)
2483 return;
2484
2485 intel_de_write_dsb(display, dsb, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
2486 crtc_state->pipe_srcsz_early_tpt);
2487 }
2488
psr2_man_trk_ctl_calc(struct intel_crtc_state * crtc_state,bool full_update)2489 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2490 bool full_update)
2491 {
2492 struct intel_display *display = to_intel_display(crtc_state);
2493 u32 val = man_trk_ctl_enable_bit_get(display);
2494
2495 /* SF partial frame enable has to be set even on full update */
2496 val |= man_trk_ctl_partial_frame_bit_get(display);
2497
2498 if (full_update) {
2499 val |= man_trk_ctl_continuos_full_frame(display);
2500 goto exit;
2501 }
2502
2503 if (crtc_state->psr2_su_area.y1 == -1)
2504 goto exit;
2505
2506 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) {
2507 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2508 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2509 } else {
2510 drm_WARN_ON(crtc_state->uapi.crtc->dev,
2511 crtc_state->psr2_su_area.y1 % 4 ||
2512 crtc_state->psr2_su_area.y2 % 4);
2513
2514 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2515 crtc_state->psr2_su_area.y1 / 4 + 1);
2516 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2517 crtc_state->psr2_su_area.y2 / 4 + 1);
2518 }
2519 exit:
2520 crtc_state->psr2_man_track_ctl = val;
2521 }
2522
psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state * crtc_state,bool full_update)2523 static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
2524 bool full_update)
2525 {
2526 int width, height;
2527
2528 if (!crtc_state->enable_psr2_su_region_et || full_update)
2529 return 0;
2530
2531 width = drm_rect_width(&crtc_state->psr2_su_area);
2532 height = drm_rect_height(&crtc_state->psr2_su_area);
2533
2534 return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
2535 }
2536
clip_area_update(struct drm_rect * overlap_damage_area,struct drm_rect * damage_area,struct drm_rect * pipe_src)2537 static void clip_area_update(struct drm_rect *overlap_damage_area,
2538 struct drm_rect *damage_area,
2539 struct drm_rect *pipe_src)
2540 {
2541 if (!drm_rect_intersect(damage_area, pipe_src))
2542 return;
2543
2544 if (overlap_damage_area->y1 == -1) {
2545 overlap_damage_area->y1 = damage_area->y1;
2546 overlap_damage_area->y2 = damage_area->y2;
2547 return;
2548 }
2549
2550 if (damage_area->y1 < overlap_damage_area->y1)
2551 overlap_damage_area->y1 = damage_area->y1;
2552
2553 if (damage_area->y2 > overlap_damage_area->y2)
2554 overlap_damage_area->y2 = damage_area->y2;
2555 }
2556
intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state * crtc_state)2557 static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2558 {
2559 struct intel_display *display = to_intel_display(crtc_state);
2560 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2561 u16 y_alignment;
2562
2563 /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2564 if (crtc_state->dsc.compression_enable &&
2565 (display->platform.alderlake_p || DISPLAY_VER(display) >= 14))
2566 y_alignment = vdsc_cfg->slice_height;
2567 else
2568 y_alignment = crtc_state->su_y_granularity;
2569
2570 crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2571 if (crtc_state->psr2_su_area.y2 % y_alignment)
2572 crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2573 y_alignment) + 1) * y_alignment;
2574 }
2575
2576 /*
2577 * When early transport is in use we need to extend SU area to cover
2578 * cursor fully when cursor is in SU area.
2579 */
2580 static void
intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state * state,struct intel_crtc * crtc,bool * cursor_in_su_area)2581 intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
2582 struct intel_crtc *crtc,
2583 bool *cursor_in_su_area)
2584 {
2585 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2586 struct intel_plane_state *new_plane_state;
2587 struct intel_plane *plane;
2588 int i;
2589
2590 if (!crtc_state->enable_psr2_su_region_et)
2591 return;
2592
2593 for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
2594 struct drm_rect inter;
2595
2596 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2597 continue;
2598
2599 if (plane->id != PLANE_CURSOR)
2600 continue;
2601
2602 if (!new_plane_state->uapi.visible)
2603 continue;
2604
2605 inter = crtc_state->psr2_su_area;
2606 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2607 continue;
2608
2609 clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
2610 &crtc_state->pipe_src);
2611 *cursor_in_su_area = true;
2612 }
2613 }
2614
2615 /*
2616 * TODO: Not clear how to handle planes with negative position,
2617 * also planes are not updated if they have a negative X
2618 * position so for now doing a full update in this cases
2619 *
2620 * Plane scaling and rotation is not supported by selective fetch and both
2621 * properties can change without a modeset, so need to be check at every
2622 * atomic commit.
2623 */
psr2_sel_fetch_plane_state_supported(const struct intel_plane_state * plane_state)2624 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2625 {
2626 if (plane_state->uapi.dst.y1 < 0 ||
2627 plane_state->uapi.dst.x1 < 0 ||
2628 plane_state->scaler_id >= 0 ||
2629 plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2630 return false;
2631
2632 return true;
2633 }
2634
2635 /*
2636 * Check for pipe properties that is not supported by selective fetch.
2637 *
2638 * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2639 * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2640 * enabled and going to the full update path.
2641 */
psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state * crtc_state)2642 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2643 {
2644 if (crtc_state->scaler_state.scaler_id >= 0)
2645 return false;
2646
2647 return true;
2648 }
2649
2650 /* Wa 14019834836 */
intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state * crtc_state)2651 static void intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state *crtc_state)
2652 {
2653 struct intel_display *display = to_intel_display(crtc_state);
2654 struct intel_encoder *encoder;
2655 int hactive_limit;
2656
2657 if (crtc_state->psr2_su_area.y1 != 0 ||
2658 crtc_state->psr2_su_area.y2 != 0)
2659 return;
2660
2661 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2662 hactive_limit = intel_dp_is_uhbr(crtc_state) ? 1230 : 546;
2663 else
2664 hactive_limit = intel_dp_is_uhbr(crtc_state) ? 615 : 273;
2665
2666 if (crtc_state->hw.adjusted_mode.hdisplay < hactive_limit)
2667 return;
2668
2669 for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2670 crtc_state->uapi.encoder_mask) {
2671 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2672
2673 if (!intel_dp_is_edp(intel_dp) &&
2674 intel_dp->psr.panel_replay_enabled &&
2675 intel_dp->psr.sel_update_enabled) {
2676 crtc_state->psr2_su_area.y2++;
2677 return;
2678 }
2679 }
2680 }
2681
2682 static void
intel_psr_apply_su_area_workarounds(struct intel_crtc_state * crtc_state)2683 intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state)
2684 {
2685 struct intel_display *display = to_intel_display(crtc_state);
2686
2687 /* Wa_14014971492 */
2688 if (!crtc_state->has_panel_replay &&
2689 ((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
2690 display->platform.alderlake_p || display->platform.tigerlake)) &&
2691 crtc_state->splitter.enable)
2692 crtc_state->psr2_su_area.y1 = 0;
2693
2694 /* Wa 14019834836 */
2695 if (DISPLAY_VER(display) == 30)
2696 intel_psr_apply_pr_link_on_su_wa(crtc_state);
2697 }
2698
intel_psr2_sel_fetch_update(struct intel_atomic_state * state,struct intel_crtc * crtc)2699 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2700 struct intel_crtc *crtc)
2701 {
2702 struct intel_display *display = to_intel_display(state);
2703 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2704 struct intel_plane_state *new_plane_state, *old_plane_state;
2705 struct intel_plane *plane;
2706 bool full_update = false, cursor_in_su_area = false;
2707 int i, ret;
2708
2709 if (!crtc_state->enable_psr2_sel_fetch)
2710 return 0;
2711
2712 if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2713 full_update = true;
2714 goto skip_sel_fetch_set_loop;
2715 }
2716
2717 crtc_state->psr2_su_area.x1 = 0;
2718 crtc_state->psr2_su_area.y1 = -1;
2719 crtc_state->psr2_su_area.x2 = drm_rect_width(&crtc_state->pipe_src);
2720 crtc_state->psr2_su_area.y2 = -1;
2721
2722 /*
2723 * Calculate minimal selective fetch area of each plane and calculate
2724 * the pipe damaged area.
2725 * In the next loop the plane selective fetch area will actually be set
2726 * using whole pipe damaged area.
2727 */
2728 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2729 new_plane_state, i) {
2730 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2731 .x2 = INT_MAX };
2732
2733 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2734 continue;
2735
2736 if (!new_plane_state->uapi.visible &&
2737 !old_plane_state->uapi.visible)
2738 continue;
2739
2740 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2741 full_update = true;
2742 break;
2743 }
2744
2745 /*
2746 * If visibility or plane moved, mark the whole plane area as
2747 * damaged as it needs to be complete redraw in the new and old
2748 * position.
2749 */
2750 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2751 !drm_rect_equals(&new_plane_state->uapi.dst,
2752 &old_plane_state->uapi.dst)) {
2753 if (old_plane_state->uapi.visible) {
2754 damaged_area.y1 = old_plane_state->uapi.dst.y1;
2755 damaged_area.y2 = old_plane_state->uapi.dst.y2;
2756 clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2757 &crtc_state->pipe_src);
2758 }
2759
2760 if (new_plane_state->uapi.visible) {
2761 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2762 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2763 clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2764 &crtc_state->pipe_src);
2765 }
2766 continue;
2767 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2768 /* If alpha changed mark the whole plane area as damaged */
2769 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2770 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2771 clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2772 &crtc_state->pipe_src);
2773 continue;
2774 }
2775
2776 src = drm_plane_state_src(&new_plane_state->uapi);
2777 drm_rect_fp_to_int(&src, &src);
2778
2779 if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2780 &new_plane_state->uapi, &damaged_area))
2781 continue;
2782
2783 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2784 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2785 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2786 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2787
2788 clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
2789 }
2790
2791 /*
2792 * TODO: For now we are just using full update in case
2793 * selective fetch area calculation fails. To optimize this we
2794 * should identify cases where this happens and fix the area
2795 * calculation for those.
2796 */
2797 if (crtc_state->psr2_su_area.y1 == -1) {
2798 drm_info_once(display->drm,
2799 "Selective fetch area calculation failed in pipe %c\n",
2800 pipe_name(crtc->pipe));
2801 full_update = true;
2802 }
2803
2804 if (full_update)
2805 goto skip_sel_fetch_set_loop;
2806
2807 intel_psr_apply_su_area_workarounds(crtc_state);
2808
2809 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2810 if (ret)
2811 return ret;
2812
2813 /*
2814 * Adjust su area to cover cursor fully as necessary (early
2815 * transport). This needs to be done after
2816 * drm_atomic_add_affected_planes to ensure visible cursor is added into
2817 * affected planes even when cursor is not updated by itself.
2818 */
2819 intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
2820
2821 intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2822
2823 /*
2824 * Now that we have the pipe damaged area check if it intersect with
2825 * every plane, if it does set the plane selective fetch area.
2826 */
2827 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2828 new_plane_state, i) {
2829 struct drm_rect *sel_fetch_area, inter;
2830 struct intel_plane *linked = new_plane_state->planar_linked_plane;
2831
2832 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2833 !new_plane_state->uapi.visible)
2834 continue;
2835
2836 inter = crtc_state->psr2_su_area;
2837 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2838 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2839 sel_fetch_area->y1 = -1;
2840 sel_fetch_area->y2 = -1;
2841 /*
2842 * if plane sel fetch was previously enabled ->
2843 * disable it
2844 */
2845 if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2846 crtc_state->update_planes |= BIT(plane->id);
2847
2848 continue;
2849 }
2850
2851 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2852 full_update = true;
2853 break;
2854 }
2855
2856 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2857 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2858 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2859 crtc_state->update_planes |= BIT(plane->id);
2860
2861 /*
2862 * Sel_fetch_area is calculated for UV plane. Use
2863 * same area for Y plane as well.
2864 */
2865 if (linked) {
2866 struct intel_plane_state *linked_new_plane_state;
2867 struct drm_rect *linked_sel_fetch_area;
2868
2869 linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2870 if (IS_ERR(linked_new_plane_state))
2871 return PTR_ERR(linked_new_plane_state);
2872
2873 linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2874 linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2875 linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2876 crtc_state->update_planes |= BIT(linked->id);
2877 }
2878 }
2879
2880 skip_sel_fetch_set_loop:
2881 psr2_man_trk_ctl_calc(crtc_state, full_update);
2882 crtc_state->pipe_srcsz_early_tpt =
2883 psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
2884 return 0;
2885 }
2886
intel_psr2_panic_force_full_update(struct intel_display * display,struct intel_crtc_state * crtc_state)2887 void intel_psr2_panic_force_full_update(struct intel_display *display,
2888 struct intel_crtc_state *crtc_state)
2889 {
2890 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2891 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2892 u32 val = man_trk_ctl_enable_bit_get(display);
2893
2894 /* SF partial frame enable has to be set even on full update */
2895 val |= man_trk_ctl_partial_frame_bit_get(display);
2896 val |= man_trk_ctl_continuos_full_frame(display);
2897
2898 /* Directly write the register */
2899 intel_de_write_fw(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder), val);
2900
2901 if (!crtc_state->enable_psr2_su_region_et)
2902 return;
2903
2904 intel_de_write_fw(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe), 0);
2905 }
2906
intel_psr_pre_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)2907 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2908 struct intel_crtc *crtc)
2909 {
2910 struct intel_display *display = to_intel_display(state);
2911 const struct intel_crtc_state *old_crtc_state =
2912 intel_atomic_get_old_crtc_state(state, crtc);
2913 const struct intel_crtc_state *new_crtc_state =
2914 intel_atomic_get_new_crtc_state(state, crtc);
2915 struct intel_encoder *encoder;
2916
2917 if (!HAS_PSR(display))
2918 return;
2919
2920 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2921 old_crtc_state->uapi.encoder_mask) {
2922 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2923 struct intel_psr *psr = &intel_dp->psr;
2924
2925 mutex_lock(&psr->lock);
2926
2927 if (psr->enabled) {
2928 /*
2929 * Reasons to disable:
2930 * - PSR disabled in new state
2931 * - All planes will go inactive
2932 * - Changing between PSR versions
2933 * - Region Early Transport changing
2934 * - Display WA #1136: skl, bxt
2935 */
2936 if (intel_crtc_needs_modeset(new_crtc_state) ||
2937 !new_crtc_state->has_psr ||
2938 !new_crtc_state->active_planes ||
2939 new_crtc_state->has_sel_update != psr->sel_update_enabled ||
2940 new_crtc_state->enable_psr2_su_region_et != psr->su_region_et_enabled ||
2941 new_crtc_state->has_panel_replay != psr->panel_replay_enabled ||
2942 (DISPLAY_VER(display) < 11 && new_crtc_state->wm_level_disabled))
2943 intel_psr_disable_locked(intel_dp);
2944 else if (new_crtc_state->wm_level_disabled)
2945 /* Wa_14015648006 */
2946 wm_optimization_wa(intel_dp, new_crtc_state);
2947 }
2948
2949 mutex_unlock(&psr->lock);
2950 }
2951 }
2952
intel_psr_post_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)2953 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2954 struct intel_crtc *crtc)
2955 {
2956 struct intel_display *display = to_intel_display(state);
2957 const struct intel_crtc_state *crtc_state =
2958 intel_atomic_get_new_crtc_state(state, crtc);
2959 struct intel_encoder *encoder;
2960
2961 if (!crtc_state->has_psr)
2962 return;
2963
2964 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2965 crtc_state->uapi.encoder_mask) {
2966 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2967 struct intel_psr *psr = &intel_dp->psr;
2968 bool keep_disabled = false;
2969
2970 mutex_lock(&psr->lock);
2971
2972 drm_WARN_ON(display->drm,
2973 psr->enabled && !crtc_state->active_planes);
2974
2975 keep_disabled |= psr->sink_not_reliable;
2976 keep_disabled |= !crtc_state->active_planes;
2977
2978 /* Display WA #1136: skl, bxt */
2979 keep_disabled |= DISPLAY_VER(display) < 11 &&
2980 crtc_state->wm_level_disabled;
2981
2982 if (!psr->enabled && !keep_disabled)
2983 intel_psr_enable_locked(intel_dp, crtc_state);
2984 else if (psr->enabled && !crtc_state->wm_level_disabled)
2985 /* Wa_14015648006 */
2986 wm_optimization_wa(intel_dp, crtc_state);
2987
2988 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2989 if (crtc_state->crc_enabled && psr->enabled)
2990 intel_psr_force_update(intel_dp);
2991
2992 /*
2993 * Clear possible busy bits in case we have
2994 * invalidate -> flip -> flush sequence.
2995 */
2996 intel_dp->psr.busy_frontbuffer_bits = 0;
2997
2998 mutex_unlock(&psr->lock);
2999 }
3000 }
3001
3002 /*
3003 * From bspec: Panel Self Refresh (BDW+)
3004 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
3005 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
3006 * defensive enough to cover everything.
3007 */
3008 #define PSR_IDLE_TIMEOUT_MS 50
3009
3010 static int
_psr2_ready_for_pipe_update_locked(const struct intel_crtc_state * new_crtc_state,struct intel_dsb * dsb)3011 _psr2_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state,
3012 struct intel_dsb *dsb)
3013 {
3014 struct intel_display *display = to_intel_display(new_crtc_state);
3015 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3016
3017 /*
3018 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
3019 * As all higher states has bit 4 of PSR2 state set we can just wait for
3020 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
3021 */
3022 if (dsb) {
3023 intel_dsb_poll(dsb, EDP_PSR2_STATUS(display, cpu_transcoder),
3024 EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 0, 200,
3025 PSR_IDLE_TIMEOUT_MS * 1000 / 200);
3026 return true;
3027 }
3028
3029 return intel_de_wait_for_clear(display,
3030 EDP_PSR2_STATUS(display, cpu_transcoder),
3031 EDP_PSR2_STATUS_STATE_DEEP_SLEEP,
3032 PSR_IDLE_TIMEOUT_MS);
3033 }
3034
3035 static int
_psr1_ready_for_pipe_update_locked(const struct intel_crtc_state * new_crtc_state,struct intel_dsb * dsb)3036 _psr1_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state,
3037 struct intel_dsb *dsb)
3038 {
3039 struct intel_display *display = to_intel_display(new_crtc_state);
3040 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3041
3042 if (dsb) {
3043 intel_dsb_poll(dsb, psr_status_reg(display, cpu_transcoder),
3044 EDP_PSR_STATUS_STATE_MASK, 0, 200,
3045 PSR_IDLE_TIMEOUT_MS * 1000 / 200);
3046 return true;
3047 }
3048
3049 return intel_de_wait_for_clear(display,
3050 psr_status_reg(display, cpu_transcoder),
3051 EDP_PSR_STATUS_STATE_MASK,
3052 PSR_IDLE_TIMEOUT_MS);
3053 }
3054
3055 /**
3056 * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
3057 * @new_crtc_state: new CRTC state
3058 *
3059 * This function is expected to be called from pipe_update_start() where it is
3060 * not expected to race with PSR enable or disable.
3061 */
intel_psr_wait_for_idle_locked(const struct intel_crtc_state * new_crtc_state)3062 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
3063 {
3064 struct intel_display *display = to_intel_display(new_crtc_state);
3065 struct intel_encoder *encoder;
3066
3067 if (!new_crtc_state->has_psr)
3068 return;
3069
3070 for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3071 new_crtc_state->uapi.encoder_mask) {
3072 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3073 int ret;
3074
3075 lockdep_assert_held(&intel_dp->psr.lock);
3076
3077 if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
3078 continue;
3079
3080 if (intel_dp->psr.sel_update_enabled)
3081 ret = _psr2_ready_for_pipe_update_locked(new_crtc_state,
3082 NULL);
3083 else
3084 ret = _psr1_ready_for_pipe_update_locked(new_crtc_state,
3085 NULL);
3086
3087 if (ret)
3088 drm_err(display->drm,
3089 "PSR wait timed out, atomic update may fail\n");
3090 }
3091 }
3092
intel_psr_wait_for_idle_dsb(struct intel_dsb * dsb,const struct intel_crtc_state * new_crtc_state)3093 void intel_psr_wait_for_idle_dsb(struct intel_dsb *dsb,
3094 const struct intel_crtc_state *new_crtc_state)
3095 {
3096 if (!new_crtc_state->has_psr || new_crtc_state->has_panel_replay)
3097 return;
3098
3099 if (new_crtc_state->has_sel_update)
3100 _psr2_ready_for_pipe_update_locked(new_crtc_state, dsb);
3101 else
3102 _psr1_ready_for_pipe_update_locked(new_crtc_state, dsb);
3103 }
3104
__psr_wait_for_idle_locked(struct intel_dp * intel_dp)3105 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
3106 {
3107 struct intel_display *display = to_intel_display(intel_dp);
3108 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3109 i915_reg_t reg;
3110 u32 mask;
3111 int err;
3112
3113 if (!intel_dp->psr.enabled)
3114 return false;
3115
3116 if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
3117 intel_dp->psr.panel_replay_enabled)) {
3118 reg = EDP_PSR2_STATUS(display, cpu_transcoder);
3119 mask = EDP_PSR2_STATUS_STATE_MASK;
3120 } else {
3121 reg = psr_status_reg(display, cpu_transcoder);
3122 mask = EDP_PSR_STATUS_STATE_MASK;
3123 }
3124
3125 mutex_unlock(&intel_dp->psr.lock);
3126
3127 err = intel_de_wait_for_clear(display, reg, mask, 50);
3128 if (err)
3129 drm_err(display->drm,
3130 "Timed out waiting for PSR Idle for re-enable\n");
3131
3132 /* After the unlocked wait, verify that PSR is still wanted! */
3133 mutex_lock(&intel_dp->psr.lock);
3134 return err == 0 && intel_dp->psr.enabled && !intel_dp->psr.pause_counter;
3135 }
3136
intel_psr_fastset_force(struct intel_display * display)3137 static int intel_psr_fastset_force(struct intel_display *display)
3138 {
3139 struct drm_connector_list_iter conn_iter;
3140 struct drm_modeset_acquire_ctx ctx;
3141 struct drm_atomic_state *state;
3142 struct drm_connector *conn;
3143 int err = 0;
3144
3145 state = drm_atomic_state_alloc(display->drm);
3146 if (!state)
3147 return -ENOMEM;
3148
3149 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
3150
3151 state->acquire_ctx = &ctx;
3152 to_intel_atomic_state(state)->internal = true;
3153
3154 retry:
3155 drm_connector_list_iter_begin(display->drm, &conn_iter);
3156 drm_for_each_connector_iter(conn, &conn_iter) {
3157 struct drm_connector_state *conn_state;
3158 struct drm_crtc_state *crtc_state;
3159
3160 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
3161 continue;
3162
3163 conn_state = drm_atomic_get_connector_state(state, conn);
3164 if (IS_ERR(conn_state)) {
3165 err = PTR_ERR(conn_state);
3166 break;
3167 }
3168
3169 if (!conn_state->crtc)
3170 continue;
3171
3172 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
3173 if (IS_ERR(crtc_state)) {
3174 err = PTR_ERR(crtc_state);
3175 break;
3176 }
3177
3178 /* Mark mode as changed to trigger a pipe->update() */
3179 crtc_state->mode_changed = true;
3180 }
3181 drm_connector_list_iter_end(&conn_iter);
3182
3183 if (err == 0)
3184 err = drm_atomic_commit(state);
3185
3186 if (err == -EDEADLK) {
3187 drm_atomic_state_clear(state);
3188 err = drm_modeset_backoff(&ctx);
3189 if (!err)
3190 goto retry;
3191 }
3192
3193 drm_modeset_drop_locks(&ctx);
3194 drm_modeset_acquire_fini(&ctx);
3195 drm_atomic_state_put(state);
3196
3197 return err;
3198 }
3199
intel_psr_debug_set(struct intel_dp * intel_dp,u64 val)3200 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
3201 {
3202 struct intel_display *display = to_intel_display(intel_dp);
3203 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
3204 const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3205 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3206 u32 old_mode, old_disable_bits;
3207 int ret;
3208
3209 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3210 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE |
3211 I915_PSR_DEBUG_MODE_MASK) ||
3212 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
3213 drm_dbg_kms(display->drm, "Invalid debug mask %llx\n", val);
3214 return -EINVAL;
3215 }
3216
3217 ret = mutex_lock_interruptible(&intel_dp->psr.lock);
3218 if (ret)
3219 return ret;
3220
3221 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
3222 old_disable_bits = intel_dp->psr.debug &
3223 (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3224 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3225
3226 intel_dp->psr.debug = val;
3227
3228 /*
3229 * Do it right away if it's already enabled, otherwise it will be done
3230 * when enabling the source.
3231 */
3232 if (intel_dp->psr.enabled)
3233 psr_irq_control(intel_dp);
3234
3235 mutex_unlock(&intel_dp->psr.lock);
3236
3237 if (old_mode != mode || old_disable_bits != disable_bits)
3238 ret = intel_psr_fastset_force(display);
3239
3240 return ret;
3241 }
3242
intel_psr_handle_irq(struct intel_dp * intel_dp)3243 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
3244 {
3245 struct intel_psr *psr = &intel_dp->psr;
3246
3247 intel_psr_disable_locked(intel_dp);
3248 psr->sink_not_reliable = true;
3249 /* let's make sure that sink is awaken */
3250 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
3251 }
3252
intel_psr_work(struct work_struct * work)3253 static void intel_psr_work(struct work_struct *work)
3254 {
3255 struct intel_dp *intel_dp =
3256 container_of(work, typeof(*intel_dp), psr.work);
3257
3258 mutex_lock(&intel_dp->psr.lock);
3259
3260 if (!intel_dp->psr.enabled)
3261 goto unlock;
3262
3263 if (READ_ONCE(intel_dp->psr.irq_aux_error)) {
3264 intel_psr_handle_irq(intel_dp);
3265 goto unlock;
3266 }
3267
3268 if (intel_dp->psr.pause_counter)
3269 goto unlock;
3270
3271 /*
3272 * We have to make sure PSR is ready for re-enable
3273 * otherwise it keeps disabled until next full enable/disable cycle.
3274 * PSR might take some time to get fully disabled
3275 * and be ready for re-enable.
3276 */
3277 if (!__psr_wait_for_idle_locked(intel_dp))
3278 goto unlock;
3279
3280 /*
3281 * The delayed work can race with an invalidate hence we need to
3282 * recheck. Since psr_flush first clears this and then reschedules we
3283 * won't ever miss a flush when bailing out here.
3284 */
3285 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
3286 goto unlock;
3287
3288 intel_psr_activate(intel_dp);
3289 unlock:
3290 mutex_unlock(&intel_dp->psr.lock);
3291 }
3292
intel_psr_configure_full_frame_update(struct intel_dp * intel_dp)3293 static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp)
3294 {
3295 struct intel_display *display = to_intel_display(intel_dp);
3296 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3297
3298 if (!intel_dp->psr.psr2_sel_fetch_enabled)
3299 return;
3300
3301 if (DISPLAY_VER(display) >= 20)
3302 intel_de_write(display, LNL_SFF_CTL(cpu_transcoder),
3303 LNL_SFF_CTL_SF_SINGLE_FULL_FRAME);
3304 else
3305 intel_de_write(display,
3306 PSR2_MAN_TRK_CTL(display, cpu_transcoder),
3307 man_trk_ctl_enable_bit_get(display) |
3308 man_trk_ctl_partial_frame_bit_get(display) |
3309 man_trk_ctl_single_full_frame_bit_get(display) |
3310 man_trk_ctl_continuos_full_frame(display));
3311 }
3312
_psr_invalidate_handle(struct intel_dp * intel_dp)3313 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
3314 {
3315 struct intel_display *display = to_intel_display(intel_dp);
3316
3317 if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
3318 if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3319 intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
3320 intel_psr_configure_full_frame_update(intel_dp);
3321 }
3322
3323 intel_psr_force_update(intel_dp);
3324 } else {
3325 intel_psr_exit(intel_dp);
3326 }
3327 }
3328
3329 /**
3330 * intel_psr_invalidate - Invalidate PSR
3331 * @display: display device
3332 * @frontbuffer_bits: frontbuffer plane tracking bits
3333 * @origin: which operation caused the invalidate
3334 *
3335 * Since the hardware frontbuffer tracking has gaps we need to integrate
3336 * with the software frontbuffer tracking. This function gets called every
3337 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
3338 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
3339 *
3340 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
3341 */
intel_psr_invalidate(struct intel_display * display,unsigned frontbuffer_bits,enum fb_op_origin origin)3342 void intel_psr_invalidate(struct intel_display *display,
3343 unsigned frontbuffer_bits, enum fb_op_origin origin)
3344 {
3345 struct intel_encoder *encoder;
3346
3347 if (origin == ORIGIN_FLIP)
3348 return;
3349
3350 for_each_intel_encoder_with_psr(display->drm, encoder) {
3351 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3352 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3353
3354 mutex_lock(&intel_dp->psr.lock);
3355 if (!intel_dp->psr.enabled) {
3356 mutex_unlock(&intel_dp->psr.lock);
3357 continue;
3358 }
3359
3360 pipe_frontbuffer_bits &=
3361 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3362 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
3363
3364 if (pipe_frontbuffer_bits)
3365 _psr_invalidate_handle(intel_dp);
3366
3367 mutex_unlock(&intel_dp->psr.lock);
3368 }
3369 }
3370 /*
3371 * When we will be completely rely on PSR2 S/W tracking in future,
3372 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
3373 * event also therefore tgl_dc3co_flush_locked() require to be changed
3374 * accordingly in future.
3375 */
3376 static void
tgl_dc3co_flush_locked(struct intel_dp * intel_dp,unsigned int frontbuffer_bits,enum fb_op_origin origin)3377 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
3378 enum fb_op_origin origin)
3379 {
3380 struct intel_display *display = to_intel_display(intel_dp);
3381
3382 if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
3383 !intel_dp->psr.active)
3384 return;
3385
3386 /*
3387 * At every frontbuffer flush flip event modified delay of delayed work,
3388 * when delayed work schedules that means display has been idle.
3389 */
3390 if (!(frontbuffer_bits &
3391 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
3392 return;
3393
3394 tgl_psr2_enable_dc3co(intel_dp);
3395 mod_delayed_work(display->wq.unordered, &intel_dp->psr.dc3co_work,
3396 intel_dp->psr.dc3co_exit_delay);
3397 }
3398
_psr_flush_handle(struct intel_dp * intel_dp)3399 static void _psr_flush_handle(struct intel_dp *intel_dp)
3400 {
3401 struct intel_display *display = to_intel_display(intel_dp);
3402
3403 if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
3404 /* Selective fetch prior LNL */
3405 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3406 /* can we turn CFF off? */
3407 if (intel_dp->psr.busy_frontbuffer_bits == 0)
3408 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
3409 }
3410
3411 /*
3412 * Still keep cff bit enabled as we don't have proper SU
3413 * configuration in case update is sent for any reason after
3414 * sff bit gets cleared by the HW on next vblank.
3415 *
3416 * NOTE: Setting cff bit is not needed for LunarLake onwards as
3417 * we have own register for SFF bit and we are not overwriting
3418 * existing SU configuration
3419 */
3420 intel_psr_configure_full_frame_update(intel_dp);
3421
3422 intel_psr_force_update(intel_dp);
3423 } else if (!intel_dp->psr.psr2_sel_fetch_enabled) {
3424 /*
3425 * PSR1 on all platforms
3426 * PSR2 HW tracking
3427 * Panel Replay Full frame update
3428 */
3429 intel_psr_force_update(intel_dp);
3430 } else {
3431 /* Selective update LNL onwards */
3432 intel_psr_exit(intel_dp);
3433 }
3434
3435 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
3436 queue_work(display->wq.unordered, &intel_dp->psr.work);
3437 }
3438
3439 /**
3440 * intel_psr_flush - Flush PSR
3441 * @display: display device
3442 * @frontbuffer_bits: frontbuffer plane tracking bits
3443 * @origin: which operation caused the flush
3444 *
3445 * Since the hardware frontbuffer tracking has gaps we need to integrate
3446 * with the software frontbuffer tracking. This function gets called every
3447 * time frontbuffer rendering has completed and flushed out to memory. PSR
3448 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
3449 *
3450 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
3451 */
intel_psr_flush(struct intel_display * display,unsigned frontbuffer_bits,enum fb_op_origin origin)3452 void intel_psr_flush(struct intel_display *display,
3453 unsigned frontbuffer_bits, enum fb_op_origin origin)
3454 {
3455 struct intel_encoder *encoder;
3456
3457 for_each_intel_encoder_with_psr(display->drm, encoder) {
3458 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3459 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3460
3461 mutex_lock(&intel_dp->psr.lock);
3462 if (!intel_dp->psr.enabled) {
3463 mutex_unlock(&intel_dp->psr.lock);
3464 continue;
3465 }
3466
3467 pipe_frontbuffer_bits &=
3468 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3469 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
3470
3471 /*
3472 * If the PSR is paused by an explicit intel_psr_paused() call,
3473 * we have to ensure that the PSR is not activated until
3474 * intel_psr_resume() is called.
3475 */
3476 if (intel_dp->psr.pause_counter)
3477 goto unlock;
3478
3479 if (origin == ORIGIN_FLIP ||
3480 (origin == ORIGIN_CURSOR_UPDATE &&
3481 !intel_dp->psr.psr2_sel_fetch_enabled)) {
3482 tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
3483 goto unlock;
3484 }
3485
3486 if (pipe_frontbuffer_bits == 0)
3487 goto unlock;
3488
3489 /* By definition flush = invalidate + flush */
3490 _psr_flush_handle(intel_dp);
3491 unlock:
3492 mutex_unlock(&intel_dp->psr.lock);
3493 }
3494 }
3495
3496 /**
3497 * intel_psr_init - Init basic PSR work and mutex.
3498 * @intel_dp: Intel DP
3499 *
3500 * This function is called after the initializing connector.
3501 * (the initializing of connector treats the handling of connector capabilities)
3502 * And it initializes basic PSR stuff for each DP Encoder.
3503 */
intel_psr_init(struct intel_dp * intel_dp)3504 void intel_psr_init(struct intel_dp *intel_dp)
3505 {
3506 struct intel_display *display = to_intel_display(intel_dp);
3507 struct intel_connector *connector = intel_dp->attached_connector;
3508 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3509
3510 if (!(HAS_PSR(display) || HAS_DP20(display)))
3511 return;
3512
3513 /*
3514 * HSW spec explicitly says PSR is tied to port A.
3515 * BDW+ platforms have a instance of PSR registers per transcoder but
3516 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
3517 * than eDP one.
3518 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
3519 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
3520 * But GEN12 supports a instance of PSR registers per transcoder.
3521 */
3522 if (DISPLAY_VER(display) < 12 && dig_port->base.port != PORT_A) {
3523 drm_dbg_kms(display->drm,
3524 "PSR condition failed: Port not supported\n");
3525 return;
3526 }
3527
3528 if ((HAS_DP20(display) && !intel_dp_is_edp(intel_dp)) ||
3529 DISPLAY_VER(display) >= 20)
3530 intel_dp->psr.source_panel_replay_support = true;
3531
3532 if (HAS_PSR(display) && intel_dp_is_edp(intel_dp))
3533 intel_dp->psr.source_support = true;
3534
3535 /* Set link_standby x link_off defaults */
3536 if (DISPLAY_VER(display) < 12)
3537 /* For new platforms up to TGL let's respect VBT back again */
3538 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
3539
3540 INIT_WORK(&intel_dp->psr.work, intel_psr_work);
3541 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
3542 mutex_init(&intel_dp->psr.lock);
3543 }
3544
psr_get_status_and_error_status(struct intel_dp * intel_dp,u8 * status,u8 * error_status)3545 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
3546 u8 *status, u8 *error_status)
3547 {
3548 struct drm_dp_aux *aux = &intel_dp->aux;
3549 int ret;
3550 unsigned int offset;
3551
3552 offset = intel_dp->psr.panel_replay_enabled ?
3553 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
3554
3555 ret = drm_dp_dpcd_readb(aux, offset, status);
3556 if (ret != 1)
3557 return ret;
3558
3559 offset = intel_dp->psr.panel_replay_enabled ?
3560 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
3561
3562 ret = drm_dp_dpcd_readb(aux, offset, error_status);
3563 if (ret != 1)
3564 return ret;
3565
3566 *status = *status & DP_PSR_SINK_STATE_MASK;
3567
3568 return 0;
3569 }
3570
psr_alpm_check(struct intel_dp * intel_dp)3571 static void psr_alpm_check(struct intel_dp *intel_dp)
3572 {
3573 struct intel_psr *psr = &intel_dp->psr;
3574
3575 if (!psr->sel_update_enabled)
3576 return;
3577
3578 if (intel_alpm_get_error(intel_dp)) {
3579 intel_psr_disable_locked(intel_dp);
3580 psr->sink_not_reliable = true;
3581 }
3582 }
3583
psr_capability_changed_check(struct intel_dp * intel_dp)3584 static void psr_capability_changed_check(struct intel_dp *intel_dp)
3585 {
3586 struct intel_display *display = to_intel_display(intel_dp);
3587 struct intel_psr *psr = &intel_dp->psr;
3588 u8 val;
3589 int r;
3590
3591 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
3592 if (r != 1) {
3593 drm_err(display->drm, "Error reading DP_PSR_ESI\n");
3594 return;
3595 }
3596
3597 if (val & DP_PSR_CAPS_CHANGE) {
3598 intel_psr_disable_locked(intel_dp);
3599 psr->sink_not_reliable = true;
3600 drm_dbg_kms(display->drm,
3601 "Sink PSR capability changed, disabling PSR\n");
3602
3603 /* Clearing it */
3604 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
3605 }
3606 }
3607
3608 /*
3609 * On common bits:
3610 * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
3611 * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
3612 * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
3613 * this function is relying on PSR definitions
3614 */
intel_psr_short_pulse(struct intel_dp * intel_dp)3615 void intel_psr_short_pulse(struct intel_dp *intel_dp)
3616 {
3617 struct intel_display *display = to_intel_display(intel_dp);
3618 struct intel_psr *psr = &intel_dp->psr;
3619 u8 status, error_status;
3620 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3621 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3622 DP_PSR_LINK_CRC_ERROR;
3623
3624 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
3625 return;
3626
3627 mutex_lock(&psr->lock);
3628
3629 psr->link_ok = false;
3630
3631 if (!psr->enabled)
3632 goto exit;
3633
3634 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
3635 drm_err(display->drm,
3636 "Error reading PSR status or error status\n");
3637 goto exit;
3638 }
3639
3640 if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
3641 (error_status & errors)) {
3642 intel_psr_disable_locked(intel_dp);
3643 psr->sink_not_reliable = true;
3644 }
3645
3646 if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
3647 !error_status)
3648 drm_dbg_kms(display->drm,
3649 "PSR sink internal error, disabling PSR\n");
3650 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3651 drm_dbg_kms(display->drm,
3652 "PSR RFB storage error, disabling PSR\n");
3653 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3654 drm_dbg_kms(display->drm,
3655 "PSR VSC SDP uncorrectable error, disabling PSR\n");
3656 if (error_status & DP_PSR_LINK_CRC_ERROR)
3657 drm_dbg_kms(display->drm,
3658 "PSR Link CRC error, disabling PSR\n");
3659
3660 if (error_status & ~errors)
3661 drm_err(display->drm,
3662 "PSR_ERROR_STATUS unhandled errors %x\n",
3663 error_status & ~errors);
3664 /* clear status register */
3665 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
3666
3667 if (!psr->panel_replay_enabled) {
3668 psr_alpm_check(intel_dp);
3669 psr_capability_changed_check(intel_dp);
3670 }
3671
3672 exit:
3673 mutex_unlock(&psr->lock);
3674 }
3675
intel_psr_enabled(struct intel_dp * intel_dp)3676 bool intel_psr_enabled(struct intel_dp *intel_dp)
3677 {
3678 bool ret;
3679
3680 if (!CAN_PSR(intel_dp))
3681 return false;
3682
3683 mutex_lock(&intel_dp->psr.lock);
3684 ret = intel_dp->psr.enabled;
3685 mutex_unlock(&intel_dp->psr.lock);
3686
3687 return ret;
3688 }
3689
3690 /**
3691 * intel_psr_link_ok - return psr->link_ok
3692 * @intel_dp: struct intel_dp
3693 *
3694 * We are seeing unexpected link re-trainings with some panels. This is caused
3695 * by panel stating bad link status after PSR is enabled. Code checking link
3696 * status can call this to ensure it can ignore bad link status stated by the
3697 * panel I.e. if panel is stating bad link and intel_psr_link_ok is stating link
3698 * is ok caller should rely on latter.
3699 *
3700 * Return value of link_ok
3701 */
intel_psr_link_ok(struct intel_dp * intel_dp)3702 bool intel_psr_link_ok(struct intel_dp *intel_dp)
3703 {
3704 bool ret;
3705
3706 if ((!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) ||
3707 !intel_dp_is_edp(intel_dp))
3708 return false;
3709
3710 mutex_lock(&intel_dp->psr.lock);
3711 ret = intel_dp->psr.link_ok;
3712 mutex_unlock(&intel_dp->psr.lock);
3713
3714 return ret;
3715 }
3716
3717 /**
3718 * intel_psr_lock - grab PSR lock
3719 * @crtc_state: the crtc state
3720 *
3721 * This is initially meant to be used by around CRTC update, when
3722 * vblank sensitive registers are updated and we need grab the lock
3723 * before it to avoid vblank evasion.
3724 */
intel_psr_lock(const struct intel_crtc_state * crtc_state)3725 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3726 {
3727 struct intel_display *display = to_intel_display(crtc_state);
3728 struct intel_encoder *encoder;
3729
3730 if (!crtc_state->has_psr)
3731 return;
3732
3733 for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3734 crtc_state->uapi.encoder_mask) {
3735 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3736
3737 mutex_lock(&intel_dp->psr.lock);
3738 break;
3739 }
3740 }
3741
3742 /**
3743 * intel_psr_unlock - release PSR lock
3744 * @crtc_state: the crtc state
3745 *
3746 * Release the PSR lock that was held during pipe update.
3747 */
intel_psr_unlock(const struct intel_crtc_state * crtc_state)3748 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3749 {
3750 struct intel_display *display = to_intel_display(crtc_state);
3751 struct intel_encoder *encoder;
3752
3753 if (!crtc_state->has_psr)
3754 return;
3755
3756 for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3757 crtc_state->uapi.encoder_mask) {
3758 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3759
3760 mutex_unlock(&intel_dp->psr.lock);
3761 break;
3762 }
3763 }
3764
3765 /* Wa_16025596647 */
intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp * intel_dp)3766 static void intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp *intel_dp)
3767 {
3768 struct intel_display *display = to_intel_display(intel_dp);
3769 bool dc5_dc6_blocked;
3770
3771 if (!intel_dp->psr.active || !intel_dp->psr.pkg_c_latency_used)
3772 return;
3773
3774 dc5_dc6_blocked = is_dc5_dc6_blocked(intel_dp);
3775
3776 if (intel_dp->psr.sel_update_enabled)
3777 psr2_program_idle_frames(intel_dp, dc5_dc6_blocked ? 0 :
3778 psr_compute_idle_frames(intel_dp));
3779 else
3780 intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
3781 intel_dp->psr.pipe,
3782 dc5_dc6_blocked);
3783 }
3784
psr_dc5_dc6_wa_work(struct work_struct * work)3785 static void psr_dc5_dc6_wa_work(struct work_struct *work)
3786 {
3787 struct intel_display *display = container_of(work, typeof(*display),
3788 psr_dc5_dc6_wa_work);
3789 struct intel_encoder *encoder;
3790
3791 for_each_intel_encoder_with_psr(display->drm, encoder) {
3792 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3793
3794 mutex_lock(&intel_dp->psr.lock);
3795
3796 if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled &&
3797 !intel_dp->psr.pkg_c_latency_used)
3798 intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
3799
3800 mutex_unlock(&intel_dp->psr.lock);
3801 }
3802 }
3803
3804 /**
3805 * intel_psr_notify_dc5_dc6 - Notify PSR about enable/disable dc5/dc6
3806 * @display: intel atomic state
3807 *
3808 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to schedule
3809 * psr_dc5_dc6_wa_work used for applying/removing the workaround.
3810 */
intel_psr_notify_dc5_dc6(struct intel_display * display)3811 void intel_psr_notify_dc5_dc6(struct intel_display *display)
3812 {
3813 if (DISPLAY_VER(display) != 20 &&
3814 !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3815 return;
3816
3817 schedule_work(&display->psr_dc5_dc6_wa_work);
3818 }
3819
3820 /**
3821 * intel_psr_dc5_dc6_wa_init - Init work for underrun on idle PSR HW bug wa
3822 * @display: intel atomic state
3823 *
3824 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to init
3825 * psr_dc5_dc6_wa_work used for applying the workaround.
3826 */
intel_psr_dc5_dc6_wa_init(struct intel_display * display)3827 void intel_psr_dc5_dc6_wa_init(struct intel_display *display)
3828 {
3829 if (DISPLAY_VER(display) != 20 &&
3830 !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3831 return;
3832
3833 INIT_WORK(&display->psr_dc5_dc6_wa_work, psr_dc5_dc6_wa_work);
3834 }
3835
3836 /**
3837 * intel_psr_notify_pipe_change - Notify PSR about enable/disable of a pipe
3838 * @state: intel atomic state
3839 * @crtc: intel crtc
3840 * @enable: enable/disable
3841 *
3842 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
3843 * remove the workaround when pipe is getting enabled/disabled
3844 */
intel_psr_notify_pipe_change(struct intel_atomic_state * state,struct intel_crtc * crtc,bool enable)3845 void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
3846 struct intel_crtc *crtc, bool enable)
3847 {
3848 struct intel_display *display = to_intel_display(state);
3849 struct intel_encoder *encoder;
3850
3851 if (DISPLAY_VER(display) != 20 &&
3852 !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
3853 return;
3854
3855 for_each_intel_encoder_with_psr(display->drm, encoder) {
3856 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3857 u8 active_non_psr_pipes;
3858
3859 mutex_lock(&intel_dp->psr.lock);
3860
3861 if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
3862 goto unlock;
3863
3864 active_non_psr_pipes = intel_dp->psr.active_non_psr_pipes;
3865
3866 if (enable)
3867 active_non_psr_pipes |= BIT(crtc->pipe);
3868 else
3869 active_non_psr_pipes &= ~BIT(crtc->pipe);
3870
3871 if (active_non_psr_pipes == intel_dp->psr.active_non_psr_pipes)
3872 goto unlock;
3873
3874 if ((enable && intel_dp->psr.active_non_psr_pipes) ||
3875 (!enable && !intel_dp->psr.active_non_psr_pipes) ||
3876 !intel_dp->psr.pkg_c_latency_used) {
3877 intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
3878 goto unlock;
3879 }
3880
3881 intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
3882
3883 intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
3884 unlock:
3885 mutex_unlock(&intel_dp->psr.lock);
3886 }
3887 }
3888
3889 /**
3890 * intel_psr_notify_vblank_enable_disable - Notify PSR about enable/disable of vblank
3891 * @display: intel display struct
3892 * @enable: enable/disable
3893 *
3894 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
3895 * remove the workaround when vblank is getting enabled/disabled
3896 */
intel_psr_notify_vblank_enable_disable(struct intel_display * display,bool enable)3897 void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
3898 bool enable)
3899 {
3900 struct intel_encoder *encoder;
3901
3902 for_each_intel_encoder_with_psr(display->drm, encoder) {
3903 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3904
3905 mutex_lock(&intel_dp->psr.lock);
3906 if (intel_dp->psr.panel_replay_enabled) {
3907 mutex_unlock(&intel_dp->psr.lock);
3908 break;
3909 }
3910
3911 if (intel_dp->psr.enabled && intel_dp->psr.pkg_c_latency_used)
3912 intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
3913
3914 mutex_unlock(&intel_dp->psr.lock);
3915 return;
3916 }
3917
3918 /*
3919 * NOTE: intel_display_power_set_target_dc_state is used
3920 * only by PSR * code for DC3CO handling. DC3CO target
3921 * state is currently disabled in * PSR code. If DC3CO
3922 * is taken into use we need take that into account here
3923 * as well.
3924 */
3925 intel_display_power_set_target_dc_state(display, enable ? DC_STATE_DISABLE :
3926 DC_STATE_EN_UPTO_DC6);
3927 }
3928
3929 static void
psr_source_status(struct intel_dp * intel_dp,struct seq_file * m)3930 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3931 {
3932 struct intel_display *display = to_intel_display(intel_dp);
3933 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3934 const char *status = "unknown";
3935 u32 val, status_val;
3936
3937 if ((intel_dp_is_edp(intel_dp) || DISPLAY_VER(display) >= 30) &&
3938 (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) {
3939 static const char * const live_status[] = {
3940 "IDLE",
3941 "CAPTURE",
3942 "CAPTURE_FS",
3943 "SLEEP",
3944 "BUFON_FW",
3945 "ML_UP",
3946 "SU_STANDBY",
3947 "FAST_SLEEP",
3948 "DEEP_SLEEP",
3949 "BUF_ON",
3950 "TG_ON"
3951 };
3952 val = intel_de_read(display,
3953 EDP_PSR2_STATUS(display, cpu_transcoder));
3954 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3955 if (status_val < ARRAY_SIZE(live_status))
3956 status = live_status[status_val];
3957 } else {
3958 static const char * const live_status[] = {
3959 "IDLE",
3960 "SRDONACK",
3961 "SRDENT",
3962 "BUFOFF",
3963 "BUFON",
3964 "AUXACK",
3965 "SRDOFFACK",
3966 "SRDENT_ON",
3967 };
3968 val = intel_de_read(display,
3969 psr_status_reg(display, cpu_transcoder));
3970 status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3971 if (status_val < ARRAY_SIZE(live_status))
3972 status = live_status[status_val];
3973 }
3974
3975 seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3976 }
3977
intel_psr_sink_capability(struct intel_dp * intel_dp,struct seq_file * m)3978 static void intel_psr_sink_capability(struct intel_dp *intel_dp,
3979 struct seq_file *m)
3980 {
3981 struct intel_psr *psr = &intel_dp->psr;
3982
3983 seq_printf(m, "Sink support: PSR = %s",
3984 str_yes_no(psr->sink_support));
3985
3986 if (psr->sink_support)
3987 seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3988 if (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
3989 seq_printf(m, " (Early Transport)");
3990 seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
3991 seq_printf(m, ", Panel Replay Selective Update = %s",
3992 str_yes_no(psr->sink_panel_replay_su_support));
3993 if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
3994 DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
3995 seq_printf(m, " (Early Transport)");
3996 seq_printf(m, "\n");
3997 }
3998
intel_psr_print_mode(struct intel_dp * intel_dp,struct seq_file * m)3999 static void intel_psr_print_mode(struct intel_dp *intel_dp,
4000 struct seq_file *m)
4001 {
4002 struct intel_psr *psr = &intel_dp->psr;
4003 const char *status, *mode, *region_et;
4004
4005 if (psr->enabled)
4006 status = " enabled";
4007 else
4008 status = "disabled";
4009
4010 if (psr->panel_replay_enabled && psr->sel_update_enabled)
4011 mode = "Panel Replay Selective Update";
4012 else if (psr->panel_replay_enabled)
4013 mode = "Panel Replay";
4014 else if (psr->sel_update_enabled)
4015 mode = "PSR2";
4016 else if (psr->enabled)
4017 mode = "PSR1";
4018 else
4019 mode = "";
4020
4021 if (psr->su_region_et_enabled)
4022 region_et = " (Early Transport)";
4023 else
4024 region_et = "";
4025
4026 seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et);
4027 }
4028
intel_psr_status(struct seq_file * m,struct intel_dp * intel_dp)4029 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
4030 {
4031 struct intel_display *display = to_intel_display(intel_dp);
4032 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
4033 struct intel_psr *psr = &intel_dp->psr;
4034 struct ref_tracker *wakeref;
4035 bool enabled;
4036 u32 val, psr2_ctl;
4037
4038 intel_psr_sink_capability(intel_dp, m);
4039
4040 if (!(psr->sink_support || psr->sink_panel_replay_support))
4041 return 0;
4042
4043 wakeref = intel_display_rpm_get(display);
4044 mutex_lock(&psr->lock);
4045
4046 intel_psr_print_mode(intel_dp, m);
4047
4048 if (!psr->enabled) {
4049 seq_printf(m, "PSR sink not reliable: %s\n",
4050 str_yes_no(psr->sink_not_reliable));
4051
4052 goto unlock;
4053 }
4054
4055 if (psr->panel_replay_enabled) {
4056 val = intel_de_read(display, TRANS_DP2_CTL(cpu_transcoder));
4057
4058 if (intel_dp_is_edp(intel_dp))
4059 psr2_ctl = intel_de_read(display,
4060 EDP_PSR2_CTL(display,
4061 cpu_transcoder));
4062
4063 enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
4064 } else if (psr->sel_update_enabled) {
4065 val = intel_de_read(display,
4066 EDP_PSR2_CTL(display, cpu_transcoder));
4067 enabled = val & EDP_PSR2_ENABLE;
4068 } else {
4069 val = intel_de_read(display, psr_ctl_reg(display, cpu_transcoder));
4070 enabled = val & EDP_PSR_ENABLE;
4071 }
4072 seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
4073 str_enabled_disabled(enabled), val);
4074 if (psr->panel_replay_enabled && intel_dp_is_edp(intel_dp))
4075 seq_printf(m, "PSR2_CTL: 0x%08x\n",
4076 psr2_ctl);
4077 psr_source_status(intel_dp, m);
4078 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
4079 psr->busy_frontbuffer_bits);
4080
4081 /*
4082 * SKL+ Perf counter is reset to 0 everytime DC state is entered
4083 */
4084 val = intel_de_read(display, psr_perf_cnt_reg(display, cpu_transcoder));
4085 seq_printf(m, "Performance counter: %u\n",
4086 REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
4087
4088 if (psr->debug & I915_PSR_DEBUG_IRQ) {
4089 seq_printf(m, "Last attempted entry at: %lld\n",
4090 psr->last_entry_attempt);
4091 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
4092 }
4093
4094 if (psr->sel_update_enabled) {
4095 u32 su_frames_val[3];
4096 int frame;
4097
4098 /*
4099 * PSR2_SU_STATUS register has been tied-off since DG2/ADL-P
4100 * (it returns zeros only) and it has been removed on Xe2_LPD.
4101 */
4102 if (DISPLAY_VER(display) < 13) {
4103 /*
4104 * Reading all 3 registers before hand to minimize crossing a
4105 * frame boundary between register reads
4106 */
4107 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
4108 val = intel_de_read(display,
4109 PSR2_SU_STATUS(display, cpu_transcoder, frame));
4110 su_frames_val[frame / 3] = val;
4111 }
4112
4113 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
4114
4115 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
4116 u32 su_blocks;
4117
4118 su_blocks = su_frames_val[frame / 3] &
4119 PSR2_SU_STATUS_MASK(frame);
4120 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
4121 seq_printf(m, "%d\t%d\n", frame, su_blocks);
4122 }
4123 }
4124
4125 seq_printf(m, "PSR2 selective fetch: %s\n",
4126 str_enabled_disabled(psr->psr2_sel_fetch_enabled));
4127 }
4128
4129 unlock:
4130 mutex_unlock(&psr->lock);
4131 intel_display_rpm_put(display, wakeref);
4132
4133 return 0;
4134 }
4135
i915_edp_psr_status_show(struct seq_file * m,void * data)4136 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
4137 {
4138 struct intel_display *display = m->private;
4139 struct intel_dp *intel_dp = NULL;
4140 struct intel_encoder *encoder;
4141
4142 if (!HAS_PSR(display))
4143 return -ENODEV;
4144
4145 /* Find the first EDP which supports PSR */
4146 for_each_intel_encoder_with_psr(display->drm, encoder) {
4147 intel_dp = enc_to_intel_dp(encoder);
4148 break;
4149 }
4150
4151 if (!intel_dp)
4152 return -ENODEV;
4153
4154 return intel_psr_status(m, intel_dp);
4155 }
4156 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
4157
4158 static int
i915_edp_psr_debug_set(void * data,u64 val)4159 i915_edp_psr_debug_set(void *data, u64 val)
4160 {
4161 struct intel_display *display = data;
4162 struct intel_encoder *encoder;
4163 int ret = -ENODEV;
4164
4165 if (!HAS_PSR(display))
4166 return ret;
4167
4168 for_each_intel_encoder_with_psr(display->drm, encoder) {
4169 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4170
4171 drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);
4172
4173 // TODO: split to each transcoder's PSR debug state
4174 with_intel_display_rpm(display)
4175 ret = intel_psr_debug_set(intel_dp, val);
4176 }
4177
4178 return ret;
4179 }
4180
4181 static int
i915_edp_psr_debug_get(void * data,u64 * val)4182 i915_edp_psr_debug_get(void *data, u64 *val)
4183 {
4184 struct intel_display *display = data;
4185 struct intel_encoder *encoder;
4186
4187 if (!HAS_PSR(display))
4188 return -ENODEV;
4189
4190 for_each_intel_encoder_with_psr(display->drm, encoder) {
4191 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4192
4193 // TODO: split to each transcoder's PSR debug state
4194 *val = READ_ONCE(intel_dp->psr.debug);
4195 return 0;
4196 }
4197
4198 return -ENODEV;
4199 }
4200
4201 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
4202 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
4203 "%llu\n");
4204
intel_psr_debugfs_register(struct intel_display * display)4205 void intel_psr_debugfs_register(struct intel_display *display)
4206 {
4207 struct dentry *debugfs_root = display->drm->debugfs_root;
4208
4209 debugfs_create_file("i915_edp_psr_debug", 0644, debugfs_root,
4210 display, &i915_edp_psr_debug_fops);
4211
4212 debugfs_create_file("i915_edp_psr_status", 0444, debugfs_root,
4213 display, &i915_edp_psr_status_fops);
4214 }
4215
psr_mode_str(struct intel_dp * intel_dp)4216 static const char *psr_mode_str(struct intel_dp *intel_dp)
4217 {
4218 if (intel_dp->psr.panel_replay_enabled)
4219 return "PANEL-REPLAY";
4220 else if (intel_dp->psr.enabled)
4221 return "PSR";
4222
4223 return "unknown";
4224 }
4225
i915_psr_sink_status_show(struct seq_file * m,void * data)4226 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
4227 {
4228 struct intel_connector *connector = m->private;
4229 struct intel_dp *intel_dp = intel_attached_dp(connector);
4230 static const char * const sink_status[] = {
4231 "inactive",
4232 "transition to active, capture and display",
4233 "active, display from RFB",
4234 "active, capture and display on sink device timings",
4235 "transition to inactive, capture and display, timing re-sync",
4236 "reserved",
4237 "reserved",
4238 "sink internal error",
4239 };
4240 const char *str;
4241 int ret;
4242 u8 status, error_status;
4243
4244 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
4245 seq_puts(m, "PSR/Panel-Replay Unsupported\n");
4246 return -ENODEV;
4247 }
4248
4249 if (connector->base.status != connector_status_connected)
4250 return -ENODEV;
4251
4252 ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
4253 if (ret)
4254 return ret;
4255
4256 status &= DP_PSR_SINK_STATE_MASK;
4257 if (status < ARRAY_SIZE(sink_status))
4258 str = sink_status[status];
4259 else
4260 str = "unknown";
4261
4262 seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
4263
4264 seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
4265
4266 if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
4267 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
4268 DP_PSR_LINK_CRC_ERROR))
4269 seq_puts(m, ":\n");
4270 else
4271 seq_puts(m, "\n");
4272 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
4273 seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
4274 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
4275 seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
4276 if (error_status & DP_PSR_LINK_CRC_ERROR)
4277 seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
4278
4279 return ret;
4280 }
4281 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
4282
i915_psr_status_show(struct seq_file * m,void * data)4283 static int i915_psr_status_show(struct seq_file *m, void *data)
4284 {
4285 struct intel_connector *connector = m->private;
4286 struct intel_dp *intel_dp = intel_attached_dp(connector);
4287
4288 return intel_psr_status(m, intel_dp);
4289 }
4290 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
4291
intel_psr_connector_debugfs_add(struct intel_connector * connector)4292 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
4293 {
4294 struct intel_display *display = to_intel_display(connector);
4295 struct dentry *root = connector->base.debugfs_entry;
4296
4297 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
4298 connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
4299 return;
4300
4301 debugfs_create_file("i915_psr_sink_status", 0444, root,
4302 connector, &i915_psr_sink_status_fops);
4303
4304 if (HAS_PSR(display) || HAS_DP20(display))
4305 debugfs_create_file("i915_psr_status", 0444, root,
4306 connector, &i915_psr_status_fops);
4307 }
4308
intel_psr_needs_alpm(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)4309 bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state)
4310 {
4311 /*
4312 * eDP Panel Replay uses always ALPM
4313 * PSR2 uses ALPM but PSR1 doesn't
4314 */
4315 return intel_dp_is_edp(intel_dp) && (crtc_state->has_sel_update ||
4316 crtc_state->has_panel_replay);
4317 }
4318
intel_psr_needs_alpm_aux_less(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)4319 bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
4320 const struct intel_crtc_state *crtc_state)
4321 {
4322 return intel_dp_is_edp(intel_dp) && crtc_state->has_panel_replay;
4323 }
4324