1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <linux/debugfs.h>
25
26 #include <drm/drm_atomic_helper.h>
27 #include <drm/drm_damage_helper.h>
28 #include <drm/drm_debugfs.h>
29
30 #include "i915_drv.h"
31 #include "i915_reg.h"
32 #include "intel_alpm.h"
33 #include "intel_atomic.h"
34 #include "intel_crtc.h"
35 #include "intel_cursor_regs.h"
36 #include "intel_ddi.h"
37 #include "intel_de.h"
38 #include "intel_display_irq.h"
39 #include "intel_display_types.h"
40 #include "intel_dp.h"
41 #include "intel_dp_aux.h"
42 #include "intel_frontbuffer.h"
43 #include "intel_hdmi.h"
44 #include "intel_psr.h"
45 #include "intel_psr_regs.h"
46 #include "intel_snps_phy.h"
47 #include "skl_universal_plane.h"
48
49 /**
50 * DOC: Panel Self Refresh (PSR/SRD)
51 *
52 * Since Haswell Display controller supports Panel Self-Refresh on display
53 * panels witch have a remote frame buffer (RFB) implemented according to PSR
54 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
55 * when system is idle but display is on as it eliminates display refresh
56 * request to DDR memory completely as long as the frame buffer for that
57 * display is unchanged.
58 *
59 * Panel Self Refresh must be supported by both Hardware (source) and
60 * Panel (sink).
61 *
62 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
63 * to power down the link and memory controller. For DSI panels the same idea
64 * is called "manual mode".
65 *
66 * The implementation uses the hardware-based PSR support which automatically
67 * enters/exits self-refresh mode. The hardware takes care of sending the
68 * required DP aux message and could even retrain the link (that part isn't
69 * enabled yet though). The hardware also keeps track of any frontbuffer
70 * changes to know when to exit self-refresh mode again. Unfortunately that
71 * part doesn't work too well, hence why the i915 PSR support uses the
72 * software frontbuffer tracking to make sure it doesn't miss a screen
73 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
74 * get called by the frontbuffer tracking code. Note that because of locking
75 * issues the self-refresh re-enable code is done from a work queue, which
76 * must be correctly synchronized/cancelled when shutting down the pipe."
77 *
78 * DC3CO (DC3 clock off)
79 *
80 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
81 * clock off automatically during PSR2 idle state.
82 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
83 * entry/exit allows the HW to enter a low-power state even when page flipping
84 * periodically (for instance a 30fps video playback scenario).
85 *
86 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
87 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
88 * frames, if no other flip occurs and the function above is executed, DC3CO is
89 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
90 * of another flip.
91 * Front buffer modifications do not trigger DC3CO activation on purpose as it
92 * would bring a lot of complexity and most of the moderns systems will only
93 * use page flips.
94 */
95
96 /*
97 * Description of PSR mask bits:
98 *
99 * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
100 *
101 * When unmasked (nearly) all display register writes (eg. even
102 * SWF) trigger a PSR exit. Some registers are excluded from this
103 * and they have a more specific mask (described below). On icl+
104 * this bit no longer exists and is effectively always set.
105 *
106 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
107 *
108 * When unmasked (nearly) all pipe/plane register writes
109 * trigger a PSR exit. Some plane registers are excluded from this
110 * and they have a more specific mask (described below).
111 *
112 * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
113 * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
114 * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
115 *
116 * When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
117 * SPR_SURF/CURBASE are not included in this and instead are
118 * controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
119 * EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
120 *
121 * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
122 * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
123 *
124 * When unmasked PSR is blocked as long as the sprite
125 * plane is enabled. skl+ with their universal planes no
126 * longer have a mask bit like this, and no plane being
127 * enabledb blocks PSR.
128 *
129 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
130 * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
131 *
132 * When umasked CURPOS writes trigger a PSR exit. On skl+
133 * this doesn't exit but CURPOS is included in the
134 * PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
135 *
136 * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
137 * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
138 *
139 * When unmasked PSR is blocked as long as vblank and/or vsync
140 * interrupt is unmasked in IMR *and* enabled in IER.
141 *
142 * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
143 * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
144 *
145 * Selectcs whether PSR exit generates an extra vblank before
146 * the first frame is transmitted. Also note the opposite polarity
147 * if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
148 * unmasked==do not generate the extra vblank).
149 *
150 * With DC states enabled the extra vblank happens after link training,
151 * with DC states disabled it happens immediately upuon PSR exit trigger.
152 * No idea as of now why there is a difference. HSW/BDW (which don't
153 * even have DMC) always generate it after link training. Go figure.
154 *
155 * Unfortunately CHICKEN_TRANS itself seems to be double buffered
156 * and thus won't latch until the first vblank. So with DC states
157 * enabled the register effctively uses the reset value during DC5
158 * exit+PSR exit sequence, and thus the bit does nothing until
159 * latched by the vblank that it was trying to prevent from being
160 * generated in the first place. So we should probably call this
161 * one a chicken/egg bit instead on skl+.
162 *
163 * In standby mode (as opposed to link-off) this makes no difference
164 * as the timing generator keeps running the whole time generating
165 * normal periodic vblanks.
166 *
167 * WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
168 * and doing so makes the behaviour match the skl+ reset value.
169 *
170 * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
171 * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
172 *
173 * On BDW without this bit is no vblanks whatsoever are
174 * generated after PSR exit. On HSW this has no apparant effect.
175 * WaPsrDPRSUnmaskVBlankInSRD says to set this.
176 *
177 * The rest of the bits are more self-explanatory and/or
178 * irrelevant for normal operation.
179 *
180 * Description of intel_crtc_state variables. has_psr, has_panel_replay and
181 * has_sel_update:
182 *
183 * has_psr (alone): PSR1
184 * has_psr + has_sel_update: PSR2
185 * has_psr + has_panel_replay: Panel Replay
186 * has_psr + has_panel_replay + has_sel_update: Panel Replay Selective Update
187 *
188 * Description of some intel_psr varibles. enabled, panel_replay_enabled,
189 * sel_update_enabled
190 *
191 * enabled (alone): PSR1
192 * enabled + sel_update_enabled: PSR2
193 * enabled + panel_replay_enabled: Panel Replay
194 * enabled + panel_replay_enabled + sel_update_enabled: Panel Replay SU
195 */
196
197 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
198 (intel_dp)->psr.source_support)
199
intel_encoder_can_psr(struct intel_encoder * encoder)200 bool intel_encoder_can_psr(struct intel_encoder *encoder)
201 {
202 if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
203 return CAN_PSR(enc_to_intel_dp(encoder)) ||
204 CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
205 else
206 return false;
207 }
208
intel_psr_needs_aux_io_power(struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state)209 bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
210 const struct intel_crtc_state *crtc_state)
211 {
212 /*
213 * For PSR/PR modes only eDP requires the AUX IO power to be enabled whenever
214 * the output is enabled. For non-eDP outputs the main link is always
215 * on, hence it doesn't require the HW initiated AUX wake-up signaling used
216 * for eDP.
217 *
218 * TODO:
219 * - Consider leaving AUX IO disabled for eDP / PR as well, in case
220 * the ALPM with main-link off mode is not enabled.
221 * - Leave AUX IO enabled for DP / PR, once support for ALPM with
222 * main-link off mode is added for it and this mode gets enabled.
223 */
224 return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
225 intel_encoder_can_psr(encoder);
226 }
227
psr_global_enabled(struct intel_dp * intel_dp)228 static bool psr_global_enabled(struct intel_dp *intel_dp)
229 {
230 struct intel_display *display = to_intel_display(intel_dp);
231 struct intel_connector *connector = intel_dp->attached_connector;
232
233 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
234 case I915_PSR_DEBUG_DEFAULT:
235 if (display->params.enable_psr == -1)
236 return intel_dp_is_edp(intel_dp) ?
237 connector->panel.vbt.psr.enable :
238 true;
239 return display->params.enable_psr;
240 case I915_PSR_DEBUG_DISABLE:
241 return false;
242 default:
243 return true;
244 }
245 }
246
psr2_global_enabled(struct intel_dp * intel_dp)247 static bool psr2_global_enabled(struct intel_dp *intel_dp)
248 {
249 struct intel_display *display = to_intel_display(intel_dp);
250
251 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
252 case I915_PSR_DEBUG_DISABLE:
253 case I915_PSR_DEBUG_FORCE_PSR1:
254 return false;
255 default:
256 if (display->params.enable_psr == 1)
257 return false;
258 return true;
259 }
260 }
261
psr2_su_region_et_global_enabled(struct intel_dp * intel_dp)262 static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp)
263 {
264 struct intel_display *display = to_intel_display(intel_dp);
265
266 if (display->params.enable_psr != -1)
267 return false;
268
269 return true;
270 }
271
panel_replay_global_enabled(struct intel_dp * intel_dp)272 static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
273 {
274 struct intel_display *display = to_intel_display(intel_dp);
275
276 if ((display->params.enable_psr != -1) ||
277 (intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE))
278 return false;
279 return true;
280 }
281
psr_irq_psr_error_bit_get(struct intel_dp * intel_dp)282 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
283 {
284 struct intel_display *display = to_intel_display(intel_dp);
285
286 return DISPLAY_VER(display) >= 12 ? TGL_PSR_ERROR :
287 EDP_PSR_ERROR(intel_dp->psr.transcoder);
288 }
289
psr_irq_post_exit_bit_get(struct intel_dp * intel_dp)290 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
291 {
292 struct intel_display *display = to_intel_display(intel_dp);
293
294 return DISPLAY_VER(display) >= 12 ? TGL_PSR_POST_EXIT :
295 EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
296 }
297
psr_irq_pre_entry_bit_get(struct intel_dp * intel_dp)298 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
299 {
300 struct intel_display *display = to_intel_display(intel_dp);
301
302 return DISPLAY_VER(display) >= 12 ? TGL_PSR_PRE_ENTRY :
303 EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
304 }
305
psr_irq_mask_get(struct intel_dp * intel_dp)306 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
307 {
308 struct intel_display *display = to_intel_display(intel_dp);
309
310 return DISPLAY_VER(display) >= 12 ? TGL_PSR_MASK :
311 EDP_PSR_MASK(intel_dp->psr.transcoder);
312 }
313
psr_ctl_reg(struct intel_display * display,enum transcoder cpu_transcoder)314 static i915_reg_t psr_ctl_reg(struct intel_display *display,
315 enum transcoder cpu_transcoder)
316 {
317 if (DISPLAY_VER(display) >= 8)
318 return EDP_PSR_CTL(display, cpu_transcoder);
319 else
320 return HSW_SRD_CTL;
321 }
322
psr_debug_reg(struct intel_display * display,enum transcoder cpu_transcoder)323 static i915_reg_t psr_debug_reg(struct intel_display *display,
324 enum transcoder cpu_transcoder)
325 {
326 if (DISPLAY_VER(display) >= 8)
327 return EDP_PSR_DEBUG(display, cpu_transcoder);
328 else
329 return HSW_SRD_DEBUG;
330 }
331
psr_perf_cnt_reg(struct intel_display * display,enum transcoder cpu_transcoder)332 static i915_reg_t psr_perf_cnt_reg(struct intel_display *display,
333 enum transcoder cpu_transcoder)
334 {
335 if (DISPLAY_VER(display) >= 8)
336 return EDP_PSR_PERF_CNT(display, cpu_transcoder);
337 else
338 return HSW_SRD_PERF_CNT;
339 }
340
psr_status_reg(struct intel_display * display,enum transcoder cpu_transcoder)341 static i915_reg_t psr_status_reg(struct intel_display *display,
342 enum transcoder cpu_transcoder)
343 {
344 if (DISPLAY_VER(display) >= 8)
345 return EDP_PSR_STATUS(display, cpu_transcoder);
346 else
347 return HSW_SRD_STATUS;
348 }
349
psr_imr_reg(struct intel_display * display,enum transcoder cpu_transcoder)350 static i915_reg_t psr_imr_reg(struct intel_display *display,
351 enum transcoder cpu_transcoder)
352 {
353 if (DISPLAY_VER(display) >= 12)
354 return TRANS_PSR_IMR(display, cpu_transcoder);
355 else
356 return EDP_PSR_IMR;
357 }
358
psr_iir_reg(struct intel_display * display,enum transcoder cpu_transcoder)359 static i915_reg_t psr_iir_reg(struct intel_display *display,
360 enum transcoder cpu_transcoder)
361 {
362 if (DISPLAY_VER(display) >= 12)
363 return TRANS_PSR_IIR(display, cpu_transcoder);
364 else
365 return EDP_PSR_IIR;
366 }
367
psr_aux_ctl_reg(struct intel_display * display,enum transcoder cpu_transcoder)368 static i915_reg_t psr_aux_ctl_reg(struct intel_display *display,
369 enum transcoder cpu_transcoder)
370 {
371 if (DISPLAY_VER(display) >= 8)
372 return EDP_PSR_AUX_CTL(display, cpu_transcoder);
373 else
374 return HSW_SRD_AUX_CTL;
375 }
376
psr_aux_data_reg(struct intel_display * display,enum transcoder cpu_transcoder,int i)377 static i915_reg_t psr_aux_data_reg(struct intel_display *display,
378 enum transcoder cpu_transcoder, int i)
379 {
380 if (DISPLAY_VER(display) >= 8)
381 return EDP_PSR_AUX_DATA(display, cpu_transcoder, i);
382 else
383 return HSW_SRD_AUX_DATA(i);
384 }
385
psr_irq_control(struct intel_dp * intel_dp)386 static void psr_irq_control(struct intel_dp *intel_dp)
387 {
388 struct intel_display *display = to_intel_display(intel_dp);
389 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
390 u32 mask;
391
392 if (intel_dp->psr.panel_replay_enabled)
393 return;
394
395 mask = psr_irq_psr_error_bit_get(intel_dp);
396 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
397 mask |= psr_irq_post_exit_bit_get(intel_dp) |
398 psr_irq_pre_entry_bit_get(intel_dp);
399
400 intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
401 psr_irq_mask_get(intel_dp), ~mask);
402 }
403
psr_event_print(struct intel_display * display,u32 val,bool sel_update_enabled)404 static void psr_event_print(struct intel_display *display,
405 u32 val, bool sel_update_enabled)
406 {
407 drm_dbg_kms(display->drm, "PSR exit events: 0x%x\n", val);
408 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
409 drm_dbg_kms(display->drm, "\tPSR2 watchdog timer expired\n");
410 if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled)
411 drm_dbg_kms(display->drm, "\tPSR2 disabled\n");
412 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
413 drm_dbg_kms(display->drm, "\tSU dirty FIFO underrun\n");
414 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
415 drm_dbg_kms(display->drm, "\tSU CRC FIFO underrun\n");
416 if (val & PSR_EVENT_GRAPHICS_RESET)
417 drm_dbg_kms(display->drm, "\tGraphics reset\n");
418 if (val & PSR_EVENT_PCH_INTERRUPT)
419 drm_dbg_kms(display->drm, "\tPCH interrupt\n");
420 if (val & PSR_EVENT_MEMORY_UP)
421 drm_dbg_kms(display->drm, "\tMemory up\n");
422 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
423 drm_dbg_kms(display->drm, "\tFront buffer modification\n");
424 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
425 drm_dbg_kms(display->drm, "\tPSR watchdog timer expired\n");
426 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
427 drm_dbg_kms(display->drm, "\tPIPE registers updated\n");
428 if (val & PSR_EVENT_REGISTER_UPDATE)
429 drm_dbg_kms(display->drm, "\tRegister updated\n");
430 if (val & PSR_EVENT_HDCP_ENABLE)
431 drm_dbg_kms(display->drm, "\tHDCP enabled\n");
432 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
433 drm_dbg_kms(display->drm, "\tKVMR session enabled\n");
434 if (val & PSR_EVENT_VBI_ENABLE)
435 drm_dbg_kms(display->drm, "\tVBI enabled\n");
436 if (val & PSR_EVENT_LPSP_MODE_EXIT)
437 drm_dbg_kms(display->drm, "\tLPSP mode exited\n");
438 if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled)
439 drm_dbg_kms(display->drm, "\tPSR disabled\n");
440 }
441
intel_psr_irq_handler(struct intel_dp * intel_dp,u32 psr_iir)442 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
443 {
444 struct intel_display *display = to_intel_display(intel_dp);
445 struct drm_i915_private *dev_priv = to_i915(display->drm);
446 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
447 ktime_t time_ns = ktime_get();
448
449 if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
450 intel_dp->psr.last_entry_attempt = time_ns;
451 drm_dbg_kms(display->drm,
452 "[transcoder %s] PSR entry attempt in 2 vblanks\n",
453 transcoder_name(cpu_transcoder));
454 }
455
456 if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
457 intel_dp->psr.last_exit = time_ns;
458 drm_dbg_kms(display->drm,
459 "[transcoder %s] PSR exit completed\n",
460 transcoder_name(cpu_transcoder));
461
462 if (DISPLAY_VER(display) >= 9) {
463 u32 val;
464
465 val = intel_de_rmw(dev_priv,
466 PSR_EVENT(dev_priv, cpu_transcoder),
467 0, 0);
468
469 psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
470 }
471 }
472
473 if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
474 drm_warn(display->drm, "[transcoder %s] PSR aux error\n",
475 transcoder_name(cpu_transcoder));
476
477 intel_dp->psr.irq_aux_error = true;
478
479 /*
480 * If this interruption is not masked it will keep
481 * interrupting so fast that it prevents the scheduled
482 * work to run.
483 * Also after a PSR error, we don't want to arm PSR
484 * again so we don't care about unmask the interruption
485 * or unset irq_aux_error.
486 */
487 intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
488 0, psr_irq_psr_error_bit_get(intel_dp));
489
490 queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
491 }
492 }
493
intel_dp_get_sink_sync_latency(struct intel_dp * intel_dp)494 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
495 {
496 struct intel_display *display = to_intel_display(intel_dp);
497 u8 val = 8; /* assume the worst if we can't read the value */
498
499 if (drm_dp_dpcd_readb(&intel_dp->aux,
500 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
501 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
502 else
503 drm_dbg_kms(display->drm,
504 "Unable to get sink synchronization latency, assuming 8 frames\n");
505 return val;
506 }
507
intel_dp_get_su_capability(struct intel_dp * intel_dp)508 static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
509 {
510 u8 su_capability = 0;
511
512 if (intel_dp->psr.sink_panel_replay_su_support)
513 drm_dp_dpcd_readb(&intel_dp->aux,
514 DP_PANEL_PANEL_REPLAY_CAPABILITY,
515 &su_capability);
516 else
517 su_capability = intel_dp->psr_dpcd[1];
518
519 return su_capability;
520 }
521
522 static unsigned int
intel_dp_get_su_x_granularity_offset(struct intel_dp * intel_dp)523 intel_dp_get_su_x_granularity_offset(struct intel_dp *intel_dp)
524 {
525 return intel_dp->psr.sink_panel_replay_su_support ?
526 DP_PANEL_PANEL_REPLAY_X_GRANULARITY :
527 DP_PSR2_SU_X_GRANULARITY;
528 }
529
530 static unsigned int
intel_dp_get_su_y_granularity_offset(struct intel_dp * intel_dp)531 intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
532 {
533 return intel_dp->psr.sink_panel_replay_su_support ?
534 DP_PANEL_PANEL_REPLAY_Y_GRANULARITY :
535 DP_PSR2_SU_Y_GRANULARITY;
536 }
537
538 /*
539 * Note: Bits related to granularity are same in panel replay and psr
540 * registers. Rely on PSR definitions on these "common" bits.
541 */
intel_dp_get_su_granularity(struct intel_dp * intel_dp)542 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
543 {
544 struct intel_display *display = to_intel_display(intel_dp);
545 ssize_t r;
546 u16 w;
547 u8 y;
548
549 /*
550 * TODO: Do we need to take into account panel supporting both PSR and
551 * Panel replay?
552 */
553
554 /*
555 * If sink don't have specific granularity requirements set legacy
556 * ones.
557 */
558 if (!(intel_dp_get_su_capability(intel_dp) &
559 DP_PSR2_SU_GRANULARITY_REQUIRED)) {
560 /* As PSR2 HW sends full lines, we do not care about x granularity */
561 w = 4;
562 y = 4;
563 goto exit;
564 }
565
566 r = drm_dp_dpcd_read(&intel_dp->aux,
567 intel_dp_get_su_x_granularity_offset(intel_dp),
568 &w, 2);
569 if (r != 2)
570 drm_dbg_kms(display->drm,
571 "Unable to read selective update x granularity\n");
572 /*
573 * Spec says that if the value read is 0 the default granularity should
574 * be used instead.
575 */
576 if (r != 2 || w == 0)
577 w = 4;
578
579 r = drm_dp_dpcd_read(&intel_dp->aux,
580 intel_dp_get_su_y_granularity_offset(intel_dp),
581 &y, 1);
582 if (r != 1) {
583 drm_dbg_kms(display->drm,
584 "Unable to read selective update y granularity\n");
585 y = 4;
586 }
587 if (y == 0)
588 y = 1;
589
590 exit:
591 intel_dp->psr.su_w_granularity = w;
592 intel_dp->psr.su_y_granularity = y;
593 }
594
_panel_replay_init_dpcd(struct intel_dp * intel_dp)595 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
596 {
597 struct intel_display *display = to_intel_display(intel_dp);
598
599 if (intel_dp_is_edp(intel_dp)) {
600 if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
601 drm_dbg_kms(display->drm,
602 "Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n");
603 return;
604 }
605
606 if (!(intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
607 drm_dbg_kms(display->drm,
608 "Panel doesn't support early transport, eDP Panel Replay not possible\n");
609 return;
610 }
611 }
612
613 intel_dp->psr.sink_panel_replay_support = true;
614
615 if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SU_SUPPORT)
616 intel_dp->psr.sink_panel_replay_su_support = true;
617
618 drm_dbg_kms(display->drm,
619 "Panel replay %sis supported by panel\n",
620 intel_dp->psr.sink_panel_replay_su_support ?
621 "selective_update " : "");
622 }
623
_psr_init_dpcd(struct intel_dp * intel_dp)624 static void _psr_init_dpcd(struct intel_dp *intel_dp)
625 {
626 struct intel_display *display = to_intel_display(intel_dp);
627
628 drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
629 intel_dp->psr_dpcd[0]);
630
631 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
632 drm_dbg_kms(display->drm,
633 "PSR support not currently available for this panel\n");
634 return;
635 }
636
637 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
638 drm_dbg_kms(display->drm,
639 "Panel lacks power state control, PSR cannot be enabled\n");
640 return;
641 }
642
643 intel_dp->psr.sink_support = true;
644 intel_dp->psr.sink_sync_latency =
645 intel_dp_get_sink_sync_latency(intel_dp);
646
647 if (DISPLAY_VER(display) >= 9 &&
648 intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
649 bool y_req = intel_dp->psr_dpcd[1] &
650 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
651
652 /*
653 * All panels that supports PSR version 03h (PSR2 +
654 * Y-coordinate) can handle Y-coordinates in VSC but we are
655 * only sure that it is going to be used when required by the
656 * panel. This way panel is capable to do selective update
657 * without a aux frame sync.
658 *
659 * To support PSR version 02h and PSR version 03h without
660 * Y-coordinate requirement panels we would need to enable
661 * GTC first.
662 */
663 intel_dp->psr.sink_psr2_support = y_req &&
664 intel_alpm_aux_wake_supported(intel_dp);
665 drm_dbg_kms(display->drm, "PSR2 %ssupported\n",
666 intel_dp->psr.sink_psr2_support ? "" : "not ");
667 }
668 }
669
intel_psr_init_dpcd(struct intel_dp * intel_dp)670 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
671 {
672 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
673 sizeof(intel_dp->psr_dpcd));
674 drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP,
675 &intel_dp->pr_dpcd);
676
677 if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SUPPORT)
678 _panel_replay_init_dpcd(intel_dp);
679
680 if (intel_dp->psr_dpcd[0])
681 _psr_init_dpcd(intel_dp);
682
683 if (intel_dp->psr.sink_psr2_support ||
684 intel_dp->psr.sink_panel_replay_su_support)
685 intel_dp_get_su_granularity(intel_dp);
686 }
687
hsw_psr_setup_aux(struct intel_dp * intel_dp)688 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
689 {
690 struct intel_display *display = to_intel_display(intel_dp);
691 struct drm_i915_private *dev_priv = to_i915(display->drm);
692 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
693 u32 aux_clock_divider, aux_ctl;
694 /* write DP_SET_POWER=D0 */
695 static const u8 aux_msg[] = {
696 [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
697 [1] = (DP_SET_POWER >> 8) & 0xff,
698 [2] = DP_SET_POWER & 0xff,
699 [3] = 1 - 1,
700 [4] = DP_SET_POWER_D0,
701 };
702 int i;
703
704 BUILD_BUG_ON(sizeof(aux_msg) > 20);
705 for (i = 0; i < sizeof(aux_msg); i += 4)
706 intel_de_write(dev_priv,
707 psr_aux_data_reg(display, cpu_transcoder, i >> 2),
708 intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
709
710 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
711
712 /* Start with bits set for DDI_AUX_CTL register */
713 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
714 aux_clock_divider);
715
716 /* Select only valid bits for SRD_AUX_CTL */
717 aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
718 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
719 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
720 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
721
722 intel_de_write(display, psr_aux_ctl_reg(display, cpu_transcoder),
723 aux_ctl);
724 }
725
psr2_su_region_et_valid(struct intel_dp * intel_dp,bool panel_replay)726 static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay)
727 {
728 struct intel_display *display = to_intel_display(intel_dp);
729
730 if (DISPLAY_VER(display) < 20 || !intel_dp_is_edp(intel_dp) ||
731 intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)
732 return false;
733
734 return panel_replay ?
735 intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
736 intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
737 psr2_su_region_et_global_enabled(intel_dp);
738 }
739
_panel_replay_enable_sink(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)740 static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
741 const struct intel_crtc_state *crtc_state)
742 {
743 u8 val = DP_PANEL_REPLAY_ENABLE |
744 DP_PANEL_REPLAY_VSC_SDP_CRC_EN |
745 DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
746 DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN |
747 DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN;
748 u8 panel_replay_config2 = DP_PANEL_REPLAY_CRC_VERIFICATION;
749
750 if (crtc_state->has_sel_update)
751 val |= DP_PANEL_REPLAY_SU_ENABLE;
752
753 if (crtc_state->enable_psr2_su_region_et)
754 val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
755
756 if (crtc_state->req_psr2_sdp_prior_scanline)
757 panel_replay_config2 |=
758 DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE;
759
760 drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, val);
761
762 drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG2,
763 panel_replay_config2);
764 }
765
_psr_enable_sink(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)766 static void _psr_enable_sink(struct intel_dp *intel_dp,
767 const struct intel_crtc_state *crtc_state)
768 {
769 struct intel_display *display = to_intel_display(intel_dp);
770 u8 val = 0;
771
772 if (crtc_state->has_sel_update) {
773 val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
774 } else {
775 if (intel_dp->psr.link_standby)
776 val |= DP_PSR_MAIN_LINK_ACTIVE;
777
778 if (DISPLAY_VER(display) >= 8)
779 val |= DP_PSR_CRC_VERIFICATION;
780 }
781
782 if (crtc_state->req_psr2_sdp_prior_scanline)
783 val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
784
785 if (crtc_state->enable_psr2_su_region_et)
786 val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
787
788 if (intel_dp->psr.entry_setup_frames > 0)
789 val |= DP_PSR_FRAME_CAPTURE;
790 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
791
792 val |= DP_PSR_ENABLE;
793 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
794 }
795
intel_psr_enable_sink_alpm(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)796 static void intel_psr_enable_sink_alpm(struct intel_dp *intel_dp,
797 const struct intel_crtc_state *crtc_state)
798 {
799 u8 val;
800
801 /*
802 * eDP Panel Replay uses always ALPM
803 * PSR2 uses ALPM but PSR1 doesn't
804 */
805 if (!intel_dp_is_edp(intel_dp) || (!crtc_state->has_panel_replay &&
806 !crtc_state->has_sel_update))
807 return;
808
809 val = DP_ALPM_ENABLE | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE;
810
811 if (crtc_state->has_panel_replay)
812 val |= DP_ALPM_MODE_AUX_LESS;
813
814 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, val);
815 }
816
intel_psr_enable_sink(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)817 void intel_psr_enable_sink(struct intel_dp *intel_dp,
818 const struct intel_crtc_state *crtc_state)
819 {
820 intel_psr_enable_sink_alpm(intel_dp, crtc_state);
821
822 crtc_state->has_panel_replay ?
823 _panel_replay_enable_sink(intel_dp, crtc_state) :
824 _psr_enable_sink(intel_dp, crtc_state);
825
826 if (intel_dp_is_edp(intel_dp))
827 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
828 }
829
intel_psr1_get_tp_time(struct intel_dp * intel_dp)830 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
831 {
832 struct intel_display *display = to_intel_display(intel_dp);
833 struct intel_connector *connector = intel_dp->attached_connector;
834 struct drm_i915_private *dev_priv = to_i915(display->drm);
835 u32 val = 0;
836
837 if (DISPLAY_VER(display) >= 11)
838 val |= EDP_PSR_TP4_TIME_0us;
839
840 if (display->params.psr_safest_params) {
841 val |= EDP_PSR_TP1_TIME_2500us;
842 val |= EDP_PSR_TP2_TP3_TIME_2500us;
843 goto check_tp3_sel;
844 }
845
846 if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
847 val |= EDP_PSR_TP1_TIME_0us;
848 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
849 val |= EDP_PSR_TP1_TIME_100us;
850 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
851 val |= EDP_PSR_TP1_TIME_500us;
852 else
853 val |= EDP_PSR_TP1_TIME_2500us;
854
855 if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
856 val |= EDP_PSR_TP2_TP3_TIME_0us;
857 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
858 val |= EDP_PSR_TP2_TP3_TIME_100us;
859 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
860 val |= EDP_PSR_TP2_TP3_TIME_500us;
861 else
862 val |= EDP_PSR_TP2_TP3_TIME_2500us;
863
864 /*
865 * WA 0479: hsw,bdw
866 * "Do not skip both TP1 and TP2/TP3"
867 */
868 if (DISPLAY_VER(dev_priv) < 9 &&
869 connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
870 connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
871 val |= EDP_PSR_TP2_TP3_TIME_100us;
872
873 check_tp3_sel:
874 if (intel_dp_source_supports_tps3(dev_priv) &&
875 drm_dp_tps3_supported(intel_dp->dpcd))
876 val |= EDP_PSR_TP_TP1_TP3;
877 else
878 val |= EDP_PSR_TP_TP1_TP2;
879
880 return val;
881 }
882
psr_compute_idle_frames(struct intel_dp * intel_dp)883 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
884 {
885 struct intel_display *display = to_intel_display(intel_dp);
886 struct intel_connector *connector = intel_dp->attached_connector;
887 int idle_frames;
888
889 /* Let's use 6 as the minimum to cover all known cases including the
890 * off-by-one issue that HW has in some cases.
891 */
892 idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
893 idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
894
895 if (drm_WARN_ON(display->drm, idle_frames > 0xf))
896 idle_frames = 0xf;
897
898 return idle_frames;
899 }
900
hsw_activate_psr1(struct intel_dp * intel_dp)901 static void hsw_activate_psr1(struct intel_dp *intel_dp)
902 {
903 struct intel_display *display = to_intel_display(intel_dp);
904 struct drm_i915_private *dev_priv = to_i915(display->drm);
905 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
906 u32 max_sleep_time = 0x1f;
907 u32 val = EDP_PSR_ENABLE;
908
909 val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
910
911 if (DISPLAY_VER(display) < 20)
912 val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
913
914 if (IS_HASWELL(dev_priv))
915 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
916
917 if (intel_dp->psr.link_standby)
918 val |= EDP_PSR_LINK_STANDBY;
919
920 val |= intel_psr1_get_tp_time(intel_dp);
921
922 if (DISPLAY_VER(display) >= 8)
923 val |= EDP_PSR_CRC_ENABLE;
924
925 if (DISPLAY_VER(display) >= 20)
926 val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
927
928 intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder),
929 ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
930 }
931
intel_psr2_get_tp_time(struct intel_dp * intel_dp)932 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
933 {
934 struct intel_display *display = to_intel_display(intel_dp);
935 struct intel_connector *connector = intel_dp->attached_connector;
936 u32 val = 0;
937
938 if (display->params.psr_safest_params)
939 return EDP_PSR2_TP2_TIME_2500us;
940
941 if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
942 connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
943 val |= EDP_PSR2_TP2_TIME_50us;
944 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
945 val |= EDP_PSR2_TP2_TIME_100us;
946 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
947 val |= EDP_PSR2_TP2_TIME_500us;
948 else
949 val |= EDP_PSR2_TP2_TIME_2500us;
950
951 return val;
952 }
953
psr2_block_count_lines(struct intel_dp * intel_dp)954 static int psr2_block_count_lines(struct intel_dp *intel_dp)
955 {
956 return intel_dp->alpm_parameters.io_wake_lines < 9 &&
957 intel_dp->alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
958 }
959
psr2_block_count(struct intel_dp * intel_dp)960 static int psr2_block_count(struct intel_dp *intel_dp)
961 {
962 return psr2_block_count_lines(intel_dp) / 4;
963 }
964
frames_before_su_entry(struct intel_dp * intel_dp)965 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
966 {
967 u8 frames_before_su_entry;
968
969 frames_before_su_entry = max_t(u8,
970 intel_dp->psr.sink_sync_latency + 1,
971 2);
972
973 /* Entry setup frames must be at least 1 less than frames before SU entry */
974 if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
975 frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
976
977 return frames_before_su_entry;
978 }
979
dg2_activate_panel_replay(struct intel_dp * intel_dp)980 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
981 {
982 struct intel_display *display = to_intel_display(intel_dp);
983 struct intel_psr *psr = &intel_dp->psr;
984 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
985
986 if (intel_dp_is_edp(intel_dp) && psr->sel_update_enabled) {
987 u32 val = psr->su_region_et_enabled ?
988 LNL_EDP_PSR2_SU_REGION_ET_ENABLE : 0;
989
990 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
991 val |= EDP_PSR2_SU_SDP_SCANLINE;
992
993 intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder),
994 val);
995 }
996
997 intel_de_rmw(display,
998 PSR2_MAN_TRK_CTL(display, intel_dp->psr.transcoder),
999 0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
1000
1001 intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
1002 TRANS_DP2_PANEL_REPLAY_ENABLE);
1003 }
1004
hsw_activate_psr2(struct intel_dp * intel_dp)1005 static void hsw_activate_psr2(struct intel_dp *intel_dp)
1006 {
1007 struct intel_display *display = to_intel_display(intel_dp);
1008 struct drm_i915_private *dev_priv = to_i915(display->drm);
1009 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1010 u32 val = EDP_PSR2_ENABLE;
1011 u32 psr_val = 0;
1012
1013 val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
1014
1015 if (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))
1016 val |= EDP_SU_TRACK_ENABLE;
1017
1018 if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
1019 val |= EDP_Y_COORDINATE_ENABLE;
1020
1021 val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
1022
1023 val |= intel_psr2_get_tp_time(intel_dp);
1024
1025 if (DISPLAY_VER(display) >= 12 && DISPLAY_VER(display) < 20) {
1026 if (psr2_block_count(intel_dp) > 2)
1027 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
1028 else
1029 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
1030 }
1031
1032 /* Wa_22012278275:adl-p */
1033 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
1034 static const u8 map[] = {
1035 2, /* 5 lines */
1036 1, /* 6 lines */
1037 0, /* 7 lines */
1038 3, /* 8 lines */
1039 6, /* 9 lines */
1040 5, /* 10 lines */
1041 4, /* 11 lines */
1042 7, /* 12 lines */
1043 };
1044 /*
1045 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
1046 * comments bellow for more information
1047 */
1048 int tmp;
1049
1050 tmp = map[intel_dp->alpm_parameters.io_wake_lines -
1051 TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
1052 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
1053
1054 tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
1055 val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
1056 } else if (DISPLAY_VER(display) >= 20) {
1057 val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1058 } else if (DISPLAY_VER(display) >= 12) {
1059 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1060 val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1061 } else if (DISPLAY_VER(display) >= 9) {
1062 val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1063 val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1064 }
1065
1066 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
1067 val |= EDP_PSR2_SU_SDP_SCANLINE;
1068
1069 if (DISPLAY_VER(display) >= 20)
1070 psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
1071
1072 if (intel_dp->psr.psr2_sel_fetch_enabled) {
1073 u32 tmp;
1074
1075 tmp = intel_de_read(display,
1076 PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1077 drm_WARN_ON(display->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
1078 } else if (HAS_PSR2_SEL_FETCH(display)) {
1079 intel_de_write(display,
1080 PSR2_MAN_TRK_CTL(display, cpu_transcoder), 0);
1081 }
1082
1083 if (intel_dp->psr.su_region_et_enabled)
1084 val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
1085
1086 /*
1087 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
1088 * recommending keep this bit unset while PSR2 is enabled.
1089 */
1090 intel_de_write(display, psr_ctl_reg(display, cpu_transcoder), psr_val);
1091
1092 intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), val);
1093 }
1094
1095 static bool
transcoder_has_psr2(struct intel_display * display,enum transcoder cpu_transcoder)1096 transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
1097 {
1098 struct drm_i915_private *dev_priv = to_i915(display->drm);
1099
1100 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
1101 return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
1102 else if (DISPLAY_VER(display) >= 12)
1103 return cpu_transcoder == TRANSCODER_A;
1104 else if (DISPLAY_VER(display) >= 9)
1105 return cpu_transcoder == TRANSCODER_EDP;
1106 else
1107 return false;
1108 }
1109
intel_get_frame_time_us(const struct intel_crtc_state * crtc_state)1110 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
1111 {
1112 if (!crtc_state->hw.active)
1113 return 0;
1114
1115 return DIV_ROUND_UP(1000 * 1000,
1116 drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
1117 }
1118
psr2_program_idle_frames(struct intel_dp * intel_dp,u32 idle_frames)1119 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
1120 u32 idle_frames)
1121 {
1122 struct intel_display *display = to_intel_display(intel_dp);
1123 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1124
1125 intel_de_rmw(display, EDP_PSR2_CTL(display, cpu_transcoder),
1126 EDP_PSR2_IDLE_FRAMES_MASK,
1127 EDP_PSR2_IDLE_FRAMES(idle_frames));
1128 }
1129
tgl_psr2_enable_dc3co(struct intel_dp * intel_dp)1130 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
1131 {
1132 struct intel_display *display = to_intel_display(intel_dp);
1133 struct drm_i915_private *dev_priv = to_i915(display->drm);
1134
1135 psr2_program_idle_frames(intel_dp, 0);
1136 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
1137 }
1138
tgl_psr2_disable_dc3co(struct intel_dp * intel_dp)1139 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
1140 {
1141 struct intel_display *display = to_intel_display(intel_dp);
1142 struct drm_i915_private *dev_priv = to_i915(display->drm);
1143
1144 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1145 psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
1146 }
1147
tgl_dc3co_disable_work(struct work_struct * work)1148 static void tgl_dc3co_disable_work(struct work_struct *work)
1149 {
1150 struct intel_dp *intel_dp =
1151 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
1152
1153 mutex_lock(&intel_dp->psr.lock);
1154 /* If delayed work is pending, it is not idle */
1155 if (delayed_work_pending(&intel_dp->psr.dc3co_work))
1156 goto unlock;
1157
1158 tgl_psr2_disable_dc3co(intel_dp);
1159 unlock:
1160 mutex_unlock(&intel_dp->psr.lock);
1161 }
1162
tgl_disallow_dc3co_on_psr2_exit(struct intel_dp * intel_dp)1163 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
1164 {
1165 if (!intel_dp->psr.dc3co_exitline)
1166 return;
1167
1168 cancel_delayed_work(&intel_dp->psr.dc3co_work);
1169 /* Before PSR2 exit disallow dc3co*/
1170 tgl_psr2_disable_dc3co(intel_dp);
1171 }
1172
1173 static bool
dc3co_is_pipe_port_compatible(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1174 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
1175 struct intel_crtc_state *crtc_state)
1176 {
1177 struct intel_display *display = to_intel_display(intel_dp);
1178 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1179 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1180 struct drm_i915_private *dev_priv = to_i915(display->drm);
1181 enum port port = dig_port->base.port;
1182
1183 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
1184 return pipe <= PIPE_B && port <= PORT_B;
1185 else
1186 return pipe == PIPE_A && port == PORT_A;
1187 }
1188
1189 static void
tgl_dc3co_exitline_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1190 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
1191 struct intel_crtc_state *crtc_state)
1192 {
1193 struct intel_display *display = to_intel_display(intel_dp);
1194 struct drm_i915_private *dev_priv = to_i915(display->drm);
1195 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
1196 struct i915_power_domains *power_domains = &display->power.domains;
1197 u32 exit_scanlines;
1198
1199 /*
1200 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1201 * disable DC3CO until the changed dc3co activating/deactivating sequence
1202 * is applied. B.Specs:49196
1203 */
1204 return;
1205
1206 /*
1207 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1208 * TODO: when the issue is addressed, this restriction should be removed.
1209 */
1210 if (crtc_state->enable_psr2_sel_fetch)
1211 return;
1212
1213 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1214 return;
1215
1216 if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1217 return;
1218
1219 /* Wa_16011303918:adl-p */
1220 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
1221 return;
1222
1223 /*
1224 * DC3CO Exit time 200us B.Spec 49196
1225 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1226 */
1227 exit_scanlines =
1228 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1229
1230 if (drm_WARN_ON(display->drm, exit_scanlines > crtc_vdisplay))
1231 return;
1232
1233 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1234 }
1235
intel_psr2_sel_fetch_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1236 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1237 struct intel_crtc_state *crtc_state)
1238 {
1239 struct intel_display *display = to_intel_display(intel_dp);
1240
1241 if (!display->params.enable_psr2_sel_fetch &&
1242 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1243 drm_dbg_kms(display->drm,
1244 "PSR2 sel fetch not enabled, disabled by parameter\n");
1245 return false;
1246 }
1247
1248 if (crtc_state->uapi.async_flip) {
1249 drm_dbg_kms(display->drm,
1250 "PSR2 sel fetch not enabled, async flip enabled\n");
1251 return false;
1252 }
1253
1254 return crtc_state->enable_psr2_sel_fetch = true;
1255 }
1256
psr2_granularity_check(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1257 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1258 struct intel_crtc_state *crtc_state)
1259 {
1260 struct intel_display *display = to_intel_display(intel_dp);
1261 struct drm_i915_private *dev_priv = to_i915(display->drm);
1262 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1263 const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1264 const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1265 u16 y_granularity = 0;
1266
1267 /* PSR2 HW only send full lines so we only need to validate the width */
1268 if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1269 return false;
1270
1271 if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1272 return false;
1273
1274 /* HW tracking is only aligned to 4 lines */
1275 if (!crtc_state->enable_psr2_sel_fetch)
1276 return intel_dp->psr.su_y_granularity == 4;
1277
1278 /*
1279 * adl_p and mtl platforms have 1 line granularity.
1280 * For other platforms with SW tracking we can adjust the y coordinates
1281 * to match sink requirement if multiple of 4.
1282 */
1283 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
1284 y_granularity = intel_dp->psr.su_y_granularity;
1285 else if (intel_dp->psr.su_y_granularity <= 2)
1286 y_granularity = 4;
1287 else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1288 y_granularity = intel_dp->psr.su_y_granularity;
1289
1290 if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1291 return false;
1292
1293 if (crtc_state->dsc.compression_enable &&
1294 vdsc_cfg->slice_height % y_granularity)
1295 return false;
1296
1297 crtc_state->su_y_granularity = y_granularity;
1298 return true;
1299 }
1300
_compute_psr2_sdp_prior_scanline_indication(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1301 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1302 struct intel_crtc_state *crtc_state)
1303 {
1304 struct intel_display *display = to_intel_display(intel_dp);
1305 const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1306 u32 hblank_total, hblank_ns, req_ns;
1307
1308 hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1309 hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1310
1311 /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1312 req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1313
1314 if ((hblank_ns - req_ns) > 100)
1315 return true;
1316
1317 /* Not supported <13 / Wa_22012279113:adl-p */
1318 if (DISPLAY_VER(display) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1319 return false;
1320
1321 crtc_state->req_psr2_sdp_prior_scanline = true;
1322 return true;
1323 }
1324
intel_psr_entry_setup_frames(struct intel_dp * intel_dp,const struct drm_display_mode * adjusted_mode)1325 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1326 const struct drm_display_mode *adjusted_mode)
1327 {
1328 struct intel_display *display = to_intel_display(intel_dp);
1329 int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1330 int entry_setup_frames = 0;
1331
1332 if (psr_setup_time < 0) {
1333 drm_dbg_kms(display->drm,
1334 "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1335 intel_dp->psr_dpcd[1]);
1336 return -ETIME;
1337 }
1338
1339 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1340 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1341 if (DISPLAY_VER(display) >= 20) {
1342 /* setup entry frames can be up to 3 frames */
1343 entry_setup_frames = 1;
1344 drm_dbg_kms(display->drm,
1345 "PSR setup entry frames %d\n",
1346 entry_setup_frames);
1347 } else {
1348 drm_dbg_kms(display->drm,
1349 "PSR condition failed: PSR setup time (%d us) too long\n",
1350 psr_setup_time);
1351 return -ETIME;
1352 }
1353 }
1354
1355 return entry_setup_frames;
1356 }
1357
wake_lines_fit_into_vblank(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,bool aux_less)1358 static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
1359 const struct intel_crtc_state *crtc_state,
1360 bool aux_less)
1361 {
1362 struct intel_display *display = to_intel_display(intel_dp);
1363 int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
1364 crtc_state->hw.adjusted_mode.crtc_vblank_start;
1365 int wake_lines;
1366
1367 if (aux_less)
1368 wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
1369 else
1370 wake_lines = DISPLAY_VER(display) < 20 ?
1371 psr2_block_count_lines(intel_dp) :
1372 intel_dp->alpm_parameters.io_wake_lines;
1373
1374 if (crtc_state->req_psr2_sdp_prior_scanline)
1375 vblank -= 1;
1376
1377 /* Vblank >= PSR2_CTL Block Count Number maximum line count */
1378 if (vblank < wake_lines)
1379 return false;
1380
1381 return true;
1382 }
1383
alpm_config_valid(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,bool aux_less)1384 static bool alpm_config_valid(struct intel_dp *intel_dp,
1385 const struct intel_crtc_state *crtc_state,
1386 bool aux_less)
1387 {
1388 struct intel_display *display = to_intel_display(intel_dp);
1389
1390 if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
1391 drm_dbg_kms(display->drm,
1392 "PSR2/Panel Replay not enabled, Unable to use long enough wake times\n");
1393 return false;
1394 }
1395
1396 if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less)) {
1397 drm_dbg_kms(display->drm,
1398 "PSR2/Panel Replay not enabled, too short vblank time\n");
1399 return false;
1400 }
1401
1402 return true;
1403 }
1404
intel_psr2_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1405 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1406 struct intel_crtc_state *crtc_state)
1407 {
1408 struct intel_display *display = to_intel_display(intel_dp);
1409 struct drm_i915_private *dev_priv = to_i915(display->drm);
1410 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1411 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1412 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1413
1414 if (!intel_dp->psr.sink_psr2_support)
1415 return false;
1416
1417 /* JSL and EHL only supports eDP 1.3 */
1418 if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1419 drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
1420 return false;
1421 }
1422
1423 /* Wa_16011181250 */
1424 if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1425 IS_DG2(dev_priv)) {
1426 drm_dbg_kms(display->drm,
1427 "PSR2 is defeatured for this platform\n");
1428 return false;
1429 }
1430
1431 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1432 drm_dbg_kms(display->drm,
1433 "PSR2 not completely functional in this stepping\n");
1434 return false;
1435 }
1436
1437 if (!transcoder_has_psr2(display, crtc_state->cpu_transcoder)) {
1438 drm_dbg_kms(display->drm,
1439 "PSR2 not supported in transcoder %s\n",
1440 transcoder_name(crtc_state->cpu_transcoder));
1441 return false;
1442 }
1443
1444 /*
1445 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1446 * resolution requires DSC to be enabled, priority is given to DSC
1447 * over PSR2.
1448 */
1449 if (crtc_state->dsc.compression_enable &&
1450 (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
1451 drm_dbg_kms(display->drm,
1452 "PSR2 cannot be enabled since DSC is enabled\n");
1453 return false;
1454 }
1455
1456 if (DISPLAY_VER(display) >= 20) {
1457 psr_max_h = crtc_hdisplay;
1458 psr_max_v = crtc_vdisplay;
1459 max_bpp = crtc_state->pipe_bpp;
1460 } else if (IS_DISPLAY_VER(display, 12, 14)) {
1461 psr_max_h = 5120;
1462 psr_max_v = 3200;
1463 max_bpp = 30;
1464 } else if (IS_DISPLAY_VER(display, 10, 11)) {
1465 psr_max_h = 4096;
1466 psr_max_v = 2304;
1467 max_bpp = 24;
1468 } else if (DISPLAY_VER(display) == 9) {
1469 psr_max_h = 3640;
1470 psr_max_v = 2304;
1471 max_bpp = 24;
1472 }
1473
1474 if (crtc_state->pipe_bpp > max_bpp) {
1475 drm_dbg_kms(display->drm,
1476 "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1477 crtc_state->pipe_bpp, max_bpp);
1478 return false;
1479 }
1480
1481 /* Wa_16011303918:adl-p */
1482 if (crtc_state->vrr.enable &&
1483 IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1484 drm_dbg_kms(display->drm,
1485 "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1486 return false;
1487 }
1488
1489 if (!alpm_config_valid(intel_dp, crtc_state, false))
1490 return false;
1491
1492 if (!crtc_state->enable_psr2_sel_fetch &&
1493 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1494 drm_dbg_kms(display->drm,
1495 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1496 crtc_hdisplay, crtc_vdisplay,
1497 psr_max_h, psr_max_v);
1498 return false;
1499 }
1500
1501 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1502
1503 return true;
1504 }
1505
intel_sel_update_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1506 static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
1507 struct intel_crtc_state *crtc_state)
1508 {
1509 struct intel_display *display = to_intel_display(intel_dp);
1510
1511 if (HAS_PSR2_SEL_FETCH(display) &&
1512 !intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1513 !HAS_PSR_HW_TRACKING(display)) {
1514 drm_dbg_kms(display->drm,
1515 "Selective update not enabled, selective fetch not valid and no HW tracking available\n");
1516 goto unsupported;
1517 }
1518
1519 if (!psr2_global_enabled(intel_dp)) {
1520 drm_dbg_kms(display->drm,
1521 "Selective update disabled by flag\n");
1522 goto unsupported;
1523 }
1524
1525 if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state))
1526 goto unsupported;
1527
1528 if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1529 drm_dbg_kms(display->drm,
1530 "Selective update not enabled, SDP indication do not fit in hblank\n");
1531 goto unsupported;
1532 }
1533
1534 if (crtc_state->has_panel_replay && (DISPLAY_VER(display) < 14 ||
1535 !intel_dp->psr.sink_panel_replay_su_support))
1536 goto unsupported;
1537
1538 if (crtc_state->crc_enabled) {
1539 drm_dbg_kms(display->drm,
1540 "Selective update not enabled because it would inhibit pipe CRC calculation\n");
1541 goto unsupported;
1542 }
1543
1544 if (!psr2_granularity_check(intel_dp, crtc_state)) {
1545 drm_dbg_kms(display->drm,
1546 "Selective update not enabled, SU granularity not compatible\n");
1547 goto unsupported;
1548 }
1549
1550 crtc_state->enable_psr2_su_region_et =
1551 psr2_su_region_et_valid(intel_dp, crtc_state->has_panel_replay);
1552
1553 return true;
1554
1555 unsupported:
1556 crtc_state->enable_psr2_sel_fetch = false;
1557 return false;
1558 }
1559
_psr_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1560 static bool _psr_compute_config(struct intel_dp *intel_dp,
1561 struct intel_crtc_state *crtc_state)
1562 {
1563 struct intel_display *display = to_intel_display(intel_dp);
1564 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1565 int entry_setup_frames;
1566
1567 /*
1568 * Current PSR panels don't work reliably with VRR enabled
1569 * So if VRR is enabled, do not enable PSR.
1570 */
1571 if (crtc_state->vrr.enable)
1572 return false;
1573
1574 if (!CAN_PSR(intel_dp))
1575 return false;
1576
1577 entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1578
1579 if (entry_setup_frames >= 0) {
1580 intel_dp->psr.entry_setup_frames = entry_setup_frames;
1581 } else {
1582 drm_dbg_kms(display->drm,
1583 "PSR condition failed: PSR setup timing not met\n");
1584 return false;
1585 }
1586
1587 return true;
1588 }
1589
1590 static bool
_panel_replay_compute_config(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,const struct drm_connector_state * conn_state)1591 _panel_replay_compute_config(struct intel_dp *intel_dp,
1592 const struct intel_crtc_state *crtc_state,
1593 const struct drm_connector_state *conn_state)
1594 {
1595 struct intel_display *display = to_intel_display(intel_dp);
1596 struct intel_connector *connector =
1597 to_intel_connector(conn_state->connector);
1598 struct intel_hdcp *hdcp = &connector->hdcp;
1599
1600 if (!CAN_PANEL_REPLAY(intel_dp))
1601 return false;
1602
1603 if (!panel_replay_global_enabled(intel_dp)) {
1604 drm_dbg_kms(display->drm, "Panel Replay disabled by flag\n");
1605 return false;
1606 }
1607
1608 if (!intel_dp_is_edp(intel_dp))
1609 return true;
1610
1611 /* Remaining checks are for eDP only */
1612
1613 if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A &&
1614 to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_B)
1615 return false;
1616
1617 /* 128b/132b Panel Replay is not supported on eDP */
1618 if (intel_dp_is_uhbr(crtc_state)) {
1619 drm_dbg_kms(display->drm,
1620 "Panel Replay is not supported with 128b/132b\n");
1621 return false;
1622 }
1623
1624 /* HW will not allow Panel Replay on eDP when HDCP enabled */
1625 if (conn_state->content_protection ==
1626 DRM_MODE_CONTENT_PROTECTION_DESIRED ||
1627 (conn_state->content_protection ==
1628 DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
1629 DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) {
1630 drm_dbg_kms(display->drm,
1631 "Panel Replay is not supported with HDCP\n");
1632 return false;
1633 }
1634
1635 if (!alpm_config_valid(intel_dp, crtc_state, true))
1636 return false;
1637
1638 if (crtc_state->crc_enabled) {
1639 drm_dbg_kms(display->drm,
1640 "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
1641 return false;
1642 }
1643
1644 return true;
1645 }
1646
intel_psr_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state,struct drm_connector_state * conn_state)1647 void intel_psr_compute_config(struct intel_dp *intel_dp,
1648 struct intel_crtc_state *crtc_state,
1649 struct drm_connector_state *conn_state)
1650 {
1651 struct intel_display *display = to_intel_display(intel_dp);
1652 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1653
1654 if (!psr_global_enabled(intel_dp)) {
1655 drm_dbg_kms(display->drm, "PSR disabled by flag\n");
1656 return;
1657 }
1658
1659 if (intel_dp->psr.sink_not_reliable) {
1660 drm_dbg_kms(display->drm,
1661 "PSR sink implementation is not reliable\n");
1662 return;
1663 }
1664
1665 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1666 drm_dbg_kms(display->drm,
1667 "PSR condition failed: Interlaced mode enabled\n");
1668 return;
1669 }
1670
1671 /*
1672 * FIXME figure out what is wrong with PSR+joiner and
1673 * fix it. Presumably something related to the fact that
1674 * PSR is a transcoder level feature.
1675 */
1676 if (crtc_state->joiner_pipes) {
1677 drm_dbg_kms(display->drm,
1678 "PSR disabled due to joiner\n");
1679 return;
1680 }
1681
1682 crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
1683 crtc_state,
1684 conn_state);
1685
1686 crtc_state->has_psr = crtc_state->has_panel_replay ? true :
1687 _psr_compute_config(intel_dp, crtc_state);
1688
1689 if (!crtc_state->has_psr)
1690 return;
1691
1692 crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
1693 }
1694
intel_psr_get_config(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config)1695 void intel_psr_get_config(struct intel_encoder *encoder,
1696 struct intel_crtc_state *pipe_config)
1697 {
1698 struct intel_display *display = to_intel_display(encoder);
1699 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1700 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1701 struct intel_dp *intel_dp;
1702 u32 val;
1703
1704 if (!dig_port)
1705 return;
1706
1707 intel_dp = &dig_port->dp;
1708 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1709 return;
1710
1711 mutex_lock(&intel_dp->psr.lock);
1712 if (!intel_dp->psr.enabled)
1713 goto unlock;
1714
1715 if (intel_dp->psr.panel_replay_enabled) {
1716 pipe_config->has_psr = pipe_config->has_panel_replay = true;
1717 } else {
1718 /*
1719 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1720 * enabled/disabled because of frontbuffer tracking and others.
1721 */
1722 pipe_config->has_psr = true;
1723 }
1724
1725 pipe_config->has_sel_update = intel_dp->psr.sel_update_enabled;
1726 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1727
1728 if (!intel_dp->psr.sel_update_enabled)
1729 goto unlock;
1730
1731 if (HAS_PSR2_SEL_FETCH(display)) {
1732 val = intel_de_read(display,
1733 PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1734 if (val & PSR2_MAN_TRK_CTL_ENABLE)
1735 pipe_config->enable_psr2_sel_fetch = true;
1736 }
1737
1738 pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled;
1739
1740 if (DISPLAY_VER(display) >= 12) {
1741 val = intel_de_read(display,
1742 TRANS_EXITLINE(display, cpu_transcoder));
1743 pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1744 }
1745 unlock:
1746 mutex_unlock(&intel_dp->psr.lock);
1747 }
1748
intel_psr_activate(struct intel_dp * intel_dp)1749 static void intel_psr_activate(struct intel_dp *intel_dp)
1750 {
1751 struct intel_display *display = to_intel_display(intel_dp);
1752 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1753
1754 drm_WARN_ON(display->drm,
1755 transcoder_has_psr2(display, cpu_transcoder) &&
1756 intel_de_read(display, EDP_PSR2_CTL(display, cpu_transcoder)) & EDP_PSR2_ENABLE);
1757
1758 drm_WARN_ON(display->drm,
1759 intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)) & EDP_PSR_ENABLE);
1760
1761 drm_WARN_ON(display->drm, intel_dp->psr.active);
1762
1763 lockdep_assert_held(&intel_dp->psr.lock);
1764
1765 /* psr1, psr2 and panel-replay are mutually exclusive.*/
1766 if (intel_dp->psr.panel_replay_enabled)
1767 dg2_activate_panel_replay(intel_dp);
1768 else if (intel_dp->psr.sel_update_enabled)
1769 hsw_activate_psr2(intel_dp);
1770 else
1771 hsw_activate_psr1(intel_dp);
1772
1773 intel_dp->psr.active = true;
1774 }
1775
wa_16013835468_bit_get(struct intel_dp * intel_dp)1776 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1777 {
1778 switch (intel_dp->psr.pipe) {
1779 case PIPE_A:
1780 return LATENCY_REPORTING_REMOVED_PIPE_A;
1781 case PIPE_B:
1782 return LATENCY_REPORTING_REMOVED_PIPE_B;
1783 case PIPE_C:
1784 return LATENCY_REPORTING_REMOVED_PIPE_C;
1785 case PIPE_D:
1786 return LATENCY_REPORTING_REMOVED_PIPE_D;
1787 default:
1788 MISSING_CASE(intel_dp->psr.pipe);
1789 return 0;
1790 }
1791 }
1792
1793 /*
1794 * Wa_16013835468
1795 * Wa_14015648006
1796 */
wm_optimization_wa(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1797 static void wm_optimization_wa(struct intel_dp *intel_dp,
1798 const struct intel_crtc_state *crtc_state)
1799 {
1800 struct intel_display *display = to_intel_display(intel_dp);
1801 bool set_wa_bit = false;
1802
1803 /* Wa_14015648006 */
1804 if (IS_DISPLAY_VER(display, 11, 14))
1805 set_wa_bit |= crtc_state->wm_level_disabled;
1806
1807 /* Wa_16013835468 */
1808 if (DISPLAY_VER(display) == 12)
1809 set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1810 crtc_state->hw.adjusted_mode.crtc_vdisplay;
1811
1812 if (set_wa_bit)
1813 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
1814 0, wa_16013835468_bit_get(intel_dp));
1815 else
1816 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
1817 wa_16013835468_bit_get(intel_dp), 0);
1818 }
1819
intel_psr_enable_source(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1820 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1821 const struct intel_crtc_state *crtc_state)
1822 {
1823 struct intel_display *display = to_intel_display(intel_dp);
1824 struct drm_i915_private *dev_priv = to_i915(display->drm);
1825 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1826 u32 mask = 0;
1827
1828 /*
1829 * Only HSW and BDW have PSR AUX registers that need to be setup.
1830 * SKL+ use hardcoded values PSR AUX transactions
1831 */
1832 if (DISPLAY_VER(display) < 9)
1833 hsw_psr_setup_aux(intel_dp);
1834
1835 /*
1836 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1837 * mask LPSP to avoid dependency on other drivers that might block
1838 * runtime_pm besides preventing other hw tracking issues now we
1839 * can rely on frontbuffer tracking.
1840 *
1841 * From bspec prior LunarLake:
1842 * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
1843 * panel replay mode.
1844 *
1845 * From bspec beyod LunarLake:
1846 * Panel Replay on DP: No bits are applicable
1847 * Panel Replay on eDP: All bits are applicable
1848 */
1849 if (DISPLAY_VER(display) < 20 || intel_dp_is_edp(intel_dp))
1850 mask = EDP_PSR_DEBUG_MASK_HPD;
1851
1852 if (intel_dp_is_edp(intel_dp)) {
1853 mask |= EDP_PSR_DEBUG_MASK_MEMUP;
1854
1855 /*
1856 * For some unknown reason on HSW non-ULT (or at least on
1857 * Dell Latitude E6540) external displays start to flicker
1858 * when PSR is enabled on the eDP. SR/PC6 residency is much
1859 * higher than should be possible with an external display.
1860 * As a workaround leave LPSP unmasked to prevent PSR entry
1861 * when external displays are active.
1862 */
1863 if (DISPLAY_VER(display) >= 8 || IS_HASWELL_ULT(dev_priv))
1864 mask |= EDP_PSR_DEBUG_MASK_LPSP;
1865
1866 if (DISPLAY_VER(display) < 20)
1867 mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1868
1869 /*
1870 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1871 * registers in order to keep the CURSURFLIVE tricks working :(
1872 */
1873 if (IS_DISPLAY_VER(display, 9, 10))
1874 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1875
1876 /* allow PSR with sprite enabled */
1877 if (IS_HASWELL(dev_priv))
1878 mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1879 }
1880
1881 intel_de_write(display, psr_debug_reg(display, cpu_transcoder), mask);
1882
1883 psr_irq_control(intel_dp);
1884
1885 /*
1886 * TODO: if future platforms supports DC3CO in more than one
1887 * transcoder, EXITLINE will need to be unset when disabling PSR
1888 */
1889 if (intel_dp->psr.dc3co_exitline)
1890 intel_de_rmw(display,
1891 TRANS_EXITLINE(display, cpu_transcoder),
1892 EXITLINE_MASK,
1893 intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1894
1895 if (HAS_PSR_HW_TRACKING(display) && HAS_PSR2_SEL_FETCH(display))
1896 intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1897 intel_dp->psr.psr2_sel_fetch_enabled ?
1898 IGNORE_PSR2_HW_TRACKING : 0);
1899
1900 if (intel_dp_is_edp(intel_dp))
1901 intel_alpm_configure(intel_dp, crtc_state);
1902
1903 /*
1904 * Wa_16013835468
1905 * Wa_14015648006
1906 */
1907 wm_optimization_wa(intel_dp, crtc_state);
1908
1909 if (intel_dp->psr.sel_update_enabled) {
1910 if (DISPLAY_VER(display) == 9)
1911 intel_de_rmw(display, CHICKEN_TRANS(cpu_transcoder), 0,
1912 PSR2_VSC_ENABLE_PROG_HEADER |
1913 PSR2_ADD_VERTICAL_LINE_COUNT);
1914
1915 /*
1916 * Wa_16014451276:adlp,mtl[a0,b0]
1917 * All supported adlp panels have 1-based X granularity, this may
1918 * cause issues if non-supported panels are used.
1919 */
1920 if (!intel_dp->psr.panel_replay_enabled &&
1921 (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
1922 IS_ALDERLAKE_P(dev_priv)))
1923 intel_de_rmw(display, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
1924 0, ADLP_1_BASED_X_GRANULARITY);
1925
1926 /* Wa_16012604467:adlp,mtl[a0,b0] */
1927 if (!intel_dp->psr.panel_replay_enabled &&
1928 IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
1929 intel_de_rmw(display,
1930 MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
1931 0,
1932 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1933 else if (IS_ALDERLAKE_P(dev_priv))
1934 intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
1935 CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1936 }
1937 }
1938
psr_interrupt_error_check(struct intel_dp * intel_dp)1939 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1940 {
1941 struct intel_display *display = to_intel_display(intel_dp);
1942 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1943 u32 val;
1944
1945 if (intel_dp->psr.panel_replay_enabled)
1946 goto no_err;
1947
1948 /*
1949 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1950 * will still keep the error set even after the reset done in the
1951 * irq_preinstall and irq_uninstall hooks.
1952 * And enabling in this situation cause the screen to freeze in the
1953 * first time that PSR HW tries to activate so lets keep PSR disabled
1954 * to avoid any rendering problems.
1955 */
1956 val = intel_de_read(display, psr_iir_reg(display, cpu_transcoder));
1957 val &= psr_irq_psr_error_bit_get(intel_dp);
1958 if (val) {
1959 intel_dp->psr.sink_not_reliable = true;
1960 drm_dbg_kms(display->drm,
1961 "PSR interruption error set, not enabling PSR\n");
1962 return false;
1963 }
1964
1965 no_err:
1966 return true;
1967 }
1968
intel_psr_enable_locked(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1969 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1970 const struct intel_crtc_state *crtc_state)
1971 {
1972 struct intel_display *display = to_intel_display(intel_dp);
1973 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1974 u32 val;
1975
1976 drm_WARN_ON(display->drm, intel_dp->psr.enabled);
1977
1978 intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update;
1979 intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
1980 intel_dp->psr.busy_frontbuffer_bits = 0;
1981 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1982 intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1983 /* DC5/DC6 requires at least 6 idle frames */
1984 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1985 intel_dp->psr.dc3co_exit_delay = val;
1986 intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1987 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1988 intel_dp->psr.su_region_et_enabled = crtc_state->enable_psr2_su_region_et;
1989 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1990 intel_dp->psr.req_psr2_sdp_prior_scanline =
1991 crtc_state->req_psr2_sdp_prior_scanline;
1992
1993 if (!psr_interrupt_error_check(intel_dp))
1994 return;
1995
1996 if (intel_dp->psr.panel_replay_enabled) {
1997 drm_dbg_kms(display->drm, "Enabling Panel Replay\n");
1998 } else {
1999 drm_dbg_kms(display->drm, "Enabling PSR%s\n",
2000 intel_dp->psr.sel_update_enabled ? "2" : "1");
2001
2002 /*
2003 * Panel replay has to be enabled before link training: doing it
2004 * only for PSR here.
2005 */
2006 intel_psr_enable_sink(intel_dp, crtc_state);
2007 }
2008
2009 if (intel_dp_is_edp(intel_dp))
2010 intel_snps_phy_update_psr_power_state(&dig_port->base, true);
2011
2012 intel_psr_enable_source(intel_dp, crtc_state);
2013 intel_dp->psr.enabled = true;
2014 intel_dp->psr.paused = false;
2015
2016 /*
2017 * Link_ok is sticky and set here on PSR enable. We can assume link
2018 * training is complete as we never continue to PSR enable with
2019 * untrained link. Link_ok is kept as set until first short pulse
2020 * interrupt. This is targeted to workaround panels stating bad link
2021 * after PSR is enabled.
2022 */
2023 intel_dp->psr.link_ok = true;
2024
2025 intel_psr_activate(intel_dp);
2026 }
2027
intel_psr_exit(struct intel_dp * intel_dp)2028 static void intel_psr_exit(struct intel_dp *intel_dp)
2029 {
2030 struct intel_display *display = to_intel_display(intel_dp);
2031 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2032 u32 val;
2033
2034 if (!intel_dp->psr.active) {
2035 if (transcoder_has_psr2(display, cpu_transcoder)) {
2036 val = intel_de_read(display,
2037 EDP_PSR2_CTL(display, cpu_transcoder));
2038 drm_WARN_ON(display->drm, val & EDP_PSR2_ENABLE);
2039 }
2040
2041 val = intel_de_read(display,
2042 psr_ctl_reg(display, cpu_transcoder));
2043 drm_WARN_ON(display->drm, val & EDP_PSR_ENABLE);
2044
2045 return;
2046 }
2047
2048 if (intel_dp->psr.panel_replay_enabled) {
2049 intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder),
2050 TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
2051 } else if (intel_dp->psr.sel_update_enabled) {
2052 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
2053
2054 val = intel_de_rmw(display,
2055 EDP_PSR2_CTL(display, cpu_transcoder),
2056 EDP_PSR2_ENABLE, 0);
2057
2058 drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
2059 } else {
2060 val = intel_de_rmw(display,
2061 psr_ctl_reg(display, cpu_transcoder),
2062 EDP_PSR_ENABLE, 0);
2063
2064 drm_WARN_ON(display->drm, !(val & EDP_PSR_ENABLE));
2065 }
2066 intel_dp->psr.active = false;
2067 }
2068
intel_psr_wait_exit_locked(struct intel_dp * intel_dp)2069 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
2070 {
2071 struct intel_display *display = to_intel_display(intel_dp);
2072 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2073 i915_reg_t psr_status;
2074 u32 psr_status_mask;
2075
2076 if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
2077 intel_dp->psr.panel_replay_enabled)) {
2078 psr_status = EDP_PSR2_STATUS(display, cpu_transcoder);
2079 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
2080 } else {
2081 psr_status = psr_status_reg(display, cpu_transcoder);
2082 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
2083 }
2084
2085 /* Wait till PSR is idle */
2086 if (intel_de_wait_for_clear(display, psr_status,
2087 psr_status_mask, 2000))
2088 drm_err(display->drm, "Timed out waiting PSR idle state\n");
2089 }
2090
intel_psr_disable_locked(struct intel_dp * intel_dp)2091 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
2092 {
2093 struct intel_display *display = to_intel_display(intel_dp);
2094 struct drm_i915_private *dev_priv = to_i915(display->drm);
2095 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2096
2097 lockdep_assert_held(&intel_dp->psr.lock);
2098
2099 if (!intel_dp->psr.enabled)
2100 return;
2101
2102 if (intel_dp->psr.panel_replay_enabled)
2103 drm_dbg_kms(display->drm, "Disabling Panel Replay\n");
2104 else
2105 drm_dbg_kms(display->drm, "Disabling PSR%s\n",
2106 intel_dp->psr.sel_update_enabled ? "2" : "1");
2107
2108 intel_psr_exit(intel_dp);
2109 intel_psr_wait_exit_locked(intel_dp);
2110
2111 /*
2112 * Wa_16013835468
2113 * Wa_14015648006
2114 */
2115 if (DISPLAY_VER(display) >= 11)
2116 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
2117 wa_16013835468_bit_get(intel_dp), 0);
2118
2119 if (intel_dp->psr.sel_update_enabled) {
2120 /* Wa_16012604467:adlp,mtl[a0,b0] */
2121 if (!intel_dp->psr.panel_replay_enabled &&
2122 IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
2123 intel_de_rmw(display,
2124 MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
2125 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
2126 else if (IS_ALDERLAKE_P(dev_priv))
2127 intel_de_rmw(display, CLKGATE_DIS_MISC,
2128 CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
2129 }
2130
2131 if (intel_dp_is_edp(intel_dp))
2132 intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
2133
2134 /* Panel Replay on eDP is always using ALPM aux less. */
2135 if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
2136 intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder),
2137 ALPM_CTL_ALPM_ENABLE |
2138 ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
2139
2140 intel_de_rmw(display,
2141 PORT_ALPM_CTL(cpu_transcoder),
2142 PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
2143 }
2144
2145 /* Disable PSR on Sink */
2146 if (!intel_dp->psr.panel_replay_enabled) {
2147 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
2148
2149 if (intel_dp->psr.sel_update_enabled)
2150 drm_dp_dpcd_writeb(&intel_dp->aux,
2151 DP_RECEIVER_ALPM_CONFIG, 0);
2152 }
2153
2154 intel_dp->psr.enabled = false;
2155 intel_dp->psr.panel_replay_enabled = false;
2156 intel_dp->psr.sel_update_enabled = false;
2157 intel_dp->psr.psr2_sel_fetch_enabled = false;
2158 intel_dp->psr.su_region_et_enabled = false;
2159 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2160 }
2161
2162 /**
2163 * intel_psr_disable - Disable PSR
2164 * @intel_dp: Intel DP
2165 * @old_crtc_state: old CRTC state
2166 *
2167 * This function needs to be called before disabling pipe.
2168 */
intel_psr_disable(struct intel_dp * intel_dp,const struct intel_crtc_state * old_crtc_state)2169 void intel_psr_disable(struct intel_dp *intel_dp,
2170 const struct intel_crtc_state *old_crtc_state)
2171 {
2172 struct intel_display *display = to_intel_display(intel_dp);
2173
2174 if (!old_crtc_state->has_psr)
2175 return;
2176
2177 if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp)))
2178 return;
2179
2180 mutex_lock(&intel_dp->psr.lock);
2181
2182 intel_psr_disable_locked(intel_dp);
2183
2184 intel_dp->psr.link_ok = false;
2185
2186 mutex_unlock(&intel_dp->psr.lock);
2187 cancel_work_sync(&intel_dp->psr.work);
2188 cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
2189 }
2190
2191 /**
2192 * intel_psr_pause - Pause PSR
2193 * @intel_dp: Intel DP
2194 *
2195 * This function need to be called after enabling psr.
2196 */
intel_psr_pause(struct intel_dp * intel_dp)2197 void intel_psr_pause(struct intel_dp *intel_dp)
2198 {
2199 struct intel_display *display = to_intel_display(intel_dp);
2200 struct intel_psr *psr = &intel_dp->psr;
2201
2202 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2203 return;
2204
2205 mutex_lock(&psr->lock);
2206
2207 if (!psr->enabled) {
2208 mutex_unlock(&psr->lock);
2209 return;
2210 }
2211
2212 /* If we ever hit this, we will need to add refcount to pause/resume */
2213 drm_WARN_ON(display->drm, psr->paused);
2214
2215 intel_psr_exit(intel_dp);
2216 intel_psr_wait_exit_locked(intel_dp);
2217 psr->paused = true;
2218
2219 mutex_unlock(&psr->lock);
2220
2221 cancel_work_sync(&psr->work);
2222 cancel_delayed_work_sync(&psr->dc3co_work);
2223 }
2224
2225 /**
2226 * intel_psr_resume - Resume PSR
2227 * @intel_dp: Intel DP
2228 *
2229 * This function need to be called after pausing psr.
2230 */
intel_psr_resume(struct intel_dp * intel_dp)2231 void intel_psr_resume(struct intel_dp *intel_dp)
2232 {
2233 struct intel_psr *psr = &intel_dp->psr;
2234
2235 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2236 return;
2237
2238 mutex_lock(&psr->lock);
2239
2240 if (!psr->paused)
2241 goto unlock;
2242
2243 psr->paused = false;
2244 intel_psr_activate(intel_dp);
2245
2246 unlock:
2247 mutex_unlock(&psr->lock);
2248 }
2249
2250 /**
2251 * intel_psr_needs_block_dc_vblank - Check if block dc entry is needed
2252 * @crtc_state: CRTC status
2253 *
2254 * We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't
2255 * prevent it in case of Panel Replay. Panel Replay switches main link off on
2256 * DC entry. This means vblank interrupts are not fired and is a problem if
2257 * user-space is polling for vblank events.
2258 */
intel_psr_needs_block_dc_vblank(const struct intel_crtc_state * crtc_state)2259 bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state)
2260 {
2261 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2262 struct intel_encoder *encoder;
2263
2264 for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) {
2265 struct intel_dp *intel_dp;
2266
2267 if (!intel_encoder_is_dp(encoder))
2268 continue;
2269
2270 intel_dp = enc_to_intel_dp(encoder);
2271
2272 if (intel_dp_is_edp(intel_dp) &&
2273 CAN_PANEL_REPLAY(intel_dp))
2274 return true;
2275 }
2276
2277 return false;
2278 }
2279
man_trk_ctl_enable_bit_get(struct intel_display * display)2280 static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
2281 {
2282 struct drm_i915_private *dev_priv = to_i915(display->drm);
2283
2284 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ? 0 :
2285 PSR2_MAN_TRK_CTL_ENABLE;
2286 }
2287
man_trk_ctl_single_full_frame_bit_get(struct intel_display * display)2288 static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
2289 {
2290 struct drm_i915_private *dev_priv = to_i915(display->drm);
2291
2292 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
2293 ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
2294 PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
2295 }
2296
man_trk_ctl_partial_frame_bit_get(struct intel_display * display)2297 static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
2298 {
2299 struct drm_i915_private *dev_priv = to_i915(display->drm);
2300
2301 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
2302 ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
2303 PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
2304 }
2305
man_trk_ctl_continuos_full_frame(struct intel_display * display)2306 static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
2307 {
2308 struct drm_i915_private *dev_priv = to_i915(display->drm);
2309
2310 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
2311 ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
2312 PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
2313 }
2314
psr_force_hw_tracking_exit(struct intel_dp * intel_dp)2315 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
2316 {
2317 struct intel_display *display = to_intel_display(intel_dp);
2318 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2319
2320 if (intel_dp->psr.psr2_sel_fetch_enabled)
2321 intel_de_write(display,
2322 PSR2_MAN_TRK_CTL(display, cpu_transcoder),
2323 man_trk_ctl_enable_bit_get(display) |
2324 man_trk_ctl_partial_frame_bit_get(display) |
2325 man_trk_ctl_single_full_frame_bit_get(display) |
2326 man_trk_ctl_continuos_full_frame(display));
2327
2328 /*
2329 * Display WA #0884: skl+
2330 * This documented WA for bxt can be safely applied
2331 * broadly so we can force HW tracking to exit PSR
2332 * instead of disabling and re-enabling.
2333 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
2334 * but it makes more sense write to the current active
2335 * pipe.
2336 *
2337 * This workaround do not exist for platforms with display 10 or newer
2338 * but testing proved that it works for up display 13, for newer
2339 * than that testing will be needed.
2340 */
2341 intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0);
2342 }
2343
intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state * crtc_state)2344 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
2345 {
2346 struct intel_display *display = to_intel_display(crtc_state);
2347 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2348 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2349 struct intel_encoder *encoder;
2350
2351 if (!crtc_state->enable_psr2_sel_fetch)
2352 return;
2353
2354 for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2355 crtc_state->uapi.encoder_mask) {
2356 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2357
2358 lockdep_assert_held(&intel_dp->psr.lock);
2359 if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
2360 return;
2361 break;
2362 }
2363
2364 intel_de_write(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder),
2365 crtc_state->psr2_man_track_ctl);
2366
2367 if (!crtc_state->enable_psr2_su_region_et)
2368 return;
2369
2370 intel_de_write(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
2371 crtc_state->pipe_srcsz_early_tpt);
2372 }
2373
psr2_man_trk_ctl_calc(struct intel_crtc_state * crtc_state,bool full_update)2374 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2375 bool full_update)
2376 {
2377 struct intel_display *display = to_intel_display(crtc_state);
2378 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2379 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2380 u32 val = man_trk_ctl_enable_bit_get(display);
2381
2382 /* SF partial frame enable has to be set even on full update */
2383 val |= man_trk_ctl_partial_frame_bit_get(display);
2384
2385 if (full_update) {
2386 val |= man_trk_ctl_single_full_frame_bit_get(display);
2387 val |= man_trk_ctl_continuos_full_frame(display);
2388 goto exit;
2389 }
2390
2391 if (crtc_state->psr2_su_area.y1 == -1)
2392 goto exit;
2393
2394 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14) {
2395 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2396 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2397 } else {
2398 drm_WARN_ON(crtc_state->uapi.crtc->dev,
2399 crtc_state->psr2_su_area.y1 % 4 ||
2400 crtc_state->psr2_su_area.y2 % 4);
2401
2402 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2403 crtc_state->psr2_su_area.y1 / 4 + 1);
2404 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2405 crtc_state->psr2_su_area.y2 / 4 + 1);
2406 }
2407 exit:
2408 crtc_state->psr2_man_track_ctl = val;
2409 }
2410
psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state * crtc_state,bool full_update)2411 static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
2412 bool full_update)
2413 {
2414 int width, height;
2415
2416 if (!crtc_state->enable_psr2_su_region_et || full_update)
2417 return 0;
2418
2419 width = drm_rect_width(&crtc_state->psr2_su_area);
2420 height = drm_rect_height(&crtc_state->psr2_su_area);
2421
2422 return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
2423 }
2424
clip_area_update(struct drm_rect * overlap_damage_area,struct drm_rect * damage_area,struct drm_rect * pipe_src)2425 static void clip_area_update(struct drm_rect *overlap_damage_area,
2426 struct drm_rect *damage_area,
2427 struct drm_rect *pipe_src)
2428 {
2429 if (!drm_rect_intersect(damage_area, pipe_src))
2430 return;
2431
2432 if (overlap_damage_area->y1 == -1) {
2433 overlap_damage_area->y1 = damage_area->y1;
2434 overlap_damage_area->y2 = damage_area->y2;
2435 return;
2436 }
2437
2438 if (damage_area->y1 < overlap_damage_area->y1)
2439 overlap_damage_area->y1 = damage_area->y1;
2440
2441 if (damage_area->y2 > overlap_damage_area->y2)
2442 overlap_damage_area->y2 = damage_area->y2;
2443 }
2444
intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state * crtc_state)2445 static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2446 {
2447 struct intel_display *display = to_intel_display(crtc_state);
2448 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2449 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2450 u16 y_alignment;
2451
2452 /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2453 if (crtc_state->dsc.compression_enable &&
2454 (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14))
2455 y_alignment = vdsc_cfg->slice_height;
2456 else
2457 y_alignment = crtc_state->su_y_granularity;
2458
2459 crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2460 if (crtc_state->psr2_su_area.y2 % y_alignment)
2461 crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2462 y_alignment) + 1) * y_alignment;
2463 }
2464
2465 /*
2466 * When early transport is in use we need to extend SU area to cover
2467 * cursor fully when cursor is in SU area.
2468 */
2469 static void
intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state * state,struct intel_crtc * crtc,bool * cursor_in_su_area)2470 intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
2471 struct intel_crtc *crtc,
2472 bool *cursor_in_su_area)
2473 {
2474 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2475 struct intel_plane_state *new_plane_state;
2476 struct intel_plane *plane;
2477 int i;
2478
2479 if (!crtc_state->enable_psr2_su_region_et)
2480 return;
2481
2482 for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
2483 struct drm_rect inter;
2484
2485 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2486 continue;
2487
2488 if (plane->id != PLANE_CURSOR)
2489 continue;
2490
2491 if (!new_plane_state->uapi.visible)
2492 continue;
2493
2494 inter = crtc_state->psr2_su_area;
2495 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2496 continue;
2497
2498 clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
2499 &crtc_state->pipe_src);
2500 *cursor_in_su_area = true;
2501 }
2502 }
2503
2504 /*
2505 * TODO: Not clear how to handle planes with negative position,
2506 * also planes are not updated if they have a negative X
2507 * position so for now doing a full update in this cases
2508 *
2509 * Plane scaling and rotation is not supported by selective fetch and both
2510 * properties can change without a modeset, so need to be check at every
2511 * atomic commit.
2512 */
psr2_sel_fetch_plane_state_supported(const struct intel_plane_state * plane_state)2513 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2514 {
2515 if (plane_state->uapi.dst.y1 < 0 ||
2516 plane_state->uapi.dst.x1 < 0 ||
2517 plane_state->scaler_id >= 0 ||
2518 plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2519 return false;
2520
2521 return true;
2522 }
2523
2524 /*
2525 * Check for pipe properties that is not supported by selective fetch.
2526 *
2527 * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2528 * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2529 * enabled and going to the full update path.
2530 */
psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state * crtc_state)2531 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2532 {
2533 if (crtc_state->scaler_state.scaler_id >= 0)
2534 return false;
2535
2536 return true;
2537 }
2538
2539 /* Wa 14019834836 */
intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state * crtc_state)2540 static void intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state *crtc_state)
2541 {
2542 struct intel_display *display = to_intel_display(crtc_state);
2543 struct intel_encoder *encoder;
2544 int hactive_limit;
2545
2546 if (crtc_state->psr2_su_area.y1 != 0 ||
2547 crtc_state->psr2_su_area.y2 != 0)
2548 return;
2549
2550 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2551 hactive_limit = intel_dp_is_uhbr(crtc_state) ? 1230 : 546;
2552 else
2553 hactive_limit = intel_dp_is_uhbr(crtc_state) ? 615 : 273;
2554
2555 if (crtc_state->hw.adjusted_mode.hdisplay < hactive_limit)
2556 return;
2557
2558 for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2559 crtc_state->uapi.encoder_mask) {
2560 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2561
2562 if (!intel_dp_is_edp(intel_dp) &&
2563 intel_dp->psr.panel_replay_enabled &&
2564 intel_dp->psr.sel_update_enabled) {
2565 crtc_state->psr2_su_area.y2++;
2566 return;
2567 }
2568 }
2569 }
2570
2571 static void
intel_psr_apply_su_area_workarounds(struct intel_crtc_state * crtc_state)2572 intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state)
2573 {
2574 struct intel_display *display = to_intel_display(crtc_state);
2575 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2576
2577 /* Wa_14014971492 */
2578 if (!crtc_state->has_panel_replay &&
2579 ((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
2580 IS_ALDERLAKE_P(i915) || IS_TIGERLAKE(i915))) &&
2581 crtc_state->splitter.enable)
2582 crtc_state->psr2_su_area.y1 = 0;
2583
2584 /* Wa 14019834836 */
2585 if (DISPLAY_VER(display) == 30)
2586 intel_psr_apply_pr_link_on_su_wa(crtc_state);
2587 }
2588
intel_psr2_sel_fetch_update(struct intel_atomic_state * state,struct intel_crtc * crtc)2589 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2590 struct intel_crtc *crtc)
2591 {
2592 struct intel_display *display = to_intel_display(state);
2593 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2594 struct intel_plane_state *new_plane_state, *old_plane_state;
2595 struct intel_plane *plane;
2596 bool full_update = false, cursor_in_su_area = false;
2597 int i, ret;
2598
2599 if (!crtc_state->enable_psr2_sel_fetch)
2600 return 0;
2601
2602 if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2603 full_update = true;
2604 goto skip_sel_fetch_set_loop;
2605 }
2606
2607 crtc_state->psr2_su_area.x1 = 0;
2608 crtc_state->psr2_su_area.y1 = -1;
2609 crtc_state->psr2_su_area.x2 = drm_rect_width(&crtc_state->pipe_src);
2610 crtc_state->psr2_su_area.y2 = -1;
2611
2612 /*
2613 * Calculate minimal selective fetch area of each plane and calculate
2614 * the pipe damaged area.
2615 * In the next loop the plane selective fetch area will actually be set
2616 * using whole pipe damaged area.
2617 */
2618 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2619 new_plane_state, i) {
2620 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2621 .x2 = INT_MAX };
2622
2623 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2624 continue;
2625
2626 if (!new_plane_state->uapi.visible &&
2627 !old_plane_state->uapi.visible)
2628 continue;
2629
2630 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2631 full_update = true;
2632 break;
2633 }
2634
2635 /*
2636 * If visibility or plane moved, mark the whole plane area as
2637 * damaged as it needs to be complete redraw in the new and old
2638 * position.
2639 */
2640 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2641 !drm_rect_equals(&new_plane_state->uapi.dst,
2642 &old_plane_state->uapi.dst)) {
2643 if (old_plane_state->uapi.visible) {
2644 damaged_area.y1 = old_plane_state->uapi.dst.y1;
2645 damaged_area.y2 = old_plane_state->uapi.dst.y2;
2646 clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2647 &crtc_state->pipe_src);
2648 }
2649
2650 if (new_plane_state->uapi.visible) {
2651 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2652 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2653 clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2654 &crtc_state->pipe_src);
2655 }
2656 continue;
2657 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2658 /* If alpha changed mark the whole plane area as damaged */
2659 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2660 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2661 clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2662 &crtc_state->pipe_src);
2663 continue;
2664 }
2665
2666 src = drm_plane_state_src(&new_plane_state->uapi);
2667 drm_rect_fp_to_int(&src, &src);
2668
2669 if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2670 &new_plane_state->uapi, &damaged_area))
2671 continue;
2672
2673 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2674 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2675 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2676 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2677
2678 clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
2679 }
2680
2681 /*
2682 * TODO: For now we are just using full update in case
2683 * selective fetch area calculation fails. To optimize this we
2684 * should identify cases where this happens and fix the area
2685 * calculation for those.
2686 */
2687 if (crtc_state->psr2_su_area.y1 == -1) {
2688 drm_info_once(display->drm,
2689 "Selective fetch area calculation failed in pipe %c\n",
2690 pipe_name(crtc->pipe));
2691 full_update = true;
2692 }
2693
2694 if (full_update)
2695 goto skip_sel_fetch_set_loop;
2696
2697 intel_psr_apply_su_area_workarounds(crtc_state);
2698
2699 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2700 if (ret)
2701 return ret;
2702
2703 /*
2704 * Adjust su area to cover cursor fully as necessary (early
2705 * transport). This needs to be done after
2706 * drm_atomic_add_affected_planes to ensure visible cursor is added into
2707 * affected planes even when cursor is not updated by itself.
2708 */
2709 intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
2710
2711 intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2712
2713 /*
2714 * Now that we have the pipe damaged area check if it intersect with
2715 * every plane, if it does set the plane selective fetch area.
2716 */
2717 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2718 new_plane_state, i) {
2719 struct drm_rect *sel_fetch_area, inter;
2720 struct intel_plane *linked = new_plane_state->planar_linked_plane;
2721
2722 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2723 !new_plane_state->uapi.visible)
2724 continue;
2725
2726 inter = crtc_state->psr2_su_area;
2727 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2728 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2729 sel_fetch_area->y1 = -1;
2730 sel_fetch_area->y2 = -1;
2731 /*
2732 * if plane sel fetch was previously enabled ->
2733 * disable it
2734 */
2735 if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2736 crtc_state->update_planes |= BIT(plane->id);
2737
2738 continue;
2739 }
2740
2741 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2742 full_update = true;
2743 break;
2744 }
2745
2746 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2747 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2748 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2749 crtc_state->update_planes |= BIT(plane->id);
2750
2751 /*
2752 * Sel_fetch_area is calculated for UV plane. Use
2753 * same area for Y plane as well.
2754 */
2755 if (linked) {
2756 struct intel_plane_state *linked_new_plane_state;
2757 struct drm_rect *linked_sel_fetch_area;
2758
2759 linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2760 if (IS_ERR(linked_new_plane_state))
2761 return PTR_ERR(linked_new_plane_state);
2762
2763 linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2764 linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2765 linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2766 crtc_state->update_planes |= BIT(linked->id);
2767 }
2768 }
2769
2770 skip_sel_fetch_set_loop:
2771 psr2_man_trk_ctl_calc(crtc_state, full_update);
2772 crtc_state->pipe_srcsz_early_tpt =
2773 psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
2774 return 0;
2775 }
2776
intel_psr_pre_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)2777 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2778 struct intel_crtc *crtc)
2779 {
2780 struct intel_display *display = to_intel_display(state);
2781 struct drm_i915_private *i915 = to_i915(state->base.dev);
2782 const struct intel_crtc_state *old_crtc_state =
2783 intel_atomic_get_old_crtc_state(state, crtc);
2784 const struct intel_crtc_state *new_crtc_state =
2785 intel_atomic_get_new_crtc_state(state, crtc);
2786 struct intel_encoder *encoder;
2787
2788 if (!HAS_PSR(display))
2789 return;
2790
2791 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2792 old_crtc_state->uapi.encoder_mask) {
2793 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2794 struct intel_psr *psr = &intel_dp->psr;
2795 bool needs_to_disable = false;
2796
2797 mutex_lock(&psr->lock);
2798
2799 /*
2800 * Reasons to disable:
2801 * - PSR disabled in new state
2802 * - All planes will go inactive
2803 * - Changing between PSR versions
2804 * - Region Early Transport changing
2805 * - Display WA #1136: skl, bxt
2806 */
2807 needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2808 needs_to_disable |= !new_crtc_state->has_psr;
2809 needs_to_disable |= !new_crtc_state->active_planes;
2810 needs_to_disable |= new_crtc_state->has_sel_update != psr->sel_update_enabled;
2811 needs_to_disable |= new_crtc_state->enable_psr2_su_region_et !=
2812 psr->su_region_et_enabled;
2813 needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2814 new_crtc_state->wm_level_disabled;
2815
2816 if (psr->enabled && needs_to_disable)
2817 intel_psr_disable_locked(intel_dp);
2818 else if (psr->enabled && new_crtc_state->wm_level_disabled)
2819 /* Wa_14015648006 */
2820 wm_optimization_wa(intel_dp, new_crtc_state);
2821
2822 mutex_unlock(&psr->lock);
2823 }
2824 }
2825
intel_psr_post_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)2826 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2827 struct intel_crtc *crtc)
2828 {
2829 struct intel_display *display = to_intel_display(state);
2830 const struct intel_crtc_state *crtc_state =
2831 intel_atomic_get_new_crtc_state(state, crtc);
2832 struct intel_encoder *encoder;
2833
2834 if (!crtc_state->has_psr)
2835 return;
2836
2837 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2838 crtc_state->uapi.encoder_mask) {
2839 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2840 struct intel_psr *psr = &intel_dp->psr;
2841 bool keep_disabled = false;
2842
2843 mutex_lock(&psr->lock);
2844
2845 drm_WARN_ON(display->drm,
2846 psr->enabled && !crtc_state->active_planes);
2847
2848 keep_disabled |= psr->sink_not_reliable;
2849 keep_disabled |= !crtc_state->active_planes;
2850
2851 /* Display WA #1136: skl, bxt */
2852 keep_disabled |= DISPLAY_VER(display) < 11 &&
2853 crtc_state->wm_level_disabled;
2854
2855 if (!psr->enabled && !keep_disabled)
2856 intel_psr_enable_locked(intel_dp, crtc_state);
2857 else if (psr->enabled && !crtc_state->wm_level_disabled)
2858 /* Wa_14015648006 */
2859 wm_optimization_wa(intel_dp, crtc_state);
2860
2861 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2862 if (crtc_state->crc_enabled && psr->enabled)
2863 psr_force_hw_tracking_exit(intel_dp);
2864
2865 /*
2866 * Clear possible busy bits in case we have
2867 * invalidate -> flip -> flush sequence.
2868 */
2869 intel_dp->psr.busy_frontbuffer_bits = 0;
2870
2871 mutex_unlock(&psr->lock);
2872 }
2873 }
2874
_psr2_ready_for_pipe_update_locked(struct intel_dp * intel_dp)2875 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2876 {
2877 struct intel_display *display = to_intel_display(intel_dp);
2878 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2879
2880 /*
2881 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2882 * As all higher states has bit 4 of PSR2 state set we can just wait for
2883 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2884 */
2885 return intel_de_wait_for_clear(display,
2886 EDP_PSR2_STATUS(display, cpu_transcoder),
2887 EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2888 }
2889
_psr1_ready_for_pipe_update_locked(struct intel_dp * intel_dp)2890 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2891 {
2892 struct intel_display *display = to_intel_display(intel_dp);
2893 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2894
2895 /*
2896 * From bspec: Panel Self Refresh (BDW+)
2897 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2898 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2899 * defensive enough to cover everything.
2900 */
2901 return intel_de_wait_for_clear(display,
2902 psr_status_reg(display, cpu_transcoder),
2903 EDP_PSR_STATUS_STATE_MASK, 50);
2904 }
2905
2906 /**
2907 * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2908 * @new_crtc_state: new CRTC state
2909 *
2910 * This function is expected to be called from pipe_update_start() where it is
2911 * not expected to race with PSR enable or disable.
2912 */
intel_psr_wait_for_idle_locked(const struct intel_crtc_state * new_crtc_state)2913 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2914 {
2915 struct intel_display *display = to_intel_display(new_crtc_state);
2916 struct intel_encoder *encoder;
2917
2918 if (!new_crtc_state->has_psr)
2919 return;
2920
2921 for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2922 new_crtc_state->uapi.encoder_mask) {
2923 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2924 int ret;
2925
2926 lockdep_assert_held(&intel_dp->psr.lock);
2927
2928 if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
2929 continue;
2930
2931 if (intel_dp->psr.sel_update_enabled)
2932 ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2933 else
2934 ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2935
2936 if (ret)
2937 drm_err(display->drm,
2938 "PSR wait timed out, atomic update may fail\n");
2939 }
2940 }
2941
__psr_wait_for_idle_locked(struct intel_dp * intel_dp)2942 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2943 {
2944 struct intel_display *display = to_intel_display(intel_dp);
2945 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2946 i915_reg_t reg;
2947 u32 mask;
2948 int err;
2949
2950 if (!intel_dp->psr.enabled)
2951 return false;
2952
2953 if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
2954 intel_dp->psr.panel_replay_enabled)) {
2955 reg = EDP_PSR2_STATUS(display, cpu_transcoder);
2956 mask = EDP_PSR2_STATUS_STATE_MASK;
2957 } else {
2958 reg = psr_status_reg(display, cpu_transcoder);
2959 mask = EDP_PSR_STATUS_STATE_MASK;
2960 }
2961
2962 mutex_unlock(&intel_dp->psr.lock);
2963
2964 err = intel_de_wait_for_clear(display, reg, mask, 50);
2965 if (err)
2966 drm_err(display->drm,
2967 "Timed out waiting for PSR Idle for re-enable\n");
2968
2969 /* After the unlocked wait, verify that PSR is still wanted! */
2970 mutex_lock(&intel_dp->psr.lock);
2971 return err == 0 && intel_dp->psr.enabled;
2972 }
2973
intel_psr_fastset_force(struct intel_display * display)2974 static int intel_psr_fastset_force(struct intel_display *display)
2975 {
2976 struct drm_connector_list_iter conn_iter;
2977 struct drm_modeset_acquire_ctx ctx;
2978 struct drm_atomic_state *state;
2979 struct drm_connector *conn;
2980 int err = 0;
2981
2982 state = drm_atomic_state_alloc(display->drm);
2983 if (!state)
2984 return -ENOMEM;
2985
2986 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2987
2988 state->acquire_ctx = &ctx;
2989 to_intel_atomic_state(state)->internal = true;
2990
2991 retry:
2992 drm_connector_list_iter_begin(display->drm, &conn_iter);
2993 drm_for_each_connector_iter(conn, &conn_iter) {
2994 struct drm_connector_state *conn_state;
2995 struct drm_crtc_state *crtc_state;
2996
2997 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2998 continue;
2999
3000 conn_state = drm_atomic_get_connector_state(state, conn);
3001 if (IS_ERR(conn_state)) {
3002 err = PTR_ERR(conn_state);
3003 break;
3004 }
3005
3006 if (!conn_state->crtc)
3007 continue;
3008
3009 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
3010 if (IS_ERR(crtc_state)) {
3011 err = PTR_ERR(crtc_state);
3012 break;
3013 }
3014
3015 /* Mark mode as changed to trigger a pipe->update() */
3016 crtc_state->mode_changed = true;
3017 }
3018 drm_connector_list_iter_end(&conn_iter);
3019
3020 if (err == 0)
3021 err = drm_atomic_commit(state);
3022
3023 if (err == -EDEADLK) {
3024 drm_atomic_state_clear(state);
3025 err = drm_modeset_backoff(&ctx);
3026 if (!err)
3027 goto retry;
3028 }
3029
3030 drm_modeset_drop_locks(&ctx);
3031 drm_modeset_acquire_fini(&ctx);
3032 drm_atomic_state_put(state);
3033
3034 return err;
3035 }
3036
intel_psr_debug_set(struct intel_dp * intel_dp,u64 val)3037 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
3038 {
3039 struct intel_display *display = to_intel_display(intel_dp);
3040 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
3041 const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3042 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3043 u32 old_mode, old_disable_bits;
3044 int ret;
3045
3046 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3047 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE |
3048 I915_PSR_DEBUG_MODE_MASK) ||
3049 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
3050 drm_dbg_kms(display->drm, "Invalid debug mask %llx\n", val);
3051 return -EINVAL;
3052 }
3053
3054 ret = mutex_lock_interruptible(&intel_dp->psr.lock);
3055 if (ret)
3056 return ret;
3057
3058 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
3059 old_disable_bits = intel_dp->psr.debug &
3060 (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3061 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3062
3063 intel_dp->psr.debug = val;
3064
3065 /*
3066 * Do it right away if it's already enabled, otherwise it will be done
3067 * when enabling the source.
3068 */
3069 if (intel_dp->psr.enabled)
3070 psr_irq_control(intel_dp);
3071
3072 mutex_unlock(&intel_dp->psr.lock);
3073
3074 if (old_mode != mode || old_disable_bits != disable_bits)
3075 ret = intel_psr_fastset_force(display);
3076
3077 return ret;
3078 }
3079
intel_psr_handle_irq(struct intel_dp * intel_dp)3080 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
3081 {
3082 struct intel_psr *psr = &intel_dp->psr;
3083
3084 intel_psr_disable_locked(intel_dp);
3085 psr->sink_not_reliable = true;
3086 /* let's make sure that sink is awaken */
3087 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
3088 }
3089
intel_psr_work(struct work_struct * work)3090 static void intel_psr_work(struct work_struct *work)
3091 {
3092 struct intel_dp *intel_dp =
3093 container_of(work, typeof(*intel_dp), psr.work);
3094
3095 mutex_lock(&intel_dp->psr.lock);
3096
3097 if (!intel_dp->psr.enabled)
3098 goto unlock;
3099
3100 if (READ_ONCE(intel_dp->psr.irq_aux_error))
3101 intel_psr_handle_irq(intel_dp);
3102
3103 /*
3104 * We have to make sure PSR is ready for re-enable
3105 * otherwise it keeps disabled until next full enable/disable cycle.
3106 * PSR might take some time to get fully disabled
3107 * and be ready for re-enable.
3108 */
3109 if (!__psr_wait_for_idle_locked(intel_dp))
3110 goto unlock;
3111
3112 /*
3113 * The delayed work can race with an invalidate hence we need to
3114 * recheck. Since psr_flush first clears this and then reschedules we
3115 * won't ever miss a flush when bailing out here.
3116 */
3117 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
3118 goto unlock;
3119
3120 intel_psr_activate(intel_dp);
3121 unlock:
3122 mutex_unlock(&intel_dp->psr.lock);
3123 }
3124
_psr_invalidate_handle(struct intel_dp * intel_dp)3125 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
3126 {
3127 struct intel_display *display = to_intel_display(intel_dp);
3128 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3129
3130 if (intel_dp->psr.psr2_sel_fetch_enabled) {
3131 u32 val;
3132
3133 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3134 /* Send one update otherwise lag is observed in screen */
3135 intel_de_write(display,
3136 CURSURFLIVE(display, intel_dp->psr.pipe),
3137 0);
3138 return;
3139 }
3140
3141 val = man_trk_ctl_enable_bit_get(display) |
3142 man_trk_ctl_partial_frame_bit_get(display) |
3143 man_trk_ctl_continuos_full_frame(display);
3144 intel_de_write(display,
3145 PSR2_MAN_TRK_CTL(display, cpu_transcoder),
3146 val);
3147 intel_de_write(display,
3148 CURSURFLIVE(display, intel_dp->psr.pipe), 0);
3149 intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
3150 } else {
3151 intel_psr_exit(intel_dp);
3152 }
3153 }
3154
3155 /**
3156 * intel_psr_invalidate - Invalidate PSR
3157 * @display: display device
3158 * @frontbuffer_bits: frontbuffer plane tracking bits
3159 * @origin: which operation caused the invalidate
3160 *
3161 * Since the hardware frontbuffer tracking has gaps we need to integrate
3162 * with the software frontbuffer tracking. This function gets called every
3163 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
3164 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
3165 *
3166 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
3167 */
intel_psr_invalidate(struct intel_display * display,unsigned frontbuffer_bits,enum fb_op_origin origin)3168 void intel_psr_invalidate(struct intel_display *display,
3169 unsigned frontbuffer_bits, enum fb_op_origin origin)
3170 {
3171 struct intel_encoder *encoder;
3172
3173 if (origin == ORIGIN_FLIP)
3174 return;
3175
3176 for_each_intel_encoder_with_psr(display->drm, encoder) {
3177 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3178 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3179
3180 mutex_lock(&intel_dp->psr.lock);
3181 if (!intel_dp->psr.enabled) {
3182 mutex_unlock(&intel_dp->psr.lock);
3183 continue;
3184 }
3185
3186 pipe_frontbuffer_bits &=
3187 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3188 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
3189
3190 if (pipe_frontbuffer_bits)
3191 _psr_invalidate_handle(intel_dp);
3192
3193 mutex_unlock(&intel_dp->psr.lock);
3194 }
3195 }
3196 /*
3197 * When we will be completely rely on PSR2 S/W tracking in future,
3198 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
3199 * event also therefore tgl_dc3co_flush_locked() require to be changed
3200 * accordingly in future.
3201 */
3202 static void
tgl_dc3co_flush_locked(struct intel_dp * intel_dp,unsigned int frontbuffer_bits,enum fb_op_origin origin)3203 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
3204 enum fb_op_origin origin)
3205 {
3206 struct intel_display *display = to_intel_display(intel_dp);
3207 struct drm_i915_private *i915 = to_i915(display->drm);
3208
3209 if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
3210 !intel_dp->psr.active)
3211 return;
3212
3213 /*
3214 * At every frontbuffer flush flip event modified delay of delayed work,
3215 * when delayed work schedules that means display has been idle.
3216 */
3217 if (!(frontbuffer_bits &
3218 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
3219 return;
3220
3221 tgl_psr2_enable_dc3co(intel_dp);
3222 mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
3223 intel_dp->psr.dc3co_exit_delay);
3224 }
3225
_psr_flush_handle(struct intel_dp * intel_dp)3226 static void _psr_flush_handle(struct intel_dp *intel_dp)
3227 {
3228 struct intel_display *display = to_intel_display(intel_dp);
3229 struct drm_i915_private *dev_priv = to_i915(display->drm);
3230 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3231
3232 if (intel_dp->psr.psr2_sel_fetch_enabled) {
3233 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3234 /* can we turn CFF off? */
3235 if (intel_dp->psr.busy_frontbuffer_bits == 0) {
3236 u32 val = man_trk_ctl_enable_bit_get(display) |
3237 man_trk_ctl_partial_frame_bit_get(display) |
3238 man_trk_ctl_single_full_frame_bit_get(display) |
3239 man_trk_ctl_continuos_full_frame(display);
3240
3241 /*
3242 * Set psr2_sel_fetch_cff_enabled as false to allow selective
3243 * updates. Still keep cff bit enabled as we don't have proper
3244 * SU configuration in case update is sent for any reason after
3245 * sff bit gets cleared by the HW on next vblank.
3246 */
3247 intel_de_write(display,
3248 PSR2_MAN_TRK_CTL(display, cpu_transcoder),
3249 val);
3250 intel_de_write(display,
3251 CURSURFLIVE(display, intel_dp->psr.pipe),
3252 0);
3253 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
3254 }
3255 } else {
3256 /*
3257 * continuous full frame is disabled, only a single full
3258 * frame is required
3259 */
3260 psr_force_hw_tracking_exit(intel_dp);
3261 }
3262 } else {
3263 psr_force_hw_tracking_exit(intel_dp);
3264
3265 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
3266 queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
3267 }
3268 }
3269
3270 /**
3271 * intel_psr_flush - Flush PSR
3272 * @display: display device
3273 * @frontbuffer_bits: frontbuffer plane tracking bits
3274 * @origin: which operation caused the flush
3275 *
3276 * Since the hardware frontbuffer tracking has gaps we need to integrate
3277 * with the software frontbuffer tracking. This function gets called every
3278 * time frontbuffer rendering has completed and flushed out to memory. PSR
3279 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
3280 *
3281 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
3282 */
intel_psr_flush(struct intel_display * display,unsigned frontbuffer_bits,enum fb_op_origin origin)3283 void intel_psr_flush(struct intel_display *display,
3284 unsigned frontbuffer_bits, enum fb_op_origin origin)
3285 {
3286 struct intel_encoder *encoder;
3287
3288 for_each_intel_encoder_with_psr(display->drm, encoder) {
3289 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3290 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3291
3292 mutex_lock(&intel_dp->psr.lock);
3293 if (!intel_dp->psr.enabled) {
3294 mutex_unlock(&intel_dp->psr.lock);
3295 continue;
3296 }
3297
3298 pipe_frontbuffer_bits &=
3299 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3300 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
3301
3302 /*
3303 * If the PSR is paused by an explicit intel_psr_paused() call,
3304 * we have to ensure that the PSR is not activated until
3305 * intel_psr_resume() is called.
3306 */
3307 if (intel_dp->psr.paused)
3308 goto unlock;
3309
3310 if (origin == ORIGIN_FLIP ||
3311 (origin == ORIGIN_CURSOR_UPDATE &&
3312 !intel_dp->psr.psr2_sel_fetch_enabled)) {
3313 tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
3314 goto unlock;
3315 }
3316
3317 if (pipe_frontbuffer_bits == 0)
3318 goto unlock;
3319
3320 /* By definition flush = invalidate + flush */
3321 _psr_flush_handle(intel_dp);
3322 unlock:
3323 mutex_unlock(&intel_dp->psr.lock);
3324 }
3325 }
3326
3327 /**
3328 * intel_psr_init - Init basic PSR work and mutex.
3329 * @intel_dp: Intel DP
3330 *
3331 * This function is called after the initializing connector.
3332 * (the initializing of connector treats the handling of connector capabilities)
3333 * And it initializes basic PSR stuff for each DP Encoder.
3334 */
intel_psr_init(struct intel_dp * intel_dp)3335 void intel_psr_init(struct intel_dp *intel_dp)
3336 {
3337 struct intel_display *display = to_intel_display(intel_dp);
3338 struct drm_i915_private *dev_priv = to_i915(display->drm);
3339 struct intel_connector *connector = intel_dp->attached_connector;
3340 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3341
3342 if (!(HAS_PSR(display) || HAS_DP20(dev_priv)))
3343 return;
3344
3345 /*
3346 * HSW spec explicitly says PSR is tied to port A.
3347 * BDW+ platforms have a instance of PSR registers per transcoder but
3348 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
3349 * than eDP one.
3350 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
3351 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
3352 * But GEN12 supports a instance of PSR registers per transcoder.
3353 */
3354 if (DISPLAY_VER(display) < 12 && dig_port->base.port != PORT_A) {
3355 drm_dbg_kms(display->drm,
3356 "PSR condition failed: Port not supported\n");
3357 return;
3358 }
3359
3360 if ((HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp)) ||
3361 DISPLAY_VER(display) >= 20)
3362 intel_dp->psr.source_panel_replay_support = true;
3363
3364 if (HAS_PSR(display) && intel_dp_is_edp(intel_dp))
3365 intel_dp->psr.source_support = true;
3366
3367 /* Set link_standby x link_off defaults */
3368 if (DISPLAY_VER(display) < 12)
3369 /* For new platforms up to TGL let's respect VBT back again */
3370 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
3371
3372 INIT_WORK(&intel_dp->psr.work, intel_psr_work);
3373 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
3374 mutex_init(&intel_dp->psr.lock);
3375 }
3376
psr_get_status_and_error_status(struct intel_dp * intel_dp,u8 * status,u8 * error_status)3377 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
3378 u8 *status, u8 *error_status)
3379 {
3380 struct drm_dp_aux *aux = &intel_dp->aux;
3381 int ret;
3382 unsigned int offset;
3383
3384 offset = intel_dp->psr.panel_replay_enabled ?
3385 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
3386
3387 ret = drm_dp_dpcd_readb(aux, offset, status);
3388 if (ret != 1)
3389 return ret;
3390
3391 offset = intel_dp->psr.panel_replay_enabled ?
3392 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
3393
3394 ret = drm_dp_dpcd_readb(aux, offset, error_status);
3395 if (ret != 1)
3396 return ret;
3397
3398 *status = *status & DP_PSR_SINK_STATE_MASK;
3399
3400 return 0;
3401 }
3402
psr_alpm_check(struct intel_dp * intel_dp)3403 static void psr_alpm_check(struct intel_dp *intel_dp)
3404 {
3405 struct intel_display *display = to_intel_display(intel_dp);
3406 struct drm_dp_aux *aux = &intel_dp->aux;
3407 struct intel_psr *psr = &intel_dp->psr;
3408 u8 val;
3409 int r;
3410
3411 if (!psr->sel_update_enabled)
3412 return;
3413
3414 r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
3415 if (r != 1) {
3416 drm_err(display->drm, "Error reading ALPM status\n");
3417 return;
3418 }
3419
3420 if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
3421 intel_psr_disable_locked(intel_dp);
3422 psr->sink_not_reliable = true;
3423 drm_dbg_kms(display->drm,
3424 "ALPM lock timeout error, disabling PSR\n");
3425
3426 /* Clearing error */
3427 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
3428 }
3429 }
3430
psr_capability_changed_check(struct intel_dp * intel_dp)3431 static void psr_capability_changed_check(struct intel_dp *intel_dp)
3432 {
3433 struct intel_display *display = to_intel_display(intel_dp);
3434 struct intel_psr *psr = &intel_dp->psr;
3435 u8 val;
3436 int r;
3437
3438 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
3439 if (r != 1) {
3440 drm_err(display->drm, "Error reading DP_PSR_ESI\n");
3441 return;
3442 }
3443
3444 if (val & DP_PSR_CAPS_CHANGE) {
3445 intel_psr_disable_locked(intel_dp);
3446 psr->sink_not_reliable = true;
3447 drm_dbg_kms(display->drm,
3448 "Sink PSR capability changed, disabling PSR\n");
3449
3450 /* Clearing it */
3451 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
3452 }
3453 }
3454
3455 /*
3456 * On common bits:
3457 * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
3458 * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
3459 * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
3460 * this function is relying on PSR definitions
3461 */
intel_psr_short_pulse(struct intel_dp * intel_dp)3462 void intel_psr_short_pulse(struct intel_dp *intel_dp)
3463 {
3464 struct intel_display *display = to_intel_display(intel_dp);
3465 struct intel_psr *psr = &intel_dp->psr;
3466 u8 status, error_status;
3467 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3468 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3469 DP_PSR_LINK_CRC_ERROR;
3470
3471 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
3472 return;
3473
3474 mutex_lock(&psr->lock);
3475
3476 psr->link_ok = false;
3477
3478 if (!psr->enabled)
3479 goto exit;
3480
3481 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
3482 drm_err(display->drm,
3483 "Error reading PSR status or error status\n");
3484 goto exit;
3485 }
3486
3487 if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
3488 (error_status & errors)) {
3489 intel_psr_disable_locked(intel_dp);
3490 psr->sink_not_reliable = true;
3491 }
3492
3493 if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
3494 !error_status)
3495 drm_dbg_kms(display->drm,
3496 "PSR sink internal error, disabling PSR\n");
3497 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3498 drm_dbg_kms(display->drm,
3499 "PSR RFB storage error, disabling PSR\n");
3500 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3501 drm_dbg_kms(display->drm,
3502 "PSR VSC SDP uncorrectable error, disabling PSR\n");
3503 if (error_status & DP_PSR_LINK_CRC_ERROR)
3504 drm_dbg_kms(display->drm,
3505 "PSR Link CRC error, disabling PSR\n");
3506
3507 if (error_status & ~errors)
3508 drm_err(display->drm,
3509 "PSR_ERROR_STATUS unhandled errors %x\n",
3510 error_status & ~errors);
3511 /* clear status register */
3512 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
3513
3514 if (!psr->panel_replay_enabled) {
3515 psr_alpm_check(intel_dp);
3516 psr_capability_changed_check(intel_dp);
3517 }
3518
3519 exit:
3520 mutex_unlock(&psr->lock);
3521 }
3522
intel_psr_enabled(struct intel_dp * intel_dp)3523 bool intel_psr_enabled(struct intel_dp *intel_dp)
3524 {
3525 bool ret;
3526
3527 if (!CAN_PSR(intel_dp))
3528 return false;
3529
3530 mutex_lock(&intel_dp->psr.lock);
3531 ret = intel_dp->psr.enabled;
3532 mutex_unlock(&intel_dp->psr.lock);
3533
3534 return ret;
3535 }
3536
3537 /**
3538 * intel_psr_link_ok - return psr->link_ok
3539 * @intel_dp: struct intel_dp
3540 *
3541 * We are seeing unexpected link re-trainings with some panels. This is caused
3542 * by panel stating bad link status after PSR is enabled. Code checking link
3543 * status can call this to ensure it can ignore bad link status stated by the
3544 * panel I.e. if panel is stating bad link and intel_psr_link_ok is stating link
3545 * is ok caller should rely on latter.
3546 *
3547 * Return value of link_ok
3548 */
intel_psr_link_ok(struct intel_dp * intel_dp)3549 bool intel_psr_link_ok(struct intel_dp *intel_dp)
3550 {
3551 bool ret;
3552
3553 if ((!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) ||
3554 !intel_dp_is_edp(intel_dp))
3555 return false;
3556
3557 mutex_lock(&intel_dp->psr.lock);
3558 ret = intel_dp->psr.link_ok;
3559 mutex_unlock(&intel_dp->psr.lock);
3560
3561 return ret;
3562 }
3563
3564 /**
3565 * intel_psr_lock - grab PSR lock
3566 * @crtc_state: the crtc state
3567 *
3568 * This is initially meant to be used by around CRTC update, when
3569 * vblank sensitive registers are updated and we need grab the lock
3570 * before it to avoid vblank evasion.
3571 */
intel_psr_lock(const struct intel_crtc_state * crtc_state)3572 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3573 {
3574 struct intel_display *display = to_intel_display(crtc_state);
3575 struct intel_encoder *encoder;
3576
3577 if (!crtc_state->has_psr)
3578 return;
3579
3580 for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3581 crtc_state->uapi.encoder_mask) {
3582 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3583
3584 mutex_lock(&intel_dp->psr.lock);
3585 break;
3586 }
3587 }
3588
3589 /**
3590 * intel_psr_unlock - release PSR lock
3591 * @crtc_state: the crtc state
3592 *
3593 * Release the PSR lock that was held during pipe update.
3594 */
intel_psr_unlock(const struct intel_crtc_state * crtc_state)3595 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3596 {
3597 struct intel_display *display = to_intel_display(crtc_state);
3598 struct intel_encoder *encoder;
3599
3600 if (!crtc_state->has_psr)
3601 return;
3602
3603 for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3604 crtc_state->uapi.encoder_mask) {
3605 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3606
3607 mutex_unlock(&intel_dp->psr.lock);
3608 break;
3609 }
3610 }
3611
3612 static void
psr_source_status(struct intel_dp * intel_dp,struct seq_file * m)3613 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3614 {
3615 struct intel_display *display = to_intel_display(intel_dp);
3616 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3617 const char *status = "unknown";
3618 u32 val, status_val;
3619
3620 if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
3621 intel_dp->psr.panel_replay_enabled)) {
3622 static const char * const live_status[] = {
3623 "IDLE",
3624 "CAPTURE",
3625 "CAPTURE_FS",
3626 "SLEEP",
3627 "BUFON_FW",
3628 "ML_UP",
3629 "SU_STANDBY",
3630 "FAST_SLEEP",
3631 "DEEP_SLEEP",
3632 "BUF_ON",
3633 "TG_ON"
3634 };
3635 val = intel_de_read(display,
3636 EDP_PSR2_STATUS(display, cpu_transcoder));
3637 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3638 if (status_val < ARRAY_SIZE(live_status))
3639 status = live_status[status_val];
3640 } else {
3641 static const char * const live_status[] = {
3642 "IDLE",
3643 "SRDONACK",
3644 "SRDENT",
3645 "BUFOFF",
3646 "BUFON",
3647 "AUXACK",
3648 "SRDOFFACK",
3649 "SRDENT_ON",
3650 };
3651 val = intel_de_read(display,
3652 psr_status_reg(display, cpu_transcoder));
3653 status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3654 if (status_val < ARRAY_SIZE(live_status))
3655 status = live_status[status_val];
3656 }
3657
3658 seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3659 }
3660
intel_psr_sink_capability(struct intel_dp * intel_dp,struct seq_file * m)3661 static void intel_psr_sink_capability(struct intel_dp *intel_dp,
3662 struct seq_file *m)
3663 {
3664 struct intel_psr *psr = &intel_dp->psr;
3665
3666 seq_printf(m, "Sink support: PSR = %s",
3667 str_yes_no(psr->sink_support));
3668
3669 if (psr->sink_support)
3670 seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3671 if (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
3672 seq_printf(m, " (Early Transport)");
3673 seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
3674 seq_printf(m, ", Panel Replay Selective Update = %s",
3675 str_yes_no(psr->sink_panel_replay_su_support));
3676 if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
3677 seq_printf(m, " (Early Transport)");
3678 seq_printf(m, "\n");
3679 }
3680
intel_psr_print_mode(struct intel_dp * intel_dp,struct seq_file * m)3681 static void intel_psr_print_mode(struct intel_dp *intel_dp,
3682 struct seq_file *m)
3683 {
3684 struct intel_psr *psr = &intel_dp->psr;
3685 const char *status, *mode, *region_et;
3686
3687 if (psr->enabled)
3688 status = " enabled";
3689 else
3690 status = "disabled";
3691
3692 if (psr->panel_replay_enabled && psr->sel_update_enabled)
3693 mode = "Panel Replay Selective Update";
3694 else if (psr->panel_replay_enabled)
3695 mode = "Panel Replay";
3696 else if (psr->sel_update_enabled)
3697 mode = "PSR2";
3698 else if (psr->enabled)
3699 mode = "PSR1";
3700 else
3701 mode = "";
3702
3703 if (psr->su_region_et_enabled)
3704 region_et = " (Early Transport)";
3705 else
3706 region_et = "";
3707
3708 seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et);
3709 }
3710
intel_psr_status(struct seq_file * m,struct intel_dp * intel_dp)3711 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3712 {
3713 struct intel_display *display = to_intel_display(intel_dp);
3714 struct drm_i915_private *dev_priv = to_i915(display->drm);
3715 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3716 struct intel_psr *psr = &intel_dp->psr;
3717 intel_wakeref_t wakeref;
3718 bool enabled;
3719 u32 val, psr2_ctl;
3720
3721 intel_psr_sink_capability(intel_dp, m);
3722
3723 if (!(psr->sink_support || psr->sink_panel_replay_support))
3724 return 0;
3725
3726 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3727 mutex_lock(&psr->lock);
3728
3729 intel_psr_print_mode(intel_dp, m);
3730
3731 if (!psr->enabled) {
3732 seq_printf(m, "PSR sink not reliable: %s\n",
3733 str_yes_no(psr->sink_not_reliable));
3734
3735 goto unlock;
3736 }
3737
3738 if (psr->panel_replay_enabled) {
3739 val = intel_de_read(display, TRANS_DP2_CTL(cpu_transcoder));
3740
3741 if (intel_dp_is_edp(intel_dp))
3742 psr2_ctl = intel_de_read(display,
3743 EDP_PSR2_CTL(display,
3744 cpu_transcoder));
3745
3746 enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3747 } else if (psr->sel_update_enabled) {
3748 val = intel_de_read(display,
3749 EDP_PSR2_CTL(display, cpu_transcoder));
3750 enabled = val & EDP_PSR2_ENABLE;
3751 } else {
3752 val = intel_de_read(display, psr_ctl_reg(display, cpu_transcoder));
3753 enabled = val & EDP_PSR_ENABLE;
3754 }
3755 seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
3756 str_enabled_disabled(enabled), val);
3757 if (psr->panel_replay_enabled && intel_dp_is_edp(intel_dp))
3758 seq_printf(m, "PSR2_CTL: 0x%08x\n",
3759 psr2_ctl);
3760 psr_source_status(intel_dp, m);
3761 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3762 psr->busy_frontbuffer_bits);
3763
3764 /*
3765 * SKL+ Perf counter is reset to 0 everytime DC state is entered
3766 */
3767 val = intel_de_read(display, psr_perf_cnt_reg(display, cpu_transcoder));
3768 seq_printf(m, "Performance counter: %u\n",
3769 REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3770
3771 if (psr->debug & I915_PSR_DEBUG_IRQ) {
3772 seq_printf(m, "Last attempted entry at: %lld\n",
3773 psr->last_entry_attempt);
3774 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3775 }
3776
3777 if (psr->sel_update_enabled) {
3778 u32 su_frames_val[3];
3779 int frame;
3780
3781 /*
3782 * Reading all 3 registers before hand to minimize crossing a
3783 * frame boundary between register reads
3784 */
3785 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3786 val = intel_de_read(display,
3787 PSR2_SU_STATUS(display, cpu_transcoder, frame));
3788 su_frames_val[frame / 3] = val;
3789 }
3790
3791 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3792
3793 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3794 u32 su_blocks;
3795
3796 su_blocks = su_frames_val[frame / 3] &
3797 PSR2_SU_STATUS_MASK(frame);
3798 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3799 seq_printf(m, "%d\t%d\n", frame, su_blocks);
3800 }
3801
3802 seq_printf(m, "PSR2 selective fetch: %s\n",
3803 str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3804 }
3805
3806 unlock:
3807 mutex_unlock(&psr->lock);
3808 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3809
3810 return 0;
3811 }
3812
i915_edp_psr_status_show(struct seq_file * m,void * data)3813 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3814 {
3815 struct intel_display *display = m->private;
3816 struct intel_dp *intel_dp = NULL;
3817 struct intel_encoder *encoder;
3818
3819 if (!HAS_PSR(display))
3820 return -ENODEV;
3821
3822 /* Find the first EDP which supports PSR */
3823 for_each_intel_encoder_with_psr(display->drm, encoder) {
3824 intel_dp = enc_to_intel_dp(encoder);
3825 break;
3826 }
3827
3828 if (!intel_dp)
3829 return -ENODEV;
3830
3831 return intel_psr_status(m, intel_dp);
3832 }
3833 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3834
3835 static int
i915_edp_psr_debug_set(void * data,u64 val)3836 i915_edp_psr_debug_set(void *data, u64 val)
3837 {
3838 struct intel_display *display = data;
3839 struct drm_i915_private *dev_priv = to_i915(display->drm);
3840 struct intel_encoder *encoder;
3841 intel_wakeref_t wakeref;
3842 int ret = -ENODEV;
3843
3844 if (!HAS_PSR(display))
3845 return ret;
3846
3847 for_each_intel_encoder_with_psr(display->drm, encoder) {
3848 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3849
3850 drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);
3851
3852 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3853
3854 // TODO: split to each transcoder's PSR debug state
3855 ret = intel_psr_debug_set(intel_dp, val);
3856
3857 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3858 }
3859
3860 return ret;
3861 }
3862
3863 static int
i915_edp_psr_debug_get(void * data,u64 * val)3864 i915_edp_psr_debug_get(void *data, u64 *val)
3865 {
3866 struct intel_display *display = data;
3867 struct intel_encoder *encoder;
3868
3869 if (!HAS_PSR(display))
3870 return -ENODEV;
3871
3872 for_each_intel_encoder_with_psr(display->drm, encoder) {
3873 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3874
3875 // TODO: split to each transcoder's PSR debug state
3876 *val = READ_ONCE(intel_dp->psr.debug);
3877 return 0;
3878 }
3879
3880 return -ENODEV;
3881 }
3882
3883 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3884 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3885 "%llu\n");
3886
intel_psr_debugfs_register(struct intel_display * display)3887 void intel_psr_debugfs_register(struct intel_display *display)
3888 {
3889 struct drm_minor *minor = display->drm->primary;
3890
3891 debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3892 display, &i915_edp_psr_debug_fops);
3893
3894 debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3895 display, &i915_edp_psr_status_fops);
3896 }
3897
psr_mode_str(struct intel_dp * intel_dp)3898 static const char *psr_mode_str(struct intel_dp *intel_dp)
3899 {
3900 if (intel_dp->psr.panel_replay_enabled)
3901 return "PANEL-REPLAY";
3902 else if (intel_dp->psr.enabled)
3903 return "PSR";
3904
3905 return "unknown";
3906 }
3907
i915_psr_sink_status_show(struct seq_file * m,void * data)3908 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3909 {
3910 struct intel_connector *connector = m->private;
3911 struct intel_dp *intel_dp = intel_attached_dp(connector);
3912 static const char * const sink_status[] = {
3913 "inactive",
3914 "transition to active, capture and display",
3915 "active, display from RFB",
3916 "active, capture and display on sink device timings",
3917 "transition to inactive, capture and display, timing re-sync",
3918 "reserved",
3919 "reserved",
3920 "sink internal error",
3921 };
3922 const char *str;
3923 int ret;
3924 u8 status, error_status;
3925
3926 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
3927 seq_puts(m, "PSR/Panel-Replay Unsupported\n");
3928 return -ENODEV;
3929 }
3930
3931 if (connector->base.status != connector_status_connected)
3932 return -ENODEV;
3933
3934 ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
3935 if (ret)
3936 return ret;
3937
3938 status &= DP_PSR_SINK_STATE_MASK;
3939 if (status < ARRAY_SIZE(sink_status))
3940 str = sink_status[status];
3941 else
3942 str = "unknown";
3943
3944 seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
3945
3946 seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
3947
3948 if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
3949 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3950 DP_PSR_LINK_CRC_ERROR))
3951 seq_puts(m, ":\n");
3952 else
3953 seq_puts(m, "\n");
3954 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3955 seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
3956 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3957 seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
3958 if (error_status & DP_PSR_LINK_CRC_ERROR)
3959 seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
3960
3961 return ret;
3962 }
3963 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3964
i915_psr_status_show(struct seq_file * m,void * data)3965 static int i915_psr_status_show(struct seq_file *m, void *data)
3966 {
3967 struct intel_connector *connector = m->private;
3968 struct intel_dp *intel_dp = intel_attached_dp(connector);
3969
3970 return intel_psr_status(m, intel_dp);
3971 }
3972 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3973
intel_psr_connector_debugfs_add(struct intel_connector * connector)3974 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3975 {
3976 struct intel_display *display = to_intel_display(connector);
3977 struct drm_i915_private *i915 = to_i915(connector->base.dev);
3978 struct dentry *root = connector->base.debugfs_entry;
3979
3980 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
3981 connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3982 return;
3983
3984 debugfs_create_file("i915_psr_sink_status", 0444, root,
3985 connector, &i915_psr_sink_status_fops);
3986
3987 if (HAS_PSR(display) || HAS_DP20(i915))
3988 debugfs_create_file("i915_psr_status", 0444, root,
3989 connector, &i915_psr_status_fops);
3990 }
3991