1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <linux/debugfs.h> 25 26 #include <drm/drm_atomic_helper.h> 27 #include <drm/drm_damage_helper.h> 28 #include <drm/drm_debugfs.h> 29 #include <drm/drm_print.h> 30 #include <drm/drm_vblank.h> 31 32 #include "i915_reg.h" 33 #include "intel_alpm.h" 34 #include "intel_atomic.h" 35 #include "intel_crtc.h" 36 #include "intel_cursor_regs.h" 37 #include "intel_ddi.h" 38 #include "intel_de.h" 39 #include "intel_display_irq.h" 40 #include "intel_display_regs.h" 41 #include "intel_display_rpm.h" 42 #include "intel_display_types.h" 43 #include "intel_display_utils.h" 44 #include "intel_dmc.h" 45 #include "intel_dp.h" 46 #include "intel_dp_aux.h" 47 #include "intel_dsb.h" 48 #include "intel_frontbuffer.h" 49 #include "intel_hdmi.h" 50 #include "intel_psr.h" 51 #include "intel_psr_regs.h" 52 #include "intel_snps_phy.h" 53 #include "intel_step.h" 54 #include "intel_vblank.h" 55 #include "intel_vdsc.h" 56 #include "intel_vrr.h" 57 #include "skl_universal_plane.h" 58 59 /** 60 * DOC: Panel Self Refresh (PSR/SRD) 61 * 62 * Since Haswell Display controller supports Panel Self-Refresh on display 63 * panels witch have a remote frame buffer (RFB) implemented according to PSR 64 * spec in eDP1.3. PSR feature allows the display to go to lower standby states 65 * when system is idle but display is on as it eliminates display refresh 66 * request to DDR memory completely as long as the frame buffer for that 67 * display is unchanged. 68 * 69 * Panel Self Refresh must be supported by both Hardware (source) and 70 * Panel (sink). 71 * 72 * PSR saves power by caching the framebuffer in the panel RFB, which allows us 73 * to power down the link and memory controller. For DSI panels the same idea 74 * is called "manual mode". 75 * 76 * The implementation uses the hardware-based PSR support which automatically 77 * enters/exits self-refresh mode. The hardware takes care of sending the 78 * required DP aux message and could even retrain the link (that part isn't 79 * enabled yet though). The hardware also keeps track of any frontbuffer 80 * changes to know when to exit self-refresh mode again. Unfortunately that 81 * part doesn't work too well, hence why the i915 PSR support uses the 82 * software frontbuffer tracking to make sure it doesn't miss a screen 83 * update. For this integration intel_psr_invalidate() and intel_psr_flush() 84 * get called by the frontbuffer tracking code. Note that because of locking 85 * issues the self-refresh re-enable code is done from a work queue, which 86 * must be correctly synchronized/cancelled when shutting down the pipe." 87 * 88 * DC3CO (DC3 clock off) 89 * 90 * On top of PSR2, GEN12 adds a intermediate power savings state that turns 91 * clock off automatically during PSR2 idle state. 92 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep 93 * entry/exit allows the HW to enter a low-power state even when page flipping 94 * periodically (for instance a 30fps video playback scenario). 95 * 96 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was), 97 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6 98 * frames, if no other flip occurs and the function above is executed, DC3CO is 99 * disabled and PSR2 is configured to enter deep sleep, resetting again in case 100 * of another flip. 101 * Front buffer modifications do not trigger DC3CO activation on purpose as it 102 * would bring a lot of complexity and most of the moderns systems will only 103 * use page flips. 104 */ 105 106 /* 107 * Description of PSR mask bits: 108 * 109 * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl): 110 * 111 * When unmasked (nearly) all display register writes (eg. even 112 * SWF) trigger a PSR exit. Some registers are excluded from this 113 * and they have a more specific mask (described below). On icl+ 114 * this bit no longer exists and is effectively always set. 115 * 116 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+): 117 * 118 * When unmasked (nearly) all pipe/plane register writes 119 * trigger a PSR exit. Some plane registers are excluded from this 120 * and they have a more specific mask (described below). 121 * 122 * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+): 123 * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw): 124 * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw): 125 * 126 * When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit. 127 * SPR_SURF/CURBASE are not included in this and instead are 128 * controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or 129 * EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw). 130 * 131 * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw): 132 * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw): 133 * 134 * When unmasked PSR is blocked as long as the sprite 135 * plane is enabled. skl+ with their universal planes no 136 * longer have a mask bit like this, and no plane being 137 * enabledb blocks PSR. 138 * 139 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw): 140 * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw): 141 * 142 * When umasked CURPOS writes trigger a PSR exit. On skl+ 143 * this doesn't exit but CURPOS is included in the 144 * PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask. 145 * 146 * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+): 147 * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw): 148 * 149 * When unmasked PSR is blocked as long as vblank and/or vsync 150 * interrupt is unmasked in IMR *and* enabled in IER. 151 * 152 * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+): 153 * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw): 154 * 155 * Selectcs whether PSR exit generates an extra vblank before 156 * the first frame is transmitted. Also note the opposite polarity 157 * if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank, 158 * unmasked==do not generate the extra vblank). 159 * 160 * With DC states enabled the extra vblank happens after link training, 161 * with DC states disabled it happens immediately upuon PSR exit trigger. 162 * No idea as of now why there is a difference. HSW/BDW (which don't 163 * even have DMC) always generate it after link training. Go figure. 164 * 165 * Unfortunately CHICKEN_TRANS itself seems to be double buffered 166 * and thus won't latch until the first vblank. So with DC states 167 * enabled the register effectively uses the reset value during DC5 168 * exit+PSR exit sequence, and thus the bit does nothing until 169 * latched by the vblank that it was trying to prevent from being 170 * generated in the first place. So we should probably call this 171 * one a chicken/egg bit instead on skl+. 172 * 173 * In standby mode (as opposed to link-off) this makes no difference 174 * as the timing generator keeps running the whole time generating 175 * normal periodic vblanks. 176 * 177 * WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw, 178 * and doing so makes the behaviour match the skl+ reset value. 179 * 180 * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw): 181 * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw): 182 * 183 * On BDW without this bit is no vblanks whatsoever are 184 * generated after PSR exit. On HSW this has no apparent effect. 185 * WaPsrDPRSUnmaskVBlankInSRD says to set this. 186 * 187 * The rest of the bits are more self-explanatory and/or 188 * irrelevant for normal operation. 189 * 190 * Description of intel_crtc_state variables. has_psr, has_panel_replay and 191 * has_sel_update: 192 * 193 * has_psr (alone): PSR1 194 * has_psr + has_sel_update: PSR2 195 * has_psr + has_panel_replay: Panel Replay 196 * has_psr + has_panel_replay + has_sel_update: Panel Replay Selective Update 197 * 198 * Description of some intel_psr variables. enabled, panel_replay_enabled, 199 * sel_update_enabled 200 * 201 * enabled (alone): PSR1 202 * enabled + sel_update_enabled: PSR2 203 * enabled + panel_replay_enabled: Panel Replay 204 * enabled + panel_replay_enabled + sel_update_enabled: Panel Replay SU 205 */ 206 207 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \ 208 (intel_dp)->psr.source_support) 209 210 bool intel_encoder_can_psr(struct intel_encoder *encoder) 211 { 212 if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST) 213 return CAN_PSR(enc_to_intel_dp(encoder)) || 214 CAN_PANEL_REPLAY(enc_to_intel_dp(encoder)); 215 else 216 return false; 217 } 218 219 bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder, 220 const struct intel_crtc_state *crtc_state) 221 { 222 /* 223 * For PSR/PR modes only eDP requires the AUX IO power to be enabled whenever 224 * the output is enabled. For non-eDP outputs the main link is always 225 * on, hence it doesn't require the HW initiated AUX wake-up signaling used 226 * for eDP. 227 * 228 * TODO: 229 * - Consider leaving AUX IO disabled for eDP / PR as well, in case 230 * the ALPM with main-link off mode is not enabled. 231 * - Leave AUX IO enabled for DP / PR, once support for ALPM with 232 * main-link off mode is added for it and this mode gets enabled. 233 */ 234 return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && 235 intel_encoder_can_psr(encoder); 236 } 237 238 static bool psr_global_enabled(struct intel_dp *intel_dp) 239 { 240 struct intel_connector *connector = intel_dp->attached_connector; 241 242 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) { 243 case I915_PSR_DEBUG_DEFAULT: 244 return intel_dp_is_edp(intel_dp) ? 245 connector->panel.vbt.psr.enable : true; 246 case I915_PSR_DEBUG_DISABLE: 247 return false; 248 default: 249 return true; 250 } 251 } 252 253 static bool sel_update_global_enabled(struct intel_dp *intel_dp) 254 { 255 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) { 256 case I915_PSR_DEBUG_DISABLE: 257 case I915_PSR_DEBUG_FORCE_PSR1: 258 return false; 259 default: 260 return true; 261 } 262 } 263 264 static bool panel_replay_global_enabled(struct intel_dp *intel_dp) 265 { 266 struct intel_display *display = to_intel_display(intel_dp); 267 268 return !(intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE) && 269 display->params.enable_panel_replay; 270 } 271 272 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp) 273 { 274 struct intel_display *display = to_intel_display(intel_dp); 275 276 return DISPLAY_VER(display) >= 12 ? TGL_PSR_ERROR : 277 EDP_PSR_ERROR(intel_dp->psr.transcoder); 278 } 279 280 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp) 281 { 282 struct intel_display *display = to_intel_display(intel_dp); 283 284 return DISPLAY_VER(display) >= 12 ? TGL_PSR_POST_EXIT : 285 EDP_PSR_POST_EXIT(intel_dp->psr.transcoder); 286 } 287 288 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp) 289 { 290 struct intel_display *display = to_intel_display(intel_dp); 291 292 return DISPLAY_VER(display) >= 12 ? TGL_PSR_PRE_ENTRY : 293 EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder); 294 } 295 296 static u32 psr_irq_mask_get(struct intel_dp *intel_dp) 297 { 298 struct intel_display *display = to_intel_display(intel_dp); 299 300 return DISPLAY_VER(display) >= 12 ? TGL_PSR_MASK : 301 EDP_PSR_MASK(intel_dp->psr.transcoder); 302 } 303 304 static i915_reg_t psr_ctl_reg(struct intel_display *display, 305 enum transcoder cpu_transcoder) 306 { 307 if (DISPLAY_VER(display) >= 8) 308 return EDP_PSR_CTL(display, cpu_transcoder); 309 else 310 return HSW_SRD_CTL; 311 } 312 313 static i915_reg_t psr_debug_reg(struct intel_display *display, 314 enum transcoder cpu_transcoder) 315 { 316 if (DISPLAY_VER(display) >= 8) 317 return EDP_PSR_DEBUG(display, cpu_transcoder); 318 else 319 return HSW_SRD_DEBUG; 320 } 321 322 static i915_reg_t psr_perf_cnt_reg(struct intel_display *display, 323 enum transcoder cpu_transcoder) 324 { 325 if (DISPLAY_VER(display) >= 8) 326 return EDP_PSR_PERF_CNT(display, cpu_transcoder); 327 else 328 return HSW_SRD_PERF_CNT; 329 } 330 331 static i915_reg_t psr_status_reg(struct intel_display *display, 332 enum transcoder cpu_transcoder) 333 { 334 if (DISPLAY_VER(display) >= 8) 335 return EDP_PSR_STATUS(display, cpu_transcoder); 336 else 337 return HSW_SRD_STATUS; 338 } 339 340 static i915_reg_t psr_imr_reg(struct intel_display *display, 341 enum transcoder cpu_transcoder) 342 { 343 if (DISPLAY_VER(display) >= 12) 344 return TRANS_PSR_IMR(display, cpu_transcoder); 345 else 346 return EDP_PSR_IMR; 347 } 348 349 static i915_reg_t psr_iir_reg(struct intel_display *display, 350 enum transcoder cpu_transcoder) 351 { 352 if (DISPLAY_VER(display) >= 12) 353 return TRANS_PSR_IIR(display, cpu_transcoder); 354 else 355 return EDP_PSR_IIR; 356 } 357 358 static i915_reg_t psr_aux_ctl_reg(struct intel_display *display, 359 enum transcoder cpu_transcoder) 360 { 361 if (DISPLAY_VER(display) >= 8) 362 return EDP_PSR_AUX_CTL(display, cpu_transcoder); 363 else 364 return HSW_SRD_AUX_CTL; 365 } 366 367 static i915_reg_t psr_aux_data_reg(struct intel_display *display, 368 enum transcoder cpu_transcoder, int i) 369 { 370 if (DISPLAY_VER(display) >= 8) 371 return EDP_PSR_AUX_DATA(display, cpu_transcoder, i); 372 else 373 return HSW_SRD_AUX_DATA(i); 374 } 375 376 static void psr_irq_control(struct intel_dp *intel_dp) 377 { 378 struct intel_display *display = to_intel_display(intel_dp); 379 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 380 u32 mask; 381 382 if (intel_dp->psr.panel_replay_enabled) 383 return; 384 385 mask = psr_irq_psr_error_bit_get(intel_dp); 386 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ) 387 mask |= psr_irq_post_exit_bit_get(intel_dp) | 388 psr_irq_pre_entry_bit_get(intel_dp); 389 390 intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder), 391 psr_irq_mask_get(intel_dp), ~mask); 392 } 393 394 static void psr_event_print(struct intel_display *display, 395 u32 val, bool sel_update_enabled) 396 { 397 drm_dbg_kms(display->drm, "PSR exit events: 0x%x\n", val); 398 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE) 399 drm_dbg_kms(display->drm, "\tPSR2 watchdog timer expired\n"); 400 if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled) 401 drm_dbg_kms(display->drm, "\tPSR2 disabled\n"); 402 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN) 403 drm_dbg_kms(display->drm, "\tSU dirty FIFO underrun\n"); 404 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN) 405 drm_dbg_kms(display->drm, "\tSU CRC FIFO underrun\n"); 406 if (val & PSR_EVENT_GRAPHICS_RESET) 407 drm_dbg_kms(display->drm, "\tGraphics reset\n"); 408 if (val & PSR_EVENT_PCH_INTERRUPT) 409 drm_dbg_kms(display->drm, "\tPCH interrupt\n"); 410 if (val & PSR_EVENT_MEMORY_UP) 411 drm_dbg_kms(display->drm, "\tMemory up\n"); 412 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY) 413 drm_dbg_kms(display->drm, "\tFront buffer modification\n"); 414 if (val & PSR_EVENT_WD_TIMER_EXPIRE) 415 drm_dbg_kms(display->drm, "\tPSR watchdog timer expired\n"); 416 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE) 417 drm_dbg_kms(display->drm, "\tPIPE registers updated\n"); 418 if (val & PSR_EVENT_REGISTER_UPDATE) 419 drm_dbg_kms(display->drm, "\tRegister updated\n"); 420 if (val & PSR_EVENT_HDCP_ENABLE) 421 drm_dbg_kms(display->drm, "\tHDCP enabled\n"); 422 if (val & PSR_EVENT_KVMR_SESSION_ENABLE) 423 drm_dbg_kms(display->drm, "\tKVMR session enabled\n"); 424 if (val & PSR_EVENT_VBI_ENABLE) 425 drm_dbg_kms(display->drm, "\tVBI enabled\n"); 426 if (val & PSR_EVENT_LPSP_MODE_EXIT) 427 drm_dbg_kms(display->drm, "\tLPSP mode exited\n"); 428 if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled) 429 drm_dbg_kms(display->drm, "\tPSR disabled\n"); 430 } 431 432 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) 433 { 434 struct intel_display *display = to_intel_display(intel_dp); 435 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 436 ktime_t time_ns = ktime_get(); 437 438 if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) { 439 intel_dp->psr.last_entry_attempt = time_ns; 440 drm_dbg_kms(display->drm, 441 "[transcoder %s] PSR entry attempt in 2 vblanks\n", 442 transcoder_name(cpu_transcoder)); 443 } 444 445 if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) { 446 intel_dp->psr.last_exit = time_ns; 447 drm_dbg_kms(display->drm, 448 "[transcoder %s] PSR exit completed\n", 449 transcoder_name(cpu_transcoder)); 450 451 if (DISPLAY_VER(display) >= 9) { 452 u32 val; 453 454 val = intel_de_rmw(display, 455 PSR_EVENT(display, cpu_transcoder), 456 0, 0); 457 458 psr_event_print(display, val, intel_dp->psr.sel_update_enabled); 459 } 460 } 461 462 if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) { 463 drm_warn(display->drm, "[transcoder %s] PSR aux error\n", 464 transcoder_name(cpu_transcoder)); 465 466 intel_dp->psr.irq_aux_error = true; 467 468 /* 469 * If this interruption is not masked it will keep 470 * interrupting so fast that it prevents the scheduled 471 * work to run. 472 * Also after a PSR error, we don't want to arm PSR 473 * again so we don't care about unmask the interruption 474 * or unset irq_aux_error. 475 */ 476 intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder), 477 0, psr_irq_psr_error_bit_get(intel_dp)); 478 479 queue_work(display->wq.unordered, &intel_dp->psr.work); 480 } 481 } 482 483 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) 484 { 485 struct intel_display *display = to_intel_display(intel_dp); 486 u8 val = 8; /* assume the worst if we can't read the value */ 487 488 if (drm_dp_dpcd_readb(&intel_dp->aux, 489 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1) 490 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK; 491 else 492 drm_dbg_kms(display->drm, 493 "Unable to get sink synchronization latency, assuming 8 frames\n"); 494 return val; 495 } 496 497 static void _psr_compute_su_granularity(struct intel_dp *intel_dp, 498 struct intel_connector *connector) 499 { 500 struct intel_display *display = to_intel_display(intel_dp); 501 ssize_t r; 502 __le16 w; 503 u8 y; 504 505 /* 506 * If sink don't have specific granularity requirements set legacy 507 * ones. 508 */ 509 if (!(connector->dp.psr_caps.dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) { 510 /* As PSR2 HW sends full lines, we do not care about x granularity */ 511 w = cpu_to_le16(4); 512 y = 4; 513 goto exit; 514 } 515 516 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, sizeof(w)); 517 if (r != sizeof(w)) 518 drm_dbg_kms(display->drm, 519 "Unable to read selective update x granularity\n"); 520 /* 521 * Spec says that if the value read is 0 the default granularity should 522 * be used instead. 523 */ 524 if (r != sizeof(w) || w == 0) 525 w = cpu_to_le16(4); 526 527 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1); 528 if (r != 1) { 529 drm_dbg_kms(display->drm, 530 "Unable to read selective update y granularity\n"); 531 y = 4; 532 } 533 if (y == 0) 534 y = 1; 535 536 exit: 537 connector->dp.psr_caps.su_w_granularity = le16_to_cpu(w); 538 connector->dp.psr_caps.su_y_granularity = y; 539 } 540 541 static enum intel_panel_replay_dsc_support 542 compute_pr_dsc_support(struct intel_connector *connector) 543 { 544 u8 pr_dsc_mode; 545 u8 val; 546 547 val = connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)]; 548 pr_dsc_mode = REG_FIELD_GET8(DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_MASK, val); 549 550 switch (pr_dsc_mode) { 551 case DP_DSC_DECODE_CAPABILITY_IN_PR_FULL_FRAME_ONLY: 552 return INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY; 553 case DP_DSC_DECODE_CAPABILITY_IN_PR_SUPPORTED: 554 return INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE; 555 default: 556 MISSING_CASE(pr_dsc_mode); 557 fallthrough; 558 case DP_DSC_DECODE_CAPABILITY_IN_PR_NOT_SUPPORTED: 559 case DP_DSC_DECODE_CAPABILITY_IN_PR_RESERVED: 560 return INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED; 561 } 562 } 563 564 static const char *panel_replay_dsc_support_str(enum intel_panel_replay_dsc_support dsc_support) 565 { 566 switch (dsc_support) { 567 case INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED: 568 return "not supported"; 569 case INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY: 570 return "full frame only"; 571 case INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE: 572 return "selective update"; 573 default: 574 MISSING_CASE(dsc_support); 575 return "n/a"; 576 }; 577 } 578 579 static void _panel_replay_compute_su_granularity(struct intel_connector *connector) 580 { 581 u16 w; 582 u8 y; 583 584 if (!(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] & 585 DP_PANEL_REPLAY_SU_GRANULARITY_REQUIRED)) { 586 w = 4; 587 y = 4; 588 goto exit; 589 } 590 591 /* 592 * Spec says that if the value read is 0 the default granularity should 593 * be used instead. 594 */ 595 w = le16_to_cpu(*(__le16 *)&connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_X_GRANULARITY)]) ? : 4; 596 y = connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_Y_GRANULARITY)] ? : 1; 597 598 exit: 599 connector->dp.panel_replay_caps.su_w_granularity = w; 600 connector->dp.panel_replay_caps.su_y_granularity = y; 601 } 602 603 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector) 604 { 605 struct intel_display *display = to_intel_display(intel_dp); 606 int ret; 607 608 /* TODO: Enable Panel Replay on MST once it's properly implemented. */ 609 if (intel_dp->mst_detect == DRM_DP_MST) 610 return; 611 612 ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT, 613 &connector->dp.panel_replay_caps.dpcd, 614 sizeof(connector->dp.panel_replay_caps.dpcd)); 615 if (ret < 0) 616 return; 617 618 if (!(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] & 619 DP_PANEL_REPLAY_SUPPORT)) 620 return; 621 622 if (intel_dp_is_edp(intel_dp)) { 623 if (!intel_alpm_aux_less_wake_supported(intel_dp)) { 624 drm_dbg_kms(display->drm, 625 "Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n"); 626 return; 627 } 628 629 if (!(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] & 630 DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) { 631 drm_dbg_kms(display->drm, 632 "Panel doesn't support early transport, eDP Panel Replay not possible\n"); 633 return; 634 } 635 } 636 637 connector->dp.panel_replay_caps.support = true; 638 intel_dp->psr.sink_panel_replay_support = true; 639 640 if (connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] & 641 DP_PANEL_REPLAY_SU_SUPPORT) { 642 connector->dp.panel_replay_caps.su_support = true; 643 644 _panel_replay_compute_su_granularity(connector); 645 } 646 647 connector->dp.panel_replay_caps.dsc_support = compute_pr_dsc_support(connector); 648 649 drm_dbg_kms(display->drm, 650 "Panel replay %sis supported by panel (in DSC mode: %s)\n", 651 connector->dp.panel_replay_caps.su_support ? 652 "selective_update " : "", 653 panel_replay_dsc_support_str(connector->dp.panel_replay_caps.dsc_support)); 654 } 655 656 static void _psr_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector) 657 { 658 struct intel_display *display = to_intel_display(intel_dp); 659 int ret; 660 661 ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PSR_SUPPORT, connector->dp.psr_caps.dpcd, 662 sizeof(connector->dp.psr_caps.dpcd)); 663 if (ret < 0) 664 return; 665 666 if (!connector->dp.psr_caps.dpcd[0]) 667 return; 668 669 drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n", 670 connector->dp.psr_caps.dpcd[0]); 671 672 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) { 673 drm_dbg_kms(display->drm, 674 "PSR support not currently available for this panel\n"); 675 return; 676 } 677 678 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { 679 drm_dbg_kms(display->drm, 680 "Panel lacks power state control, PSR cannot be enabled\n"); 681 return; 682 } 683 684 connector->dp.psr_caps.support = true; 685 intel_dp->psr.sink_support = true; 686 687 connector->dp.psr_caps.sync_latency = intel_dp_get_sink_sync_latency(intel_dp); 688 689 if (DISPLAY_VER(display) >= 9 && 690 connector->dp.psr_caps.dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) { 691 bool y_req = connector->dp.psr_caps.dpcd[1] & 692 DP_PSR2_SU_Y_COORDINATE_REQUIRED; 693 694 /* 695 * All panels that supports PSR version 03h (PSR2 + 696 * Y-coordinate) can handle Y-coordinates in VSC but we are 697 * only sure that it is going to be used when required by the 698 * panel. This way panel is capable to do selective update 699 * without a aux frame sync. 700 * 701 * To support PSR version 02h and PSR version 03h without 702 * Y-coordinate requirement panels we would need to enable 703 * GTC first. 704 */ 705 connector->dp.psr_caps.su_support = y_req && 706 intel_alpm_aux_wake_supported(intel_dp); 707 drm_dbg_kms(display->drm, "PSR2 %ssupported\n", 708 connector->dp.psr_caps.su_support ? "" : "not "); 709 } 710 711 if (connector->dp.psr_caps.su_support) 712 _psr_compute_su_granularity(intel_dp, connector); 713 } 714 715 void intel_psr_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector) 716 { 717 _psr_init_dpcd(intel_dp, connector); 718 719 _panel_replay_init_dpcd(intel_dp, connector); 720 } 721 722 static void hsw_psr_setup_aux(struct intel_dp *intel_dp) 723 { 724 struct intel_display *display = to_intel_display(intel_dp); 725 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 726 u32 aux_clock_divider, aux_ctl; 727 /* write DP_SET_POWER=D0 */ 728 static const u8 aux_msg[] = { 729 [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf), 730 [1] = (DP_SET_POWER >> 8) & 0xff, 731 [2] = DP_SET_POWER & 0xff, 732 [3] = 1 - 1, 733 [4] = DP_SET_POWER_D0, 734 }; 735 int i; 736 737 BUILD_BUG_ON(sizeof(aux_msg) > 20); 738 for (i = 0; i < sizeof(aux_msg); i += 4) 739 intel_de_write(display, 740 psr_aux_data_reg(display, cpu_transcoder, i >> 2), 741 intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i)); 742 743 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); 744 745 /* Start with bits set for DDI_AUX_CTL register */ 746 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg), 747 aux_clock_divider); 748 749 /* Select only valid bits for SRD_AUX_CTL */ 750 aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK | 751 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK | 752 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK | 753 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK; 754 755 intel_de_write(display, psr_aux_ctl_reg(display, cpu_transcoder), 756 aux_ctl); 757 } 758 759 static bool psr2_su_region_et_valid(struct intel_connector *connector, bool panel_replay) 760 { 761 struct intel_dp *intel_dp = intel_attached_dp(connector); 762 struct intel_display *display = to_intel_display(intel_dp); 763 764 if (DISPLAY_VER(display) < 20 || !intel_dp_is_edp(intel_dp) || 765 intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE) 766 return false; 767 768 return panel_replay ? 769 connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] & 770 DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT : 771 connector->dp.psr_caps.dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED; 772 } 773 774 static void _panel_replay_enable_sink(struct intel_dp *intel_dp, 775 const struct intel_crtc_state *crtc_state) 776 { 777 u8 val = DP_PANEL_REPLAY_ENABLE | 778 DP_PANEL_REPLAY_VSC_SDP_CRC_EN | 779 DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN | 780 DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN | 781 DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN; 782 u8 panel_replay_config2 = DP_PANEL_REPLAY_CRC_VERIFICATION; 783 784 if (crtc_state->has_sel_update) 785 val |= DP_PANEL_REPLAY_SU_ENABLE; 786 787 if (crtc_state->enable_psr2_su_region_et) 788 val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET; 789 790 if (crtc_state->req_psr2_sdp_prior_scanline) 791 panel_replay_config2 |= 792 DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE; 793 794 drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, val); 795 796 drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG2, 797 panel_replay_config2); 798 } 799 800 static void _psr_enable_sink(struct intel_dp *intel_dp, 801 const struct intel_crtc_state *crtc_state) 802 { 803 struct intel_display *display = to_intel_display(intel_dp); 804 u8 val = 0; 805 806 if (crtc_state->has_sel_update) { 807 val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS; 808 } else { 809 if (intel_dp->psr.link_standby) 810 val |= DP_PSR_MAIN_LINK_ACTIVE; 811 812 if (DISPLAY_VER(display) >= 8) 813 val |= DP_PSR_CRC_VERIFICATION; 814 } 815 816 if (crtc_state->req_psr2_sdp_prior_scanline) 817 val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE; 818 819 if (crtc_state->enable_psr2_su_region_et) 820 val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET; 821 822 if (intel_dp->psr.entry_setup_frames > 0) 823 val |= DP_PSR_FRAME_CAPTURE; 824 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val); 825 826 val |= DP_PSR_ENABLE; 827 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val); 828 } 829 830 static void intel_psr_enable_sink(struct intel_dp *intel_dp, 831 const struct intel_crtc_state *crtc_state) 832 { 833 intel_alpm_enable_sink(intel_dp, crtc_state); 834 835 crtc_state->has_panel_replay ? 836 _panel_replay_enable_sink(intel_dp, crtc_state) : 837 _psr_enable_sink(intel_dp, crtc_state); 838 839 if (intel_dp_is_edp(intel_dp)) 840 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); 841 } 842 843 void intel_psr_panel_replay_enable_sink(struct intel_dp *intel_dp) 844 { 845 /* 846 * NOTE: We might want to trigger mode set when 847 * disabling/enabling Panel Replay via debugfs interface to 848 * ensure this bit is cleared/set accordingly. 849 */ 850 if (CAN_PANEL_REPLAY(intel_dp) && panel_replay_global_enabled(intel_dp)) 851 drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, 852 DP_PANEL_REPLAY_ENABLE); 853 } 854 855 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) 856 { 857 struct intel_display *display = to_intel_display(intel_dp); 858 struct intel_connector *connector = intel_dp->attached_connector; 859 u32 val = 0; 860 861 if (DISPLAY_VER(display) >= 11) 862 val |= EDP_PSR_TP4_TIME_0us; 863 864 if (display->params.psr_safest_params) { 865 val |= EDP_PSR_TP1_TIME_2500us; 866 val |= EDP_PSR_TP2_TP3_TIME_2500us; 867 goto check_tp3_sel; 868 } 869 870 if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0) 871 val |= EDP_PSR_TP1_TIME_0us; 872 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100) 873 val |= EDP_PSR_TP1_TIME_100us; 874 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500) 875 val |= EDP_PSR_TP1_TIME_500us; 876 else 877 val |= EDP_PSR_TP1_TIME_2500us; 878 879 if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0) 880 val |= EDP_PSR_TP2_TP3_TIME_0us; 881 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100) 882 val |= EDP_PSR_TP2_TP3_TIME_100us; 883 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500) 884 val |= EDP_PSR_TP2_TP3_TIME_500us; 885 else 886 val |= EDP_PSR_TP2_TP3_TIME_2500us; 887 888 /* 889 * WA 0479: hsw,bdw 890 * "Do not skip both TP1 and TP2/TP3" 891 */ 892 if (DISPLAY_VER(display) < 9 && 893 connector->panel.vbt.psr.tp1_wakeup_time_us == 0 && 894 connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0) 895 val |= EDP_PSR_TP2_TP3_TIME_100us; 896 897 check_tp3_sel: 898 if (intel_dp_source_supports_tps3(display) && 899 drm_dp_tps3_supported(intel_dp->dpcd)) 900 val |= EDP_PSR_TP_TP1_TP3; 901 else 902 val |= EDP_PSR_TP_TP1_TP2; 903 904 return val; 905 } 906 907 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp) 908 { 909 struct intel_display *display = to_intel_display(intel_dp); 910 struct intel_connector *connector = intel_dp->attached_connector; 911 int idle_frames; 912 913 /* Let's use 6 as the minimum to cover all known cases including the 914 * off-by-one issue that HW has in some cases. 915 */ 916 idle_frames = max(6, connector->panel.vbt.psr.idle_frames); 917 idle_frames = max(idle_frames, connector->dp.psr_caps.sync_latency + 1); 918 919 if (drm_WARN_ON(display->drm, idle_frames > 0xf)) 920 idle_frames = 0xf; 921 922 return idle_frames; 923 } 924 925 static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp) 926 { 927 struct intel_display *display = to_intel_display(intel_dp); 928 u32 current_dc_state = intel_display_power_get_current_dc_state(display); 929 struct intel_crtc *crtc = intel_crtc_for_pipe(display, intel_dp->psr.pipe); 930 struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base); 931 932 return (current_dc_state != DC_STATE_EN_UPTO_DC5 && 933 current_dc_state != DC_STATE_EN_UPTO_DC6) || 934 intel_dp->psr.active_non_psr_pipes || 935 READ_ONCE(vblank->enabled); 936 } 937 938 static void hsw_activate_psr1(struct intel_dp *intel_dp) 939 { 940 struct intel_display *display = to_intel_display(intel_dp); 941 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 942 u32 max_sleep_time = 0x1f; 943 u32 val = EDP_PSR_ENABLE; 944 945 val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp)); 946 947 if (DISPLAY_VER(display) < 20) 948 val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time); 949 950 if (display->platform.haswell) 951 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; 952 953 if (intel_dp->psr.link_standby) 954 val |= EDP_PSR_LINK_STANDBY; 955 956 val |= intel_psr1_get_tp_time(intel_dp); 957 958 if (DISPLAY_VER(display) >= 8) 959 val |= EDP_PSR_CRC_ENABLE; 960 961 if (DISPLAY_VER(display) >= 20) 962 val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames); 963 964 intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder), 965 ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val); 966 967 /* Wa_16025596647 */ 968 if ((DISPLAY_VER(display) == 20 || 969 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) && 970 is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used) 971 intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display, 972 intel_dp->psr.pipe, 973 true); 974 } 975 976 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp) 977 { 978 struct intel_display *display = to_intel_display(intel_dp); 979 struct intel_connector *connector = intel_dp->attached_connector; 980 u32 val = 0; 981 982 if (display->params.psr_safest_params) 983 return EDP_PSR2_TP2_TIME_2500us; 984 985 if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && 986 connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50) 987 val |= EDP_PSR2_TP2_TIME_50us; 988 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100) 989 val |= EDP_PSR2_TP2_TIME_100us; 990 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500) 991 val |= EDP_PSR2_TP2_TIME_500us; 992 else 993 val |= EDP_PSR2_TP2_TIME_2500us; 994 995 return val; 996 } 997 998 static int 999 psr2_block_count_lines(u8 io_wake_lines, u8 fast_wake_lines) 1000 { 1001 return io_wake_lines < 9 && fast_wake_lines < 9 ? 8 : 12; 1002 } 1003 1004 static int psr2_block_count(struct intel_dp *intel_dp) 1005 { 1006 return psr2_block_count_lines(intel_dp->psr.io_wake_lines, 1007 intel_dp->psr.fast_wake_lines) / 4; 1008 } 1009 1010 static u8 frames_before_su_entry(struct intel_dp *intel_dp) 1011 { 1012 struct intel_connector *connector = intel_dp->attached_connector; 1013 u8 frames_before_su_entry; 1014 1015 frames_before_su_entry = max_t(u8, 1016 connector->dp.psr_caps.sync_latency + 1, 1017 2); 1018 1019 /* Entry setup frames must be at least 1 less than frames before SU entry */ 1020 if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry) 1021 frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1; 1022 1023 return frames_before_su_entry; 1024 } 1025 1026 static void dg2_activate_panel_replay(struct intel_dp *intel_dp) 1027 { 1028 struct intel_display *display = to_intel_display(intel_dp); 1029 struct intel_psr *psr = &intel_dp->psr; 1030 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 1031 1032 if (intel_dp_is_edp(intel_dp) && psr->sel_update_enabled) { 1033 u32 val = psr->su_region_et_enabled ? 1034 LNL_EDP_PSR2_SU_REGION_ET_ENABLE : 0; 1035 1036 if (intel_dp->psr.req_psr2_sdp_prior_scanline) 1037 val |= EDP_PSR2_SU_SDP_SCANLINE; 1038 1039 intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), 1040 val); 1041 } 1042 1043 intel_de_rmw(display, 1044 PSR2_MAN_TRK_CTL(display, intel_dp->psr.transcoder), 1045 0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME); 1046 1047 intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0, 1048 TRANS_DP2_PANEL_REPLAY_ENABLE); 1049 } 1050 1051 static void hsw_activate_psr2(struct intel_dp *intel_dp) 1052 { 1053 struct intel_display *display = to_intel_display(intel_dp); 1054 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 1055 u32 val = EDP_PSR2_ENABLE; 1056 u32 psr_val = 0; 1057 u8 idle_frames; 1058 1059 /* Wa_16025596647 */ 1060 if ((DISPLAY_VER(display) == 20 || 1061 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) && 1062 is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used) 1063 idle_frames = 0; 1064 else 1065 idle_frames = psr_compute_idle_frames(intel_dp); 1066 val |= EDP_PSR2_IDLE_FRAMES(idle_frames); 1067 1068 if (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p) 1069 val |= EDP_SU_TRACK_ENABLE; 1070 1071 if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13) 1072 val |= EDP_Y_COORDINATE_ENABLE; 1073 1074 val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp)); 1075 1076 val |= intel_psr2_get_tp_time(intel_dp); 1077 1078 if (DISPLAY_VER(display) >= 12 && DISPLAY_VER(display) < 20) { 1079 if (psr2_block_count(intel_dp) > 2) 1080 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3; 1081 else 1082 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; 1083 } 1084 1085 /* Wa_22012278275:adl-p */ 1086 if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) { 1087 static const u8 map[] = { 1088 2, /* 5 lines */ 1089 1, /* 6 lines */ 1090 0, /* 7 lines */ 1091 3, /* 8 lines */ 1092 6, /* 9 lines */ 1093 5, /* 10 lines */ 1094 4, /* 11 lines */ 1095 7, /* 12 lines */ 1096 }; 1097 /* 1098 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see 1099 * comments below for more information 1100 */ 1101 int tmp; 1102 1103 tmp = map[intel_dp->psr.io_wake_lines - 1104 TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES]; 1105 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES); 1106 1107 tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES]; 1108 val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES); 1109 } else if (DISPLAY_VER(display) >= 20) { 1110 val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); 1111 } else if (DISPLAY_VER(display) >= 12) { 1112 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); 1113 val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); 1114 } else if (DISPLAY_VER(display) >= 9) { 1115 val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); 1116 val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); 1117 } 1118 1119 if (intel_dp->psr.req_psr2_sdp_prior_scanline) 1120 val |= EDP_PSR2_SU_SDP_SCANLINE; 1121 1122 if (DISPLAY_VER(display) >= 20) 1123 psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames); 1124 1125 if (intel_dp->psr.psr2_sel_fetch_enabled) { 1126 u32 tmp; 1127 1128 tmp = intel_de_read(display, 1129 PSR2_MAN_TRK_CTL(display, cpu_transcoder)); 1130 drm_WARN_ON(display->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE)); 1131 } else if (HAS_PSR2_SEL_FETCH(display)) { 1132 intel_de_write(display, 1133 PSR2_MAN_TRK_CTL(display, cpu_transcoder), 0); 1134 } 1135 1136 if (intel_dp->psr.su_region_et_enabled) 1137 val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE; 1138 1139 /* 1140 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is 1141 * recommending keep this bit unset while PSR2 is enabled. 1142 */ 1143 intel_de_write(display, psr_ctl_reg(display, cpu_transcoder), psr_val); 1144 1145 intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), val); 1146 } 1147 1148 static bool 1149 transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder) 1150 { 1151 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) 1152 return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B; 1153 else if (DISPLAY_VER(display) >= 12) 1154 return cpu_transcoder == TRANSCODER_A; 1155 else if (DISPLAY_VER(display) >= 9) 1156 return cpu_transcoder == TRANSCODER_EDP; 1157 else 1158 return false; 1159 } 1160 1161 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state) 1162 { 1163 if (!crtc_state->hw.active) 1164 return 0; 1165 1166 return DIV_ROUND_UP(1000 * 1000, 1167 drm_mode_vrefresh(&crtc_state->hw.adjusted_mode)); 1168 } 1169 1170 static void psr2_program_idle_frames(struct intel_dp *intel_dp, 1171 u32 idle_frames) 1172 { 1173 struct intel_display *display = to_intel_display(intel_dp); 1174 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 1175 1176 intel_de_rmw(display, EDP_PSR2_CTL(display, cpu_transcoder), 1177 EDP_PSR2_IDLE_FRAMES_MASK, 1178 EDP_PSR2_IDLE_FRAMES(idle_frames)); 1179 } 1180 1181 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp) 1182 { 1183 struct intel_display *display = to_intel_display(intel_dp); 1184 1185 psr2_program_idle_frames(intel_dp, 0); 1186 intel_display_power_set_target_dc_state(display, DC_STATE_EN_DC3CO); 1187 } 1188 1189 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp) 1190 { 1191 struct intel_display *display = to_intel_display(intel_dp); 1192 1193 intel_display_power_set_target_dc_state(display, DC_STATE_EN_UPTO_DC6); 1194 psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp)); 1195 } 1196 1197 static void tgl_dc3co_disable_work(struct work_struct *work) 1198 { 1199 struct intel_dp *intel_dp = 1200 container_of(work, typeof(*intel_dp), psr.dc3co_work.work); 1201 1202 mutex_lock(&intel_dp->psr.lock); 1203 /* If delayed work is pending, it is not idle */ 1204 if (delayed_work_pending(&intel_dp->psr.dc3co_work)) 1205 goto unlock; 1206 1207 tgl_psr2_disable_dc3co(intel_dp); 1208 unlock: 1209 mutex_unlock(&intel_dp->psr.lock); 1210 } 1211 1212 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp) 1213 { 1214 if (!intel_dp->psr.dc3co_exitline) 1215 return; 1216 1217 cancel_delayed_work(&intel_dp->psr.dc3co_work); 1218 /* Before PSR2 exit disallow dc3co*/ 1219 tgl_psr2_disable_dc3co(intel_dp); 1220 } 1221 1222 static bool 1223 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp, 1224 struct intel_crtc_state *crtc_state) 1225 { 1226 struct intel_display *display = to_intel_display(intel_dp); 1227 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1228 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; 1229 enum port port = dig_port->base.port; 1230 1231 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) 1232 return pipe <= PIPE_B && port <= PORT_B; 1233 else 1234 return pipe == PIPE_A && port == PORT_A; 1235 } 1236 1237 static void 1238 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, 1239 struct intel_crtc_state *crtc_state) 1240 { 1241 struct intel_display *display = to_intel_display(intel_dp); 1242 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay; 1243 struct i915_power_domains *power_domains = &display->power.domains; 1244 u32 exit_scanlines; 1245 1246 /* 1247 * FIXME: Due to the changed sequence of activating/deactivating DC3CO, 1248 * disable DC3CO until the changed dc3co activating/deactivating sequence 1249 * is applied. B.Specs:49196 1250 */ 1251 return; 1252 1253 /* 1254 * DMC's DC3CO exit mechanism has an issue with Selective Fecth 1255 * TODO: when the issue is addressed, this restriction should be removed. 1256 */ 1257 if (crtc_state->enable_psr2_sel_fetch) 1258 return; 1259 1260 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO)) 1261 return; 1262 1263 if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state)) 1264 return; 1265 1266 /* Wa_16011303918:adl-p */ 1267 if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) 1268 return; 1269 1270 /* 1271 * DC3CO Exit time 200us B.Spec 49196 1272 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1 1273 */ 1274 exit_scanlines = 1275 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1; 1276 1277 if (drm_WARN_ON(display->drm, exit_scanlines > crtc_vdisplay)) 1278 return; 1279 1280 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines; 1281 } 1282 1283 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp, 1284 struct intel_crtc_state *crtc_state) 1285 { 1286 struct intel_display *display = to_intel_display(intel_dp); 1287 1288 if (!display->params.enable_psr2_sel_fetch && 1289 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) { 1290 drm_dbg_kms(display->drm, 1291 "PSR2 sel fetch not enabled, disabled by parameter\n"); 1292 return false; 1293 } 1294 1295 return crtc_state->enable_psr2_sel_fetch = true; 1296 } 1297 1298 static bool psr2_granularity_check(struct intel_crtc_state *crtc_state, 1299 struct intel_connector *connector) 1300 { 1301 struct intel_dp *intel_dp = intel_attached_dp(connector); 1302 struct intel_display *display = to_intel_display(intel_dp); 1303 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 1304 const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay; 1305 const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; 1306 u16 y_granularity = 0; 1307 u16 sink_y_granularity = crtc_state->has_panel_replay ? 1308 connector->dp.panel_replay_caps.su_y_granularity : 1309 connector->dp.psr_caps.su_y_granularity; 1310 u16 sink_w_granularity; 1311 1312 if (crtc_state->has_panel_replay) 1313 sink_w_granularity = connector->dp.panel_replay_caps.su_w_granularity == 1314 DP_PANEL_REPLAY_FULL_LINE_GRANULARITY ? 1315 crtc_hdisplay : connector->dp.panel_replay_caps.su_w_granularity; 1316 else 1317 sink_w_granularity = connector->dp.psr_caps.su_w_granularity; 1318 1319 /* PSR2 HW only send full lines so we only need to validate the width */ 1320 if (crtc_hdisplay % sink_w_granularity) 1321 return false; 1322 1323 if (crtc_vdisplay % sink_y_granularity) 1324 return false; 1325 1326 /* HW tracking is only aligned to 4 lines */ 1327 if (!crtc_state->enable_psr2_sel_fetch) 1328 return sink_y_granularity == 4; 1329 1330 /* 1331 * adl_p and mtl platforms have 1 line granularity. 1332 * For other platforms with SW tracking we can adjust the y coordinates 1333 * to match sink requirement if multiple of 4. 1334 */ 1335 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) 1336 y_granularity = sink_y_granularity; 1337 else if (sink_y_granularity <= 2) 1338 y_granularity = 4; 1339 else if ((sink_y_granularity % 4) == 0) 1340 y_granularity = sink_y_granularity; 1341 1342 if (y_granularity == 0 || crtc_vdisplay % y_granularity) 1343 return false; 1344 1345 if (crtc_state->dsc.compression_enable && 1346 vdsc_cfg->slice_height % y_granularity) 1347 return false; 1348 1349 crtc_state->su_y_granularity = y_granularity; 1350 return true; 1351 } 1352 1353 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp, 1354 struct intel_crtc_state *crtc_state) 1355 { 1356 struct intel_display *display = to_intel_display(intel_dp); 1357 const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode; 1358 u32 hblank_total, hblank_ns, req_ns; 1359 1360 hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start; 1361 hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock); 1362 1363 /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */ 1364 req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000); 1365 1366 if ((hblank_ns - req_ns) > 100) 1367 return true; 1368 1369 /* Not supported <13 / Wa_22012279113:adl-p */ 1370 if (DISPLAY_VER(display) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b) 1371 return false; 1372 1373 crtc_state->req_psr2_sdp_prior_scanline = true; 1374 return true; 1375 } 1376 1377 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp, 1378 struct drm_connector_state *conn_state, 1379 const struct drm_display_mode *adjusted_mode) 1380 { 1381 struct intel_display *display = to_intel_display(intel_dp); 1382 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1383 int psr_setup_time = drm_dp_psr_setup_time(connector->dp.psr_caps.dpcd); 1384 int entry_setup_frames = 0; 1385 1386 if (psr_setup_time < 0) { 1387 drm_dbg_kms(display->drm, 1388 "PSR condition failed: Invalid PSR setup time (0x%02x)\n", 1389 connector->dp.psr_caps.dpcd[1]); 1390 return -ETIME; 1391 } 1392 1393 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > 1394 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { 1395 if (DISPLAY_VER(display) >= 20) { 1396 /* setup entry frames can be up to 3 frames */ 1397 entry_setup_frames = 1; 1398 drm_dbg_kms(display->drm, 1399 "PSR setup entry frames %d\n", 1400 entry_setup_frames); 1401 } else { 1402 drm_dbg_kms(display->drm, 1403 "PSR condition failed: PSR setup time (%d us) too long\n", 1404 psr_setup_time); 1405 return -ETIME; 1406 } 1407 } 1408 1409 return entry_setup_frames; 1410 } 1411 1412 static 1413 int _intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state, 1414 bool needs_panel_replay, 1415 bool needs_sel_update) 1416 { 1417 struct intel_display *display = to_intel_display(crtc_state); 1418 1419 if (!crtc_state->has_psr) 1420 return 0; 1421 1422 /* Wa_14015401596 */ 1423 if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14)) 1424 return 1; 1425 1426 /* Rest is for SRD_STATUS needed on LunarLake and onwards */ 1427 if (DISPLAY_VER(display) < 20) 1428 return 0; 1429 1430 /* 1431 * Comment on SRD_STATUS register in Bspec for LunarLake and onwards: 1432 * 1433 * To deterministically capture the transition of the state machine 1434 * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least 1435 * one line after the non-delayed V. Blank. 1436 * 1437 * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0 1438 * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ] 1439 * - TRANS_VTOTAL[ Vertical Active ]) 1440 * 1441 * SRD_STATUS is used only by PSR1 on PantherLake. 1442 * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake. 1443 */ 1444 1445 if (DISPLAY_VER(display) >= 30 && (needs_panel_replay || 1446 needs_sel_update)) 1447 return 0; 1448 else if (DISPLAY_VER(display) < 30 && (needs_sel_update || 1449 intel_crtc_has_type(crtc_state, 1450 INTEL_OUTPUT_EDP))) 1451 return 0; 1452 else 1453 return 1; 1454 } 1455 1456 static bool _wake_lines_fit_into_vblank(const struct intel_crtc_state *crtc_state, 1457 int vblank, 1458 int wake_lines) 1459 { 1460 if (crtc_state->req_psr2_sdp_prior_scanline) 1461 vblank -= 1; 1462 1463 /* Vblank >= PSR2_CTL Block Count Number maximum line count */ 1464 if (vblank < wake_lines) 1465 return false; 1466 1467 return true; 1468 } 1469 1470 static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp, 1471 const struct intel_crtc_state *crtc_state, 1472 bool aux_less, 1473 bool needs_panel_replay, 1474 bool needs_sel_update) 1475 { 1476 struct intel_display *display = to_intel_display(intel_dp); 1477 int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end - 1478 crtc_state->hw.adjusted_mode.crtc_vblank_start; 1479 int wake_lines; 1480 int scl = _intel_psr_min_set_context_latency(crtc_state, 1481 needs_panel_replay, 1482 needs_sel_update); 1483 vblank -= scl; 1484 1485 if (aux_less) 1486 wake_lines = crtc_state->alpm_state.aux_less_wake_lines; 1487 else 1488 wake_lines = DISPLAY_VER(display) < 20 ? 1489 psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines, 1490 crtc_state->alpm_state.fast_wake_lines) : 1491 crtc_state->alpm_state.io_wake_lines; 1492 1493 /* 1494 * Guardband has not been computed yet, so we conservatively check if the 1495 * full vblank duration is sufficient to accommodate wake line requirements 1496 * for PSR features like Panel Replay and Selective Update. 1497 * 1498 * Once the actual guardband is available, a more accurate validation is 1499 * performed in intel_psr_compute_config_late(), and PSR features are 1500 * disabled if wake lines exceed the available guardband. 1501 */ 1502 return _wake_lines_fit_into_vblank(crtc_state, vblank, wake_lines); 1503 } 1504 1505 static bool alpm_config_valid(struct intel_dp *intel_dp, 1506 struct intel_crtc_state *crtc_state, 1507 bool aux_less, 1508 bool needs_panel_replay, 1509 bool needs_sel_update) 1510 { 1511 struct intel_display *display = to_intel_display(intel_dp); 1512 1513 if (!intel_alpm_compute_params(intel_dp, crtc_state)) { 1514 drm_dbg_kms(display->drm, 1515 "PSR2/Panel Replay not enabled, Unable to use long enough wake times\n"); 1516 return false; 1517 } 1518 1519 if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less, 1520 needs_panel_replay, needs_sel_update)) { 1521 drm_dbg_kms(display->drm, 1522 "PSR2/Panel Replay not enabled, too short vblank time\n"); 1523 return false; 1524 } 1525 1526 return true; 1527 } 1528 1529 static bool intel_psr2_config_valid(struct intel_dp *intel_dp, 1530 struct intel_crtc_state *crtc_state, 1531 struct drm_connector_state *conn_state) 1532 { 1533 struct intel_display *display = to_intel_display(intel_dp); 1534 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1535 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay; 1536 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; 1537 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0; 1538 1539 if (!connector->dp.psr_caps.su_support || display->params.enable_psr == 1) 1540 return false; 1541 1542 /* JSL and EHL only supports eDP 1.3 */ 1543 if (display->platform.jasperlake || display->platform.elkhartlake) { 1544 drm_dbg_kms(display->drm, "PSR2 not supported by phy\n"); 1545 return false; 1546 } 1547 1548 /* Wa_16011181250 */ 1549 if (display->platform.rocketlake || display->platform.alderlake_s || 1550 display->platform.dg2) { 1551 drm_dbg_kms(display->drm, 1552 "PSR2 is defeatured for this platform\n"); 1553 return false; 1554 } 1555 1556 if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) { 1557 drm_dbg_kms(display->drm, 1558 "PSR2 not completely functional in this stepping\n"); 1559 return false; 1560 } 1561 1562 if (!transcoder_has_psr2(display, crtc_state->cpu_transcoder)) { 1563 drm_dbg_kms(display->drm, 1564 "PSR2 not supported in transcoder %s\n", 1565 transcoder_name(crtc_state->cpu_transcoder)); 1566 return false; 1567 } 1568 1569 /* 1570 * DSC and PSR2 cannot be enabled simultaneously. If a requested 1571 * resolution requires DSC to be enabled, priority is given to DSC 1572 * over PSR2. 1573 */ 1574 if (crtc_state->dsc.compression_enable && 1575 (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)) { 1576 drm_dbg_kms(display->drm, 1577 "PSR2 cannot be enabled since DSC is enabled\n"); 1578 return false; 1579 } 1580 1581 if (DISPLAY_VER(display) >= 20) { 1582 psr_max_h = crtc_hdisplay; 1583 psr_max_v = crtc_vdisplay; 1584 max_bpp = crtc_state->pipe_bpp; 1585 } else if (IS_DISPLAY_VER(display, 12, 14)) { 1586 psr_max_h = 5120; 1587 psr_max_v = 3200; 1588 max_bpp = 30; 1589 } else if (IS_DISPLAY_VER(display, 10, 11)) { 1590 psr_max_h = 4096; 1591 psr_max_v = 2304; 1592 max_bpp = 24; 1593 } else if (DISPLAY_VER(display) == 9) { 1594 psr_max_h = 3640; 1595 psr_max_v = 2304; 1596 max_bpp = 24; 1597 } 1598 1599 if (crtc_state->pipe_bpp > max_bpp) { 1600 drm_dbg_kms(display->drm, 1601 "PSR2 not enabled, pipe bpp %d > max supported %d\n", 1602 crtc_state->pipe_bpp, max_bpp); 1603 return false; 1604 } 1605 1606 /* Wa_16011303918:adl-p */ 1607 if (crtc_state->vrr.enable && 1608 display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) { 1609 drm_dbg_kms(display->drm, 1610 "PSR2 not enabled, not compatible with HW stepping + VRR\n"); 1611 return false; 1612 } 1613 1614 if (!alpm_config_valid(intel_dp, crtc_state, false, false, true)) 1615 return false; 1616 1617 if (!crtc_state->enable_psr2_sel_fetch && 1618 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) { 1619 drm_dbg_kms(display->drm, 1620 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", 1621 crtc_hdisplay, crtc_vdisplay, 1622 psr_max_h, psr_max_v); 1623 return false; 1624 } 1625 1626 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state); 1627 1628 return true; 1629 } 1630 1631 static bool intel_sel_update_config_valid(struct intel_crtc_state *crtc_state, 1632 struct drm_connector_state *conn_state) 1633 { 1634 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1635 struct intel_dp *intel_dp = intel_attached_dp(connector); 1636 struct intel_display *display = to_intel_display(intel_dp); 1637 1638 if (HAS_PSR2_SEL_FETCH(display) && 1639 !intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) && 1640 !HAS_PSR_HW_TRACKING(display)) { 1641 drm_dbg_kms(display->drm, 1642 "Selective update not enabled, selective fetch not valid and no HW tracking available\n"); 1643 goto unsupported; 1644 } 1645 1646 if (!sel_update_global_enabled(intel_dp)) { 1647 drm_dbg_kms(display->drm, 1648 "Selective update disabled by flag\n"); 1649 goto unsupported; 1650 } 1651 1652 if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state, 1653 conn_state)) 1654 goto unsupported; 1655 1656 if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) { 1657 drm_dbg_kms(display->drm, 1658 "Selective update not enabled, SDP indication do not fit in hblank\n"); 1659 goto unsupported; 1660 } 1661 1662 if (crtc_state->has_panel_replay) { 1663 if (DISPLAY_VER(display) < 14) 1664 goto unsupported; 1665 1666 if (!connector->dp.panel_replay_caps.su_support) 1667 goto unsupported; 1668 1669 if (intel_dsc_enabled_on_link(crtc_state) && 1670 connector->dp.panel_replay_caps.dsc_support != 1671 INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE) { 1672 drm_dbg_kms(display->drm, 1673 "Selective update with Panel Replay not enabled because it's not supported with DSC\n"); 1674 goto unsupported; 1675 } 1676 } 1677 1678 if (crtc_state->crc_enabled) { 1679 drm_dbg_kms(display->drm, 1680 "Selective update not enabled because it would inhibit pipe CRC calculation\n"); 1681 goto unsupported; 1682 } 1683 1684 if (!psr2_granularity_check(crtc_state, connector)) { 1685 drm_dbg_kms(display->drm, 1686 "Selective update not enabled, SU granularity not compatible\n"); 1687 goto unsupported; 1688 } 1689 1690 crtc_state->enable_psr2_su_region_et = psr2_su_region_et_valid(connector, 1691 crtc_state->has_panel_replay); 1692 1693 return true; 1694 1695 unsupported: 1696 crtc_state->enable_psr2_sel_fetch = false; 1697 return false; 1698 } 1699 1700 static bool _psr_compute_config(struct intel_dp *intel_dp, 1701 struct intel_crtc_state *crtc_state, 1702 struct drm_connector_state *conn_state) 1703 { 1704 struct intel_display *display = to_intel_display(intel_dp); 1705 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 1706 int entry_setup_frames; 1707 1708 if (!CAN_PSR(intel_dp) || !display->params.enable_psr) 1709 return false; 1710 1711 /* 1712 * Currently PSR doesn't work reliably with VRR enabled. 1713 */ 1714 if (crtc_state->vrr.enable) 1715 return false; 1716 1717 entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, conn_state, adjusted_mode); 1718 1719 if (entry_setup_frames >= 0) { 1720 crtc_state->entry_setup_frames = entry_setup_frames; 1721 } else { 1722 crtc_state->no_psr_reason = "PSR setup timing not met"; 1723 drm_dbg_kms(display->drm, 1724 "PSR condition failed: PSR setup timing not met\n"); 1725 return false; 1726 } 1727 1728 return true; 1729 } 1730 1731 static inline bool compute_link_off_after_as_sdp_when_pr_active(struct intel_connector *connector) 1732 { 1733 return (connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] & 1734 DP_PANEL_REPLAY_LINK_OFF_SUPPORTED_IN_PR_AFTER_ADAPTIVE_SYNC_SDP); 1735 } 1736 1737 static inline bool compute_disable_as_sdp_when_pr_active(struct intel_connector *connector) 1738 { 1739 return !(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] & 1740 DP_PANEL_REPLAY_ASYNC_VIDEO_TIMING_NOT_SUPPORTED_IN_PR); 1741 } 1742 1743 static bool _panel_replay_compute_config(struct intel_crtc_state *crtc_state, 1744 const struct drm_connector_state *conn_state) 1745 { 1746 struct intel_connector *connector = 1747 to_intel_connector(conn_state->connector); 1748 struct intel_dp *intel_dp = intel_attached_dp(connector); 1749 struct intel_display *display = to_intel_display(intel_dp); 1750 struct intel_hdcp *hdcp = &connector->hdcp; 1751 1752 if (!CAN_PANEL_REPLAY(intel_dp)) 1753 return false; 1754 1755 if (!connector->dp.panel_replay_caps.support) 1756 return false; 1757 1758 if (!panel_replay_global_enabled(intel_dp)) { 1759 drm_dbg_kms(display->drm, "Panel Replay disabled by flag\n"); 1760 return false; 1761 } 1762 1763 if (crtc_state->crc_enabled) { 1764 drm_dbg_kms(display->drm, 1765 "Panel Replay not enabled because it would inhibit pipe CRC calculation\n"); 1766 return false; 1767 } 1768 1769 if (intel_dsc_enabled_on_link(crtc_state) && 1770 connector->dp.panel_replay_caps.dsc_support == 1771 INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED) { 1772 drm_dbg_kms(display->drm, 1773 "Panel Replay not enabled because it's not supported with DSC\n"); 1774 return false; 1775 } 1776 1777 crtc_state->link_off_after_as_sdp_when_pr_active = compute_link_off_after_as_sdp_when_pr_active(connector); 1778 crtc_state->disable_as_sdp_when_pr_active = compute_disable_as_sdp_when_pr_active(connector); 1779 1780 if (!intel_dp_is_edp(intel_dp)) 1781 return true; 1782 1783 /* Remaining checks are for eDP only */ 1784 1785 if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A && 1786 to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_B) 1787 return false; 1788 1789 /* 128b/132b Panel Replay is not supported on eDP */ 1790 if (intel_dp_is_uhbr(crtc_state)) { 1791 drm_dbg_kms(display->drm, 1792 "Panel Replay is not supported with 128b/132b\n"); 1793 return false; 1794 } 1795 1796 /* HW will not allow Panel Replay on eDP when HDCP enabled */ 1797 if (conn_state->content_protection == 1798 DRM_MODE_CONTENT_PROTECTION_DESIRED || 1799 (conn_state->content_protection == 1800 DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value == 1801 DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) { 1802 drm_dbg_kms(display->drm, 1803 "Panel Replay is not supported with HDCP\n"); 1804 return false; 1805 } 1806 1807 if (!alpm_config_valid(intel_dp, crtc_state, true, true, false)) 1808 return false; 1809 1810 return true; 1811 } 1812 1813 static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp, 1814 struct intel_crtc_state *crtc_state) 1815 { 1816 struct intel_display *display = to_intel_display(intel_dp); 1817 1818 return (DISPLAY_VER(display) == 20 && crtc_state->entry_setup_frames > 0 && 1819 !crtc_state->has_sel_update); 1820 } 1821 1822 static 1823 void intel_psr_set_non_psr_pipes(struct intel_dp *intel_dp, 1824 struct intel_crtc_state *crtc_state) 1825 { 1826 struct intel_display *display = to_intel_display(intel_dp); 1827 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); 1828 struct intel_crtc *crtc; 1829 u8 active_pipes = 0; 1830 1831 /* Wa_16025596647 */ 1832 if (DISPLAY_VER(display) != 20 && 1833 !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) 1834 return; 1835 1836 /* Not needed by Panel Replay */ 1837 if (crtc_state->has_panel_replay) 1838 return; 1839 1840 /* We ignore possible secondary PSR/Panel Replay capable eDP */ 1841 for_each_intel_crtc(display->drm, crtc) 1842 active_pipes |= crtc->active ? BIT(crtc->pipe) : 0; 1843 1844 active_pipes = intel_calc_active_pipes(state, active_pipes); 1845 1846 crtc_state->active_non_psr_pipes = active_pipes & 1847 ~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe); 1848 } 1849 1850 void intel_psr_compute_config(struct intel_dp *intel_dp, 1851 struct intel_crtc_state *crtc_state, 1852 struct drm_connector_state *conn_state) 1853 { 1854 struct intel_display *display = to_intel_display(intel_dp); 1855 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1856 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 1857 1858 if (!psr_global_enabled(intel_dp)) { 1859 drm_dbg_kms(display->drm, "PSR disabled by flag\n"); 1860 return; 1861 } 1862 1863 if (intel_dp->psr.sink_not_reliable) { 1864 drm_dbg_kms(display->drm, 1865 "PSR sink implementation is not reliable\n"); 1866 return; 1867 } 1868 1869 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 1870 drm_dbg_kms(display->drm, 1871 "PSR condition failed: Interlaced mode enabled\n"); 1872 return; 1873 } 1874 1875 /* 1876 * FIXME figure out what is wrong with PSR+joiner and 1877 * fix it. Presumably something related to the fact that 1878 * PSR is a transcoder level feature. 1879 */ 1880 if (crtc_state->joiner_pipes) { 1881 drm_dbg_kms(display->drm, 1882 "PSR disabled due to joiner\n"); 1883 return; 1884 } 1885 1886 /* Only used for state verification. */ 1887 crtc_state->panel_replay_dsc_support = connector->dp.panel_replay_caps.dsc_support; 1888 crtc_state->has_panel_replay = _panel_replay_compute_config(crtc_state, conn_state); 1889 1890 crtc_state->has_psr = crtc_state->has_panel_replay ? true : 1891 _psr_compute_config(intel_dp, crtc_state, conn_state); 1892 1893 if (!crtc_state->has_psr) 1894 return; 1895 1896 crtc_state->has_sel_update = intel_sel_update_config_valid(crtc_state, conn_state); 1897 } 1898 1899 void intel_psr_get_config(struct intel_encoder *encoder, 1900 struct intel_crtc_state *pipe_config) 1901 { 1902 struct intel_display *display = to_intel_display(encoder); 1903 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 1904 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 1905 struct intel_dp *intel_dp; 1906 u32 val; 1907 1908 if (!dig_port) 1909 return; 1910 1911 intel_dp = &dig_port->dp; 1912 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) 1913 return; 1914 1915 mutex_lock(&intel_dp->psr.lock); 1916 if (!intel_dp->psr.enabled) 1917 goto unlock; 1918 1919 if (intel_dp->psr.panel_replay_enabled) { 1920 pipe_config->has_psr = pipe_config->has_panel_replay = true; 1921 } else { 1922 /* 1923 * Not possible to read EDP_PSR/PSR2_CTL registers as it is 1924 * enabled/disabled because of frontbuffer tracking and others. 1925 */ 1926 pipe_config->has_psr = true; 1927 } 1928 1929 pipe_config->has_sel_update = intel_dp->psr.sel_update_enabled; 1930 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 1931 1932 if (!intel_dp->psr.sel_update_enabled) 1933 goto unlock; 1934 1935 if (HAS_PSR2_SEL_FETCH(display)) { 1936 val = intel_de_read(display, 1937 PSR2_MAN_TRK_CTL(display, cpu_transcoder)); 1938 if (val & PSR2_MAN_TRK_CTL_ENABLE) 1939 pipe_config->enable_psr2_sel_fetch = true; 1940 } 1941 1942 pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled; 1943 1944 if (DISPLAY_VER(display) >= 12) { 1945 val = intel_de_read(display, 1946 TRANS_EXITLINE(display, cpu_transcoder)); 1947 pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val); 1948 } 1949 unlock: 1950 mutex_unlock(&intel_dp->psr.lock); 1951 } 1952 1953 static void intel_psr_activate(struct intel_dp *intel_dp) 1954 { 1955 struct intel_display *display = to_intel_display(intel_dp); 1956 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 1957 1958 drm_WARN_ON(display->drm, 1959 transcoder_has_psr2(display, cpu_transcoder) && 1960 intel_de_read(display, EDP_PSR2_CTL(display, cpu_transcoder)) & EDP_PSR2_ENABLE); 1961 1962 drm_WARN_ON(display->drm, 1963 intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)) & EDP_PSR_ENABLE); 1964 1965 drm_WARN_ON(display->drm, intel_dp->psr.active); 1966 1967 drm_WARN_ON(display->drm, !intel_dp->psr.enabled); 1968 1969 lockdep_assert_held(&intel_dp->psr.lock); 1970 1971 /* psr1, psr2 and panel-replay are mutually exclusive.*/ 1972 if (intel_dp->psr.panel_replay_enabled) 1973 dg2_activate_panel_replay(intel_dp); 1974 else if (intel_dp->psr.sel_update_enabled) 1975 hsw_activate_psr2(intel_dp); 1976 else 1977 hsw_activate_psr1(intel_dp); 1978 1979 intel_dp->psr.active = true; 1980 intel_dp->psr.no_psr_reason = NULL; 1981 } 1982 1983 /* 1984 * Wa_16013835468 1985 * Wa_14015648006 1986 */ 1987 static void wm_optimization_wa(struct intel_dp *intel_dp, 1988 const struct intel_crtc_state *crtc_state) 1989 { 1990 struct intel_display *display = to_intel_display(intel_dp); 1991 enum pipe pipe = intel_dp->psr.pipe; 1992 bool activate = false; 1993 1994 /* Wa_14015648006 */ 1995 if (IS_DISPLAY_VER(display, 11, 14) && crtc_state->wm_level_disabled) 1996 activate = true; 1997 1998 /* Wa_16013835468 */ 1999 if (DISPLAY_VER(display) == 12 && 2000 crtc_state->hw.adjusted_mode.crtc_vblank_start != 2001 crtc_state->hw.adjusted_mode.crtc_vdisplay) 2002 activate = true; 2003 2004 if (activate) 2005 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 2006 0, LATENCY_REPORTING_REMOVED(pipe)); 2007 else 2008 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 2009 LATENCY_REPORTING_REMOVED(pipe), 0); 2010 } 2011 2012 static void intel_psr_enable_source(struct intel_dp *intel_dp, 2013 const struct intel_crtc_state *crtc_state) 2014 { 2015 struct intel_display *display = to_intel_display(intel_dp); 2016 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 2017 u32 mask = 0; 2018 2019 /* 2020 * Only HSW and BDW have PSR AUX registers that need to be setup. 2021 * SKL+ use hardcoded values PSR AUX transactions 2022 */ 2023 if (DISPLAY_VER(display) < 9) 2024 hsw_psr_setup_aux(intel_dp); 2025 2026 /* 2027 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also 2028 * mask LPSP to avoid dependency on other drivers that might block 2029 * runtime_pm besides preventing other hw tracking issues now we 2030 * can rely on frontbuffer tracking. 2031 * 2032 * From bspec prior LunarLake: 2033 * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in 2034 * panel replay mode. 2035 * 2036 * From bspec beyod LunarLake: 2037 * Panel Replay on DP: No bits are applicable 2038 * Panel Replay on eDP: All bits are applicable 2039 */ 2040 if (DISPLAY_VER(display) < 20 || intel_dp_is_edp(intel_dp)) 2041 mask = EDP_PSR_DEBUG_MASK_HPD; 2042 2043 if (intel_dp_is_edp(intel_dp)) { 2044 mask |= EDP_PSR_DEBUG_MASK_MEMUP; 2045 2046 /* 2047 * For some unknown reason on HSW non-ULT (or at least on 2048 * Dell Latitude E6540) external displays start to flicker 2049 * when PSR is enabled on the eDP. SR/PC6 residency is much 2050 * higher than should be possible with an external display. 2051 * As a workaround leave LPSP unmasked to prevent PSR entry 2052 * when external displays are active. 2053 */ 2054 if (DISPLAY_VER(display) >= 8 || display->platform.haswell_ult) 2055 mask |= EDP_PSR_DEBUG_MASK_LPSP; 2056 2057 if (DISPLAY_VER(display) < 20) 2058 mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP; 2059 2060 /* 2061 * No separate pipe reg write mask on hsw/bdw, so have to unmask all 2062 * registers in order to keep the CURSURFLIVE tricks working :( 2063 */ 2064 if (IS_DISPLAY_VER(display, 9, 10)) 2065 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; 2066 2067 /* allow PSR with sprite enabled */ 2068 if (display->platform.haswell) 2069 mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE; 2070 } 2071 2072 intel_de_write(display, psr_debug_reg(display, cpu_transcoder), mask); 2073 2074 psr_irq_control(intel_dp); 2075 2076 /* 2077 * TODO: if future platforms supports DC3CO in more than one 2078 * transcoder, EXITLINE will need to be unset when disabling PSR 2079 */ 2080 if (intel_dp->psr.dc3co_exitline) 2081 intel_de_rmw(display, 2082 TRANS_EXITLINE(display, cpu_transcoder), 2083 EXITLINE_MASK, 2084 intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE); 2085 2086 if (HAS_PSR_HW_TRACKING(display) && HAS_PSR2_SEL_FETCH(display)) 2087 intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING, 2088 intel_dp->psr.psr2_sel_fetch_enabled ? 2089 IGNORE_PSR2_HW_TRACKING : 0); 2090 2091 /* 2092 * Wa_16013835468 2093 * Wa_14015648006 2094 */ 2095 wm_optimization_wa(intel_dp, crtc_state); 2096 2097 if (intel_dp->psr.sel_update_enabled) { 2098 if (DISPLAY_VER(display) == 9) 2099 intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 0, 2100 PSR2_VSC_ENABLE_PROG_HEADER | 2101 PSR2_ADD_VERTICAL_LINE_COUNT); 2102 2103 /* 2104 * Wa_16014451276:adlp,mtl[a0,b0] 2105 * All supported adlp panels have 1-based X granularity, this may 2106 * cause issues if non-supported panels are used. 2107 */ 2108 if (!intel_dp->psr.panel_replay_enabled && 2109 (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) || 2110 display->platform.alderlake_p)) 2111 intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 2112 0, ADLP_1_BASED_X_GRANULARITY); 2113 2114 /* Wa_16012604467:adlp,mtl[a0,b0] */ 2115 if (!intel_dp->psr.panel_replay_enabled && 2116 IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0)) 2117 intel_de_rmw(display, 2118 MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder), 2119 0, 2120 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS); 2121 else if (display->platform.alderlake_p) 2122 intel_de_rmw(display, CLKGATE_DIS_MISC, 0, 2123 CLKGATE_DIS_MISC_DMASC_GATING_DIS); 2124 } 2125 2126 /* Wa_16025596647 */ 2127 if ((DISPLAY_VER(display) == 20 || 2128 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) && 2129 !intel_dp->psr.panel_replay_enabled) 2130 intel_dmc_block_pkgc(display, intel_dp->psr.pipe, true); 2131 2132 intel_alpm_configure(intel_dp, crtc_state); 2133 } 2134 2135 static bool psr_interrupt_error_check(struct intel_dp *intel_dp) 2136 { 2137 struct intel_display *display = to_intel_display(intel_dp); 2138 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 2139 u32 val; 2140 2141 if (intel_dp->psr.panel_replay_enabled) 2142 goto no_err; 2143 2144 /* 2145 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR 2146 * will still keep the error set even after the reset done in the 2147 * irq_preinstall and irq_uninstall hooks. 2148 * And enabling in this situation cause the screen to freeze in the 2149 * first time that PSR HW tries to activate so lets keep PSR disabled 2150 * to avoid any rendering problems. 2151 */ 2152 val = intel_de_read(display, psr_iir_reg(display, cpu_transcoder)); 2153 val &= psr_irq_psr_error_bit_get(intel_dp); 2154 if (val) { 2155 intel_dp->psr.sink_not_reliable = true; 2156 drm_dbg_kms(display->drm, 2157 "PSR interruption error set, not enabling PSR\n"); 2158 return false; 2159 } 2160 2161 no_err: 2162 return true; 2163 } 2164 2165 static void intel_psr_enable_locked(struct intel_dp *intel_dp, 2166 const struct intel_crtc_state *crtc_state) 2167 { 2168 struct intel_display *display = to_intel_display(intel_dp); 2169 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2170 u32 val; 2171 2172 drm_WARN_ON(display->drm, intel_dp->psr.enabled); 2173 2174 intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update; 2175 intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay; 2176 intel_dp->psr.busy_frontbuffer_bits = 0; 2177 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; 2178 intel_dp->psr.transcoder = crtc_state->cpu_transcoder; 2179 /* DC5/DC6 requires at least 6 idle frames */ 2180 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6); 2181 intel_dp->psr.dc3co_exit_delay = val; 2182 intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline; 2183 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch; 2184 intel_dp->psr.su_region_et_enabled = crtc_state->enable_psr2_su_region_et; 2185 intel_dp->psr.psr2_sel_fetch_cff_enabled = false; 2186 intel_dp->psr.req_psr2_sdp_prior_scanline = 2187 crtc_state->req_psr2_sdp_prior_scanline; 2188 intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes; 2189 intel_dp->psr.pkg_c_latency_used = crtc_state->pkg_c_latency_used; 2190 intel_dp->psr.io_wake_lines = crtc_state->alpm_state.io_wake_lines; 2191 intel_dp->psr.fast_wake_lines = crtc_state->alpm_state.fast_wake_lines; 2192 intel_dp->psr.entry_setup_frames = crtc_state->entry_setup_frames; 2193 2194 if (!psr_interrupt_error_check(intel_dp)) 2195 return; 2196 2197 if (intel_dp->psr.panel_replay_enabled) 2198 drm_dbg_kms(display->drm, "Enabling Panel Replay\n"); 2199 else 2200 drm_dbg_kms(display->drm, "Enabling PSR%s\n", 2201 intel_dp->psr.sel_update_enabled ? "2" : "1"); 2202 2203 /* 2204 * Enabling sink PSR/Panel Replay here only for PSR. Panel Replay enable 2205 * bit is already written at this point. Sink ALPM is enabled here for 2206 * PSR and Panel Replay. See 2207 * intel_psr_panel_replay_enable_sink. Modifiers/options: 2208 * - Selective Update 2209 * - Region Early Transport 2210 * - Selective Update Region Scanline Capture 2211 * - VSC_SDP_CRC 2212 * - HPD on different Errors 2213 * - CRC verification 2214 * are written for PSR and Panel Replay here. 2215 */ 2216 intel_psr_enable_sink(intel_dp, crtc_state); 2217 2218 if (intel_dp_is_edp(intel_dp)) 2219 intel_snps_phy_update_psr_power_state(&dig_port->base, true); 2220 2221 intel_psr_enable_source(intel_dp, crtc_state); 2222 intel_dp->psr.enabled = true; 2223 intel_dp->psr.pause_counter = 0; 2224 2225 /* 2226 * Link_ok is sticky and set here on PSR enable. We can assume link 2227 * training is complete as we never continue to PSR enable with 2228 * untrained link. Link_ok is kept as set until first short pulse 2229 * interrupt. This is targeted to workaround panels stating bad link 2230 * after PSR is enabled. 2231 */ 2232 intel_dp->psr.link_ok = true; 2233 2234 intel_psr_activate(intel_dp); 2235 } 2236 2237 static void intel_psr_exit(struct intel_dp *intel_dp) 2238 { 2239 struct intel_display *display = to_intel_display(intel_dp); 2240 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 2241 u32 val; 2242 2243 if (!intel_dp->psr.active) { 2244 if (transcoder_has_psr2(display, cpu_transcoder)) { 2245 val = intel_de_read(display, 2246 EDP_PSR2_CTL(display, cpu_transcoder)); 2247 drm_WARN_ON(display->drm, val & EDP_PSR2_ENABLE); 2248 } 2249 2250 val = intel_de_read(display, 2251 psr_ctl_reg(display, cpu_transcoder)); 2252 drm_WARN_ON(display->drm, val & EDP_PSR_ENABLE); 2253 2254 return; 2255 } 2256 2257 if (intel_dp->psr.panel_replay_enabled) { 2258 intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 2259 TRANS_DP2_PANEL_REPLAY_ENABLE, 0); 2260 } else if (intel_dp->psr.sel_update_enabled) { 2261 tgl_disallow_dc3co_on_psr2_exit(intel_dp); 2262 2263 val = intel_de_rmw(display, 2264 EDP_PSR2_CTL(display, cpu_transcoder), 2265 EDP_PSR2_ENABLE, 0); 2266 2267 drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE)); 2268 } else { 2269 if ((DISPLAY_VER(display) == 20 || 2270 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) && 2271 intel_dp->psr.pkg_c_latency_used) 2272 intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display, 2273 intel_dp->psr.pipe, 2274 false); 2275 2276 val = intel_de_rmw(display, 2277 psr_ctl_reg(display, cpu_transcoder), 2278 EDP_PSR_ENABLE, 0); 2279 2280 drm_WARN_ON(display->drm, !(val & EDP_PSR_ENABLE)); 2281 } 2282 intel_dp->psr.active = false; 2283 } 2284 2285 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp) 2286 { 2287 struct intel_display *display = to_intel_display(intel_dp); 2288 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 2289 i915_reg_t psr_status; 2290 u32 psr_status_mask; 2291 2292 if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled || 2293 intel_dp->psr.panel_replay_enabled)) { 2294 psr_status = EDP_PSR2_STATUS(display, cpu_transcoder); 2295 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; 2296 } else { 2297 psr_status = psr_status_reg(display, cpu_transcoder); 2298 psr_status_mask = EDP_PSR_STATUS_STATE_MASK; 2299 } 2300 2301 /* Wait till PSR is idle */ 2302 if (intel_de_wait_for_clear_ms(display, psr_status, 2303 psr_status_mask, 2000)) 2304 drm_err(display->drm, "Timed out waiting PSR idle state\n"); 2305 } 2306 2307 static void intel_psr_disable_locked(struct intel_dp *intel_dp) 2308 { 2309 struct intel_display *display = to_intel_display(intel_dp); 2310 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 2311 2312 lockdep_assert_held(&intel_dp->psr.lock); 2313 2314 if (!intel_dp->psr.enabled) 2315 return; 2316 2317 if (intel_dp->psr.panel_replay_enabled) 2318 drm_dbg_kms(display->drm, "Disabling Panel Replay\n"); 2319 else 2320 drm_dbg_kms(display->drm, "Disabling PSR%s\n", 2321 intel_dp->psr.sel_update_enabled ? "2" : "1"); 2322 2323 intel_psr_exit(intel_dp); 2324 intel_psr_wait_exit_locked(intel_dp); 2325 2326 /* 2327 * Wa_16013835468 2328 * Wa_14015648006 2329 */ 2330 if (DISPLAY_VER(display) >= 11) 2331 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 2332 LATENCY_REPORTING_REMOVED(intel_dp->psr.pipe), 0); 2333 2334 if (intel_dp->psr.sel_update_enabled) { 2335 /* Wa_16012604467:adlp,mtl[a0,b0] */ 2336 if (!intel_dp->psr.panel_replay_enabled && 2337 IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0)) 2338 intel_de_rmw(display, 2339 MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder), 2340 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0); 2341 else if (display->platform.alderlake_p) 2342 intel_de_rmw(display, CLKGATE_DIS_MISC, 2343 CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0); 2344 } 2345 2346 if (intel_dp_is_edp(intel_dp)) 2347 intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false); 2348 2349 if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) 2350 intel_alpm_disable(intel_dp); 2351 2352 /* Disable PSR on Sink */ 2353 if (!intel_dp->psr.panel_replay_enabled) { 2354 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); 2355 2356 if (intel_dp->psr.sel_update_enabled) 2357 drm_dp_dpcd_writeb(&intel_dp->aux, 2358 DP_RECEIVER_ALPM_CONFIG, 0); 2359 } 2360 2361 /* Wa_16025596647 */ 2362 if ((DISPLAY_VER(display) == 20 || 2363 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) && 2364 !intel_dp->psr.panel_replay_enabled) 2365 intel_dmc_block_pkgc(display, intel_dp->psr.pipe, false); 2366 2367 intel_dp->psr.enabled = false; 2368 intel_dp->psr.panel_replay_enabled = false; 2369 intel_dp->psr.sel_update_enabled = false; 2370 intel_dp->psr.psr2_sel_fetch_enabled = false; 2371 intel_dp->psr.su_region_et_enabled = false; 2372 intel_dp->psr.psr2_sel_fetch_cff_enabled = false; 2373 intel_dp->psr.active_non_psr_pipes = 0; 2374 intel_dp->psr.pkg_c_latency_used = 0; 2375 } 2376 2377 /** 2378 * intel_psr_disable - Disable PSR 2379 * @intel_dp: Intel DP 2380 * @old_crtc_state: old CRTC state 2381 * 2382 * This function needs to be called before disabling pipe. 2383 */ 2384 void intel_psr_disable(struct intel_dp *intel_dp, 2385 const struct intel_crtc_state *old_crtc_state) 2386 { 2387 struct intel_display *display = to_intel_display(intel_dp); 2388 2389 if (!old_crtc_state->has_psr) 2390 return; 2391 2392 if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp) && 2393 !CAN_PANEL_REPLAY(intel_dp))) 2394 return; 2395 2396 mutex_lock(&intel_dp->psr.lock); 2397 2398 intel_psr_disable_locked(intel_dp); 2399 2400 intel_dp->psr.link_ok = false; 2401 2402 mutex_unlock(&intel_dp->psr.lock); 2403 cancel_work_sync(&intel_dp->psr.work); 2404 cancel_delayed_work_sync(&intel_dp->psr.dc3co_work); 2405 } 2406 2407 /** 2408 * intel_psr_pause - Pause PSR 2409 * @intel_dp: Intel DP 2410 * 2411 * This function need to be called after enabling psr. 2412 */ 2413 void intel_psr_pause(struct intel_dp *intel_dp) 2414 { 2415 struct intel_psr *psr = &intel_dp->psr; 2416 2417 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) 2418 return; 2419 2420 mutex_lock(&psr->lock); 2421 2422 if (!psr->enabled) { 2423 mutex_unlock(&psr->lock); 2424 return; 2425 } 2426 2427 if (intel_dp->psr.pause_counter++ == 0) { 2428 intel_psr_exit(intel_dp); 2429 intel_psr_wait_exit_locked(intel_dp); 2430 } 2431 2432 mutex_unlock(&psr->lock); 2433 2434 cancel_work_sync(&psr->work); 2435 cancel_delayed_work_sync(&psr->dc3co_work); 2436 } 2437 2438 /** 2439 * intel_psr_resume - Resume PSR 2440 * @intel_dp: Intel DP 2441 * 2442 * This function need to be called after pausing psr. 2443 */ 2444 void intel_psr_resume(struct intel_dp *intel_dp) 2445 { 2446 struct intel_display *display = to_intel_display(intel_dp); 2447 struct intel_psr *psr = &intel_dp->psr; 2448 2449 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) 2450 return; 2451 2452 mutex_lock(&psr->lock); 2453 2454 if (!psr->enabled) 2455 goto out; 2456 2457 if (!psr->pause_counter) { 2458 drm_warn(display->drm, "Unbalanced PSR pause/resume!\n"); 2459 goto out; 2460 } 2461 2462 if (--intel_dp->psr.pause_counter == 0) 2463 intel_psr_activate(intel_dp); 2464 2465 out: 2466 mutex_unlock(&psr->lock); 2467 } 2468 2469 /** 2470 * intel_psr_needs_vblank_notification - Check if PSR need vblank enable/disable 2471 * notification. 2472 * @crtc_state: CRTC status 2473 * 2474 * We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't 2475 * prevent it in case of Panel Replay. Panel Replay switches main link off on 2476 * DC entry. This means vblank interrupts are not fired and is a problem if 2477 * user-space is polling for vblank events. Also Wa_16025596647 needs 2478 * information when vblank is enabled/disabled. 2479 */ 2480 bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state) 2481 { 2482 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2483 struct intel_display *display = to_intel_display(crtc_state); 2484 struct intel_encoder *encoder; 2485 2486 for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) { 2487 struct intel_dp *intel_dp; 2488 2489 if (!intel_encoder_is_dp(encoder)) 2490 continue; 2491 2492 intel_dp = enc_to_intel_dp(encoder); 2493 2494 if (!intel_dp_is_edp(intel_dp)) 2495 continue; 2496 2497 if (CAN_PANEL_REPLAY(intel_dp)) 2498 return true; 2499 2500 if ((DISPLAY_VER(display) == 20 || 2501 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) && 2502 CAN_PSR(intel_dp)) 2503 return true; 2504 } 2505 2506 return false; 2507 } 2508 2509 /** 2510 * intel_psr_trigger_frame_change_event - Trigger "Frame Change" event 2511 * @dsb: DSB context 2512 * @state: the atomic state 2513 * @crtc: the CRTC 2514 * 2515 * Generate PSR "Frame Change" event. 2516 */ 2517 void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb, 2518 struct intel_atomic_state *state, 2519 struct intel_crtc *crtc) 2520 { 2521 const struct intel_crtc_state *crtc_state = 2522 intel_pre_commit_crtc_state(state, crtc); 2523 struct intel_display *display = to_intel_display(crtc); 2524 2525 if (crtc_state->has_psr) 2526 intel_de_write_dsb(display, dsb, 2527 CURSURFLIVE(display, crtc->pipe), 0); 2528 } 2529 2530 /** 2531 * intel_psr_min_set_context_latency - Minimum 'set context latency' lines needed by PSR 2532 * @crtc_state: the crtc state 2533 * 2534 * Return minimum SCL lines/delay needed by PSR. 2535 */ 2536 int intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state) 2537 { 2538 2539 return _intel_psr_min_set_context_latency(crtc_state, 2540 crtc_state->has_panel_replay, 2541 crtc_state->has_sel_update); 2542 } 2543 2544 static u32 man_trk_ctl_enable_bit_get(struct intel_display *display) 2545 { 2546 return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 0 : 2547 PSR2_MAN_TRK_CTL_ENABLE; 2548 } 2549 2550 static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display) 2551 { 2552 return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 2553 ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME : 2554 PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME; 2555 } 2556 2557 static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display) 2558 { 2559 return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 2560 ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE : 2561 PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE; 2562 } 2563 2564 static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display) 2565 { 2566 return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 2567 ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME : 2568 PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME; 2569 } 2570 2571 static void intel_psr_force_update(struct intel_dp *intel_dp) 2572 { 2573 struct intel_display *display = to_intel_display(intel_dp); 2574 2575 /* 2576 * Display WA #0884: skl+ 2577 * This documented WA for bxt can be safely applied 2578 * broadly so we can force HW tracking to exit PSR 2579 * instead of disabling and re-enabling. 2580 * Workaround tells us to write 0 to CUR_SURFLIVE_A, 2581 * but it makes more sense write to the current active 2582 * pipe. 2583 * 2584 * This workaround do not exist for platforms with display 10 or newer 2585 * but testing proved that it works for up display 13, for newer 2586 * than that testing will be needed. 2587 */ 2588 intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0); 2589 } 2590 2591 void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb, 2592 const struct intel_crtc_state *crtc_state) 2593 { 2594 struct intel_display *display = to_intel_display(crtc_state); 2595 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2596 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2597 struct intel_encoder *encoder; 2598 2599 if (!crtc_state->enable_psr2_sel_fetch) 2600 return; 2601 2602 for_each_intel_encoder_mask_with_psr(display->drm, encoder, 2603 crtc_state->uapi.encoder_mask) { 2604 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2605 2606 if (!dsb) 2607 lockdep_assert_held(&intel_dp->psr.lock); 2608 2609 if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_cff_enabled) 2610 return; 2611 break; 2612 } 2613 2614 intel_de_write_dsb(display, dsb, 2615 PSR2_MAN_TRK_CTL(display, cpu_transcoder), 2616 crtc_state->psr2_man_track_ctl); 2617 2618 if (!crtc_state->enable_psr2_su_region_et) 2619 return; 2620 2621 intel_de_write_dsb(display, dsb, PIPE_SRCSZ_ERLY_TPT(crtc->pipe), 2622 crtc_state->pipe_srcsz_early_tpt); 2623 2624 if (!crtc_state->dsc.compression_enable) 2625 return; 2626 2627 intel_dsc_su_et_parameters_configure(dsb, encoder, crtc_state, 2628 drm_rect_height(&crtc_state->psr2_su_area)); 2629 } 2630 2631 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state, 2632 bool full_update) 2633 { 2634 struct intel_display *display = to_intel_display(crtc_state); 2635 u32 val = man_trk_ctl_enable_bit_get(display); 2636 2637 /* SF partial frame enable has to be set even on full update */ 2638 val |= man_trk_ctl_partial_frame_bit_get(display); 2639 2640 if (full_update) { 2641 val |= man_trk_ctl_continuos_full_frame(display); 2642 goto exit; 2643 } 2644 2645 if (crtc_state->psr2_su_area.y1 == -1) 2646 goto exit; 2647 2648 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) { 2649 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1); 2650 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1); 2651 } else { 2652 drm_WARN_ON(crtc_state->uapi.crtc->dev, 2653 crtc_state->psr2_su_area.y1 % 4 || 2654 crtc_state->psr2_su_area.y2 % 4); 2655 2656 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR( 2657 crtc_state->psr2_su_area.y1 / 4 + 1); 2658 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR( 2659 crtc_state->psr2_su_area.y2 / 4 + 1); 2660 } 2661 exit: 2662 crtc_state->psr2_man_track_ctl = val; 2663 } 2664 2665 static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state, 2666 bool full_update) 2667 { 2668 int width, height; 2669 2670 if (!crtc_state->enable_psr2_su_region_et || full_update) 2671 return 0; 2672 2673 width = drm_rect_width(&crtc_state->psr2_su_area); 2674 height = drm_rect_height(&crtc_state->psr2_su_area); 2675 2676 return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1); 2677 } 2678 2679 static void clip_area_update(struct drm_rect *overlap_damage_area, 2680 struct drm_rect *damage_area, 2681 struct drm_rect *pipe_src) 2682 { 2683 if (!drm_rect_intersect(damage_area, pipe_src)) 2684 return; 2685 2686 if (overlap_damage_area->y1 == -1) { 2687 overlap_damage_area->y1 = damage_area->y1; 2688 overlap_damage_area->y2 = damage_area->y2; 2689 return; 2690 } 2691 2692 if (damage_area->y1 < overlap_damage_area->y1) 2693 overlap_damage_area->y1 = damage_area->y1; 2694 2695 if (damage_area->y2 > overlap_damage_area->y2) 2696 overlap_damage_area->y2 = damage_area->y2; 2697 } 2698 2699 static bool intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state) 2700 { 2701 struct intel_display *display = to_intel_display(crtc_state); 2702 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 2703 u16 y_alignment; 2704 bool su_area_changed = false; 2705 2706 /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */ 2707 if (crtc_state->dsc.compression_enable && 2708 (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)) 2709 y_alignment = vdsc_cfg->slice_height; 2710 else 2711 y_alignment = crtc_state->su_y_granularity; 2712 2713 if (crtc_state->psr2_su_area.y1 % y_alignment) { 2714 crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment; 2715 su_area_changed = true; 2716 } 2717 2718 if (crtc_state->psr2_su_area.y2 % y_alignment) { 2719 crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 / 2720 y_alignment) + 1) * y_alignment; 2721 su_area_changed = true; 2722 } 2723 2724 return su_area_changed; 2725 } 2726 2727 /* 2728 * When early transport is in use we need to extend SU area to cover 2729 * cursor fully when cursor is in SU area. 2730 */ 2731 static void 2732 intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state, 2733 struct intel_crtc *crtc, 2734 bool *cursor_in_su_area) 2735 { 2736 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 2737 struct intel_plane_state *new_plane_state; 2738 struct intel_plane *plane; 2739 int i; 2740 2741 if (!crtc_state->enable_psr2_su_region_et) 2742 return; 2743 2744 for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) { 2745 struct drm_rect inter; 2746 2747 if (new_plane_state->hw.crtc != crtc_state->uapi.crtc) 2748 continue; 2749 2750 if (plane->id != PLANE_CURSOR) 2751 continue; 2752 2753 if (!new_plane_state->uapi.visible) 2754 continue; 2755 2756 inter = crtc_state->psr2_su_area; 2757 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) 2758 continue; 2759 2760 clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst, 2761 &crtc_state->pipe_src); 2762 *cursor_in_su_area = true; 2763 } 2764 } 2765 2766 /* 2767 * TODO: Not clear how to handle planes with negative position, 2768 * also planes are not updated if they have a negative X 2769 * position so for now doing a full update in this cases 2770 * 2771 * Plane scaling and rotation is not supported by selective fetch and both 2772 * properties can change without a modeset, so need to be check at every 2773 * atomic commit. 2774 */ 2775 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state) 2776 { 2777 if (plane_state->uapi.dst.y1 < 0 || 2778 plane_state->uapi.dst.x1 < 0 || 2779 plane_state->scaler_id >= 0 || 2780 plane_state->hw.rotation != DRM_MODE_ROTATE_0) 2781 return false; 2782 2783 return true; 2784 } 2785 2786 /* 2787 * Check for pipe properties that is not supported by selective fetch. 2788 * 2789 * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed 2790 * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch 2791 * enabled and going to the full update path. 2792 */ 2793 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state) 2794 { 2795 if (crtc_state->scaler_state.scaler_id >= 0 || 2796 crtc_state->async_flip_planes) 2797 return false; 2798 2799 return true; 2800 } 2801 2802 /* Wa 14019834836 */ 2803 static void intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state *crtc_state) 2804 { 2805 struct intel_display *display = to_intel_display(crtc_state); 2806 struct intel_encoder *encoder; 2807 int hactive_limit; 2808 2809 if (crtc_state->psr2_su_area.y1 != 0 || 2810 crtc_state->psr2_su_area.y2 != 0) 2811 return; 2812 2813 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2814 hactive_limit = intel_dp_is_uhbr(crtc_state) ? 1230 : 546; 2815 else 2816 hactive_limit = intel_dp_is_uhbr(crtc_state) ? 615 : 273; 2817 2818 if (crtc_state->hw.adjusted_mode.hdisplay < hactive_limit) 2819 return; 2820 2821 for_each_intel_encoder_mask_with_psr(display->drm, encoder, 2822 crtc_state->uapi.encoder_mask) { 2823 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2824 2825 if (!intel_dp_is_edp(intel_dp) && 2826 intel_dp->psr.panel_replay_enabled && 2827 intel_dp->psr.sel_update_enabled) { 2828 crtc_state->psr2_su_area.y2++; 2829 return; 2830 } 2831 } 2832 } 2833 2834 static void 2835 intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state) 2836 { 2837 struct intel_display *display = to_intel_display(crtc_state); 2838 2839 /* Wa_14014971492 */ 2840 if (!crtc_state->has_panel_replay && 2841 ((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) || 2842 display->platform.alderlake_p || display->platform.tigerlake)) && 2843 crtc_state->splitter.enable) 2844 crtc_state->psr2_su_area.y1 = 0; 2845 2846 /* Wa 14019834836 */ 2847 if (DISPLAY_VER(display) == 30) 2848 intel_psr_apply_pr_link_on_su_wa(crtc_state); 2849 } 2850 2851 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, 2852 struct intel_crtc *crtc) 2853 { 2854 struct intel_display *display = to_intel_display(state); 2855 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 2856 struct intel_plane_state *new_plane_state, *old_plane_state; 2857 struct intel_plane *plane; 2858 bool full_update = false, su_area_changed; 2859 int i, ret; 2860 2861 if (!crtc_state->enable_psr2_sel_fetch) 2862 return 0; 2863 2864 if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) { 2865 full_update = true; 2866 goto skip_sel_fetch_set_loop; 2867 } 2868 2869 crtc_state->psr2_su_area.x1 = 0; 2870 crtc_state->psr2_su_area.y1 = -1; 2871 crtc_state->psr2_su_area.x2 = drm_rect_width(&crtc_state->pipe_src); 2872 crtc_state->psr2_su_area.y2 = -1; 2873 2874 /* 2875 * Calculate minimal selective fetch area of each plane and calculate 2876 * the pipe damaged area. 2877 * In the next loop the plane selective fetch area will actually be set 2878 * using whole pipe damaged area. 2879 */ 2880 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 2881 new_plane_state, i) { 2882 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1, 2883 .x2 = INT_MAX }; 2884 2885 if (new_plane_state->hw.crtc != crtc_state->uapi.crtc) 2886 continue; 2887 2888 if (!new_plane_state->uapi.visible && 2889 !old_plane_state->uapi.visible) 2890 continue; 2891 2892 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) { 2893 full_update = true; 2894 break; 2895 } 2896 2897 /* 2898 * If visibility or plane moved, mark the whole plane area as 2899 * damaged as it needs to be complete redraw in the new and old 2900 * position. 2901 */ 2902 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible || 2903 !drm_rect_equals(&new_plane_state->uapi.dst, 2904 &old_plane_state->uapi.dst)) { 2905 if (old_plane_state->uapi.visible) { 2906 damaged_area.y1 = old_plane_state->uapi.dst.y1; 2907 damaged_area.y2 = old_plane_state->uapi.dst.y2; 2908 clip_area_update(&crtc_state->psr2_su_area, &damaged_area, 2909 &crtc_state->pipe_src); 2910 } 2911 2912 if (new_plane_state->uapi.visible) { 2913 damaged_area.y1 = new_plane_state->uapi.dst.y1; 2914 damaged_area.y2 = new_plane_state->uapi.dst.y2; 2915 clip_area_update(&crtc_state->psr2_su_area, &damaged_area, 2916 &crtc_state->pipe_src); 2917 } 2918 continue; 2919 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) { 2920 /* If alpha changed mark the whole plane area as damaged */ 2921 damaged_area.y1 = new_plane_state->uapi.dst.y1; 2922 damaged_area.y2 = new_plane_state->uapi.dst.y2; 2923 clip_area_update(&crtc_state->psr2_su_area, &damaged_area, 2924 &crtc_state->pipe_src); 2925 continue; 2926 } 2927 2928 src = drm_plane_state_src(&new_plane_state->uapi); 2929 drm_rect_fp_to_int(&src, &src); 2930 2931 if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi, 2932 &new_plane_state->uapi, &damaged_area)) 2933 continue; 2934 2935 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1; 2936 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1; 2937 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1; 2938 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1; 2939 2940 clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src); 2941 } 2942 2943 /* 2944 * TODO: For now we are just using full update in case 2945 * selective fetch area calculation fails. To optimize this we 2946 * should identify cases where this happens and fix the area 2947 * calculation for those. 2948 */ 2949 if (crtc_state->psr2_su_area.y1 == -1) { 2950 drm_info_once(display->drm, 2951 "Selective fetch area calculation failed in pipe %c\n", 2952 pipe_name(crtc->pipe)); 2953 full_update = true; 2954 } 2955 2956 if (full_update) 2957 goto skip_sel_fetch_set_loop; 2958 2959 intel_psr_apply_su_area_workarounds(crtc_state); 2960 2961 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 2962 if (ret) 2963 return ret; 2964 2965 do { 2966 bool cursor_in_su_area; 2967 2968 /* 2969 * Adjust su area to cover cursor fully as necessary 2970 * (early transport). This needs to be done after 2971 * drm_atomic_add_affected_planes to ensure visible 2972 * cursor is added into affected planes even when 2973 * cursor is not updated by itself. 2974 */ 2975 intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area); 2976 2977 su_area_changed = intel_psr2_sel_fetch_pipe_alignment(crtc_state); 2978 2979 /* 2980 * If the cursor was outside the SU area before 2981 * alignment, the alignment step (which only expands 2982 * SU) may pull the cursor partially inside, so we 2983 * must run ET alignment again to fully cover it. But 2984 * if the cursor was already fully inside before 2985 * alignment, expanding the SU area won't change that, 2986 * so no further work is needed. 2987 */ 2988 if (cursor_in_su_area) 2989 break; 2990 } while (su_area_changed); 2991 2992 /* 2993 * Now that we have the pipe damaged area check if it intersect with 2994 * every plane, if it does set the plane selective fetch area. 2995 */ 2996 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 2997 new_plane_state, i) { 2998 struct drm_rect *sel_fetch_area, inter; 2999 struct intel_plane *linked = new_plane_state->planar_linked_plane; 3000 3001 if (new_plane_state->hw.crtc != crtc_state->uapi.crtc || 3002 !new_plane_state->uapi.visible) 3003 continue; 3004 3005 inter = crtc_state->psr2_su_area; 3006 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area; 3007 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) { 3008 sel_fetch_area->y1 = -1; 3009 sel_fetch_area->y2 = -1; 3010 /* 3011 * if plane sel fetch was previously enabled -> 3012 * disable it 3013 */ 3014 if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0) 3015 crtc_state->update_planes |= BIT(plane->id); 3016 3017 continue; 3018 } 3019 3020 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) { 3021 full_update = true; 3022 break; 3023 } 3024 3025 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area; 3026 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1; 3027 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1; 3028 crtc_state->update_planes |= BIT(plane->id); 3029 3030 /* 3031 * Sel_fetch_area is calculated for UV plane. Use 3032 * same area for Y plane as well. 3033 */ 3034 if (linked) { 3035 struct intel_plane_state *linked_new_plane_state; 3036 struct drm_rect *linked_sel_fetch_area; 3037 3038 linked_new_plane_state = intel_atomic_get_plane_state(state, linked); 3039 if (IS_ERR(linked_new_plane_state)) 3040 return PTR_ERR(linked_new_plane_state); 3041 3042 linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area; 3043 linked_sel_fetch_area->y1 = sel_fetch_area->y1; 3044 linked_sel_fetch_area->y2 = sel_fetch_area->y2; 3045 crtc_state->update_planes |= BIT(linked->id); 3046 } 3047 } 3048 3049 skip_sel_fetch_set_loop: 3050 if (full_update) 3051 clip_area_update(&crtc_state->psr2_su_area, &crtc_state->pipe_src, 3052 &crtc_state->pipe_src); 3053 3054 psr2_man_trk_ctl_calc(crtc_state, full_update); 3055 crtc_state->pipe_srcsz_early_tpt = 3056 psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update); 3057 return 0; 3058 } 3059 3060 void intel_psr2_panic_force_full_update(const struct intel_crtc_state *crtc_state) 3061 { 3062 struct intel_display *display = to_intel_display(crtc_state); 3063 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3064 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3065 u32 val = man_trk_ctl_enable_bit_get(display); 3066 3067 /* SF partial frame enable has to be set even on full update */ 3068 val |= man_trk_ctl_partial_frame_bit_get(display); 3069 val |= man_trk_ctl_continuos_full_frame(display); 3070 3071 /* Directly write the register */ 3072 intel_de_write_fw(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder), val); 3073 3074 if (!crtc_state->enable_psr2_su_region_et) 3075 return; 3076 3077 intel_de_write_fw(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe), 0); 3078 } 3079 3080 void intel_psr_pre_plane_update(struct intel_atomic_state *state, 3081 struct intel_crtc *crtc) 3082 { 3083 struct intel_display *display = to_intel_display(state); 3084 const struct intel_crtc_state *old_crtc_state = 3085 intel_atomic_get_old_crtc_state(state, crtc); 3086 const struct intel_crtc_state *new_crtc_state = 3087 intel_atomic_get_new_crtc_state(state, crtc); 3088 struct intel_encoder *encoder; 3089 3090 if (!HAS_PSR(display)) 3091 return; 3092 3093 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder, 3094 old_crtc_state->uapi.encoder_mask) { 3095 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3096 struct intel_psr *psr = &intel_dp->psr; 3097 3098 mutex_lock(&psr->lock); 3099 3100 if (!new_crtc_state->has_psr) 3101 psr->no_psr_reason = new_crtc_state->no_psr_reason; 3102 3103 if (psr->enabled) { 3104 /* 3105 * Reasons to disable: 3106 * - PSR disabled in new state 3107 * - All planes will go inactive 3108 * - Changing between PSR versions 3109 * - Region Early Transport changing 3110 * - Display WA #1136: skl, bxt 3111 */ 3112 if (intel_crtc_needs_modeset(new_crtc_state) || 3113 new_crtc_state->update_m_n || 3114 new_crtc_state->update_lrr || 3115 !new_crtc_state->has_psr || 3116 !new_crtc_state->active_planes || 3117 new_crtc_state->has_sel_update != psr->sel_update_enabled || 3118 new_crtc_state->enable_psr2_su_region_et != psr->su_region_et_enabled || 3119 new_crtc_state->has_panel_replay != psr->panel_replay_enabled || 3120 (DISPLAY_VER(display) < 11 && new_crtc_state->wm_level_disabled)) 3121 intel_psr_disable_locked(intel_dp); 3122 else if (new_crtc_state->wm_level_disabled) 3123 /* Wa_14015648006 */ 3124 wm_optimization_wa(intel_dp, new_crtc_state); 3125 } 3126 3127 mutex_unlock(&psr->lock); 3128 } 3129 } 3130 3131 static void 3132 verify_panel_replay_dsc_state(const struct intel_crtc_state *crtc_state) 3133 { 3134 struct intel_display *display = to_intel_display(crtc_state); 3135 3136 if (!crtc_state->has_panel_replay) 3137 return; 3138 3139 drm_WARN_ON(display->drm, 3140 intel_dsc_enabled_on_link(crtc_state) && 3141 crtc_state->panel_replay_dsc_support == 3142 INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED); 3143 } 3144 3145 void intel_psr_post_plane_update(struct intel_atomic_state *state, 3146 struct intel_crtc *crtc) 3147 { 3148 struct intel_display *display = to_intel_display(state); 3149 const struct intel_crtc_state *crtc_state = 3150 intel_atomic_get_new_crtc_state(state, crtc); 3151 struct intel_encoder *encoder; 3152 3153 if (!crtc_state->has_psr) 3154 return; 3155 3156 verify_panel_replay_dsc_state(crtc_state); 3157 3158 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder, 3159 crtc_state->uapi.encoder_mask) { 3160 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3161 struct intel_psr *psr = &intel_dp->psr; 3162 bool keep_disabled = false; 3163 3164 mutex_lock(&psr->lock); 3165 3166 drm_WARN_ON(display->drm, 3167 psr->enabled && !crtc_state->active_planes); 3168 3169 if (psr->sink_not_reliable) 3170 keep_disabled = true; 3171 3172 if (!crtc_state->active_planes) { 3173 psr->no_psr_reason = "All planes inactive"; 3174 keep_disabled = true; 3175 } 3176 3177 /* Display WA #1136: skl, bxt */ 3178 if (DISPLAY_VER(display) < 11 && crtc_state->wm_level_disabled) { 3179 psr->no_psr_reason = "Workaround #1136 for skl, bxt"; 3180 keep_disabled = true; 3181 } 3182 3183 if (!psr->enabled && !keep_disabled) 3184 intel_psr_enable_locked(intel_dp, crtc_state); 3185 else if (psr->enabled && !crtc_state->wm_level_disabled) 3186 /* Wa_14015648006 */ 3187 wm_optimization_wa(intel_dp, crtc_state); 3188 3189 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */ 3190 if (crtc_state->crc_enabled && psr->enabled) 3191 intel_psr_force_update(intel_dp); 3192 3193 /* 3194 * Clear possible busy bits in case we have 3195 * invalidate -> flip -> flush sequence. 3196 */ 3197 intel_dp->psr.busy_frontbuffer_bits = 0; 3198 3199 mutex_unlock(&psr->lock); 3200 } 3201 } 3202 3203 /* 3204 * From bspec: Panel Self Refresh (BDW+) 3205 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of 3206 * exit training time + 1.5 ms of aux channel handshake. 50 ms is 3207 * defensive enough to cover everything. 3208 */ 3209 #define PSR_IDLE_TIMEOUT_MS 50 3210 3211 static int 3212 _psr2_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state, 3213 struct intel_dsb *dsb) 3214 { 3215 struct intel_display *display = to_intel_display(new_crtc_state); 3216 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 3217 3218 /* 3219 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough. 3220 * As all higher states has bit 4 of PSR2 state set we can just wait for 3221 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared. 3222 */ 3223 if (dsb) { 3224 intel_dsb_poll(dsb, EDP_PSR2_STATUS(display, cpu_transcoder), 3225 EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 0, 200, 3226 PSR_IDLE_TIMEOUT_MS * 1000 / 200); 3227 return true; 3228 } 3229 3230 return intel_de_wait_for_clear_ms(display, 3231 EDP_PSR2_STATUS(display, cpu_transcoder), 3232 EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 3233 PSR_IDLE_TIMEOUT_MS); 3234 } 3235 3236 static int 3237 _psr1_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state, 3238 struct intel_dsb *dsb) 3239 { 3240 struct intel_display *display = to_intel_display(new_crtc_state); 3241 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 3242 3243 if (dsb) { 3244 intel_dsb_poll(dsb, psr_status_reg(display, cpu_transcoder), 3245 EDP_PSR_STATUS_STATE_MASK, 0, 200, 3246 PSR_IDLE_TIMEOUT_MS * 1000 / 200); 3247 return true; 3248 } 3249 3250 return intel_de_wait_for_clear_ms(display, 3251 psr_status_reg(display, cpu_transcoder), 3252 EDP_PSR_STATUS_STATE_MASK, 3253 PSR_IDLE_TIMEOUT_MS); 3254 } 3255 3256 /** 3257 * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update 3258 * @new_crtc_state: new CRTC state 3259 * 3260 * This function is expected to be called from pipe_update_start() where it is 3261 * not expected to race with PSR enable or disable. 3262 */ 3263 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state) 3264 { 3265 struct intel_display *display = to_intel_display(new_crtc_state); 3266 struct intel_encoder *encoder; 3267 3268 if (!new_crtc_state->has_psr) 3269 return; 3270 3271 for_each_intel_encoder_mask_with_psr(display->drm, encoder, 3272 new_crtc_state->uapi.encoder_mask) { 3273 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3274 int ret; 3275 3276 lockdep_assert_held(&intel_dp->psr.lock); 3277 3278 if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled) 3279 continue; 3280 3281 if (intel_dp->psr.sel_update_enabled) 3282 ret = _psr2_ready_for_pipe_update_locked(new_crtc_state, 3283 NULL); 3284 else 3285 ret = _psr1_ready_for_pipe_update_locked(new_crtc_state, 3286 NULL); 3287 3288 if (ret) 3289 drm_err(display->drm, 3290 "PSR wait timed out, atomic update may fail\n"); 3291 } 3292 } 3293 3294 void intel_psr_wait_for_idle_dsb(struct intel_dsb *dsb, 3295 const struct intel_crtc_state *new_crtc_state) 3296 { 3297 if (!new_crtc_state->has_psr || new_crtc_state->has_panel_replay) 3298 return; 3299 3300 if (new_crtc_state->has_sel_update) 3301 _psr2_ready_for_pipe_update_locked(new_crtc_state, dsb); 3302 else 3303 _psr1_ready_for_pipe_update_locked(new_crtc_state, dsb); 3304 } 3305 3306 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp) 3307 { 3308 struct intel_display *display = to_intel_display(intel_dp); 3309 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 3310 i915_reg_t reg; 3311 u32 mask; 3312 int err; 3313 3314 if (!intel_dp->psr.enabled) 3315 return false; 3316 3317 if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled || 3318 intel_dp->psr.panel_replay_enabled)) { 3319 reg = EDP_PSR2_STATUS(display, cpu_transcoder); 3320 mask = EDP_PSR2_STATUS_STATE_MASK; 3321 } else { 3322 reg = psr_status_reg(display, cpu_transcoder); 3323 mask = EDP_PSR_STATUS_STATE_MASK; 3324 } 3325 3326 mutex_unlock(&intel_dp->psr.lock); 3327 3328 err = intel_de_wait_for_clear_ms(display, reg, mask, 50); 3329 if (err) 3330 drm_err(display->drm, 3331 "Timed out waiting for PSR Idle for re-enable\n"); 3332 3333 /* After the unlocked wait, verify that PSR is still wanted! */ 3334 mutex_lock(&intel_dp->psr.lock); 3335 return err == 0 && intel_dp->psr.enabled && !intel_dp->psr.pause_counter; 3336 } 3337 3338 static int intel_psr_fastset_force(struct intel_display *display) 3339 { 3340 struct drm_connector_list_iter conn_iter; 3341 struct drm_modeset_acquire_ctx ctx; 3342 struct drm_atomic_state *state; 3343 struct drm_connector *conn; 3344 int err = 0; 3345 3346 state = drm_atomic_state_alloc(display->drm); 3347 if (!state) 3348 return -ENOMEM; 3349 3350 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 3351 3352 state->acquire_ctx = &ctx; 3353 to_intel_atomic_state(state)->internal = true; 3354 3355 retry: 3356 drm_connector_list_iter_begin(display->drm, &conn_iter); 3357 drm_for_each_connector_iter(conn, &conn_iter) { 3358 struct drm_connector_state *conn_state; 3359 struct drm_crtc_state *crtc_state; 3360 3361 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP) 3362 continue; 3363 3364 conn_state = drm_atomic_get_connector_state(state, conn); 3365 if (IS_ERR(conn_state)) { 3366 err = PTR_ERR(conn_state); 3367 break; 3368 } 3369 3370 if (!conn_state->crtc) 3371 continue; 3372 3373 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc); 3374 if (IS_ERR(crtc_state)) { 3375 err = PTR_ERR(crtc_state); 3376 break; 3377 } 3378 3379 /* Mark mode as changed to trigger a pipe->update() */ 3380 crtc_state->mode_changed = true; 3381 } 3382 drm_connector_list_iter_end(&conn_iter); 3383 3384 if (err == 0) 3385 err = drm_atomic_commit(state); 3386 3387 if (err == -EDEADLK) { 3388 drm_atomic_state_clear(state); 3389 err = drm_modeset_backoff(&ctx); 3390 if (!err) 3391 goto retry; 3392 } 3393 3394 drm_modeset_drop_locks(&ctx); 3395 drm_modeset_acquire_fini(&ctx); 3396 drm_atomic_state_put(state); 3397 3398 return err; 3399 } 3400 3401 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val) 3402 { 3403 struct intel_display *display = to_intel_display(intel_dp); 3404 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK; 3405 const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE | 3406 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE); 3407 u32 old_mode, old_disable_bits; 3408 int ret; 3409 3410 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_SU_REGION_ET_DISABLE | 3411 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE | 3412 I915_PSR_DEBUG_MODE_MASK) || 3413 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) { 3414 drm_dbg_kms(display->drm, "Invalid debug mask %llx\n", val); 3415 return -EINVAL; 3416 } 3417 3418 ret = mutex_lock_interruptible(&intel_dp->psr.lock); 3419 if (ret) 3420 return ret; 3421 3422 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK; 3423 old_disable_bits = intel_dp->psr.debug & 3424 (I915_PSR_DEBUG_SU_REGION_ET_DISABLE | 3425 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE); 3426 3427 intel_dp->psr.debug = val; 3428 3429 /* 3430 * Do it right away if it's already enabled, otherwise it will be done 3431 * when enabling the source. 3432 */ 3433 if (intel_dp->psr.enabled) 3434 psr_irq_control(intel_dp); 3435 3436 mutex_unlock(&intel_dp->psr.lock); 3437 3438 if (old_mode != mode || old_disable_bits != disable_bits) 3439 ret = intel_psr_fastset_force(display); 3440 3441 return ret; 3442 } 3443 3444 static void intel_psr_handle_irq(struct intel_dp *intel_dp) 3445 { 3446 struct intel_psr *psr = &intel_dp->psr; 3447 3448 intel_psr_disable_locked(intel_dp); 3449 psr->sink_not_reliable = true; 3450 /* let's make sure that sink is awaken */ 3451 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); 3452 } 3453 3454 static void intel_psr_work(struct work_struct *work) 3455 { 3456 struct intel_dp *intel_dp = 3457 container_of(work, typeof(*intel_dp), psr.work); 3458 3459 mutex_lock(&intel_dp->psr.lock); 3460 3461 if (!intel_dp->psr.enabled) 3462 goto unlock; 3463 3464 if (READ_ONCE(intel_dp->psr.irq_aux_error)) { 3465 intel_psr_handle_irq(intel_dp); 3466 goto unlock; 3467 } 3468 3469 if (intel_dp->psr.pause_counter) 3470 goto unlock; 3471 3472 /* 3473 * We have to make sure PSR is ready for re-enable 3474 * otherwise it keeps disabled until next full enable/disable cycle. 3475 * PSR might take some time to get fully disabled 3476 * and be ready for re-enable. 3477 */ 3478 if (!__psr_wait_for_idle_locked(intel_dp)) 3479 goto unlock; 3480 3481 /* 3482 * The delayed work can race with an invalidate hence we need to 3483 * recheck. Since psr_flush first clears this and then reschedules we 3484 * won't ever miss a flush when bailing out here. 3485 */ 3486 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active) 3487 goto unlock; 3488 3489 intel_psr_activate(intel_dp); 3490 unlock: 3491 mutex_unlock(&intel_dp->psr.lock); 3492 } 3493 3494 static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp) 3495 { 3496 struct intel_display *display = to_intel_display(intel_dp); 3497 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 3498 3499 if (!intel_dp->psr.psr2_sel_fetch_enabled) 3500 return; 3501 3502 if (DISPLAY_VER(display) >= 20) 3503 intel_de_write(display, LNL_SFF_CTL(cpu_transcoder), 3504 LNL_SFF_CTL_SF_SINGLE_FULL_FRAME); 3505 else 3506 intel_de_write(display, 3507 PSR2_MAN_TRK_CTL(display, cpu_transcoder), 3508 man_trk_ctl_enable_bit_get(display) | 3509 man_trk_ctl_partial_frame_bit_get(display) | 3510 man_trk_ctl_single_full_frame_bit_get(display) | 3511 man_trk_ctl_continuos_full_frame(display)); 3512 } 3513 3514 static void _psr_invalidate_handle(struct intel_dp *intel_dp) 3515 { 3516 struct intel_display *display = to_intel_display(intel_dp); 3517 3518 if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) { 3519 if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) { 3520 intel_dp->psr.psr2_sel_fetch_cff_enabled = true; 3521 intel_psr_configure_full_frame_update(intel_dp); 3522 } 3523 3524 intel_psr_force_update(intel_dp); 3525 } else { 3526 intel_psr_exit(intel_dp); 3527 } 3528 } 3529 3530 /** 3531 * intel_psr_invalidate - Invalidate PSR 3532 * @display: display device 3533 * @frontbuffer_bits: frontbuffer plane tracking bits 3534 * @origin: which operation caused the invalidate 3535 * 3536 * Since the hardware frontbuffer tracking has gaps we need to integrate 3537 * with the software frontbuffer tracking. This function gets called every 3538 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be 3539 * disabled if the frontbuffer mask contains a buffer relevant to PSR. 3540 * 3541 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." 3542 */ 3543 void intel_psr_invalidate(struct intel_display *display, 3544 unsigned frontbuffer_bits, enum fb_op_origin origin) 3545 { 3546 struct intel_encoder *encoder; 3547 3548 if (origin == ORIGIN_FLIP) 3549 return; 3550 3551 for_each_intel_encoder_with_psr(display->drm, encoder) { 3552 unsigned int pipe_frontbuffer_bits = frontbuffer_bits; 3553 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3554 3555 mutex_lock(&intel_dp->psr.lock); 3556 if (!intel_dp->psr.enabled) { 3557 mutex_unlock(&intel_dp->psr.lock); 3558 continue; 3559 } 3560 3561 pipe_frontbuffer_bits &= 3562 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe); 3563 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits; 3564 3565 if (pipe_frontbuffer_bits) 3566 _psr_invalidate_handle(intel_dp); 3567 3568 mutex_unlock(&intel_dp->psr.lock); 3569 } 3570 } 3571 /* 3572 * When we will be completely rely on PSR2 S/W tracking in future, 3573 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP 3574 * event also therefore tgl_dc3co_flush_locked() require to be changed 3575 * accordingly in future. 3576 */ 3577 static void 3578 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits, 3579 enum fb_op_origin origin) 3580 { 3581 struct intel_display *display = to_intel_display(intel_dp); 3582 3583 if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled || 3584 !intel_dp->psr.active) 3585 return; 3586 3587 /* 3588 * At every frontbuffer flush flip event modified delay of delayed work, 3589 * when delayed work schedules that means display has been idle. 3590 */ 3591 if (!(frontbuffer_bits & 3592 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe))) 3593 return; 3594 3595 tgl_psr2_enable_dc3co(intel_dp); 3596 mod_delayed_work(display->wq.unordered, &intel_dp->psr.dc3co_work, 3597 intel_dp->psr.dc3co_exit_delay); 3598 } 3599 3600 static void _psr_flush_handle(struct intel_dp *intel_dp) 3601 { 3602 struct intel_display *display = to_intel_display(intel_dp); 3603 3604 if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) { 3605 /* Selective fetch prior LNL */ 3606 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) { 3607 /* can we turn CFF off? */ 3608 if (intel_dp->psr.busy_frontbuffer_bits == 0) 3609 intel_dp->psr.psr2_sel_fetch_cff_enabled = false; 3610 } 3611 3612 /* 3613 * Still keep cff bit enabled as we don't have proper SU 3614 * configuration in case update is sent for any reason after 3615 * sff bit gets cleared by the HW on next vblank. 3616 * 3617 * NOTE: Setting cff bit is not needed for LunarLake onwards as 3618 * we have own register for SFF bit and we are not overwriting 3619 * existing SU configuration 3620 */ 3621 intel_psr_configure_full_frame_update(intel_dp); 3622 3623 intel_psr_force_update(intel_dp); 3624 } else if (!intel_dp->psr.psr2_sel_fetch_enabled) { 3625 /* 3626 * PSR1 on all platforms 3627 * PSR2 HW tracking 3628 * Panel Replay Full frame update 3629 */ 3630 intel_psr_force_update(intel_dp); 3631 } else { 3632 /* Selective update LNL onwards */ 3633 intel_psr_exit(intel_dp); 3634 } 3635 3636 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits) 3637 queue_work(display->wq.unordered, &intel_dp->psr.work); 3638 } 3639 3640 /** 3641 * intel_psr_flush - Flush PSR 3642 * @display: display device 3643 * @frontbuffer_bits: frontbuffer plane tracking bits 3644 * @origin: which operation caused the flush 3645 * 3646 * Since the hardware frontbuffer tracking has gaps we need to integrate 3647 * with the software frontbuffer tracking. This function gets called every 3648 * time frontbuffer rendering has completed and flushed out to memory. PSR 3649 * can be enabled again if no other frontbuffer relevant to PSR is dirty. 3650 * 3651 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. 3652 */ 3653 void intel_psr_flush(struct intel_display *display, 3654 unsigned frontbuffer_bits, enum fb_op_origin origin) 3655 { 3656 struct intel_encoder *encoder; 3657 3658 for_each_intel_encoder_with_psr(display->drm, encoder) { 3659 unsigned int pipe_frontbuffer_bits = frontbuffer_bits; 3660 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3661 3662 mutex_lock(&intel_dp->psr.lock); 3663 if (!intel_dp->psr.enabled) { 3664 mutex_unlock(&intel_dp->psr.lock); 3665 continue; 3666 } 3667 3668 pipe_frontbuffer_bits &= 3669 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe); 3670 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits; 3671 3672 /* 3673 * If the PSR is paused by an explicit intel_psr_paused() call, 3674 * we have to ensure that the PSR is not activated until 3675 * intel_psr_resume() is called. 3676 */ 3677 if (intel_dp->psr.pause_counter) 3678 goto unlock; 3679 3680 if (origin == ORIGIN_FLIP || 3681 (origin == ORIGIN_CURSOR_UPDATE && 3682 !intel_dp->psr.psr2_sel_fetch_enabled)) { 3683 tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin); 3684 goto unlock; 3685 } 3686 3687 if (pipe_frontbuffer_bits == 0) 3688 goto unlock; 3689 3690 /* By definition flush = invalidate + flush */ 3691 _psr_flush_handle(intel_dp); 3692 unlock: 3693 mutex_unlock(&intel_dp->psr.lock); 3694 } 3695 } 3696 3697 /** 3698 * intel_psr_init - Init basic PSR work and mutex. 3699 * @intel_dp: Intel DP 3700 * 3701 * This function is called after the initializing connector. 3702 * (the initializing of connector treats the handling of connector capabilities) 3703 * And it initializes basic PSR stuff for each DP Encoder. 3704 */ 3705 void intel_psr_init(struct intel_dp *intel_dp) 3706 { 3707 struct intel_display *display = to_intel_display(intel_dp); 3708 struct intel_connector *connector = intel_dp->attached_connector; 3709 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3710 3711 if (!(HAS_PSR(display) || HAS_DP20(display))) 3712 return; 3713 3714 /* 3715 * HSW spec explicitly says PSR is tied to port A. 3716 * BDW+ platforms have a instance of PSR registers per transcoder but 3717 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder 3718 * than eDP one. 3719 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11. 3720 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11. 3721 * But GEN12 supports a instance of PSR registers per transcoder. 3722 */ 3723 if (DISPLAY_VER(display) < 12 && dig_port->base.port != PORT_A) { 3724 drm_dbg_kms(display->drm, 3725 "PSR condition failed: Port not supported\n"); 3726 return; 3727 } 3728 3729 if ((HAS_DP20(display) && !intel_dp_is_edp(intel_dp)) || 3730 DISPLAY_VER(display) >= 20) 3731 intel_dp->psr.source_panel_replay_support = true; 3732 3733 if (HAS_PSR(display) && intel_dp_is_edp(intel_dp)) 3734 intel_dp->psr.source_support = true; 3735 3736 /* Set link_standby x link_off defaults */ 3737 if (DISPLAY_VER(display) < 12) 3738 /* For new platforms up to TGL let's respect VBT back again */ 3739 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link; 3740 3741 INIT_WORK(&intel_dp->psr.work, intel_psr_work); 3742 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work); 3743 mutex_init(&intel_dp->psr.lock); 3744 } 3745 3746 static int psr_get_status_and_error_status(struct intel_dp *intel_dp, 3747 u8 *status, u8 *error_status) 3748 { 3749 struct drm_dp_aux *aux = &intel_dp->aux; 3750 int ret; 3751 unsigned int offset; 3752 3753 offset = intel_dp->psr.panel_replay_enabled ? 3754 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS; 3755 3756 ret = drm_dp_dpcd_readb(aux, offset, status); 3757 if (ret != 1) 3758 return ret; 3759 3760 offset = intel_dp->psr.panel_replay_enabled ? 3761 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS; 3762 3763 ret = drm_dp_dpcd_readb(aux, offset, error_status); 3764 if (ret != 1) 3765 return ret; 3766 3767 *status = *status & DP_PSR_SINK_STATE_MASK; 3768 3769 return 0; 3770 } 3771 3772 static void psr_alpm_check(struct intel_dp *intel_dp) 3773 { 3774 struct intel_psr *psr = &intel_dp->psr; 3775 3776 if (!psr->sel_update_enabled) 3777 return; 3778 3779 if (intel_alpm_get_error(intel_dp)) { 3780 intel_psr_disable_locked(intel_dp); 3781 psr->sink_not_reliable = true; 3782 } 3783 } 3784 3785 static void psr_capability_changed_check(struct intel_dp *intel_dp) 3786 { 3787 struct intel_display *display = to_intel_display(intel_dp); 3788 struct intel_psr *psr = &intel_dp->psr; 3789 u8 val; 3790 int r; 3791 3792 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val); 3793 if (r != 1) { 3794 drm_err(display->drm, "Error reading DP_PSR_ESI\n"); 3795 return; 3796 } 3797 3798 if (val & DP_PSR_CAPS_CHANGE) { 3799 intel_psr_disable_locked(intel_dp); 3800 psr->sink_not_reliable = true; 3801 drm_dbg_kms(display->drm, 3802 "Sink PSR capability changed, disabling PSR\n"); 3803 3804 /* Clearing it */ 3805 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val); 3806 } 3807 } 3808 3809 /* 3810 * On common bits: 3811 * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR 3812 * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR 3813 * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR 3814 * this function is relying on PSR definitions 3815 */ 3816 void intel_psr_short_pulse(struct intel_dp *intel_dp) 3817 { 3818 struct intel_display *display = to_intel_display(intel_dp); 3819 struct intel_psr *psr = &intel_dp->psr; 3820 u8 status, error_status; 3821 const u8 errors = DP_PSR_RFB_STORAGE_ERROR | 3822 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | 3823 DP_PSR_LINK_CRC_ERROR; 3824 3825 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) 3826 return; 3827 3828 mutex_lock(&psr->lock); 3829 3830 psr->link_ok = false; 3831 3832 if (!psr->enabled) 3833 goto exit; 3834 3835 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) { 3836 drm_err(display->drm, 3837 "Error reading PSR status or error status\n"); 3838 goto exit; 3839 } 3840 3841 if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) || 3842 (error_status & errors)) { 3843 intel_psr_disable_locked(intel_dp); 3844 psr->sink_not_reliable = true; 3845 } 3846 3847 if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR && 3848 !error_status) 3849 drm_dbg_kms(display->drm, 3850 "PSR sink internal error, disabling PSR\n"); 3851 if (error_status & DP_PSR_RFB_STORAGE_ERROR) 3852 drm_dbg_kms(display->drm, 3853 "PSR RFB storage error, disabling PSR\n"); 3854 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) 3855 drm_dbg_kms(display->drm, 3856 "PSR VSC SDP uncorrectable error, disabling PSR\n"); 3857 if (error_status & DP_PSR_LINK_CRC_ERROR) 3858 drm_dbg_kms(display->drm, 3859 "PSR Link CRC error, disabling PSR\n"); 3860 3861 if (error_status & ~errors) 3862 drm_err(display->drm, 3863 "PSR_ERROR_STATUS unhandled errors %x\n", 3864 error_status & ~errors); 3865 /* clear status register */ 3866 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status); 3867 3868 if (!psr->panel_replay_enabled) { 3869 psr_alpm_check(intel_dp); 3870 psr_capability_changed_check(intel_dp); 3871 } 3872 3873 exit: 3874 mutex_unlock(&psr->lock); 3875 } 3876 3877 bool intel_psr_enabled(struct intel_dp *intel_dp) 3878 { 3879 bool ret; 3880 3881 if (!CAN_PSR(intel_dp)) 3882 return false; 3883 3884 mutex_lock(&intel_dp->psr.lock); 3885 ret = intel_dp->psr.enabled; 3886 mutex_unlock(&intel_dp->psr.lock); 3887 3888 return ret; 3889 } 3890 3891 /** 3892 * intel_psr_link_ok - return psr->link_ok 3893 * @intel_dp: struct intel_dp 3894 * 3895 * We are seeing unexpected link re-trainings with some panels. This is caused 3896 * by panel stating bad link status after PSR is enabled. Code checking link 3897 * status can call this to ensure it can ignore bad link status stated by the 3898 * panel I.e. if panel is stating bad link and intel_psr_link_ok is stating link 3899 * is ok caller should rely on latter. 3900 * 3901 * Return value of link_ok 3902 */ 3903 bool intel_psr_link_ok(struct intel_dp *intel_dp) 3904 { 3905 bool ret; 3906 3907 if ((!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) || 3908 !intel_dp_is_edp(intel_dp)) 3909 return false; 3910 3911 mutex_lock(&intel_dp->psr.lock); 3912 ret = intel_dp->psr.link_ok; 3913 mutex_unlock(&intel_dp->psr.lock); 3914 3915 return ret; 3916 } 3917 3918 /** 3919 * intel_psr_lock - grab PSR lock 3920 * @crtc_state: the crtc state 3921 * 3922 * This is initially meant to be used by around CRTC update, when 3923 * vblank sensitive registers are updated and we need grab the lock 3924 * before it to avoid vblank evasion. 3925 */ 3926 void intel_psr_lock(const struct intel_crtc_state *crtc_state) 3927 { 3928 struct intel_display *display = to_intel_display(crtc_state); 3929 struct intel_encoder *encoder; 3930 3931 if (!crtc_state->has_psr) 3932 return; 3933 3934 for_each_intel_encoder_mask_with_psr(display->drm, encoder, 3935 crtc_state->uapi.encoder_mask) { 3936 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3937 3938 mutex_lock(&intel_dp->psr.lock); 3939 break; 3940 } 3941 } 3942 3943 /** 3944 * intel_psr_unlock - release PSR lock 3945 * @crtc_state: the crtc state 3946 * 3947 * Release the PSR lock that was held during pipe update. 3948 */ 3949 void intel_psr_unlock(const struct intel_crtc_state *crtc_state) 3950 { 3951 struct intel_display *display = to_intel_display(crtc_state); 3952 struct intel_encoder *encoder; 3953 3954 if (!crtc_state->has_psr) 3955 return; 3956 3957 for_each_intel_encoder_mask_with_psr(display->drm, encoder, 3958 crtc_state->uapi.encoder_mask) { 3959 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3960 3961 mutex_unlock(&intel_dp->psr.lock); 3962 break; 3963 } 3964 } 3965 3966 /* Wa_16025596647 */ 3967 static void intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp *intel_dp) 3968 { 3969 struct intel_display *display = to_intel_display(intel_dp); 3970 bool dc5_dc6_blocked; 3971 3972 if (!intel_dp->psr.active || !intel_dp->psr.pkg_c_latency_used) 3973 return; 3974 3975 dc5_dc6_blocked = is_dc5_dc6_blocked(intel_dp); 3976 3977 if (intel_dp->psr.sel_update_enabled) 3978 psr2_program_idle_frames(intel_dp, dc5_dc6_blocked ? 0 : 3979 psr_compute_idle_frames(intel_dp)); 3980 else 3981 intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display, 3982 intel_dp->psr.pipe, 3983 dc5_dc6_blocked); 3984 } 3985 3986 static void psr_dc5_dc6_wa_work(struct work_struct *work) 3987 { 3988 struct intel_display *display = container_of(work, typeof(*display), 3989 psr_dc5_dc6_wa_work); 3990 struct intel_encoder *encoder; 3991 3992 for_each_intel_encoder_with_psr(display->drm, encoder) { 3993 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3994 3995 mutex_lock(&intel_dp->psr.lock); 3996 3997 if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled && 3998 !intel_dp->psr.pkg_c_latency_used) 3999 intel_psr_apply_underrun_on_idle_wa_locked(intel_dp); 4000 4001 mutex_unlock(&intel_dp->psr.lock); 4002 } 4003 } 4004 4005 /** 4006 * intel_psr_notify_dc5_dc6 - Notify PSR about enable/disable dc5/dc6 4007 * @display: intel atomic state 4008 * 4009 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to schedule 4010 * psr_dc5_dc6_wa_work used for applying/removing the workaround. 4011 */ 4012 void intel_psr_notify_dc5_dc6(struct intel_display *display) 4013 { 4014 if (DISPLAY_VER(display) != 20 && 4015 !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) 4016 return; 4017 4018 schedule_work(&display->psr_dc5_dc6_wa_work); 4019 } 4020 4021 /** 4022 * intel_psr_dc5_dc6_wa_init - Init work for underrun on idle PSR HW bug wa 4023 * @display: intel atomic state 4024 * 4025 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to init 4026 * psr_dc5_dc6_wa_work used for applying the workaround. 4027 */ 4028 void intel_psr_dc5_dc6_wa_init(struct intel_display *display) 4029 { 4030 if (DISPLAY_VER(display) != 20 && 4031 !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) 4032 return; 4033 4034 INIT_WORK(&display->psr_dc5_dc6_wa_work, psr_dc5_dc6_wa_work); 4035 } 4036 4037 /** 4038 * intel_psr_notify_pipe_change - Notify PSR about enable/disable of a pipe 4039 * @state: intel atomic state 4040 * @crtc: intel crtc 4041 * @enable: enable/disable 4042 * 4043 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply 4044 * remove the workaround when pipe is getting enabled/disabled 4045 */ 4046 void intel_psr_notify_pipe_change(struct intel_atomic_state *state, 4047 struct intel_crtc *crtc, bool enable) 4048 { 4049 struct intel_display *display = to_intel_display(state); 4050 struct intel_encoder *encoder; 4051 4052 if (DISPLAY_VER(display) != 20 && 4053 !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) 4054 return; 4055 4056 for_each_intel_encoder_with_psr(display->drm, encoder) { 4057 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4058 u8 active_non_psr_pipes; 4059 4060 mutex_lock(&intel_dp->psr.lock); 4061 4062 if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled) 4063 goto unlock; 4064 4065 active_non_psr_pipes = intel_dp->psr.active_non_psr_pipes; 4066 4067 if (enable) 4068 active_non_psr_pipes |= BIT(crtc->pipe); 4069 else 4070 active_non_psr_pipes &= ~BIT(crtc->pipe); 4071 4072 if (active_non_psr_pipes == intel_dp->psr.active_non_psr_pipes) 4073 goto unlock; 4074 4075 if ((enable && intel_dp->psr.active_non_psr_pipes) || 4076 (!enable && !intel_dp->psr.active_non_psr_pipes) || 4077 !intel_dp->psr.pkg_c_latency_used) { 4078 intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes; 4079 goto unlock; 4080 } 4081 4082 intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes; 4083 4084 intel_psr_apply_underrun_on_idle_wa_locked(intel_dp); 4085 unlock: 4086 mutex_unlock(&intel_dp->psr.lock); 4087 } 4088 } 4089 4090 /** 4091 * intel_psr_notify_vblank_enable_disable - Notify PSR about enable/disable of vblank 4092 * @display: intel display struct 4093 * @enable: enable/disable 4094 * 4095 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply 4096 * remove the workaround when vblank is getting enabled/disabled 4097 */ 4098 void intel_psr_notify_vblank_enable_disable(struct intel_display *display, 4099 bool enable) 4100 { 4101 struct intel_encoder *encoder; 4102 4103 for_each_intel_encoder_with_psr(display->drm, encoder) { 4104 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4105 4106 mutex_lock(&intel_dp->psr.lock); 4107 if (intel_dp->psr.panel_replay_enabled) { 4108 mutex_unlock(&intel_dp->psr.lock); 4109 break; 4110 } 4111 4112 if (intel_dp->psr.enabled && intel_dp->psr.pkg_c_latency_used) 4113 intel_psr_apply_underrun_on_idle_wa_locked(intel_dp); 4114 4115 mutex_unlock(&intel_dp->psr.lock); 4116 return; 4117 } 4118 4119 /* 4120 * NOTE: intel_display_power_set_target_dc_state is used 4121 * only by PSR * code for DC3CO handling. DC3CO target 4122 * state is currently disabled in * PSR code. If DC3CO 4123 * is taken into use we need take that into account here 4124 * as well. 4125 */ 4126 intel_display_power_set_target_dc_state(display, enable ? DC_STATE_DISABLE : 4127 DC_STATE_EN_UPTO_DC6); 4128 } 4129 4130 static void 4131 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) 4132 { 4133 struct intel_display *display = to_intel_display(intel_dp); 4134 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 4135 const char *status = "unknown"; 4136 u32 val, status_val; 4137 4138 if ((intel_dp_is_edp(intel_dp) || DISPLAY_VER(display) >= 30) && 4139 (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) { 4140 static const char * const live_status[] = { 4141 "IDLE", 4142 "CAPTURE", 4143 "CAPTURE_FS", 4144 "SLEEP", 4145 "BUFON_FW", 4146 "ML_UP", 4147 "SU_STANDBY", 4148 "FAST_SLEEP", 4149 "DEEP_SLEEP", 4150 "BUF_ON", 4151 "TG_ON" 4152 }; 4153 val = intel_de_read(display, 4154 EDP_PSR2_STATUS(display, cpu_transcoder)); 4155 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val); 4156 if (status_val < ARRAY_SIZE(live_status)) 4157 status = live_status[status_val]; 4158 } else { 4159 static const char * const live_status[] = { 4160 "IDLE", 4161 "SRDONACK", 4162 "SRDENT", 4163 "BUFOFF", 4164 "BUFON", 4165 "AUXACK", 4166 "SRDOFFACK", 4167 "SRDENT_ON", 4168 }; 4169 val = intel_de_read(display, 4170 psr_status_reg(display, cpu_transcoder)); 4171 status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val); 4172 if (status_val < ARRAY_SIZE(live_status)) 4173 status = live_status[status_val]; 4174 } 4175 4176 seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val); 4177 } 4178 4179 static void intel_psr_sink_capability(struct intel_connector *connector, 4180 struct seq_file *m) 4181 { 4182 seq_printf(m, "Sink support: PSR = %s", 4183 str_yes_no(connector->dp.psr_caps.support)); 4184 4185 if (connector->dp.psr_caps.support) 4186 seq_printf(m, " [0x%02x]", connector->dp.psr_caps.dpcd[0]); 4187 if (connector->dp.psr_caps.dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED) 4188 seq_printf(m, " (Early Transport)"); 4189 seq_printf(m, ", Panel Replay = %s", str_yes_no(connector->dp.panel_replay_caps.support)); 4190 seq_printf(m, ", Panel Replay Selective Update = %s", 4191 str_yes_no(connector->dp.panel_replay_caps.su_support)); 4192 seq_printf(m, ", Panel Replay DSC support = %s", 4193 panel_replay_dsc_support_str(connector->dp.panel_replay_caps.dsc_support)); 4194 if (connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] & 4195 DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT) 4196 seq_printf(m, " (Early Transport)"); 4197 seq_printf(m, "\n"); 4198 } 4199 4200 static void intel_psr_print_mode(struct intel_dp *intel_dp, 4201 struct seq_file *m) 4202 { 4203 struct intel_psr *psr = &intel_dp->psr; 4204 const char *status, *mode, *region_et; 4205 4206 if (psr->enabled) 4207 status = " enabled"; 4208 else 4209 status = "disabled"; 4210 4211 if (psr->panel_replay_enabled && psr->sel_update_enabled) 4212 mode = "Panel Replay Selective Update"; 4213 else if (psr->panel_replay_enabled) 4214 mode = "Panel Replay"; 4215 else if (psr->sel_update_enabled) 4216 mode = "PSR2"; 4217 else if (psr->enabled) 4218 mode = "PSR1"; 4219 else 4220 mode = ""; 4221 4222 if (psr->su_region_et_enabled) 4223 region_et = " (Early Transport)"; 4224 else 4225 region_et = ""; 4226 4227 seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et); 4228 if (psr->no_psr_reason) 4229 seq_printf(m, " %s\n", psr->no_psr_reason); 4230 } 4231 4232 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp, 4233 struct intel_connector *connector) 4234 { 4235 struct intel_display *display = to_intel_display(intel_dp); 4236 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 4237 struct intel_psr *psr = &intel_dp->psr; 4238 struct ref_tracker *wakeref; 4239 bool enabled; 4240 u32 val, psr2_ctl; 4241 4242 intel_psr_sink_capability(connector, m); 4243 4244 if (!(connector->dp.psr_caps.support || connector->dp.panel_replay_caps.support)) 4245 return 0; 4246 4247 wakeref = intel_display_rpm_get(display); 4248 mutex_lock(&psr->lock); 4249 4250 intel_psr_print_mode(intel_dp, m); 4251 4252 if (!psr->enabled) { 4253 seq_printf(m, "PSR sink not reliable: %s\n", 4254 str_yes_no(psr->sink_not_reliable)); 4255 4256 goto unlock; 4257 } 4258 4259 if (psr->panel_replay_enabled) { 4260 val = intel_de_read(display, TRANS_DP2_CTL(cpu_transcoder)); 4261 4262 if (intel_dp_is_edp(intel_dp)) 4263 psr2_ctl = intel_de_read(display, 4264 EDP_PSR2_CTL(display, 4265 cpu_transcoder)); 4266 4267 enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE; 4268 } else if (psr->sel_update_enabled) { 4269 val = intel_de_read(display, 4270 EDP_PSR2_CTL(display, cpu_transcoder)); 4271 enabled = val & EDP_PSR2_ENABLE; 4272 } else { 4273 val = intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)); 4274 enabled = val & EDP_PSR_ENABLE; 4275 } 4276 seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n", 4277 str_enabled_disabled(enabled), val); 4278 if (psr->panel_replay_enabled && intel_dp_is_edp(intel_dp)) 4279 seq_printf(m, "PSR2_CTL: 0x%08x\n", 4280 psr2_ctl); 4281 psr_source_status(intel_dp, m); 4282 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n", 4283 psr->busy_frontbuffer_bits); 4284 4285 /* 4286 * SKL+ Perf counter is reset to 0 everytime DC state is entered 4287 */ 4288 val = intel_de_read(display, psr_perf_cnt_reg(display, cpu_transcoder)); 4289 seq_printf(m, "Performance counter: %u\n", 4290 REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val)); 4291 4292 if (psr->debug & I915_PSR_DEBUG_IRQ) { 4293 seq_printf(m, "Last attempted entry at: %lld\n", 4294 psr->last_entry_attempt); 4295 seq_printf(m, "Last exit at: %lld\n", psr->last_exit); 4296 } 4297 4298 if (psr->sel_update_enabled) { 4299 u32 su_frames_val[3]; 4300 int frame; 4301 4302 /* 4303 * PSR2_SU_STATUS register has been tied-off since DG2/ADL-P 4304 * (it returns zeros only) and it has been removed on Xe2_LPD. 4305 */ 4306 if (DISPLAY_VER(display) < 13) { 4307 /* 4308 * Reading all 3 registers before hand to minimize crossing a 4309 * frame boundary between register reads 4310 */ 4311 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { 4312 val = intel_de_read(display, 4313 PSR2_SU_STATUS(display, cpu_transcoder, frame)); 4314 su_frames_val[frame / 3] = val; 4315 } 4316 4317 seq_puts(m, "Frame:\tPSR2 SU blocks:\n"); 4318 4319 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) { 4320 u32 su_blocks; 4321 4322 su_blocks = su_frames_val[frame / 3] & 4323 PSR2_SU_STATUS_MASK(frame); 4324 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame); 4325 seq_printf(m, "%d\t%d\n", frame, su_blocks); 4326 } 4327 } 4328 4329 seq_printf(m, "PSR2 selective fetch: %s\n", 4330 str_enabled_disabled(psr->psr2_sel_fetch_enabled)); 4331 } 4332 4333 unlock: 4334 mutex_unlock(&psr->lock); 4335 intel_display_rpm_put(display, wakeref); 4336 4337 return 0; 4338 } 4339 4340 static int i915_edp_psr_status_show(struct seq_file *m, void *data) 4341 { 4342 struct intel_display *display = m->private; 4343 struct intel_dp *intel_dp = NULL; 4344 struct intel_encoder *encoder; 4345 4346 if (!HAS_PSR(display)) 4347 return -ENODEV; 4348 4349 /* Find the first EDP which supports PSR */ 4350 for_each_intel_encoder_with_psr(display->drm, encoder) { 4351 intel_dp = enc_to_intel_dp(encoder); 4352 break; 4353 } 4354 4355 if (!intel_dp) 4356 return -ENODEV; 4357 4358 return intel_psr_status(m, intel_dp, intel_dp->attached_connector); 4359 } 4360 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status); 4361 4362 static int 4363 i915_edp_psr_debug_set(void *data, u64 val) 4364 { 4365 struct intel_display *display = data; 4366 struct intel_encoder *encoder; 4367 int ret = -ENODEV; 4368 4369 if (!HAS_PSR(display)) 4370 return ret; 4371 4372 for_each_intel_encoder_with_psr(display->drm, encoder) { 4373 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4374 4375 drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val); 4376 4377 // TODO: split to each transcoder's PSR debug state 4378 with_intel_display_rpm(display) 4379 ret = intel_psr_debug_set(intel_dp, val); 4380 } 4381 4382 return ret; 4383 } 4384 4385 static int 4386 i915_edp_psr_debug_get(void *data, u64 *val) 4387 { 4388 struct intel_display *display = data; 4389 struct intel_encoder *encoder; 4390 4391 if (!HAS_PSR(display)) 4392 return -ENODEV; 4393 4394 for_each_intel_encoder_with_psr(display->drm, encoder) { 4395 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4396 4397 // TODO: split to each transcoder's PSR debug state 4398 *val = READ_ONCE(intel_dp->psr.debug); 4399 return 0; 4400 } 4401 4402 return -ENODEV; 4403 } 4404 4405 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, 4406 i915_edp_psr_debug_get, i915_edp_psr_debug_set, 4407 "%llu\n"); 4408 4409 void intel_psr_debugfs_register(struct intel_display *display) 4410 { 4411 struct dentry *debugfs_root = display->drm->debugfs_root; 4412 4413 debugfs_create_file("i915_edp_psr_debug", 0644, debugfs_root, 4414 display, &i915_edp_psr_debug_fops); 4415 4416 debugfs_create_file("i915_edp_psr_status", 0444, debugfs_root, 4417 display, &i915_edp_psr_status_fops); 4418 } 4419 4420 static const char *psr_mode_str(struct intel_dp *intel_dp) 4421 { 4422 if (intel_dp->psr.panel_replay_enabled) 4423 return "PANEL-REPLAY"; 4424 else if (intel_dp->psr.enabled) 4425 return "PSR"; 4426 4427 return "unknown"; 4428 } 4429 4430 static int i915_psr_sink_status_show(struct seq_file *m, void *data) 4431 { 4432 struct intel_connector *connector = m->private; 4433 struct intel_dp *intel_dp = intel_attached_dp(connector); 4434 static const char * const sink_status[] = { 4435 "inactive", 4436 "transition to active, capture and display", 4437 "active, display from RFB", 4438 "active, capture and display on sink device timings", 4439 "transition to inactive, capture and display, timing re-sync", 4440 "reserved", 4441 "reserved", 4442 "sink internal error", 4443 }; 4444 const char *str; 4445 int ret; 4446 u8 status, error_status; 4447 4448 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) { 4449 seq_puts(m, "PSR/Panel-Replay Unsupported\n"); 4450 return -ENODEV; 4451 } 4452 4453 if (connector->base.status != connector_status_connected) 4454 return -ENODEV; 4455 4456 ret = psr_get_status_and_error_status(intel_dp, &status, &error_status); 4457 if (ret) 4458 return ret; 4459 4460 status &= DP_PSR_SINK_STATE_MASK; 4461 if (status < ARRAY_SIZE(sink_status)) 4462 str = sink_status[status]; 4463 else 4464 str = "unknown"; 4465 4466 seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str); 4467 4468 seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status); 4469 4470 if (error_status & (DP_PSR_RFB_STORAGE_ERROR | 4471 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | 4472 DP_PSR_LINK_CRC_ERROR)) 4473 seq_puts(m, ":\n"); 4474 else 4475 seq_puts(m, "\n"); 4476 if (error_status & DP_PSR_RFB_STORAGE_ERROR) 4477 seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp)); 4478 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) 4479 seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp)); 4480 if (error_status & DP_PSR_LINK_CRC_ERROR) 4481 seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp)); 4482 4483 return ret; 4484 } 4485 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); 4486 4487 static int i915_psr_status_show(struct seq_file *m, void *data) 4488 { 4489 struct intel_connector *connector = m->private; 4490 struct intel_dp *intel_dp = intel_attached_dp(connector); 4491 4492 return intel_psr_status(m, intel_dp, connector); 4493 } 4494 DEFINE_SHOW_ATTRIBUTE(i915_psr_status); 4495 4496 void intel_psr_connector_debugfs_add(struct intel_connector *connector) 4497 { 4498 struct intel_display *display = to_intel_display(connector); 4499 struct dentry *root = connector->base.debugfs_entry; 4500 4501 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP && 4502 connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) 4503 return; 4504 4505 debugfs_create_file("i915_psr_sink_status", 0444, root, 4506 connector, &i915_psr_sink_status_fops); 4507 4508 if (HAS_PSR(display) || HAS_DP20(display)) 4509 debugfs_create_file("i915_psr_status", 0444, root, 4510 connector, &i915_psr_status_fops); 4511 } 4512 4513 bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) 4514 { 4515 /* 4516 * eDP Panel Replay uses always ALPM 4517 * PSR2 uses ALPM but PSR1 doesn't 4518 */ 4519 return intel_dp_is_edp(intel_dp) && (crtc_state->has_sel_update || 4520 crtc_state->has_panel_replay); 4521 } 4522 4523 bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp, 4524 const struct intel_crtc_state *crtc_state) 4525 { 4526 return intel_dp_is_edp(intel_dp) && crtc_state->has_panel_replay; 4527 } 4528 4529 void intel_psr_compute_config_late(struct intel_dp *intel_dp, 4530 struct intel_crtc_state *crtc_state) 4531 { 4532 struct intel_display *display = to_intel_display(intel_dp); 4533 int vblank = intel_crtc_vblank_length(crtc_state); 4534 int wake_lines; 4535 4536 if (intel_psr_needs_alpm_aux_less(intel_dp, crtc_state)) 4537 wake_lines = crtc_state->alpm_state.aux_less_wake_lines; 4538 else if (intel_psr_needs_alpm(intel_dp, crtc_state)) 4539 wake_lines = DISPLAY_VER(display) < 20 ? 4540 psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines, 4541 crtc_state->alpm_state.fast_wake_lines) : 4542 crtc_state->alpm_state.io_wake_lines; 4543 else 4544 wake_lines = 0; 4545 4546 /* 4547 * Disable the PSR features if wake lines exceed the available vblank. 4548 * Though SCL is computed based on these PSR features, it is not reset 4549 * even if the PSR features are disabled to avoid changing vblank start 4550 * at this stage. 4551 */ 4552 if (wake_lines && !_wake_lines_fit_into_vblank(crtc_state, vblank, wake_lines)) { 4553 drm_dbg_kms(display->drm, 4554 "Adjusting PSR/PR mode: vblank too short for wake lines = %d\n", 4555 wake_lines); 4556 4557 if (crtc_state->has_panel_replay) { 4558 crtc_state->has_panel_replay = false; 4559 /* 4560 * #TODO : Add fall back to PSR/PSR2 4561 * Since panel replay cannot be supported, we can fall back to PSR/PSR2. 4562 * This will require calling compute_config for psr and psr2 with check for 4563 * actual guardband instead of vblank_length. 4564 */ 4565 crtc_state->has_psr = false; 4566 } 4567 4568 crtc_state->has_sel_update = false; 4569 crtc_state->enable_psr2_su_region_et = false; 4570 crtc_state->enable_psr2_sel_fetch = false; 4571 } 4572 4573 /* Wa_18037818876 */ 4574 if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) { 4575 crtc_state->has_psr = false; 4576 drm_dbg_kms(display->drm, 4577 "PSR disabled to workaround PSR FSM hang issue\n"); 4578 } 4579 4580 intel_psr_set_non_psr_pipes(intel_dp, crtc_state); 4581 } 4582 4583 int intel_psr_min_guardband(struct intel_crtc_state *crtc_state) 4584 { 4585 struct intel_display *display = to_intel_display(crtc_state); 4586 int psr_min_guardband; 4587 int wake_lines; 4588 4589 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) 4590 return 0; 4591 4592 if (crtc_state->has_panel_replay) 4593 wake_lines = crtc_state->alpm_state.aux_less_wake_lines; 4594 else if (crtc_state->has_sel_update) 4595 wake_lines = DISPLAY_VER(display) < 20 ? 4596 psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines, 4597 crtc_state->alpm_state.fast_wake_lines) : 4598 crtc_state->alpm_state.io_wake_lines; 4599 else 4600 return 0; 4601 4602 psr_min_guardband = wake_lines + crtc_state->set_context_latency; 4603 4604 if (crtc_state->req_psr2_sdp_prior_scanline) 4605 psr_min_guardband++; 4606 4607 return psr_min_guardband; 4608 } 4609