1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <linux/debugfs.h> 25 26 #include <drm/drm_atomic_helper.h> 27 #include <drm/drm_damage_helper.h> 28 #include <drm/drm_debugfs.h> 29 #include <drm/drm_print.h> 30 #include <drm/drm_vblank.h> 31 32 #include "i915_reg.h" 33 #include "intel_alpm.h" 34 #include "intel_atomic.h" 35 #include "intel_crtc.h" 36 #include "intel_cursor_regs.h" 37 #include "intel_ddi.h" 38 #include "intel_de.h" 39 #include "intel_display_irq.h" 40 #include "intel_display_regs.h" 41 #include "intel_display_rpm.h" 42 #include "intel_display_types.h" 43 #include "intel_display_utils.h" 44 #include "intel_dmc.h" 45 #include "intel_dp.h" 46 #include "intel_dp_aux.h" 47 #include "intel_dsb.h" 48 #include "intel_frontbuffer.h" 49 #include "intel_hdmi.h" 50 #include "intel_psr.h" 51 #include "intel_psr_regs.h" 52 #include "intel_snps_phy.h" 53 #include "intel_step.h" 54 #include "intel_vblank.h" 55 #include "intel_vdsc.h" 56 #include "intel_vrr.h" 57 #include "skl_universal_plane.h" 58 59 /** 60 * DOC: Panel Self Refresh (PSR/SRD) 61 * 62 * Since Haswell Display controller supports Panel Self-Refresh on display 63 * panels witch have a remote frame buffer (RFB) implemented according to PSR 64 * spec in eDP1.3. PSR feature allows the display to go to lower standby states 65 * when system is idle but display is on as it eliminates display refresh 66 * request to DDR memory completely as long as the frame buffer for that 67 * display is unchanged. 68 * 69 * Panel Self Refresh must be supported by both Hardware (source) and 70 * Panel (sink). 71 * 72 * PSR saves power by caching the framebuffer in the panel RFB, which allows us 73 * to power down the link and memory controller. For DSI panels the same idea 74 * is called "manual mode". 75 * 76 * The implementation uses the hardware-based PSR support which automatically 77 * enters/exits self-refresh mode. The hardware takes care of sending the 78 * required DP aux message and could even retrain the link (that part isn't 79 * enabled yet though). The hardware also keeps track of any frontbuffer 80 * changes to know when to exit self-refresh mode again. Unfortunately that 81 * part doesn't work too well, hence why the i915 PSR support uses the 82 * software frontbuffer tracking to make sure it doesn't miss a screen 83 * update. For this integration intel_psr_invalidate() and intel_psr_flush() 84 * get called by the frontbuffer tracking code. Note that because of locking 85 * issues the self-refresh re-enable code is done from a work queue, which 86 * must be correctly synchronized/cancelled when shutting down the pipe." 87 * 88 * DC3CO (DC3 clock off) 89 * 90 * On top of PSR2, GEN12 adds a intermediate power savings state that turns 91 * clock off automatically during PSR2 idle state. 92 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep 93 * entry/exit allows the HW to enter a low-power state even when page flipping 94 * periodically (for instance a 30fps video playback scenario). 95 * 96 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was), 97 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6 98 * frames, if no other flip occurs and the function above is executed, DC3CO is 99 * disabled and PSR2 is configured to enter deep sleep, resetting again in case 100 * of another flip. 101 * Front buffer modifications do not trigger DC3CO activation on purpose as it 102 * would bring a lot of complexity and most of the moderns systems will only 103 * use page flips. 104 */ 105 106 /* 107 * Description of PSR mask bits: 108 * 109 * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl): 110 * 111 * When unmasked (nearly) all display register writes (eg. even 112 * SWF) trigger a PSR exit. Some registers are excluded from this 113 * and they have a more specific mask (described below). On icl+ 114 * this bit no longer exists and is effectively always set. 115 * 116 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+): 117 * 118 * When unmasked (nearly) all pipe/plane register writes 119 * trigger a PSR exit. Some plane registers are excluded from this 120 * and they have a more specific mask (described below). 121 * 122 * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+): 123 * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw): 124 * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw): 125 * 126 * When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit. 127 * SPR_SURF/CURBASE are not included in this and instead are 128 * controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or 129 * EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw). 130 * 131 * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw): 132 * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw): 133 * 134 * When unmasked PSR is blocked as long as the sprite 135 * plane is enabled. skl+ with their universal planes no 136 * longer have a mask bit like this, and no plane being 137 * enabledb blocks PSR. 138 * 139 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw): 140 * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw): 141 * 142 * When umasked CURPOS writes trigger a PSR exit. On skl+ 143 * this doesn't exit but CURPOS is included in the 144 * PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask. 145 * 146 * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+): 147 * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw): 148 * 149 * When unmasked PSR is blocked as long as vblank and/or vsync 150 * interrupt is unmasked in IMR *and* enabled in IER. 151 * 152 * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+): 153 * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw): 154 * 155 * Selectcs whether PSR exit generates an extra vblank before 156 * the first frame is transmitted. Also note the opposite polarity 157 * if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank, 158 * unmasked==do not generate the extra vblank). 159 * 160 * With DC states enabled the extra vblank happens after link training, 161 * with DC states disabled it happens immediately upuon PSR exit trigger. 162 * No idea as of now why there is a difference. HSW/BDW (which don't 163 * even have DMC) always generate it after link training. Go figure. 164 * 165 * Unfortunately CHICKEN_TRANS itself seems to be double buffered 166 * and thus won't latch until the first vblank. So with DC states 167 * enabled the register effectively uses the reset value during DC5 168 * exit+PSR exit sequence, and thus the bit does nothing until 169 * latched by the vblank that it was trying to prevent from being 170 * generated in the first place. So we should probably call this 171 * one a chicken/egg bit instead on skl+. 172 * 173 * In standby mode (as opposed to link-off) this makes no difference 174 * as the timing generator keeps running the whole time generating 175 * normal periodic vblanks. 176 * 177 * WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw, 178 * and doing so makes the behaviour match the skl+ reset value. 179 * 180 * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw): 181 * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw): 182 * 183 * On BDW without this bit is no vblanks whatsoever are 184 * generated after PSR exit. On HSW this has no apparent effect. 185 * WaPsrDPRSUnmaskVBlankInSRD says to set this. 186 * 187 * The rest of the bits are more self-explanatory and/or 188 * irrelevant for normal operation. 189 * 190 * Description of intel_crtc_state variables. has_psr, has_panel_replay and 191 * has_sel_update: 192 * 193 * has_psr (alone): PSR1 194 * has_psr + has_sel_update: PSR2 195 * has_psr + has_panel_replay: Panel Replay 196 * has_psr + has_panel_replay + has_sel_update: Panel Replay Selective Update 197 * 198 * Description of some intel_psr variables. enabled, panel_replay_enabled, 199 * sel_update_enabled 200 * 201 * enabled (alone): PSR1 202 * enabled + sel_update_enabled: PSR2 203 * enabled + panel_replay_enabled: Panel Replay 204 * enabled + panel_replay_enabled + sel_update_enabled: Panel Replay SU 205 */ 206 207 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \ 208 (intel_dp)->psr.source_support) 209 210 bool intel_encoder_can_psr(struct intel_encoder *encoder) 211 { 212 if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST) 213 return CAN_PSR(enc_to_intel_dp(encoder)) || 214 CAN_PANEL_REPLAY(enc_to_intel_dp(encoder)); 215 else 216 return false; 217 } 218 219 bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder, 220 const struct intel_crtc_state *crtc_state) 221 { 222 /* 223 * For PSR/PR modes only eDP requires the AUX IO power to be enabled whenever 224 * the output is enabled. For non-eDP outputs the main link is always 225 * on, hence it doesn't require the HW initiated AUX wake-up signaling used 226 * for eDP. 227 * 228 * TODO: 229 * - Consider leaving AUX IO disabled for eDP / PR as well, in case 230 * the ALPM with main-link off mode is not enabled. 231 * - Leave AUX IO enabled for DP / PR, once support for ALPM with 232 * main-link off mode is added for it and this mode gets enabled. 233 */ 234 return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && 235 intel_encoder_can_psr(encoder); 236 } 237 238 static bool psr_global_enabled(struct intel_dp *intel_dp) 239 { 240 struct intel_connector *connector = intel_dp->attached_connector; 241 242 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) { 243 case I915_PSR_DEBUG_DEFAULT: 244 return intel_dp_is_edp(intel_dp) ? 245 connector->panel.vbt.psr.enable : true; 246 case I915_PSR_DEBUG_DISABLE: 247 return false; 248 default: 249 return true; 250 } 251 } 252 253 static bool sel_update_global_enabled(struct intel_dp *intel_dp) 254 { 255 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) { 256 case I915_PSR_DEBUG_DISABLE: 257 case I915_PSR_DEBUG_FORCE_PSR1: 258 return false; 259 default: 260 return true; 261 } 262 } 263 264 static bool panel_replay_global_enabled(struct intel_dp *intel_dp) 265 { 266 struct intel_display *display = to_intel_display(intel_dp); 267 268 return !(intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE) && 269 display->params.enable_panel_replay; 270 } 271 272 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp) 273 { 274 struct intel_display *display = to_intel_display(intel_dp); 275 276 return DISPLAY_VER(display) >= 12 ? TGL_PSR_ERROR : 277 EDP_PSR_ERROR(intel_dp->psr.transcoder); 278 } 279 280 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp) 281 { 282 struct intel_display *display = to_intel_display(intel_dp); 283 284 return DISPLAY_VER(display) >= 12 ? TGL_PSR_POST_EXIT : 285 EDP_PSR_POST_EXIT(intel_dp->psr.transcoder); 286 } 287 288 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp) 289 { 290 struct intel_display *display = to_intel_display(intel_dp); 291 292 return DISPLAY_VER(display) >= 12 ? TGL_PSR_PRE_ENTRY : 293 EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder); 294 } 295 296 static u32 psr_irq_mask_get(struct intel_dp *intel_dp) 297 { 298 struct intel_display *display = to_intel_display(intel_dp); 299 300 return DISPLAY_VER(display) >= 12 ? TGL_PSR_MASK : 301 EDP_PSR_MASK(intel_dp->psr.transcoder); 302 } 303 304 static i915_reg_t psr_ctl_reg(struct intel_display *display, 305 enum transcoder cpu_transcoder) 306 { 307 if (DISPLAY_VER(display) >= 8) 308 return EDP_PSR_CTL(display, cpu_transcoder); 309 else 310 return HSW_SRD_CTL; 311 } 312 313 static i915_reg_t psr_debug_reg(struct intel_display *display, 314 enum transcoder cpu_transcoder) 315 { 316 if (DISPLAY_VER(display) >= 8) 317 return EDP_PSR_DEBUG(display, cpu_transcoder); 318 else 319 return HSW_SRD_DEBUG; 320 } 321 322 static i915_reg_t psr_perf_cnt_reg(struct intel_display *display, 323 enum transcoder cpu_transcoder) 324 { 325 if (DISPLAY_VER(display) >= 8) 326 return EDP_PSR_PERF_CNT(display, cpu_transcoder); 327 else 328 return HSW_SRD_PERF_CNT; 329 } 330 331 static i915_reg_t psr_status_reg(struct intel_display *display, 332 enum transcoder cpu_transcoder) 333 { 334 if (DISPLAY_VER(display) >= 8) 335 return EDP_PSR_STATUS(display, cpu_transcoder); 336 else 337 return HSW_SRD_STATUS; 338 } 339 340 static i915_reg_t psr_imr_reg(struct intel_display *display, 341 enum transcoder cpu_transcoder) 342 { 343 if (DISPLAY_VER(display) >= 12) 344 return TRANS_PSR_IMR(display, cpu_transcoder); 345 else 346 return EDP_PSR_IMR; 347 } 348 349 static i915_reg_t psr_iir_reg(struct intel_display *display, 350 enum transcoder cpu_transcoder) 351 { 352 if (DISPLAY_VER(display) >= 12) 353 return TRANS_PSR_IIR(display, cpu_transcoder); 354 else 355 return EDP_PSR_IIR; 356 } 357 358 static i915_reg_t psr_aux_ctl_reg(struct intel_display *display, 359 enum transcoder cpu_transcoder) 360 { 361 if (DISPLAY_VER(display) >= 8) 362 return EDP_PSR_AUX_CTL(display, cpu_transcoder); 363 else 364 return HSW_SRD_AUX_CTL; 365 } 366 367 static i915_reg_t psr_aux_data_reg(struct intel_display *display, 368 enum transcoder cpu_transcoder, int i) 369 { 370 if (DISPLAY_VER(display) >= 8) 371 return EDP_PSR_AUX_DATA(display, cpu_transcoder, i); 372 else 373 return HSW_SRD_AUX_DATA(i); 374 } 375 376 static void psr_irq_control(struct intel_dp *intel_dp) 377 { 378 struct intel_display *display = to_intel_display(intel_dp); 379 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 380 u32 mask; 381 382 if (intel_dp->psr.panel_replay_enabled) 383 return; 384 385 mask = psr_irq_psr_error_bit_get(intel_dp); 386 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ) 387 mask |= psr_irq_post_exit_bit_get(intel_dp) | 388 psr_irq_pre_entry_bit_get(intel_dp); 389 390 intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder), 391 psr_irq_mask_get(intel_dp), ~mask); 392 } 393 394 static void psr_event_print(struct intel_display *display, 395 u32 val, bool sel_update_enabled) 396 { 397 drm_dbg_kms(display->drm, "PSR exit events: 0x%x\n", val); 398 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE) 399 drm_dbg_kms(display->drm, "\tPSR2 watchdog timer expired\n"); 400 if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled) 401 drm_dbg_kms(display->drm, "\tPSR2 disabled\n"); 402 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN) 403 drm_dbg_kms(display->drm, "\tSU dirty FIFO underrun\n"); 404 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN) 405 drm_dbg_kms(display->drm, "\tSU CRC FIFO underrun\n"); 406 if (val & PSR_EVENT_GRAPHICS_RESET) 407 drm_dbg_kms(display->drm, "\tGraphics reset\n"); 408 if (val & PSR_EVENT_PCH_INTERRUPT) 409 drm_dbg_kms(display->drm, "\tPCH interrupt\n"); 410 if (val & PSR_EVENT_MEMORY_UP) 411 drm_dbg_kms(display->drm, "\tMemory up\n"); 412 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY) 413 drm_dbg_kms(display->drm, "\tFront buffer modification\n"); 414 if (val & PSR_EVENT_WD_TIMER_EXPIRE) 415 drm_dbg_kms(display->drm, "\tPSR watchdog timer expired\n"); 416 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE) 417 drm_dbg_kms(display->drm, "\tPIPE registers updated\n"); 418 if (val & PSR_EVENT_REGISTER_UPDATE) 419 drm_dbg_kms(display->drm, "\tRegister updated\n"); 420 if (val & PSR_EVENT_HDCP_ENABLE) 421 drm_dbg_kms(display->drm, "\tHDCP enabled\n"); 422 if (val & PSR_EVENT_KVMR_SESSION_ENABLE) 423 drm_dbg_kms(display->drm, "\tKVMR session enabled\n"); 424 if (val & PSR_EVENT_VBI_ENABLE) 425 drm_dbg_kms(display->drm, "\tVBI enabled\n"); 426 if (val & PSR_EVENT_LPSP_MODE_EXIT) 427 drm_dbg_kms(display->drm, "\tLPSP mode exited\n"); 428 if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled) 429 drm_dbg_kms(display->drm, "\tPSR disabled\n"); 430 } 431 432 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) 433 { 434 struct intel_display *display = to_intel_display(intel_dp); 435 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 436 ktime_t time_ns = ktime_get(); 437 438 if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) { 439 intel_dp->psr.last_entry_attempt = time_ns; 440 drm_dbg_kms(display->drm, 441 "[transcoder %s] PSR entry attempt in 2 vblanks\n", 442 transcoder_name(cpu_transcoder)); 443 } 444 445 if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) { 446 intel_dp->psr.last_exit = time_ns; 447 drm_dbg_kms(display->drm, 448 "[transcoder %s] PSR exit completed\n", 449 transcoder_name(cpu_transcoder)); 450 451 if (DISPLAY_VER(display) >= 9) { 452 u32 val; 453 454 val = intel_de_rmw(display, 455 PSR_EVENT(display, cpu_transcoder), 456 0, 0); 457 458 psr_event_print(display, val, intel_dp->psr.sel_update_enabled); 459 } 460 } 461 462 if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) { 463 drm_warn(display->drm, "[transcoder %s] PSR aux error\n", 464 transcoder_name(cpu_transcoder)); 465 466 intel_dp->psr.irq_aux_error = true; 467 468 /* 469 * If this interruption is not masked it will keep 470 * interrupting so fast that it prevents the scheduled 471 * work to run. 472 * Also after a PSR error, we don't want to arm PSR 473 * again so we don't care about unmask the interruption 474 * or unset irq_aux_error. 475 */ 476 intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder), 477 0, psr_irq_psr_error_bit_get(intel_dp)); 478 479 queue_work(display->wq.unordered, &intel_dp->psr.work); 480 } 481 } 482 483 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) 484 { 485 struct intel_display *display = to_intel_display(intel_dp); 486 u8 val = 8; /* assume the worst if we can't read the value */ 487 488 if (drm_dp_dpcd_readb(&intel_dp->aux, 489 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1) 490 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK; 491 else 492 drm_dbg_kms(display->drm, 493 "Unable to get sink synchronization latency, assuming 8 frames\n"); 494 return val; 495 } 496 497 static void _psr_compute_su_granularity(struct intel_dp *intel_dp, 498 struct intel_connector *connector) 499 { 500 struct intel_display *display = to_intel_display(intel_dp); 501 ssize_t r; 502 __le16 w; 503 u8 y; 504 505 /* 506 * If sink don't have specific granularity requirements set legacy 507 * ones. 508 */ 509 if (!(connector->dp.psr_caps.dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) { 510 /* As PSR2 HW sends full lines, we do not care about x granularity */ 511 w = cpu_to_le16(4); 512 y = 4; 513 goto exit; 514 } 515 516 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, sizeof(w)); 517 if (r != sizeof(w)) 518 drm_dbg_kms(display->drm, 519 "Unable to read selective update x granularity\n"); 520 /* 521 * Spec says that if the value read is 0 the default granularity should 522 * be used instead. 523 */ 524 if (r != sizeof(w) || w == 0) 525 w = cpu_to_le16(4); 526 527 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1); 528 if (r != 1) { 529 drm_dbg_kms(display->drm, 530 "Unable to read selective update y granularity\n"); 531 y = 4; 532 } 533 if (y == 0) 534 y = 1; 535 536 exit: 537 connector->dp.psr_caps.su_w_granularity = le16_to_cpu(w); 538 connector->dp.psr_caps.su_y_granularity = y; 539 } 540 541 static enum intel_panel_replay_dsc_support 542 compute_pr_dsc_support(struct intel_connector *connector) 543 { 544 u8 pr_dsc_mode; 545 u8 val; 546 547 val = connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)]; 548 pr_dsc_mode = REG_FIELD_GET8(DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_MASK, val); 549 550 switch (pr_dsc_mode) { 551 case DP_DSC_DECODE_CAPABILITY_IN_PR_FULL_FRAME_ONLY: 552 return INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY; 553 case DP_DSC_DECODE_CAPABILITY_IN_PR_SUPPORTED: 554 return INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE; 555 default: 556 MISSING_CASE(pr_dsc_mode); 557 fallthrough; 558 case DP_DSC_DECODE_CAPABILITY_IN_PR_NOT_SUPPORTED: 559 case DP_DSC_DECODE_CAPABILITY_IN_PR_RESERVED: 560 return INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED; 561 } 562 } 563 564 static const char *panel_replay_dsc_support_str(enum intel_panel_replay_dsc_support dsc_support) 565 { 566 switch (dsc_support) { 567 case INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED: 568 return "not supported"; 569 case INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY: 570 return "full frame only"; 571 case INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE: 572 return "selective update"; 573 default: 574 MISSING_CASE(dsc_support); 575 return "n/a"; 576 }; 577 } 578 579 static void _panel_replay_compute_su_granularity(struct intel_connector *connector) 580 { 581 u16 w; 582 u8 y; 583 584 if (!(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] & 585 DP_PANEL_REPLAY_SU_GRANULARITY_REQUIRED)) { 586 w = 4; 587 y = 4; 588 goto exit; 589 } 590 591 /* 592 * Spec says that if the value read is 0 the default granularity should 593 * be used instead. 594 */ 595 w = le16_to_cpu(*(__le16 *)&connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_X_GRANULARITY)]) ? : 4; 596 y = connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_Y_GRANULARITY)] ? : 1; 597 598 exit: 599 connector->dp.panel_replay_caps.su_w_granularity = w; 600 connector->dp.panel_replay_caps.su_y_granularity = y; 601 } 602 603 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector) 604 { 605 struct intel_display *display = to_intel_display(intel_dp); 606 int ret; 607 608 /* TODO: Enable Panel Replay on MST once it's properly implemented. */ 609 if (intel_dp->mst_detect == DRM_DP_MST) 610 return; 611 612 ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT, 613 &connector->dp.panel_replay_caps.dpcd, 614 sizeof(connector->dp.panel_replay_caps.dpcd)); 615 if (ret < 0) 616 return; 617 618 if (!(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] & 619 DP_PANEL_REPLAY_SUPPORT)) 620 return; 621 622 if (intel_dp_is_edp(intel_dp)) { 623 if (!intel_alpm_aux_less_wake_supported(intel_dp)) { 624 drm_dbg_kms(display->drm, 625 "Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n"); 626 return; 627 } 628 629 if (!(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] & 630 DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) { 631 drm_dbg_kms(display->drm, 632 "Panel doesn't support early transport, eDP Panel Replay not possible\n"); 633 return; 634 } 635 } 636 637 connector->dp.panel_replay_caps.support = true; 638 intel_dp->psr.sink_panel_replay_support = true; 639 640 if (connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] & 641 DP_PANEL_REPLAY_SU_SUPPORT) { 642 connector->dp.panel_replay_caps.su_support = true; 643 644 _panel_replay_compute_su_granularity(connector); 645 } 646 647 connector->dp.panel_replay_caps.dsc_support = compute_pr_dsc_support(connector); 648 649 drm_dbg_kms(display->drm, 650 "Panel replay %sis supported by panel (in DSC mode: %s)\n", 651 connector->dp.panel_replay_caps.su_support ? 652 "selective_update " : "", 653 panel_replay_dsc_support_str(connector->dp.panel_replay_caps.dsc_support)); 654 } 655 656 static void _psr_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector) 657 { 658 struct intel_display *display = to_intel_display(intel_dp); 659 int ret; 660 661 ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PSR_SUPPORT, connector->dp.psr_caps.dpcd, 662 sizeof(connector->dp.psr_caps.dpcd)); 663 if (ret < 0) 664 return; 665 666 if (!connector->dp.psr_caps.dpcd[0]) 667 return; 668 669 drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n", 670 connector->dp.psr_caps.dpcd[0]); 671 672 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) { 673 drm_dbg_kms(display->drm, 674 "PSR support not currently available for this panel\n"); 675 return; 676 } 677 678 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { 679 drm_dbg_kms(display->drm, 680 "Panel lacks power state control, PSR cannot be enabled\n"); 681 return; 682 } 683 684 connector->dp.psr_caps.support = true; 685 intel_dp->psr.sink_support = true; 686 687 connector->dp.psr_caps.sync_latency = intel_dp_get_sink_sync_latency(intel_dp); 688 689 if (DISPLAY_VER(display) >= 9 && 690 connector->dp.psr_caps.dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) { 691 bool y_req = connector->dp.psr_caps.dpcd[1] & 692 DP_PSR2_SU_Y_COORDINATE_REQUIRED; 693 694 /* 695 * All panels that supports PSR version 03h (PSR2 + 696 * Y-coordinate) can handle Y-coordinates in VSC but we are 697 * only sure that it is going to be used when required by the 698 * panel. This way panel is capable to do selective update 699 * without a aux frame sync. 700 * 701 * To support PSR version 02h and PSR version 03h without 702 * Y-coordinate requirement panels we would need to enable 703 * GTC first. 704 */ 705 connector->dp.psr_caps.su_support = y_req && 706 intel_alpm_aux_wake_supported(intel_dp); 707 drm_dbg_kms(display->drm, "PSR2 %ssupported\n", 708 connector->dp.psr_caps.su_support ? "" : "not "); 709 } 710 711 if (connector->dp.psr_caps.su_support) 712 _psr_compute_su_granularity(intel_dp, connector); 713 } 714 715 void intel_psr_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector) 716 { 717 _psr_init_dpcd(intel_dp, connector); 718 719 _panel_replay_init_dpcd(intel_dp, connector); 720 } 721 722 static void hsw_psr_setup_aux(struct intel_dp *intel_dp) 723 { 724 struct intel_display *display = to_intel_display(intel_dp); 725 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 726 u32 aux_clock_divider, aux_ctl; 727 /* write DP_SET_POWER=D0 */ 728 static const u8 aux_msg[] = { 729 [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf), 730 [1] = (DP_SET_POWER >> 8) & 0xff, 731 [2] = DP_SET_POWER & 0xff, 732 [3] = 1 - 1, 733 [4] = DP_SET_POWER_D0, 734 }; 735 int i; 736 737 BUILD_BUG_ON(sizeof(aux_msg) > 20); 738 for (i = 0; i < sizeof(aux_msg); i += 4) 739 intel_de_write(display, 740 psr_aux_data_reg(display, cpu_transcoder, i >> 2), 741 intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i)); 742 743 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); 744 745 /* Start with bits set for DDI_AUX_CTL register */ 746 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg), 747 aux_clock_divider); 748 749 /* Select only valid bits for SRD_AUX_CTL */ 750 aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK | 751 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK | 752 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK | 753 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK; 754 755 intel_de_write(display, psr_aux_ctl_reg(display, cpu_transcoder), 756 aux_ctl); 757 } 758 759 static bool psr2_su_region_et_valid(struct intel_connector *connector, bool panel_replay) 760 { 761 struct intel_dp *intel_dp = intel_attached_dp(connector); 762 struct intel_display *display = to_intel_display(intel_dp); 763 764 if (DISPLAY_VER(display) < 20 || !intel_dp_is_edp(intel_dp) || 765 intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE) 766 return false; 767 768 return panel_replay ? 769 connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] & 770 DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT : 771 connector->dp.psr_caps.dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED; 772 } 773 774 static void _panel_replay_enable_sink(struct intel_dp *intel_dp, 775 const struct intel_crtc_state *crtc_state) 776 { 777 u8 val = DP_PANEL_REPLAY_ENABLE | 778 DP_PANEL_REPLAY_VSC_SDP_CRC_EN | 779 DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN | 780 DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN | 781 DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN; 782 u8 panel_replay_config2 = DP_PANEL_REPLAY_CRC_VERIFICATION; 783 784 if (crtc_state->has_sel_update) 785 val |= DP_PANEL_REPLAY_SU_ENABLE; 786 787 if (crtc_state->enable_psr2_su_region_et) 788 val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET; 789 790 if (crtc_state->req_psr2_sdp_prior_scanline) 791 panel_replay_config2 |= 792 DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE; 793 794 drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, val); 795 796 drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG2, 797 panel_replay_config2); 798 } 799 800 static void _psr_enable_sink(struct intel_dp *intel_dp, 801 const struct intel_crtc_state *crtc_state) 802 { 803 struct intel_display *display = to_intel_display(intel_dp); 804 u8 val = 0; 805 806 if (crtc_state->has_sel_update) { 807 val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS; 808 } else { 809 if (intel_dp->psr.link_standby) 810 val |= DP_PSR_MAIN_LINK_ACTIVE; 811 812 if (DISPLAY_VER(display) >= 8) 813 val |= DP_PSR_CRC_VERIFICATION; 814 } 815 816 if (crtc_state->req_psr2_sdp_prior_scanline) 817 val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE; 818 819 if (crtc_state->enable_psr2_su_region_et) 820 val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET; 821 822 if (intel_dp->psr.entry_setup_frames > 0) 823 val |= DP_PSR_FRAME_CAPTURE; 824 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val); 825 826 val |= DP_PSR_ENABLE; 827 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val); 828 } 829 830 static void intel_psr_enable_sink(struct intel_dp *intel_dp, 831 const struct intel_crtc_state *crtc_state) 832 { 833 intel_alpm_enable_sink(intel_dp, crtc_state); 834 835 crtc_state->has_panel_replay ? 836 _panel_replay_enable_sink(intel_dp, crtc_state) : 837 _psr_enable_sink(intel_dp, crtc_state); 838 839 if (intel_dp_is_edp(intel_dp)) 840 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); 841 } 842 843 void intel_psr_panel_replay_enable_sink(struct intel_dp *intel_dp) 844 { 845 /* 846 * NOTE: We might want to trigger mode set when 847 * disabling/enabling Panel Replay via debugfs interface to 848 * ensure this bit is cleared/set accordingly. 849 */ 850 if (CAN_PANEL_REPLAY(intel_dp) && panel_replay_global_enabled(intel_dp)) 851 drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, 852 DP_PANEL_REPLAY_ENABLE); 853 } 854 855 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) 856 { 857 struct intel_display *display = to_intel_display(intel_dp); 858 struct intel_connector *connector = intel_dp->attached_connector; 859 u32 val = 0; 860 861 if (DISPLAY_VER(display) >= 11) 862 val |= EDP_PSR_TP4_TIME_0us; 863 864 if (display->params.psr_safest_params) { 865 val |= EDP_PSR_TP1_TIME_2500us; 866 val |= EDP_PSR_TP2_TP3_TIME_2500us; 867 goto check_tp3_sel; 868 } 869 870 if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0) 871 val |= EDP_PSR_TP1_TIME_0us; 872 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100) 873 val |= EDP_PSR_TP1_TIME_100us; 874 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500) 875 val |= EDP_PSR_TP1_TIME_500us; 876 else 877 val |= EDP_PSR_TP1_TIME_2500us; 878 879 if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0) 880 val |= EDP_PSR_TP2_TP3_TIME_0us; 881 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100) 882 val |= EDP_PSR_TP2_TP3_TIME_100us; 883 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500) 884 val |= EDP_PSR_TP2_TP3_TIME_500us; 885 else 886 val |= EDP_PSR_TP2_TP3_TIME_2500us; 887 888 /* 889 * WA 0479: hsw,bdw 890 * "Do not skip both TP1 and TP2/TP3" 891 */ 892 if (DISPLAY_VER(display) < 9 && 893 connector->panel.vbt.psr.tp1_wakeup_time_us == 0 && 894 connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0) 895 val |= EDP_PSR_TP2_TP3_TIME_100us; 896 897 check_tp3_sel: 898 if (intel_dp_source_supports_tps3(display) && 899 drm_dp_tps3_supported(intel_dp->dpcd)) 900 val |= EDP_PSR_TP_TP1_TP3; 901 else 902 val |= EDP_PSR_TP_TP1_TP2; 903 904 return val; 905 } 906 907 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp) 908 { 909 struct intel_display *display = to_intel_display(intel_dp); 910 struct intel_connector *connector = intel_dp->attached_connector; 911 int idle_frames; 912 913 /* Let's use 6 as the minimum to cover all known cases including the 914 * off-by-one issue that HW has in some cases. 915 */ 916 idle_frames = max(6, connector->panel.vbt.psr.idle_frames); 917 idle_frames = max(idle_frames, connector->dp.psr_caps.sync_latency + 1); 918 919 if (drm_WARN_ON(display->drm, idle_frames > 0xf)) 920 idle_frames = 0xf; 921 922 return idle_frames; 923 } 924 925 static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp) 926 { 927 struct intel_display *display = to_intel_display(intel_dp); 928 u32 current_dc_state = intel_display_power_get_current_dc_state(display); 929 struct intel_crtc *crtc = intel_crtc_for_pipe(display, intel_dp->psr.pipe); 930 struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base); 931 932 return (current_dc_state != DC_STATE_EN_UPTO_DC5 && 933 current_dc_state != DC_STATE_EN_UPTO_DC6) || 934 intel_dp->psr.active_non_psr_pipes || 935 READ_ONCE(vblank->enabled); 936 } 937 938 static void hsw_activate_psr1(struct intel_dp *intel_dp) 939 { 940 struct intel_display *display = to_intel_display(intel_dp); 941 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 942 u32 max_sleep_time = 0x1f; 943 u32 val = EDP_PSR_ENABLE; 944 945 val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp)); 946 947 if (DISPLAY_VER(display) < 20) 948 val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time); 949 950 if (display->platform.haswell) 951 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; 952 953 if (intel_dp->psr.link_standby) 954 val |= EDP_PSR_LINK_STANDBY; 955 956 val |= intel_psr1_get_tp_time(intel_dp); 957 958 if (DISPLAY_VER(display) >= 8) 959 val |= EDP_PSR_CRC_ENABLE; 960 961 if (DISPLAY_VER(display) >= 20) 962 val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames); 963 964 intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder), 965 ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val); 966 967 /* Wa_16025596647 */ 968 if ((DISPLAY_VER(display) == 20 || 969 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) && 970 is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used) 971 intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display, 972 intel_dp->psr.pipe, 973 true); 974 } 975 976 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp) 977 { 978 struct intel_display *display = to_intel_display(intel_dp); 979 struct intel_connector *connector = intel_dp->attached_connector; 980 u32 val = 0; 981 982 if (display->params.psr_safest_params) 983 return EDP_PSR2_TP2_TIME_2500us; 984 985 if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && 986 connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50) 987 val |= EDP_PSR2_TP2_TIME_50us; 988 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100) 989 val |= EDP_PSR2_TP2_TIME_100us; 990 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500) 991 val |= EDP_PSR2_TP2_TIME_500us; 992 else 993 val |= EDP_PSR2_TP2_TIME_2500us; 994 995 return val; 996 } 997 998 static int 999 psr2_block_count_lines(u8 io_wake_lines, u8 fast_wake_lines) 1000 { 1001 return io_wake_lines < 9 && fast_wake_lines < 9 ? 8 : 12; 1002 } 1003 1004 static int psr2_block_count(struct intel_dp *intel_dp) 1005 { 1006 return psr2_block_count_lines(intel_dp->psr.io_wake_lines, 1007 intel_dp->psr.fast_wake_lines) / 4; 1008 } 1009 1010 static u8 frames_before_su_entry(struct intel_dp *intel_dp) 1011 { 1012 struct intel_connector *connector = intel_dp->attached_connector; 1013 u8 frames_before_su_entry; 1014 1015 frames_before_su_entry = max_t(u8, 1016 connector->dp.psr_caps.sync_latency + 1, 1017 2); 1018 1019 /* Entry setup frames must be at least 1 less than frames before SU entry */ 1020 if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry) 1021 frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1; 1022 1023 return frames_before_su_entry; 1024 } 1025 1026 static void dg2_activate_panel_replay(struct intel_dp *intel_dp) 1027 { 1028 struct intel_display *display = to_intel_display(intel_dp); 1029 struct intel_psr *psr = &intel_dp->psr; 1030 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 1031 1032 if (intel_dp_is_edp(intel_dp) && psr->sel_update_enabled) { 1033 u32 val = psr->su_region_et_enabled ? 1034 LNL_EDP_PSR2_SU_REGION_ET_ENABLE : 0; 1035 1036 if (intel_dp->psr.req_psr2_sdp_prior_scanline) 1037 val |= EDP_PSR2_SU_SDP_SCANLINE; 1038 1039 intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), 1040 val); 1041 } 1042 1043 intel_de_rmw(display, 1044 PSR2_MAN_TRK_CTL(display, intel_dp->psr.transcoder), 1045 0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME); 1046 1047 intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0, 1048 TRANS_DP2_PANEL_REPLAY_ENABLE); 1049 } 1050 1051 static void hsw_activate_psr2(struct intel_dp *intel_dp) 1052 { 1053 struct intel_display *display = to_intel_display(intel_dp); 1054 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 1055 u32 val = EDP_PSR2_ENABLE; 1056 u32 psr_val = 0; 1057 u8 idle_frames; 1058 1059 /* Wa_16025596647 */ 1060 if ((DISPLAY_VER(display) == 20 || 1061 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) && 1062 is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used) 1063 idle_frames = 0; 1064 else 1065 idle_frames = psr_compute_idle_frames(intel_dp); 1066 val |= EDP_PSR2_IDLE_FRAMES(idle_frames); 1067 1068 if (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p) 1069 val |= EDP_SU_TRACK_ENABLE; 1070 1071 if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13) 1072 val |= EDP_Y_COORDINATE_ENABLE; 1073 1074 val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp)); 1075 1076 val |= intel_psr2_get_tp_time(intel_dp); 1077 1078 if (DISPLAY_VER(display) >= 12 && DISPLAY_VER(display) < 20) { 1079 if (psr2_block_count(intel_dp) > 2) 1080 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3; 1081 else 1082 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; 1083 } 1084 1085 /* Wa_22012278275:adl-p */ 1086 if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) { 1087 static const u8 map[] = { 1088 2, /* 5 lines */ 1089 1, /* 6 lines */ 1090 0, /* 7 lines */ 1091 3, /* 8 lines */ 1092 6, /* 9 lines */ 1093 5, /* 10 lines */ 1094 4, /* 11 lines */ 1095 7, /* 12 lines */ 1096 }; 1097 /* 1098 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see 1099 * comments below for more information 1100 */ 1101 int tmp; 1102 1103 tmp = map[intel_dp->psr.io_wake_lines - 1104 TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES]; 1105 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES); 1106 1107 tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES]; 1108 val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES); 1109 } else if (DISPLAY_VER(display) >= 20) { 1110 val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); 1111 } else if (DISPLAY_VER(display) >= 12) { 1112 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); 1113 val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); 1114 } else if (DISPLAY_VER(display) >= 9) { 1115 val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); 1116 val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); 1117 } 1118 1119 if (intel_dp->psr.req_psr2_sdp_prior_scanline) 1120 val |= EDP_PSR2_SU_SDP_SCANLINE; 1121 1122 if (DISPLAY_VER(display) >= 20) 1123 psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames); 1124 1125 if (intel_dp->psr.psr2_sel_fetch_enabled) { 1126 u32 tmp; 1127 1128 tmp = intel_de_read(display, 1129 PSR2_MAN_TRK_CTL(display, cpu_transcoder)); 1130 drm_WARN_ON(display->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE)); 1131 } else if (HAS_PSR2_SEL_FETCH(display)) { 1132 intel_de_write(display, 1133 PSR2_MAN_TRK_CTL(display, cpu_transcoder), 0); 1134 } 1135 1136 if (intel_dp->psr.su_region_et_enabled) 1137 val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE; 1138 1139 /* 1140 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is 1141 * recommending keep this bit unset while PSR2 is enabled. 1142 */ 1143 intel_de_write(display, psr_ctl_reg(display, cpu_transcoder), psr_val); 1144 1145 intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), val); 1146 } 1147 1148 static bool 1149 transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder) 1150 { 1151 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) 1152 return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B; 1153 else if (DISPLAY_VER(display) >= 12) 1154 return cpu_transcoder == TRANSCODER_A; 1155 else if (DISPLAY_VER(display) >= 9) 1156 return cpu_transcoder == TRANSCODER_EDP; 1157 else 1158 return false; 1159 } 1160 1161 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state) 1162 { 1163 if (!crtc_state->hw.active) 1164 return 0; 1165 1166 return DIV_ROUND_UP(1000 * 1000, 1167 drm_mode_vrefresh(&crtc_state->hw.adjusted_mode)); 1168 } 1169 1170 static void psr2_program_idle_frames(struct intel_dp *intel_dp, 1171 u32 idle_frames) 1172 { 1173 struct intel_display *display = to_intel_display(intel_dp); 1174 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 1175 1176 intel_de_rmw(display, EDP_PSR2_CTL(display, cpu_transcoder), 1177 EDP_PSR2_IDLE_FRAMES_MASK, 1178 EDP_PSR2_IDLE_FRAMES(idle_frames)); 1179 } 1180 1181 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp) 1182 { 1183 struct intel_display *display = to_intel_display(intel_dp); 1184 1185 psr2_program_idle_frames(intel_dp, 0); 1186 intel_display_power_set_target_dc_state(display, DC_STATE_EN_DC3CO); 1187 } 1188 1189 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp) 1190 { 1191 struct intel_display *display = to_intel_display(intel_dp); 1192 1193 intel_display_power_set_target_dc_state(display, DC_STATE_EN_UPTO_DC6); 1194 psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp)); 1195 } 1196 1197 static void tgl_dc3co_disable_work(struct work_struct *work) 1198 { 1199 struct intel_dp *intel_dp = 1200 container_of(work, typeof(*intel_dp), psr.dc3co_work.work); 1201 1202 mutex_lock(&intel_dp->psr.lock); 1203 /* If delayed work is pending, it is not idle */ 1204 if (delayed_work_pending(&intel_dp->psr.dc3co_work)) 1205 goto unlock; 1206 1207 tgl_psr2_disable_dc3co(intel_dp); 1208 unlock: 1209 mutex_unlock(&intel_dp->psr.lock); 1210 } 1211 1212 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp) 1213 { 1214 if (!intel_dp->psr.dc3co_exitline) 1215 return; 1216 1217 cancel_delayed_work(&intel_dp->psr.dc3co_work); 1218 /* Before PSR2 exit disallow dc3co*/ 1219 tgl_psr2_disable_dc3co(intel_dp); 1220 } 1221 1222 static bool 1223 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp, 1224 struct intel_crtc_state *crtc_state) 1225 { 1226 struct intel_display *display = to_intel_display(intel_dp); 1227 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1228 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; 1229 enum port port = dig_port->base.port; 1230 1231 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) 1232 return pipe <= PIPE_B && port <= PORT_B; 1233 else 1234 return pipe == PIPE_A && port == PORT_A; 1235 } 1236 1237 static void 1238 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, 1239 struct intel_crtc_state *crtc_state) 1240 { 1241 struct intel_display *display = to_intel_display(intel_dp); 1242 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay; 1243 struct i915_power_domains *power_domains = &display->power.domains; 1244 u32 exit_scanlines; 1245 1246 /* 1247 * FIXME: Due to the changed sequence of activating/deactivating DC3CO, 1248 * disable DC3CO until the changed dc3co activating/deactivating sequence 1249 * is applied. B.Specs:49196 1250 */ 1251 return; 1252 1253 /* 1254 * DMC's DC3CO exit mechanism has an issue with Selective Fecth 1255 * TODO: when the issue is addressed, this restriction should be removed. 1256 */ 1257 if (crtc_state->enable_psr2_sel_fetch) 1258 return; 1259 1260 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO)) 1261 return; 1262 1263 if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state)) 1264 return; 1265 1266 /* Wa_16011303918:adl-p */ 1267 if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) 1268 return; 1269 1270 /* 1271 * DC3CO Exit time 200us B.Spec 49196 1272 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1 1273 */ 1274 exit_scanlines = 1275 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1; 1276 1277 if (drm_WARN_ON(display->drm, exit_scanlines > crtc_vdisplay)) 1278 return; 1279 1280 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines; 1281 } 1282 1283 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp, 1284 struct intel_crtc_state *crtc_state) 1285 { 1286 struct intel_display *display = to_intel_display(intel_dp); 1287 1288 if (!display->params.enable_psr2_sel_fetch && 1289 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) { 1290 drm_dbg_kms(display->drm, 1291 "PSR2 sel fetch not enabled, disabled by parameter\n"); 1292 return false; 1293 } 1294 1295 return crtc_state->enable_psr2_sel_fetch = true; 1296 } 1297 1298 static bool psr2_granularity_check(struct intel_crtc_state *crtc_state, 1299 struct intel_connector *connector) 1300 { 1301 struct intel_dp *intel_dp = intel_attached_dp(connector); 1302 struct intel_display *display = to_intel_display(intel_dp); 1303 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 1304 const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay; 1305 const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; 1306 u16 y_granularity = 0; 1307 u16 sink_y_granularity = crtc_state->has_panel_replay ? 1308 connector->dp.panel_replay_caps.su_y_granularity : 1309 connector->dp.psr_caps.su_y_granularity; 1310 u16 sink_w_granularity; 1311 1312 if (crtc_state->has_panel_replay) 1313 sink_w_granularity = connector->dp.panel_replay_caps.su_w_granularity == 1314 DP_PANEL_REPLAY_FULL_LINE_GRANULARITY ? 1315 crtc_hdisplay : connector->dp.panel_replay_caps.su_w_granularity; 1316 else 1317 sink_w_granularity = connector->dp.psr_caps.su_w_granularity; 1318 1319 /* PSR2 HW only send full lines so we only need to validate the width */ 1320 if (crtc_hdisplay % sink_w_granularity) 1321 return false; 1322 1323 if (crtc_vdisplay % sink_y_granularity) 1324 return false; 1325 1326 /* HW tracking is only aligned to 4 lines */ 1327 if (!crtc_state->enable_psr2_sel_fetch) 1328 return sink_y_granularity == 4; 1329 1330 /* 1331 * adl_p and mtl platforms have 1 line granularity. 1332 * For other platforms with SW tracking we can adjust the y coordinates 1333 * to match sink requirement if multiple of 4. 1334 */ 1335 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) 1336 y_granularity = sink_y_granularity; 1337 else if (sink_y_granularity <= 2) 1338 y_granularity = 4; 1339 else if ((sink_y_granularity % 4) == 0) 1340 y_granularity = sink_y_granularity; 1341 1342 if (y_granularity == 0 || crtc_vdisplay % y_granularity) 1343 return false; 1344 1345 if (crtc_state->dsc.compression_enable && 1346 vdsc_cfg->slice_height % y_granularity) 1347 return false; 1348 1349 crtc_state->su_y_granularity = y_granularity; 1350 return true; 1351 } 1352 1353 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp, 1354 struct intel_crtc_state *crtc_state) 1355 { 1356 struct intel_display *display = to_intel_display(intel_dp); 1357 const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode; 1358 u32 hblank_total, hblank_ns, req_ns; 1359 1360 hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start; 1361 hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock); 1362 1363 /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */ 1364 req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000); 1365 1366 if ((hblank_ns - req_ns) > 100) 1367 return true; 1368 1369 /* Not supported <13 / Wa_22012279113:adl-p */ 1370 if (DISPLAY_VER(display) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b) 1371 return false; 1372 1373 crtc_state->req_psr2_sdp_prior_scanline = true; 1374 return true; 1375 } 1376 1377 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp, 1378 struct drm_connector_state *conn_state, 1379 const struct drm_display_mode *adjusted_mode) 1380 { 1381 struct intel_display *display = to_intel_display(intel_dp); 1382 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1383 int psr_setup_time = drm_dp_psr_setup_time(connector->dp.psr_caps.dpcd); 1384 int entry_setup_frames = 0; 1385 1386 if (psr_setup_time < 0) { 1387 drm_dbg_kms(display->drm, 1388 "PSR condition failed: Invalid PSR setup time (0x%02x)\n", 1389 connector->dp.psr_caps.dpcd[1]); 1390 return -ETIME; 1391 } 1392 1393 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > 1394 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { 1395 if (DISPLAY_VER(display) >= 20) { 1396 /* setup entry frames can be up to 3 frames */ 1397 entry_setup_frames = 1; 1398 drm_dbg_kms(display->drm, 1399 "PSR setup entry frames %d\n", 1400 entry_setup_frames); 1401 } else { 1402 drm_dbg_kms(display->drm, 1403 "PSR condition failed: PSR setup time (%d us) too long\n", 1404 psr_setup_time); 1405 return -ETIME; 1406 } 1407 } 1408 1409 return entry_setup_frames; 1410 } 1411 1412 static 1413 int _intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state, 1414 bool needs_panel_replay, 1415 bool needs_sel_update) 1416 { 1417 struct intel_display *display = to_intel_display(crtc_state); 1418 1419 if (!crtc_state->has_psr) 1420 return 0; 1421 1422 /* Wa_14015401596 */ 1423 if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14)) 1424 return 1; 1425 1426 /* Rest is for SRD_STATUS needed on LunarLake and onwards */ 1427 if (DISPLAY_VER(display) < 20) 1428 return 0; 1429 1430 /* 1431 * Comment on SRD_STATUS register in Bspec for LunarLake and onwards: 1432 * 1433 * To deterministically capture the transition of the state machine 1434 * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least 1435 * one line after the non-delayed V. Blank. 1436 * 1437 * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0 1438 * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ] 1439 * - TRANS_VTOTAL[ Vertical Active ]) 1440 * 1441 * SRD_STATUS is used only by PSR1 on PantherLake. 1442 * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake. 1443 */ 1444 1445 if (DISPLAY_VER(display) >= 30 && (needs_panel_replay || 1446 needs_sel_update)) 1447 return 0; 1448 else if (DISPLAY_VER(display) < 30 && (needs_sel_update || 1449 intel_crtc_has_type(crtc_state, 1450 INTEL_OUTPUT_EDP))) 1451 return 0; 1452 else 1453 return 1; 1454 } 1455 1456 static bool _wake_lines_fit_into_vblank(const struct intel_crtc_state *crtc_state, 1457 int vblank, 1458 int wake_lines) 1459 { 1460 if (crtc_state->req_psr2_sdp_prior_scanline) 1461 vblank -= 1; 1462 1463 /* Vblank >= PSR2_CTL Block Count Number maximum line count */ 1464 if (vblank < wake_lines) 1465 return false; 1466 1467 return true; 1468 } 1469 1470 static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp, 1471 const struct intel_crtc_state *crtc_state, 1472 bool aux_less, 1473 bool needs_panel_replay, 1474 bool needs_sel_update) 1475 { 1476 struct intel_display *display = to_intel_display(intel_dp); 1477 int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end - 1478 crtc_state->hw.adjusted_mode.crtc_vblank_start; 1479 int wake_lines; 1480 int scl = _intel_psr_min_set_context_latency(crtc_state, 1481 needs_panel_replay, 1482 needs_sel_update); 1483 vblank -= scl; 1484 1485 if (aux_less) 1486 wake_lines = crtc_state->alpm_state.aux_less_wake_lines; 1487 else 1488 wake_lines = DISPLAY_VER(display) < 20 ? 1489 psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines, 1490 crtc_state->alpm_state.fast_wake_lines) : 1491 crtc_state->alpm_state.io_wake_lines; 1492 1493 /* 1494 * Guardband has not been computed yet, so we conservatively check if the 1495 * full vblank duration is sufficient to accommodate wake line requirements 1496 * for PSR features like Panel Replay and Selective Update. 1497 * 1498 * Once the actual guardband is available, a more accurate validation is 1499 * performed in intel_psr_compute_config_late(), and PSR features are 1500 * disabled if wake lines exceed the available guardband. 1501 */ 1502 return _wake_lines_fit_into_vblank(crtc_state, vblank, wake_lines); 1503 } 1504 1505 static bool alpm_config_valid(struct intel_dp *intel_dp, 1506 struct intel_crtc_state *crtc_state, 1507 bool aux_less, 1508 bool needs_panel_replay, 1509 bool needs_sel_update) 1510 { 1511 struct intel_display *display = to_intel_display(intel_dp); 1512 1513 if (!intel_alpm_compute_params(intel_dp, crtc_state)) { 1514 drm_dbg_kms(display->drm, 1515 "PSR2/Panel Replay not enabled, Unable to use long enough wake times\n"); 1516 return false; 1517 } 1518 1519 if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less, 1520 needs_panel_replay, needs_sel_update)) { 1521 drm_dbg_kms(display->drm, 1522 "PSR2/Panel Replay not enabled, too short vblank time\n"); 1523 return false; 1524 } 1525 1526 return true; 1527 } 1528 1529 static bool intel_psr2_config_valid(struct intel_dp *intel_dp, 1530 struct intel_crtc_state *crtc_state, 1531 struct drm_connector_state *conn_state) 1532 { 1533 struct intel_display *display = to_intel_display(intel_dp); 1534 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1535 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay; 1536 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; 1537 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0; 1538 1539 if (!connector->dp.psr_caps.su_support || display->params.enable_psr == 1) 1540 return false; 1541 1542 /* JSL and EHL only supports eDP 1.3 */ 1543 if (display->platform.jasperlake || display->platform.elkhartlake) { 1544 drm_dbg_kms(display->drm, "PSR2 not supported by phy\n"); 1545 return false; 1546 } 1547 1548 /* Wa_16011181250 */ 1549 if (display->platform.rocketlake || display->platform.alderlake_s || 1550 display->platform.dg2) { 1551 drm_dbg_kms(display->drm, 1552 "PSR2 is defeatured for this platform\n"); 1553 return false; 1554 } 1555 1556 if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) { 1557 drm_dbg_kms(display->drm, 1558 "PSR2 not completely functional in this stepping\n"); 1559 return false; 1560 } 1561 1562 if (!transcoder_has_psr2(display, crtc_state->cpu_transcoder)) { 1563 drm_dbg_kms(display->drm, 1564 "PSR2 not supported in transcoder %s\n", 1565 transcoder_name(crtc_state->cpu_transcoder)); 1566 return false; 1567 } 1568 1569 /* 1570 * DSC and PSR2 cannot be enabled simultaneously. If a requested 1571 * resolution requires DSC to be enabled, priority is given to DSC 1572 * over PSR2. 1573 */ 1574 if (crtc_state->dsc.compression_enable && 1575 (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)) { 1576 drm_dbg_kms(display->drm, 1577 "PSR2 cannot be enabled since DSC is enabled\n"); 1578 return false; 1579 } 1580 1581 if (DISPLAY_VER(display) >= 20) { 1582 psr_max_h = crtc_hdisplay; 1583 psr_max_v = crtc_vdisplay; 1584 max_bpp = crtc_state->pipe_bpp; 1585 } else if (IS_DISPLAY_VER(display, 12, 14)) { 1586 psr_max_h = 5120; 1587 psr_max_v = 3200; 1588 max_bpp = 30; 1589 } else if (IS_DISPLAY_VER(display, 10, 11)) { 1590 psr_max_h = 4096; 1591 psr_max_v = 2304; 1592 max_bpp = 24; 1593 } else if (DISPLAY_VER(display) == 9) { 1594 psr_max_h = 3640; 1595 psr_max_v = 2304; 1596 max_bpp = 24; 1597 } 1598 1599 if (crtc_state->pipe_bpp > max_bpp) { 1600 drm_dbg_kms(display->drm, 1601 "PSR2 not enabled, pipe bpp %d > max supported %d\n", 1602 crtc_state->pipe_bpp, max_bpp); 1603 return false; 1604 } 1605 1606 /* Wa_16011303918:adl-p */ 1607 if (crtc_state->vrr.enable && 1608 display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) { 1609 drm_dbg_kms(display->drm, 1610 "PSR2 not enabled, not compatible with HW stepping + VRR\n"); 1611 return false; 1612 } 1613 1614 if (!alpm_config_valid(intel_dp, crtc_state, false, false, true)) 1615 return false; 1616 1617 if (!crtc_state->enable_psr2_sel_fetch && 1618 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) { 1619 drm_dbg_kms(display->drm, 1620 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", 1621 crtc_hdisplay, crtc_vdisplay, 1622 psr_max_h, psr_max_v); 1623 return false; 1624 } 1625 1626 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state); 1627 1628 return true; 1629 } 1630 1631 static bool intel_sel_update_config_valid(struct intel_crtc_state *crtc_state, 1632 struct drm_connector_state *conn_state) 1633 { 1634 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1635 struct intel_dp *intel_dp = intel_attached_dp(connector); 1636 struct intel_display *display = to_intel_display(intel_dp); 1637 1638 if (HAS_PSR2_SEL_FETCH(display) && 1639 !intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) && 1640 !HAS_PSR_HW_TRACKING(display)) { 1641 drm_dbg_kms(display->drm, 1642 "Selective update not enabled, selective fetch not valid and no HW tracking available\n"); 1643 goto unsupported; 1644 } 1645 1646 if (!sel_update_global_enabled(intel_dp)) { 1647 drm_dbg_kms(display->drm, 1648 "Selective update disabled by flag\n"); 1649 goto unsupported; 1650 } 1651 1652 if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state, 1653 conn_state)) 1654 goto unsupported; 1655 1656 if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) { 1657 drm_dbg_kms(display->drm, 1658 "Selective update not enabled, SDP indication do not fit in hblank\n"); 1659 goto unsupported; 1660 } 1661 1662 if (crtc_state->has_panel_replay) { 1663 if (DISPLAY_VER(display) < 14) 1664 goto unsupported; 1665 1666 if (!connector->dp.panel_replay_caps.su_support) 1667 goto unsupported; 1668 1669 if (intel_dsc_enabled_on_link(crtc_state) && 1670 connector->dp.panel_replay_caps.dsc_support != 1671 INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE) { 1672 drm_dbg_kms(display->drm, 1673 "Selective update with Panel Replay not enabled because it's not supported with DSC\n"); 1674 goto unsupported; 1675 } 1676 } 1677 1678 if (crtc_state->crc_enabled) { 1679 drm_dbg_kms(display->drm, 1680 "Selective update not enabled because it would inhibit pipe CRC calculation\n"); 1681 goto unsupported; 1682 } 1683 1684 if (!psr2_granularity_check(crtc_state, connector)) { 1685 drm_dbg_kms(display->drm, 1686 "Selective update not enabled, SU granularity not compatible\n"); 1687 goto unsupported; 1688 } 1689 1690 crtc_state->enable_psr2_su_region_et = psr2_su_region_et_valid(connector, 1691 crtc_state->has_panel_replay); 1692 1693 return true; 1694 1695 unsupported: 1696 crtc_state->enable_psr2_sel_fetch = false; 1697 return false; 1698 } 1699 1700 static bool _psr_compute_config(struct intel_dp *intel_dp, 1701 struct intel_crtc_state *crtc_state, 1702 struct drm_connector_state *conn_state) 1703 { 1704 struct intel_display *display = to_intel_display(intel_dp); 1705 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 1706 int entry_setup_frames; 1707 1708 if (!CAN_PSR(intel_dp) || !display->params.enable_psr) 1709 return false; 1710 1711 /* 1712 * Currently PSR doesn't work reliably with VRR enabled. 1713 */ 1714 if (crtc_state->vrr.enable) 1715 return false; 1716 1717 entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, conn_state, adjusted_mode); 1718 1719 if (entry_setup_frames >= 0) { 1720 intel_dp->psr.entry_setup_frames = entry_setup_frames; 1721 } else { 1722 crtc_state->no_psr_reason = "PSR setup timing not met"; 1723 drm_dbg_kms(display->drm, 1724 "PSR condition failed: PSR setup timing not met\n"); 1725 return false; 1726 } 1727 1728 return true; 1729 } 1730 1731 static inline bool compute_link_off_after_as_sdp_when_pr_active(struct intel_connector *connector) 1732 { 1733 return (connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] & 1734 DP_PANEL_REPLAY_LINK_OFF_SUPPORTED_IN_PR_AFTER_ADAPTIVE_SYNC_SDP); 1735 } 1736 1737 static inline bool compute_disable_as_sdp_when_pr_active(struct intel_connector *connector) 1738 { 1739 return !(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] & 1740 DP_PANEL_REPLAY_ASYNC_VIDEO_TIMING_NOT_SUPPORTED_IN_PR); 1741 } 1742 1743 static bool _panel_replay_compute_config(struct intel_crtc_state *crtc_state, 1744 const struct drm_connector_state *conn_state) 1745 { 1746 struct intel_connector *connector = 1747 to_intel_connector(conn_state->connector); 1748 struct intel_dp *intel_dp = intel_attached_dp(connector); 1749 struct intel_display *display = to_intel_display(intel_dp); 1750 struct intel_hdcp *hdcp = &connector->hdcp; 1751 1752 if (!CAN_PANEL_REPLAY(intel_dp)) 1753 return false; 1754 1755 if (!connector->dp.panel_replay_caps.support) 1756 return false; 1757 1758 if (!panel_replay_global_enabled(intel_dp)) { 1759 drm_dbg_kms(display->drm, "Panel Replay disabled by flag\n"); 1760 return false; 1761 } 1762 1763 if (crtc_state->crc_enabled) { 1764 drm_dbg_kms(display->drm, 1765 "Panel Replay not enabled because it would inhibit pipe CRC calculation\n"); 1766 return false; 1767 } 1768 1769 if (intel_dsc_enabled_on_link(crtc_state) && 1770 connector->dp.panel_replay_caps.dsc_support == 1771 INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED) { 1772 drm_dbg_kms(display->drm, 1773 "Panel Replay not enabled because it's not supported with DSC\n"); 1774 return false; 1775 } 1776 1777 crtc_state->link_off_after_as_sdp_when_pr_active = compute_link_off_after_as_sdp_when_pr_active(connector); 1778 crtc_state->disable_as_sdp_when_pr_active = compute_disable_as_sdp_when_pr_active(connector); 1779 1780 if (!intel_dp_is_edp(intel_dp)) 1781 return true; 1782 1783 /* Remaining checks are for eDP only */ 1784 1785 if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A && 1786 to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_B) 1787 return false; 1788 1789 /* 128b/132b Panel Replay is not supported on eDP */ 1790 if (intel_dp_is_uhbr(crtc_state)) { 1791 drm_dbg_kms(display->drm, 1792 "Panel Replay is not supported with 128b/132b\n"); 1793 return false; 1794 } 1795 1796 /* HW will not allow Panel Replay on eDP when HDCP enabled */ 1797 if (conn_state->content_protection == 1798 DRM_MODE_CONTENT_PROTECTION_DESIRED || 1799 (conn_state->content_protection == 1800 DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value == 1801 DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) { 1802 drm_dbg_kms(display->drm, 1803 "Panel Replay is not supported with HDCP\n"); 1804 return false; 1805 } 1806 1807 if (!alpm_config_valid(intel_dp, crtc_state, true, true, false)) 1808 return false; 1809 1810 return true; 1811 } 1812 1813 static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp, 1814 struct intel_crtc_state *crtc_state) 1815 { 1816 struct intel_display *display = to_intel_display(intel_dp); 1817 1818 return (DISPLAY_VER(display) == 20 && intel_dp->psr.entry_setup_frames > 0 && 1819 !crtc_state->has_sel_update); 1820 } 1821 1822 static 1823 void intel_psr_set_non_psr_pipes(struct intel_dp *intel_dp, 1824 struct intel_crtc_state *crtc_state) 1825 { 1826 struct intel_display *display = to_intel_display(intel_dp); 1827 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); 1828 struct intel_crtc *crtc; 1829 u8 active_pipes = 0; 1830 1831 /* Wa_16025596647 */ 1832 if (DISPLAY_VER(display) != 20 && 1833 !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) 1834 return; 1835 1836 /* Not needed by Panel Replay */ 1837 if (crtc_state->has_panel_replay) 1838 return; 1839 1840 /* We ignore possible secondary PSR/Panel Replay capable eDP */ 1841 for_each_intel_crtc(display->drm, crtc) 1842 active_pipes |= crtc->active ? BIT(crtc->pipe) : 0; 1843 1844 active_pipes = intel_calc_active_pipes(state, active_pipes); 1845 1846 crtc_state->active_non_psr_pipes = active_pipes & 1847 ~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe); 1848 } 1849 1850 void intel_psr_compute_config(struct intel_dp *intel_dp, 1851 struct intel_crtc_state *crtc_state, 1852 struct drm_connector_state *conn_state) 1853 { 1854 struct intel_display *display = to_intel_display(intel_dp); 1855 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1856 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 1857 1858 if (!psr_global_enabled(intel_dp)) { 1859 drm_dbg_kms(display->drm, "PSR disabled by flag\n"); 1860 return; 1861 } 1862 1863 if (intel_dp->psr.sink_not_reliable) { 1864 drm_dbg_kms(display->drm, 1865 "PSR sink implementation is not reliable\n"); 1866 return; 1867 } 1868 1869 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 1870 drm_dbg_kms(display->drm, 1871 "PSR condition failed: Interlaced mode enabled\n"); 1872 return; 1873 } 1874 1875 /* 1876 * FIXME figure out what is wrong with PSR+joiner and 1877 * fix it. Presumably something related to the fact that 1878 * PSR is a transcoder level feature. 1879 */ 1880 if (crtc_state->joiner_pipes) { 1881 drm_dbg_kms(display->drm, 1882 "PSR disabled due to joiner\n"); 1883 return; 1884 } 1885 1886 /* Only used for state verification. */ 1887 crtc_state->panel_replay_dsc_support = connector->dp.panel_replay_caps.dsc_support; 1888 crtc_state->has_panel_replay = _panel_replay_compute_config(crtc_state, conn_state); 1889 1890 crtc_state->has_psr = crtc_state->has_panel_replay ? true : 1891 _psr_compute_config(intel_dp, crtc_state, conn_state); 1892 1893 if (!crtc_state->has_psr) 1894 return; 1895 1896 crtc_state->has_sel_update = intel_sel_update_config_valid(crtc_state, conn_state); 1897 } 1898 1899 void intel_psr_get_config(struct intel_encoder *encoder, 1900 struct intel_crtc_state *pipe_config) 1901 { 1902 struct intel_display *display = to_intel_display(encoder); 1903 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 1904 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 1905 struct intel_dp *intel_dp; 1906 u32 val; 1907 1908 if (!dig_port) 1909 return; 1910 1911 intel_dp = &dig_port->dp; 1912 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) 1913 return; 1914 1915 mutex_lock(&intel_dp->psr.lock); 1916 if (!intel_dp->psr.enabled) 1917 goto unlock; 1918 1919 if (intel_dp->psr.panel_replay_enabled) { 1920 pipe_config->has_psr = pipe_config->has_panel_replay = true; 1921 } else { 1922 /* 1923 * Not possible to read EDP_PSR/PSR2_CTL registers as it is 1924 * enabled/disabled because of frontbuffer tracking and others. 1925 */ 1926 pipe_config->has_psr = true; 1927 } 1928 1929 pipe_config->has_sel_update = intel_dp->psr.sel_update_enabled; 1930 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 1931 1932 if (!intel_dp->psr.sel_update_enabled) 1933 goto unlock; 1934 1935 if (HAS_PSR2_SEL_FETCH(display)) { 1936 val = intel_de_read(display, 1937 PSR2_MAN_TRK_CTL(display, cpu_transcoder)); 1938 if (val & PSR2_MAN_TRK_CTL_ENABLE) 1939 pipe_config->enable_psr2_sel_fetch = true; 1940 } 1941 1942 pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled; 1943 1944 if (DISPLAY_VER(display) >= 12) { 1945 val = intel_de_read(display, 1946 TRANS_EXITLINE(display, cpu_transcoder)); 1947 pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val); 1948 } 1949 unlock: 1950 mutex_unlock(&intel_dp->psr.lock); 1951 } 1952 1953 static void intel_psr_activate(struct intel_dp *intel_dp) 1954 { 1955 struct intel_display *display = to_intel_display(intel_dp); 1956 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 1957 1958 drm_WARN_ON(display->drm, 1959 transcoder_has_psr2(display, cpu_transcoder) && 1960 intel_de_read(display, EDP_PSR2_CTL(display, cpu_transcoder)) & EDP_PSR2_ENABLE); 1961 1962 drm_WARN_ON(display->drm, 1963 intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)) & EDP_PSR_ENABLE); 1964 1965 drm_WARN_ON(display->drm, intel_dp->psr.active); 1966 1967 drm_WARN_ON(display->drm, !intel_dp->psr.enabled); 1968 1969 lockdep_assert_held(&intel_dp->psr.lock); 1970 1971 /* psr1, psr2 and panel-replay are mutually exclusive.*/ 1972 if (intel_dp->psr.panel_replay_enabled) 1973 dg2_activate_panel_replay(intel_dp); 1974 else if (intel_dp->psr.sel_update_enabled) 1975 hsw_activate_psr2(intel_dp); 1976 else 1977 hsw_activate_psr1(intel_dp); 1978 1979 intel_dp->psr.active = true; 1980 intel_dp->psr.no_psr_reason = NULL; 1981 } 1982 1983 /* 1984 * Wa_16013835468 1985 * Wa_14015648006 1986 */ 1987 static void wm_optimization_wa(struct intel_dp *intel_dp, 1988 const struct intel_crtc_state *crtc_state) 1989 { 1990 struct intel_display *display = to_intel_display(intel_dp); 1991 enum pipe pipe = intel_dp->psr.pipe; 1992 bool activate = false; 1993 1994 /* Wa_14015648006 */ 1995 if (IS_DISPLAY_VER(display, 11, 14) && crtc_state->wm_level_disabled) 1996 activate = true; 1997 1998 /* Wa_16013835468 */ 1999 if (DISPLAY_VER(display) == 12 && 2000 crtc_state->hw.adjusted_mode.crtc_vblank_start != 2001 crtc_state->hw.adjusted_mode.crtc_vdisplay) 2002 activate = true; 2003 2004 if (activate) 2005 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 2006 0, LATENCY_REPORTING_REMOVED(pipe)); 2007 else 2008 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 2009 LATENCY_REPORTING_REMOVED(pipe), 0); 2010 } 2011 2012 static void intel_psr_enable_source(struct intel_dp *intel_dp, 2013 const struct intel_crtc_state *crtc_state) 2014 { 2015 struct intel_display *display = to_intel_display(intel_dp); 2016 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 2017 u32 mask = 0; 2018 2019 /* 2020 * Only HSW and BDW have PSR AUX registers that need to be setup. 2021 * SKL+ use hardcoded values PSR AUX transactions 2022 */ 2023 if (DISPLAY_VER(display) < 9) 2024 hsw_psr_setup_aux(intel_dp); 2025 2026 /* 2027 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also 2028 * mask LPSP to avoid dependency on other drivers that might block 2029 * runtime_pm besides preventing other hw tracking issues now we 2030 * can rely on frontbuffer tracking. 2031 * 2032 * From bspec prior LunarLake: 2033 * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in 2034 * panel replay mode. 2035 * 2036 * From bspec beyod LunarLake: 2037 * Panel Replay on DP: No bits are applicable 2038 * Panel Replay on eDP: All bits are applicable 2039 */ 2040 if (DISPLAY_VER(display) < 20 || intel_dp_is_edp(intel_dp)) 2041 mask = EDP_PSR_DEBUG_MASK_HPD; 2042 2043 if (intel_dp_is_edp(intel_dp)) { 2044 mask |= EDP_PSR_DEBUG_MASK_MEMUP; 2045 2046 /* 2047 * For some unknown reason on HSW non-ULT (or at least on 2048 * Dell Latitude E6540) external displays start to flicker 2049 * when PSR is enabled on the eDP. SR/PC6 residency is much 2050 * higher than should be possible with an external display. 2051 * As a workaround leave LPSP unmasked to prevent PSR entry 2052 * when external displays are active. 2053 */ 2054 if (DISPLAY_VER(display) >= 8 || display->platform.haswell_ult) 2055 mask |= EDP_PSR_DEBUG_MASK_LPSP; 2056 2057 if (DISPLAY_VER(display) < 20) 2058 mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP; 2059 2060 /* 2061 * No separate pipe reg write mask on hsw/bdw, so have to unmask all 2062 * registers in order to keep the CURSURFLIVE tricks working :( 2063 */ 2064 if (IS_DISPLAY_VER(display, 9, 10)) 2065 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; 2066 2067 /* allow PSR with sprite enabled */ 2068 if (display->platform.haswell) 2069 mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE; 2070 } 2071 2072 intel_de_write(display, psr_debug_reg(display, cpu_transcoder), mask); 2073 2074 psr_irq_control(intel_dp); 2075 2076 /* 2077 * TODO: if future platforms supports DC3CO in more than one 2078 * transcoder, EXITLINE will need to be unset when disabling PSR 2079 */ 2080 if (intel_dp->psr.dc3co_exitline) 2081 intel_de_rmw(display, 2082 TRANS_EXITLINE(display, cpu_transcoder), 2083 EXITLINE_MASK, 2084 intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE); 2085 2086 if (HAS_PSR_HW_TRACKING(display) && HAS_PSR2_SEL_FETCH(display)) 2087 intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING, 2088 intel_dp->psr.psr2_sel_fetch_enabled ? 2089 IGNORE_PSR2_HW_TRACKING : 0); 2090 2091 /* 2092 * Wa_16013835468 2093 * Wa_14015648006 2094 */ 2095 wm_optimization_wa(intel_dp, crtc_state); 2096 2097 if (intel_dp->psr.sel_update_enabled) { 2098 if (DISPLAY_VER(display) == 9) 2099 intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 0, 2100 PSR2_VSC_ENABLE_PROG_HEADER | 2101 PSR2_ADD_VERTICAL_LINE_COUNT); 2102 2103 /* 2104 * Wa_16014451276:adlp,mtl[a0,b0] 2105 * All supported adlp panels have 1-based X granularity, this may 2106 * cause issues if non-supported panels are used. 2107 */ 2108 if (!intel_dp->psr.panel_replay_enabled && 2109 (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) || 2110 display->platform.alderlake_p)) 2111 intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 2112 0, ADLP_1_BASED_X_GRANULARITY); 2113 2114 /* Wa_16012604467:adlp,mtl[a0,b0] */ 2115 if (!intel_dp->psr.panel_replay_enabled && 2116 IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0)) 2117 intel_de_rmw(display, 2118 MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder), 2119 0, 2120 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS); 2121 else if (display->platform.alderlake_p) 2122 intel_de_rmw(display, CLKGATE_DIS_MISC, 0, 2123 CLKGATE_DIS_MISC_DMASC_GATING_DIS); 2124 } 2125 2126 /* Wa_16025596647 */ 2127 if ((DISPLAY_VER(display) == 20 || 2128 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) && 2129 !intel_dp->psr.panel_replay_enabled) 2130 intel_dmc_block_pkgc(display, intel_dp->psr.pipe, true); 2131 2132 intel_alpm_configure(intel_dp, crtc_state); 2133 } 2134 2135 static bool psr_interrupt_error_check(struct intel_dp *intel_dp) 2136 { 2137 struct intel_display *display = to_intel_display(intel_dp); 2138 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 2139 u32 val; 2140 2141 if (intel_dp->psr.panel_replay_enabled) 2142 goto no_err; 2143 2144 /* 2145 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR 2146 * will still keep the error set even after the reset done in the 2147 * irq_preinstall and irq_uninstall hooks. 2148 * And enabling in this situation cause the screen to freeze in the 2149 * first time that PSR HW tries to activate so lets keep PSR disabled 2150 * to avoid any rendering problems. 2151 */ 2152 val = intel_de_read(display, psr_iir_reg(display, cpu_transcoder)); 2153 val &= psr_irq_psr_error_bit_get(intel_dp); 2154 if (val) { 2155 intel_dp->psr.sink_not_reliable = true; 2156 drm_dbg_kms(display->drm, 2157 "PSR interruption error set, not enabling PSR\n"); 2158 return false; 2159 } 2160 2161 no_err: 2162 return true; 2163 } 2164 2165 static void intel_psr_enable_locked(struct intel_dp *intel_dp, 2166 const struct intel_crtc_state *crtc_state) 2167 { 2168 struct intel_display *display = to_intel_display(intel_dp); 2169 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2170 u32 val; 2171 2172 drm_WARN_ON(display->drm, intel_dp->psr.enabled); 2173 2174 intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update; 2175 intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay; 2176 intel_dp->psr.busy_frontbuffer_bits = 0; 2177 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; 2178 intel_dp->psr.transcoder = crtc_state->cpu_transcoder; 2179 /* DC5/DC6 requires at least 6 idle frames */ 2180 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6); 2181 intel_dp->psr.dc3co_exit_delay = val; 2182 intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline; 2183 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch; 2184 intel_dp->psr.su_region_et_enabled = crtc_state->enable_psr2_su_region_et; 2185 intel_dp->psr.psr2_sel_fetch_cff_enabled = false; 2186 intel_dp->psr.req_psr2_sdp_prior_scanline = 2187 crtc_state->req_psr2_sdp_prior_scanline; 2188 intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes; 2189 intel_dp->psr.pkg_c_latency_used = crtc_state->pkg_c_latency_used; 2190 intel_dp->psr.io_wake_lines = crtc_state->alpm_state.io_wake_lines; 2191 intel_dp->psr.fast_wake_lines = crtc_state->alpm_state.fast_wake_lines; 2192 2193 if (!psr_interrupt_error_check(intel_dp)) 2194 return; 2195 2196 if (intel_dp->psr.panel_replay_enabled) 2197 drm_dbg_kms(display->drm, "Enabling Panel Replay\n"); 2198 else 2199 drm_dbg_kms(display->drm, "Enabling PSR%s\n", 2200 intel_dp->psr.sel_update_enabled ? "2" : "1"); 2201 2202 /* 2203 * Enabling sink PSR/Panel Replay here only for PSR. Panel Replay enable 2204 * bit is already written at this point. Sink ALPM is enabled here for 2205 * PSR and Panel Replay. See 2206 * intel_psr_panel_replay_enable_sink. Modifiers/options: 2207 * - Selective Update 2208 * - Region Early Transport 2209 * - Selective Update Region Scanline Capture 2210 * - VSC_SDP_CRC 2211 * - HPD on different Errors 2212 * - CRC verification 2213 * are written for PSR and Panel Replay here. 2214 */ 2215 intel_psr_enable_sink(intel_dp, crtc_state); 2216 2217 if (intel_dp_is_edp(intel_dp)) 2218 intel_snps_phy_update_psr_power_state(&dig_port->base, true); 2219 2220 intel_psr_enable_source(intel_dp, crtc_state); 2221 intel_dp->psr.enabled = true; 2222 intel_dp->psr.pause_counter = 0; 2223 2224 /* 2225 * Link_ok is sticky and set here on PSR enable. We can assume link 2226 * training is complete as we never continue to PSR enable with 2227 * untrained link. Link_ok is kept as set until first short pulse 2228 * interrupt. This is targeted to workaround panels stating bad link 2229 * after PSR is enabled. 2230 */ 2231 intel_dp->psr.link_ok = true; 2232 2233 intel_psr_activate(intel_dp); 2234 } 2235 2236 static void intel_psr_exit(struct intel_dp *intel_dp) 2237 { 2238 struct intel_display *display = to_intel_display(intel_dp); 2239 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 2240 u32 val; 2241 2242 if (!intel_dp->psr.active) { 2243 if (transcoder_has_psr2(display, cpu_transcoder)) { 2244 val = intel_de_read(display, 2245 EDP_PSR2_CTL(display, cpu_transcoder)); 2246 drm_WARN_ON(display->drm, val & EDP_PSR2_ENABLE); 2247 } 2248 2249 val = intel_de_read(display, 2250 psr_ctl_reg(display, cpu_transcoder)); 2251 drm_WARN_ON(display->drm, val & EDP_PSR_ENABLE); 2252 2253 return; 2254 } 2255 2256 if (intel_dp->psr.panel_replay_enabled) { 2257 intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 2258 TRANS_DP2_PANEL_REPLAY_ENABLE, 0); 2259 } else if (intel_dp->psr.sel_update_enabled) { 2260 tgl_disallow_dc3co_on_psr2_exit(intel_dp); 2261 2262 val = intel_de_rmw(display, 2263 EDP_PSR2_CTL(display, cpu_transcoder), 2264 EDP_PSR2_ENABLE, 0); 2265 2266 drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE)); 2267 } else { 2268 if ((DISPLAY_VER(display) == 20 || 2269 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) && 2270 intel_dp->psr.pkg_c_latency_used) 2271 intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display, 2272 intel_dp->psr.pipe, 2273 false); 2274 2275 val = intel_de_rmw(display, 2276 psr_ctl_reg(display, cpu_transcoder), 2277 EDP_PSR_ENABLE, 0); 2278 2279 drm_WARN_ON(display->drm, !(val & EDP_PSR_ENABLE)); 2280 } 2281 intel_dp->psr.active = false; 2282 } 2283 2284 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp) 2285 { 2286 struct intel_display *display = to_intel_display(intel_dp); 2287 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 2288 i915_reg_t psr_status; 2289 u32 psr_status_mask; 2290 2291 if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled || 2292 intel_dp->psr.panel_replay_enabled)) { 2293 psr_status = EDP_PSR2_STATUS(display, cpu_transcoder); 2294 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; 2295 } else { 2296 psr_status = psr_status_reg(display, cpu_transcoder); 2297 psr_status_mask = EDP_PSR_STATUS_STATE_MASK; 2298 } 2299 2300 /* Wait till PSR is idle */ 2301 if (intel_de_wait_for_clear_ms(display, psr_status, 2302 psr_status_mask, 2000)) 2303 drm_err(display->drm, "Timed out waiting PSR idle state\n"); 2304 } 2305 2306 static void intel_psr_disable_locked(struct intel_dp *intel_dp) 2307 { 2308 struct intel_display *display = to_intel_display(intel_dp); 2309 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 2310 2311 lockdep_assert_held(&intel_dp->psr.lock); 2312 2313 if (!intel_dp->psr.enabled) 2314 return; 2315 2316 if (intel_dp->psr.panel_replay_enabled) 2317 drm_dbg_kms(display->drm, "Disabling Panel Replay\n"); 2318 else 2319 drm_dbg_kms(display->drm, "Disabling PSR%s\n", 2320 intel_dp->psr.sel_update_enabled ? "2" : "1"); 2321 2322 intel_psr_exit(intel_dp); 2323 intel_psr_wait_exit_locked(intel_dp); 2324 2325 /* 2326 * Wa_16013835468 2327 * Wa_14015648006 2328 */ 2329 if (DISPLAY_VER(display) >= 11) 2330 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 2331 LATENCY_REPORTING_REMOVED(intel_dp->psr.pipe), 0); 2332 2333 if (intel_dp->psr.sel_update_enabled) { 2334 /* Wa_16012604467:adlp,mtl[a0,b0] */ 2335 if (!intel_dp->psr.panel_replay_enabled && 2336 IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0)) 2337 intel_de_rmw(display, 2338 MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder), 2339 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0); 2340 else if (display->platform.alderlake_p) 2341 intel_de_rmw(display, CLKGATE_DIS_MISC, 2342 CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0); 2343 } 2344 2345 if (intel_dp_is_edp(intel_dp)) 2346 intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false); 2347 2348 if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) 2349 intel_alpm_disable(intel_dp); 2350 2351 /* Disable PSR on Sink */ 2352 if (!intel_dp->psr.panel_replay_enabled) { 2353 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); 2354 2355 if (intel_dp->psr.sel_update_enabled) 2356 drm_dp_dpcd_writeb(&intel_dp->aux, 2357 DP_RECEIVER_ALPM_CONFIG, 0); 2358 } 2359 2360 /* Wa_16025596647 */ 2361 if ((DISPLAY_VER(display) == 20 || 2362 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) && 2363 !intel_dp->psr.panel_replay_enabled) 2364 intel_dmc_block_pkgc(display, intel_dp->psr.pipe, false); 2365 2366 intel_dp->psr.enabled = false; 2367 intel_dp->psr.panel_replay_enabled = false; 2368 intel_dp->psr.sel_update_enabled = false; 2369 intel_dp->psr.psr2_sel_fetch_enabled = false; 2370 intel_dp->psr.su_region_et_enabled = false; 2371 intel_dp->psr.psr2_sel_fetch_cff_enabled = false; 2372 intel_dp->psr.active_non_psr_pipes = 0; 2373 intel_dp->psr.pkg_c_latency_used = 0; 2374 } 2375 2376 /** 2377 * intel_psr_disable - Disable PSR 2378 * @intel_dp: Intel DP 2379 * @old_crtc_state: old CRTC state 2380 * 2381 * This function needs to be called before disabling pipe. 2382 */ 2383 void intel_psr_disable(struct intel_dp *intel_dp, 2384 const struct intel_crtc_state *old_crtc_state) 2385 { 2386 struct intel_display *display = to_intel_display(intel_dp); 2387 2388 if (!old_crtc_state->has_psr) 2389 return; 2390 2391 if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp) && 2392 !CAN_PANEL_REPLAY(intel_dp))) 2393 return; 2394 2395 mutex_lock(&intel_dp->psr.lock); 2396 2397 intel_psr_disable_locked(intel_dp); 2398 2399 intel_dp->psr.link_ok = false; 2400 2401 mutex_unlock(&intel_dp->psr.lock); 2402 cancel_work_sync(&intel_dp->psr.work); 2403 cancel_delayed_work_sync(&intel_dp->psr.dc3co_work); 2404 } 2405 2406 /** 2407 * intel_psr_pause - Pause PSR 2408 * @intel_dp: Intel DP 2409 * 2410 * This function need to be called after enabling psr. 2411 */ 2412 void intel_psr_pause(struct intel_dp *intel_dp) 2413 { 2414 struct intel_psr *psr = &intel_dp->psr; 2415 2416 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) 2417 return; 2418 2419 mutex_lock(&psr->lock); 2420 2421 if (!psr->enabled) { 2422 mutex_unlock(&psr->lock); 2423 return; 2424 } 2425 2426 if (intel_dp->psr.pause_counter++ == 0) { 2427 intel_psr_exit(intel_dp); 2428 intel_psr_wait_exit_locked(intel_dp); 2429 } 2430 2431 mutex_unlock(&psr->lock); 2432 2433 cancel_work_sync(&psr->work); 2434 cancel_delayed_work_sync(&psr->dc3co_work); 2435 } 2436 2437 /** 2438 * intel_psr_resume - Resume PSR 2439 * @intel_dp: Intel DP 2440 * 2441 * This function need to be called after pausing psr. 2442 */ 2443 void intel_psr_resume(struct intel_dp *intel_dp) 2444 { 2445 struct intel_display *display = to_intel_display(intel_dp); 2446 struct intel_psr *psr = &intel_dp->psr; 2447 2448 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) 2449 return; 2450 2451 mutex_lock(&psr->lock); 2452 2453 if (!psr->enabled) 2454 goto out; 2455 2456 if (!psr->pause_counter) { 2457 drm_warn(display->drm, "Unbalanced PSR pause/resume!\n"); 2458 goto out; 2459 } 2460 2461 if (--intel_dp->psr.pause_counter == 0) 2462 intel_psr_activate(intel_dp); 2463 2464 out: 2465 mutex_unlock(&psr->lock); 2466 } 2467 2468 /** 2469 * intel_psr_needs_vblank_notification - Check if PSR need vblank enable/disable 2470 * notification. 2471 * @crtc_state: CRTC status 2472 * 2473 * We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't 2474 * prevent it in case of Panel Replay. Panel Replay switches main link off on 2475 * DC entry. This means vblank interrupts are not fired and is a problem if 2476 * user-space is polling for vblank events. Also Wa_16025596647 needs 2477 * information when vblank is enabled/disabled. 2478 */ 2479 bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state) 2480 { 2481 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2482 struct intel_display *display = to_intel_display(crtc_state); 2483 struct intel_encoder *encoder; 2484 2485 for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) { 2486 struct intel_dp *intel_dp; 2487 2488 if (!intel_encoder_is_dp(encoder)) 2489 continue; 2490 2491 intel_dp = enc_to_intel_dp(encoder); 2492 2493 if (!intel_dp_is_edp(intel_dp)) 2494 continue; 2495 2496 if (CAN_PANEL_REPLAY(intel_dp)) 2497 return true; 2498 2499 if ((DISPLAY_VER(display) == 20 || 2500 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) && 2501 CAN_PSR(intel_dp)) 2502 return true; 2503 } 2504 2505 return false; 2506 } 2507 2508 /** 2509 * intel_psr_trigger_frame_change_event - Trigger "Frame Change" event 2510 * @dsb: DSB context 2511 * @state: the atomic state 2512 * @crtc: the CRTC 2513 * 2514 * Generate PSR "Frame Change" event. 2515 */ 2516 void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb, 2517 struct intel_atomic_state *state, 2518 struct intel_crtc *crtc) 2519 { 2520 const struct intel_crtc_state *crtc_state = 2521 intel_pre_commit_crtc_state(state, crtc); 2522 struct intel_display *display = to_intel_display(crtc); 2523 2524 if (crtc_state->has_psr) 2525 intel_de_write_dsb(display, dsb, 2526 CURSURFLIVE(display, crtc->pipe), 0); 2527 } 2528 2529 /** 2530 * intel_psr_min_set_context_latency - Minimum 'set context latency' lines needed by PSR 2531 * @crtc_state: the crtc state 2532 * 2533 * Return minimum SCL lines/delay needed by PSR. 2534 */ 2535 int intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state) 2536 { 2537 2538 return _intel_psr_min_set_context_latency(crtc_state, 2539 crtc_state->has_panel_replay, 2540 crtc_state->has_sel_update); 2541 } 2542 2543 static u32 man_trk_ctl_enable_bit_get(struct intel_display *display) 2544 { 2545 return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 0 : 2546 PSR2_MAN_TRK_CTL_ENABLE; 2547 } 2548 2549 static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display) 2550 { 2551 return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 2552 ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME : 2553 PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME; 2554 } 2555 2556 static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display) 2557 { 2558 return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 2559 ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE : 2560 PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE; 2561 } 2562 2563 static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display) 2564 { 2565 return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 2566 ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME : 2567 PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME; 2568 } 2569 2570 static void intel_psr_force_update(struct intel_dp *intel_dp) 2571 { 2572 struct intel_display *display = to_intel_display(intel_dp); 2573 2574 /* 2575 * Display WA #0884: skl+ 2576 * This documented WA for bxt can be safely applied 2577 * broadly so we can force HW tracking to exit PSR 2578 * instead of disabling and re-enabling. 2579 * Workaround tells us to write 0 to CUR_SURFLIVE_A, 2580 * but it makes more sense write to the current active 2581 * pipe. 2582 * 2583 * This workaround do not exist for platforms with display 10 or newer 2584 * but testing proved that it works for up display 13, for newer 2585 * than that testing will be needed. 2586 */ 2587 intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0); 2588 } 2589 2590 void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb, 2591 const struct intel_crtc_state *crtc_state) 2592 { 2593 struct intel_display *display = to_intel_display(crtc_state); 2594 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2595 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2596 struct intel_encoder *encoder; 2597 2598 if (!crtc_state->enable_psr2_sel_fetch) 2599 return; 2600 2601 for_each_intel_encoder_mask_with_psr(display->drm, encoder, 2602 crtc_state->uapi.encoder_mask) { 2603 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2604 2605 if (!dsb) 2606 lockdep_assert_held(&intel_dp->psr.lock); 2607 2608 if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_cff_enabled) 2609 return; 2610 break; 2611 } 2612 2613 intel_de_write_dsb(display, dsb, 2614 PSR2_MAN_TRK_CTL(display, cpu_transcoder), 2615 crtc_state->psr2_man_track_ctl); 2616 2617 if (!crtc_state->enable_psr2_su_region_et) 2618 return; 2619 2620 intel_de_write_dsb(display, dsb, PIPE_SRCSZ_ERLY_TPT(crtc->pipe), 2621 crtc_state->pipe_srcsz_early_tpt); 2622 } 2623 2624 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state, 2625 bool full_update) 2626 { 2627 struct intel_display *display = to_intel_display(crtc_state); 2628 u32 val = man_trk_ctl_enable_bit_get(display); 2629 2630 /* SF partial frame enable has to be set even on full update */ 2631 val |= man_trk_ctl_partial_frame_bit_get(display); 2632 2633 if (full_update) { 2634 val |= man_trk_ctl_continuos_full_frame(display); 2635 goto exit; 2636 } 2637 2638 if (crtc_state->psr2_su_area.y1 == -1) 2639 goto exit; 2640 2641 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) { 2642 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1); 2643 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1); 2644 } else { 2645 drm_WARN_ON(crtc_state->uapi.crtc->dev, 2646 crtc_state->psr2_su_area.y1 % 4 || 2647 crtc_state->psr2_su_area.y2 % 4); 2648 2649 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR( 2650 crtc_state->psr2_su_area.y1 / 4 + 1); 2651 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR( 2652 crtc_state->psr2_su_area.y2 / 4 + 1); 2653 } 2654 exit: 2655 crtc_state->psr2_man_track_ctl = val; 2656 } 2657 2658 static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state, 2659 bool full_update) 2660 { 2661 int width, height; 2662 2663 if (!crtc_state->enable_psr2_su_region_et || full_update) 2664 return 0; 2665 2666 width = drm_rect_width(&crtc_state->psr2_su_area); 2667 height = drm_rect_height(&crtc_state->psr2_su_area); 2668 2669 return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1); 2670 } 2671 2672 static void clip_area_update(struct drm_rect *overlap_damage_area, 2673 struct drm_rect *damage_area, 2674 struct drm_rect *pipe_src) 2675 { 2676 if (!drm_rect_intersect(damage_area, pipe_src)) 2677 return; 2678 2679 if (overlap_damage_area->y1 == -1) { 2680 overlap_damage_area->y1 = damage_area->y1; 2681 overlap_damage_area->y2 = damage_area->y2; 2682 return; 2683 } 2684 2685 if (damage_area->y1 < overlap_damage_area->y1) 2686 overlap_damage_area->y1 = damage_area->y1; 2687 2688 if (damage_area->y2 > overlap_damage_area->y2) 2689 overlap_damage_area->y2 = damage_area->y2; 2690 } 2691 2692 static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state) 2693 { 2694 struct intel_display *display = to_intel_display(crtc_state); 2695 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 2696 u16 y_alignment; 2697 2698 /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */ 2699 if (crtc_state->dsc.compression_enable && 2700 (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)) 2701 y_alignment = vdsc_cfg->slice_height; 2702 else 2703 y_alignment = crtc_state->su_y_granularity; 2704 2705 crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment; 2706 if (crtc_state->psr2_su_area.y2 % y_alignment) 2707 crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 / 2708 y_alignment) + 1) * y_alignment; 2709 } 2710 2711 /* 2712 * When early transport is in use we need to extend SU area to cover 2713 * cursor fully when cursor is in SU area. 2714 */ 2715 static void 2716 intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state, 2717 struct intel_crtc *crtc, 2718 bool *cursor_in_su_area) 2719 { 2720 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 2721 struct intel_plane_state *new_plane_state; 2722 struct intel_plane *plane; 2723 int i; 2724 2725 if (!crtc_state->enable_psr2_su_region_et) 2726 return; 2727 2728 for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) { 2729 struct drm_rect inter; 2730 2731 if (new_plane_state->hw.crtc != crtc_state->uapi.crtc) 2732 continue; 2733 2734 if (plane->id != PLANE_CURSOR) 2735 continue; 2736 2737 if (!new_plane_state->uapi.visible) 2738 continue; 2739 2740 inter = crtc_state->psr2_su_area; 2741 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) 2742 continue; 2743 2744 clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst, 2745 &crtc_state->pipe_src); 2746 *cursor_in_su_area = true; 2747 } 2748 } 2749 2750 /* 2751 * TODO: Not clear how to handle planes with negative position, 2752 * also planes are not updated if they have a negative X 2753 * position so for now doing a full update in this cases 2754 * 2755 * Plane scaling and rotation is not supported by selective fetch and both 2756 * properties can change without a modeset, so need to be check at every 2757 * atomic commit. 2758 */ 2759 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state) 2760 { 2761 if (plane_state->uapi.dst.y1 < 0 || 2762 plane_state->uapi.dst.x1 < 0 || 2763 plane_state->scaler_id >= 0 || 2764 plane_state->hw.rotation != DRM_MODE_ROTATE_0) 2765 return false; 2766 2767 return true; 2768 } 2769 2770 /* 2771 * Check for pipe properties that is not supported by selective fetch. 2772 * 2773 * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed 2774 * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch 2775 * enabled and going to the full update path. 2776 */ 2777 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state) 2778 { 2779 if (crtc_state->scaler_state.scaler_id >= 0 || 2780 crtc_state->async_flip_planes) 2781 return false; 2782 2783 return true; 2784 } 2785 2786 /* Wa 14019834836 */ 2787 static void intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state *crtc_state) 2788 { 2789 struct intel_display *display = to_intel_display(crtc_state); 2790 struct intel_encoder *encoder; 2791 int hactive_limit; 2792 2793 if (crtc_state->psr2_su_area.y1 != 0 || 2794 crtc_state->psr2_su_area.y2 != 0) 2795 return; 2796 2797 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2798 hactive_limit = intel_dp_is_uhbr(crtc_state) ? 1230 : 546; 2799 else 2800 hactive_limit = intel_dp_is_uhbr(crtc_state) ? 615 : 273; 2801 2802 if (crtc_state->hw.adjusted_mode.hdisplay < hactive_limit) 2803 return; 2804 2805 for_each_intel_encoder_mask_with_psr(display->drm, encoder, 2806 crtc_state->uapi.encoder_mask) { 2807 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2808 2809 if (!intel_dp_is_edp(intel_dp) && 2810 intel_dp->psr.panel_replay_enabled && 2811 intel_dp->psr.sel_update_enabled) { 2812 crtc_state->psr2_su_area.y2++; 2813 return; 2814 } 2815 } 2816 } 2817 2818 static void 2819 intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state) 2820 { 2821 struct intel_display *display = to_intel_display(crtc_state); 2822 2823 /* Wa_14014971492 */ 2824 if (!crtc_state->has_panel_replay && 2825 ((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) || 2826 display->platform.alderlake_p || display->platform.tigerlake)) && 2827 crtc_state->splitter.enable) 2828 crtc_state->psr2_su_area.y1 = 0; 2829 2830 /* Wa 14019834836 */ 2831 if (DISPLAY_VER(display) == 30) 2832 intel_psr_apply_pr_link_on_su_wa(crtc_state); 2833 } 2834 2835 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, 2836 struct intel_crtc *crtc) 2837 { 2838 struct intel_display *display = to_intel_display(state); 2839 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 2840 struct intel_plane_state *new_plane_state, *old_plane_state; 2841 struct intel_plane *plane; 2842 bool full_update = false, cursor_in_su_area = false; 2843 int i, ret; 2844 2845 if (!crtc_state->enable_psr2_sel_fetch) 2846 return 0; 2847 2848 if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) { 2849 full_update = true; 2850 goto skip_sel_fetch_set_loop; 2851 } 2852 2853 crtc_state->psr2_su_area.x1 = 0; 2854 crtc_state->psr2_su_area.y1 = -1; 2855 crtc_state->psr2_su_area.x2 = drm_rect_width(&crtc_state->pipe_src); 2856 crtc_state->psr2_su_area.y2 = -1; 2857 2858 /* 2859 * Calculate minimal selective fetch area of each plane and calculate 2860 * the pipe damaged area. 2861 * In the next loop the plane selective fetch area will actually be set 2862 * using whole pipe damaged area. 2863 */ 2864 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 2865 new_plane_state, i) { 2866 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1, 2867 .x2 = INT_MAX }; 2868 2869 if (new_plane_state->hw.crtc != crtc_state->uapi.crtc) 2870 continue; 2871 2872 if (!new_plane_state->uapi.visible && 2873 !old_plane_state->uapi.visible) 2874 continue; 2875 2876 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) { 2877 full_update = true; 2878 break; 2879 } 2880 2881 /* 2882 * If visibility or plane moved, mark the whole plane area as 2883 * damaged as it needs to be complete redraw in the new and old 2884 * position. 2885 */ 2886 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible || 2887 !drm_rect_equals(&new_plane_state->uapi.dst, 2888 &old_plane_state->uapi.dst)) { 2889 if (old_plane_state->uapi.visible) { 2890 damaged_area.y1 = old_plane_state->uapi.dst.y1; 2891 damaged_area.y2 = old_plane_state->uapi.dst.y2; 2892 clip_area_update(&crtc_state->psr2_su_area, &damaged_area, 2893 &crtc_state->pipe_src); 2894 } 2895 2896 if (new_plane_state->uapi.visible) { 2897 damaged_area.y1 = new_plane_state->uapi.dst.y1; 2898 damaged_area.y2 = new_plane_state->uapi.dst.y2; 2899 clip_area_update(&crtc_state->psr2_su_area, &damaged_area, 2900 &crtc_state->pipe_src); 2901 } 2902 continue; 2903 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) { 2904 /* If alpha changed mark the whole plane area as damaged */ 2905 damaged_area.y1 = new_plane_state->uapi.dst.y1; 2906 damaged_area.y2 = new_plane_state->uapi.dst.y2; 2907 clip_area_update(&crtc_state->psr2_su_area, &damaged_area, 2908 &crtc_state->pipe_src); 2909 continue; 2910 } 2911 2912 src = drm_plane_state_src(&new_plane_state->uapi); 2913 drm_rect_fp_to_int(&src, &src); 2914 2915 if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi, 2916 &new_plane_state->uapi, &damaged_area)) 2917 continue; 2918 2919 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1; 2920 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1; 2921 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1; 2922 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1; 2923 2924 clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src); 2925 } 2926 2927 /* 2928 * TODO: For now we are just using full update in case 2929 * selective fetch area calculation fails. To optimize this we 2930 * should identify cases where this happens and fix the area 2931 * calculation for those. 2932 */ 2933 if (crtc_state->psr2_su_area.y1 == -1) { 2934 drm_info_once(display->drm, 2935 "Selective fetch area calculation failed in pipe %c\n", 2936 pipe_name(crtc->pipe)); 2937 full_update = true; 2938 } 2939 2940 if (full_update) 2941 goto skip_sel_fetch_set_loop; 2942 2943 intel_psr_apply_su_area_workarounds(crtc_state); 2944 2945 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 2946 if (ret) 2947 return ret; 2948 2949 /* 2950 * Adjust su area to cover cursor fully as necessary (early 2951 * transport). This needs to be done after 2952 * drm_atomic_add_affected_planes to ensure visible cursor is added into 2953 * affected planes even when cursor is not updated by itself. 2954 */ 2955 intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area); 2956 2957 intel_psr2_sel_fetch_pipe_alignment(crtc_state); 2958 2959 /* 2960 * Now that we have the pipe damaged area check if it intersect with 2961 * every plane, if it does set the plane selective fetch area. 2962 */ 2963 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 2964 new_plane_state, i) { 2965 struct drm_rect *sel_fetch_area, inter; 2966 struct intel_plane *linked = new_plane_state->planar_linked_plane; 2967 2968 if (new_plane_state->hw.crtc != crtc_state->uapi.crtc || 2969 !new_plane_state->uapi.visible) 2970 continue; 2971 2972 inter = crtc_state->psr2_su_area; 2973 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area; 2974 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) { 2975 sel_fetch_area->y1 = -1; 2976 sel_fetch_area->y2 = -1; 2977 /* 2978 * if plane sel fetch was previously enabled -> 2979 * disable it 2980 */ 2981 if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0) 2982 crtc_state->update_planes |= BIT(plane->id); 2983 2984 continue; 2985 } 2986 2987 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) { 2988 full_update = true; 2989 break; 2990 } 2991 2992 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area; 2993 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1; 2994 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1; 2995 crtc_state->update_planes |= BIT(plane->id); 2996 2997 /* 2998 * Sel_fetch_area is calculated for UV plane. Use 2999 * same area for Y plane as well. 3000 */ 3001 if (linked) { 3002 struct intel_plane_state *linked_new_plane_state; 3003 struct drm_rect *linked_sel_fetch_area; 3004 3005 linked_new_plane_state = intel_atomic_get_plane_state(state, linked); 3006 if (IS_ERR(linked_new_plane_state)) 3007 return PTR_ERR(linked_new_plane_state); 3008 3009 linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area; 3010 linked_sel_fetch_area->y1 = sel_fetch_area->y1; 3011 linked_sel_fetch_area->y2 = sel_fetch_area->y2; 3012 crtc_state->update_planes |= BIT(linked->id); 3013 } 3014 } 3015 3016 skip_sel_fetch_set_loop: 3017 psr2_man_trk_ctl_calc(crtc_state, full_update); 3018 crtc_state->pipe_srcsz_early_tpt = 3019 psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update); 3020 return 0; 3021 } 3022 3023 void intel_psr2_panic_force_full_update(const struct intel_crtc_state *crtc_state) 3024 { 3025 struct intel_display *display = to_intel_display(crtc_state); 3026 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3027 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3028 u32 val = man_trk_ctl_enable_bit_get(display); 3029 3030 /* SF partial frame enable has to be set even on full update */ 3031 val |= man_trk_ctl_partial_frame_bit_get(display); 3032 val |= man_trk_ctl_continuos_full_frame(display); 3033 3034 /* Directly write the register */ 3035 intel_de_write_fw(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder), val); 3036 3037 if (!crtc_state->enable_psr2_su_region_et) 3038 return; 3039 3040 intel_de_write_fw(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe), 0); 3041 } 3042 3043 void intel_psr_pre_plane_update(struct intel_atomic_state *state, 3044 struct intel_crtc *crtc) 3045 { 3046 struct intel_display *display = to_intel_display(state); 3047 const struct intel_crtc_state *old_crtc_state = 3048 intel_atomic_get_old_crtc_state(state, crtc); 3049 const struct intel_crtc_state *new_crtc_state = 3050 intel_atomic_get_new_crtc_state(state, crtc); 3051 struct intel_encoder *encoder; 3052 3053 if (!HAS_PSR(display)) 3054 return; 3055 3056 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder, 3057 old_crtc_state->uapi.encoder_mask) { 3058 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3059 struct intel_psr *psr = &intel_dp->psr; 3060 3061 mutex_lock(&psr->lock); 3062 3063 if (!new_crtc_state->has_psr) 3064 psr->no_psr_reason = new_crtc_state->no_psr_reason; 3065 3066 if (psr->enabled) { 3067 /* 3068 * Reasons to disable: 3069 * - PSR disabled in new state 3070 * - All planes will go inactive 3071 * - Changing between PSR versions 3072 * - Region Early Transport changing 3073 * - Display WA #1136: skl, bxt 3074 */ 3075 if (intel_crtc_needs_modeset(new_crtc_state) || 3076 !new_crtc_state->has_psr || 3077 !new_crtc_state->active_planes || 3078 new_crtc_state->has_sel_update != psr->sel_update_enabled || 3079 new_crtc_state->enable_psr2_su_region_et != psr->su_region_et_enabled || 3080 new_crtc_state->has_panel_replay != psr->panel_replay_enabled || 3081 (DISPLAY_VER(display) < 11 && new_crtc_state->wm_level_disabled)) 3082 intel_psr_disable_locked(intel_dp); 3083 else if (new_crtc_state->wm_level_disabled) 3084 /* Wa_14015648006 */ 3085 wm_optimization_wa(intel_dp, new_crtc_state); 3086 } 3087 3088 mutex_unlock(&psr->lock); 3089 } 3090 } 3091 3092 static void 3093 verify_panel_replay_dsc_state(const struct intel_crtc_state *crtc_state) 3094 { 3095 struct intel_display *display = to_intel_display(crtc_state); 3096 3097 if (!crtc_state->has_panel_replay) 3098 return; 3099 3100 drm_WARN_ON(display->drm, 3101 intel_dsc_enabled_on_link(crtc_state) && 3102 crtc_state->panel_replay_dsc_support == 3103 INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED); 3104 } 3105 3106 void intel_psr_post_plane_update(struct intel_atomic_state *state, 3107 struct intel_crtc *crtc) 3108 { 3109 struct intel_display *display = to_intel_display(state); 3110 const struct intel_crtc_state *crtc_state = 3111 intel_atomic_get_new_crtc_state(state, crtc); 3112 struct intel_encoder *encoder; 3113 3114 if (!crtc_state->has_psr) 3115 return; 3116 3117 verify_panel_replay_dsc_state(crtc_state); 3118 3119 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder, 3120 crtc_state->uapi.encoder_mask) { 3121 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3122 struct intel_psr *psr = &intel_dp->psr; 3123 bool keep_disabled = false; 3124 3125 mutex_lock(&psr->lock); 3126 3127 drm_WARN_ON(display->drm, 3128 psr->enabled && !crtc_state->active_planes); 3129 3130 if (psr->sink_not_reliable) 3131 keep_disabled = true; 3132 3133 if (!crtc_state->active_planes) { 3134 psr->no_psr_reason = "All planes inactive"; 3135 keep_disabled = true; 3136 } 3137 3138 /* Display WA #1136: skl, bxt */ 3139 if (DISPLAY_VER(display) < 11 && crtc_state->wm_level_disabled) { 3140 psr->no_psr_reason = "Workaround #1136 for skl, bxt"; 3141 keep_disabled = true; 3142 } 3143 3144 if (!psr->enabled && !keep_disabled) 3145 intel_psr_enable_locked(intel_dp, crtc_state); 3146 else if (psr->enabled && !crtc_state->wm_level_disabled) 3147 /* Wa_14015648006 */ 3148 wm_optimization_wa(intel_dp, crtc_state); 3149 3150 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */ 3151 if (crtc_state->crc_enabled && psr->enabled) 3152 intel_psr_force_update(intel_dp); 3153 3154 /* 3155 * Clear possible busy bits in case we have 3156 * invalidate -> flip -> flush sequence. 3157 */ 3158 intel_dp->psr.busy_frontbuffer_bits = 0; 3159 3160 mutex_unlock(&psr->lock); 3161 } 3162 } 3163 3164 /* 3165 * From bspec: Panel Self Refresh (BDW+) 3166 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of 3167 * exit training time + 1.5 ms of aux channel handshake. 50 ms is 3168 * defensive enough to cover everything. 3169 */ 3170 #define PSR_IDLE_TIMEOUT_MS 50 3171 3172 static int 3173 _psr2_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state, 3174 struct intel_dsb *dsb) 3175 { 3176 struct intel_display *display = to_intel_display(new_crtc_state); 3177 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 3178 3179 /* 3180 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough. 3181 * As all higher states has bit 4 of PSR2 state set we can just wait for 3182 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared. 3183 */ 3184 if (dsb) { 3185 intel_dsb_poll(dsb, EDP_PSR2_STATUS(display, cpu_transcoder), 3186 EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 0, 200, 3187 PSR_IDLE_TIMEOUT_MS * 1000 / 200); 3188 return true; 3189 } 3190 3191 return intel_de_wait_for_clear_ms(display, 3192 EDP_PSR2_STATUS(display, cpu_transcoder), 3193 EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 3194 PSR_IDLE_TIMEOUT_MS); 3195 } 3196 3197 static int 3198 _psr1_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state, 3199 struct intel_dsb *dsb) 3200 { 3201 struct intel_display *display = to_intel_display(new_crtc_state); 3202 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 3203 3204 if (dsb) { 3205 intel_dsb_poll(dsb, psr_status_reg(display, cpu_transcoder), 3206 EDP_PSR_STATUS_STATE_MASK, 0, 200, 3207 PSR_IDLE_TIMEOUT_MS * 1000 / 200); 3208 return true; 3209 } 3210 3211 return intel_de_wait_for_clear_ms(display, 3212 psr_status_reg(display, cpu_transcoder), 3213 EDP_PSR_STATUS_STATE_MASK, 3214 PSR_IDLE_TIMEOUT_MS); 3215 } 3216 3217 /** 3218 * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update 3219 * @new_crtc_state: new CRTC state 3220 * 3221 * This function is expected to be called from pipe_update_start() where it is 3222 * not expected to race with PSR enable or disable. 3223 */ 3224 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state) 3225 { 3226 struct intel_display *display = to_intel_display(new_crtc_state); 3227 struct intel_encoder *encoder; 3228 3229 if (!new_crtc_state->has_psr) 3230 return; 3231 3232 for_each_intel_encoder_mask_with_psr(display->drm, encoder, 3233 new_crtc_state->uapi.encoder_mask) { 3234 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3235 int ret; 3236 3237 lockdep_assert_held(&intel_dp->psr.lock); 3238 3239 if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled) 3240 continue; 3241 3242 if (intel_dp->psr.sel_update_enabled) 3243 ret = _psr2_ready_for_pipe_update_locked(new_crtc_state, 3244 NULL); 3245 else 3246 ret = _psr1_ready_for_pipe_update_locked(new_crtc_state, 3247 NULL); 3248 3249 if (ret) 3250 drm_err(display->drm, 3251 "PSR wait timed out, atomic update may fail\n"); 3252 } 3253 } 3254 3255 void intel_psr_wait_for_idle_dsb(struct intel_dsb *dsb, 3256 const struct intel_crtc_state *new_crtc_state) 3257 { 3258 if (!new_crtc_state->has_psr || new_crtc_state->has_panel_replay) 3259 return; 3260 3261 if (new_crtc_state->has_sel_update) 3262 _psr2_ready_for_pipe_update_locked(new_crtc_state, dsb); 3263 else 3264 _psr1_ready_for_pipe_update_locked(new_crtc_state, dsb); 3265 } 3266 3267 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp) 3268 { 3269 struct intel_display *display = to_intel_display(intel_dp); 3270 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 3271 i915_reg_t reg; 3272 u32 mask; 3273 int err; 3274 3275 if (!intel_dp->psr.enabled) 3276 return false; 3277 3278 if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled || 3279 intel_dp->psr.panel_replay_enabled)) { 3280 reg = EDP_PSR2_STATUS(display, cpu_transcoder); 3281 mask = EDP_PSR2_STATUS_STATE_MASK; 3282 } else { 3283 reg = psr_status_reg(display, cpu_transcoder); 3284 mask = EDP_PSR_STATUS_STATE_MASK; 3285 } 3286 3287 mutex_unlock(&intel_dp->psr.lock); 3288 3289 err = intel_de_wait_for_clear_ms(display, reg, mask, 50); 3290 if (err) 3291 drm_err(display->drm, 3292 "Timed out waiting for PSR Idle for re-enable\n"); 3293 3294 /* After the unlocked wait, verify that PSR is still wanted! */ 3295 mutex_lock(&intel_dp->psr.lock); 3296 return err == 0 && intel_dp->psr.enabled && !intel_dp->psr.pause_counter; 3297 } 3298 3299 static int intel_psr_fastset_force(struct intel_display *display) 3300 { 3301 struct drm_connector_list_iter conn_iter; 3302 struct drm_modeset_acquire_ctx ctx; 3303 struct drm_atomic_state *state; 3304 struct drm_connector *conn; 3305 int err = 0; 3306 3307 state = drm_atomic_state_alloc(display->drm); 3308 if (!state) 3309 return -ENOMEM; 3310 3311 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 3312 3313 state->acquire_ctx = &ctx; 3314 to_intel_atomic_state(state)->internal = true; 3315 3316 retry: 3317 drm_connector_list_iter_begin(display->drm, &conn_iter); 3318 drm_for_each_connector_iter(conn, &conn_iter) { 3319 struct drm_connector_state *conn_state; 3320 struct drm_crtc_state *crtc_state; 3321 3322 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP) 3323 continue; 3324 3325 conn_state = drm_atomic_get_connector_state(state, conn); 3326 if (IS_ERR(conn_state)) { 3327 err = PTR_ERR(conn_state); 3328 break; 3329 } 3330 3331 if (!conn_state->crtc) 3332 continue; 3333 3334 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc); 3335 if (IS_ERR(crtc_state)) { 3336 err = PTR_ERR(crtc_state); 3337 break; 3338 } 3339 3340 /* Mark mode as changed to trigger a pipe->update() */ 3341 crtc_state->mode_changed = true; 3342 } 3343 drm_connector_list_iter_end(&conn_iter); 3344 3345 if (err == 0) 3346 err = drm_atomic_commit(state); 3347 3348 if (err == -EDEADLK) { 3349 drm_atomic_state_clear(state); 3350 err = drm_modeset_backoff(&ctx); 3351 if (!err) 3352 goto retry; 3353 } 3354 3355 drm_modeset_drop_locks(&ctx); 3356 drm_modeset_acquire_fini(&ctx); 3357 drm_atomic_state_put(state); 3358 3359 return err; 3360 } 3361 3362 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val) 3363 { 3364 struct intel_display *display = to_intel_display(intel_dp); 3365 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK; 3366 const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE | 3367 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE); 3368 u32 old_mode, old_disable_bits; 3369 int ret; 3370 3371 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_SU_REGION_ET_DISABLE | 3372 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE | 3373 I915_PSR_DEBUG_MODE_MASK) || 3374 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) { 3375 drm_dbg_kms(display->drm, "Invalid debug mask %llx\n", val); 3376 return -EINVAL; 3377 } 3378 3379 ret = mutex_lock_interruptible(&intel_dp->psr.lock); 3380 if (ret) 3381 return ret; 3382 3383 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK; 3384 old_disable_bits = intel_dp->psr.debug & 3385 (I915_PSR_DEBUG_SU_REGION_ET_DISABLE | 3386 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE); 3387 3388 intel_dp->psr.debug = val; 3389 3390 /* 3391 * Do it right away if it's already enabled, otherwise it will be done 3392 * when enabling the source. 3393 */ 3394 if (intel_dp->psr.enabled) 3395 psr_irq_control(intel_dp); 3396 3397 mutex_unlock(&intel_dp->psr.lock); 3398 3399 if (old_mode != mode || old_disable_bits != disable_bits) 3400 ret = intel_psr_fastset_force(display); 3401 3402 return ret; 3403 } 3404 3405 static void intel_psr_handle_irq(struct intel_dp *intel_dp) 3406 { 3407 struct intel_psr *psr = &intel_dp->psr; 3408 3409 intel_psr_disable_locked(intel_dp); 3410 psr->sink_not_reliable = true; 3411 /* let's make sure that sink is awaken */ 3412 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); 3413 } 3414 3415 static void intel_psr_work(struct work_struct *work) 3416 { 3417 struct intel_dp *intel_dp = 3418 container_of(work, typeof(*intel_dp), psr.work); 3419 3420 mutex_lock(&intel_dp->psr.lock); 3421 3422 if (!intel_dp->psr.enabled) 3423 goto unlock; 3424 3425 if (READ_ONCE(intel_dp->psr.irq_aux_error)) { 3426 intel_psr_handle_irq(intel_dp); 3427 goto unlock; 3428 } 3429 3430 if (intel_dp->psr.pause_counter) 3431 goto unlock; 3432 3433 /* 3434 * We have to make sure PSR is ready for re-enable 3435 * otherwise it keeps disabled until next full enable/disable cycle. 3436 * PSR might take some time to get fully disabled 3437 * and be ready for re-enable. 3438 */ 3439 if (!__psr_wait_for_idle_locked(intel_dp)) 3440 goto unlock; 3441 3442 /* 3443 * The delayed work can race with an invalidate hence we need to 3444 * recheck. Since psr_flush first clears this and then reschedules we 3445 * won't ever miss a flush when bailing out here. 3446 */ 3447 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active) 3448 goto unlock; 3449 3450 intel_psr_activate(intel_dp); 3451 unlock: 3452 mutex_unlock(&intel_dp->psr.lock); 3453 } 3454 3455 static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp) 3456 { 3457 struct intel_display *display = to_intel_display(intel_dp); 3458 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 3459 3460 if (!intel_dp->psr.psr2_sel_fetch_enabled) 3461 return; 3462 3463 if (DISPLAY_VER(display) >= 20) 3464 intel_de_write(display, LNL_SFF_CTL(cpu_transcoder), 3465 LNL_SFF_CTL_SF_SINGLE_FULL_FRAME); 3466 else 3467 intel_de_write(display, 3468 PSR2_MAN_TRK_CTL(display, cpu_transcoder), 3469 man_trk_ctl_enable_bit_get(display) | 3470 man_trk_ctl_partial_frame_bit_get(display) | 3471 man_trk_ctl_single_full_frame_bit_get(display) | 3472 man_trk_ctl_continuos_full_frame(display)); 3473 } 3474 3475 static void _psr_invalidate_handle(struct intel_dp *intel_dp) 3476 { 3477 struct intel_display *display = to_intel_display(intel_dp); 3478 3479 if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) { 3480 if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) { 3481 intel_dp->psr.psr2_sel_fetch_cff_enabled = true; 3482 intel_psr_configure_full_frame_update(intel_dp); 3483 } 3484 3485 intel_psr_force_update(intel_dp); 3486 } else { 3487 intel_psr_exit(intel_dp); 3488 } 3489 } 3490 3491 /** 3492 * intel_psr_invalidate - Invalidate PSR 3493 * @display: display device 3494 * @frontbuffer_bits: frontbuffer plane tracking bits 3495 * @origin: which operation caused the invalidate 3496 * 3497 * Since the hardware frontbuffer tracking has gaps we need to integrate 3498 * with the software frontbuffer tracking. This function gets called every 3499 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be 3500 * disabled if the frontbuffer mask contains a buffer relevant to PSR. 3501 * 3502 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." 3503 */ 3504 void intel_psr_invalidate(struct intel_display *display, 3505 unsigned frontbuffer_bits, enum fb_op_origin origin) 3506 { 3507 struct intel_encoder *encoder; 3508 3509 if (origin == ORIGIN_FLIP) 3510 return; 3511 3512 for_each_intel_encoder_with_psr(display->drm, encoder) { 3513 unsigned int pipe_frontbuffer_bits = frontbuffer_bits; 3514 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3515 3516 mutex_lock(&intel_dp->psr.lock); 3517 if (!intel_dp->psr.enabled) { 3518 mutex_unlock(&intel_dp->psr.lock); 3519 continue; 3520 } 3521 3522 pipe_frontbuffer_bits &= 3523 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe); 3524 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits; 3525 3526 if (pipe_frontbuffer_bits) 3527 _psr_invalidate_handle(intel_dp); 3528 3529 mutex_unlock(&intel_dp->psr.lock); 3530 } 3531 } 3532 /* 3533 * When we will be completely rely on PSR2 S/W tracking in future, 3534 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP 3535 * event also therefore tgl_dc3co_flush_locked() require to be changed 3536 * accordingly in future. 3537 */ 3538 static void 3539 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits, 3540 enum fb_op_origin origin) 3541 { 3542 struct intel_display *display = to_intel_display(intel_dp); 3543 3544 if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled || 3545 !intel_dp->psr.active) 3546 return; 3547 3548 /* 3549 * At every frontbuffer flush flip event modified delay of delayed work, 3550 * when delayed work schedules that means display has been idle. 3551 */ 3552 if (!(frontbuffer_bits & 3553 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe))) 3554 return; 3555 3556 tgl_psr2_enable_dc3co(intel_dp); 3557 mod_delayed_work(display->wq.unordered, &intel_dp->psr.dc3co_work, 3558 intel_dp->psr.dc3co_exit_delay); 3559 } 3560 3561 static void _psr_flush_handle(struct intel_dp *intel_dp) 3562 { 3563 struct intel_display *display = to_intel_display(intel_dp); 3564 3565 if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) { 3566 /* Selective fetch prior LNL */ 3567 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) { 3568 /* can we turn CFF off? */ 3569 if (intel_dp->psr.busy_frontbuffer_bits == 0) 3570 intel_dp->psr.psr2_sel_fetch_cff_enabled = false; 3571 } 3572 3573 /* 3574 * Still keep cff bit enabled as we don't have proper SU 3575 * configuration in case update is sent for any reason after 3576 * sff bit gets cleared by the HW on next vblank. 3577 * 3578 * NOTE: Setting cff bit is not needed for LunarLake onwards as 3579 * we have own register for SFF bit and we are not overwriting 3580 * existing SU configuration 3581 */ 3582 intel_psr_configure_full_frame_update(intel_dp); 3583 3584 intel_psr_force_update(intel_dp); 3585 } else if (!intel_dp->psr.psr2_sel_fetch_enabled) { 3586 /* 3587 * PSR1 on all platforms 3588 * PSR2 HW tracking 3589 * Panel Replay Full frame update 3590 */ 3591 intel_psr_force_update(intel_dp); 3592 } else { 3593 /* Selective update LNL onwards */ 3594 intel_psr_exit(intel_dp); 3595 } 3596 3597 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits) 3598 queue_work(display->wq.unordered, &intel_dp->psr.work); 3599 } 3600 3601 /** 3602 * intel_psr_flush - Flush PSR 3603 * @display: display device 3604 * @frontbuffer_bits: frontbuffer plane tracking bits 3605 * @origin: which operation caused the flush 3606 * 3607 * Since the hardware frontbuffer tracking has gaps we need to integrate 3608 * with the software frontbuffer tracking. This function gets called every 3609 * time frontbuffer rendering has completed and flushed out to memory. PSR 3610 * can be enabled again if no other frontbuffer relevant to PSR is dirty. 3611 * 3612 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. 3613 */ 3614 void intel_psr_flush(struct intel_display *display, 3615 unsigned frontbuffer_bits, enum fb_op_origin origin) 3616 { 3617 struct intel_encoder *encoder; 3618 3619 for_each_intel_encoder_with_psr(display->drm, encoder) { 3620 unsigned int pipe_frontbuffer_bits = frontbuffer_bits; 3621 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3622 3623 mutex_lock(&intel_dp->psr.lock); 3624 if (!intel_dp->psr.enabled) { 3625 mutex_unlock(&intel_dp->psr.lock); 3626 continue; 3627 } 3628 3629 pipe_frontbuffer_bits &= 3630 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe); 3631 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits; 3632 3633 /* 3634 * If the PSR is paused by an explicit intel_psr_paused() call, 3635 * we have to ensure that the PSR is not activated until 3636 * intel_psr_resume() is called. 3637 */ 3638 if (intel_dp->psr.pause_counter) 3639 goto unlock; 3640 3641 if (origin == ORIGIN_FLIP || 3642 (origin == ORIGIN_CURSOR_UPDATE && 3643 !intel_dp->psr.psr2_sel_fetch_enabled)) { 3644 tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin); 3645 goto unlock; 3646 } 3647 3648 if (pipe_frontbuffer_bits == 0) 3649 goto unlock; 3650 3651 /* By definition flush = invalidate + flush */ 3652 _psr_flush_handle(intel_dp); 3653 unlock: 3654 mutex_unlock(&intel_dp->psr.lock); 3655 } 3656 } 3657 3658 /** 3659 * intel_psr_init - Init basic PSR work and mutex. 3660 * @intel_dp: Intel DP 3661 * 3662 * This function is called after the initializing connector. 3663 * (the initializing of connector treats the handling of connector capabilities) 3664 * And it initializes basic PSR stuff for each DP Encoder. 3665 */ 3666 void intel_psr_init(struct intel_dp *intel_dp) 3667 { 3668 struct intel_display *display = to_intel_display(intel_dp); 3669 struct intel_connector *connector = intel_dp->attached_connector; 3670 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3671 3672 if (!(HAS_PSR(display) || HAS_DP20(display))) 3673 return; 3674 3675 /* 3676 * HSW spec explicitly says PSR is tied to port A. 3677 * BDW+ platforms have a instance of PSR registers per transcoder but 3678 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder 3679 * than eDP one. 3680 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11. 3681 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11. 3682 * But GEN12 supports a instance of PSR registers per transcoder. 3683 */ 3684 if (DISPLAY_VER(display) < 12 && dig_port->base.port != PORT_A) { 3685 drm_dbg_kms(display->drm, 3686 "PSR condition failed: Port not supported\n"); 3687 return; 3688 } 3689 3690 if ((HAS_DP20(display) && !intel_dp_is_edp(intel_dp)) || 3691 DISPLAY_VER(display) >= 20) 3692 intel_dp->psr.source_panel_replay_support = true; 3693 3694 if (HAS_PSR(display) && intel_dp_is_edp(intel_dp)) 3695 intel_dp->psr.source_support = true; 3696 3697 /* Set link_standby x link_off defaults */ 3698 if (DISPLAY_VER(display) < 12) 3699 /* For new platforms up to TGL let's respect VBT back again */ 3700 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link; 3701 3702 INIT_WORK(&intel_dp->psr.work, intel_psr_work); 3703 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work); 3704 mutex_init(&intel_dp->psr.lock); 3705 } 3706 3707 static int psr_get_status_and_error_status(struct intel_dp *intel_dp, 3708 u8 *status, u8 *error_status) 3709 { 3710 struct drm_dp_aux *aux = &intel_dp->aux; 3711 int ret; 3712 unsigned int offset; 3713 3714 offset = intel_dp->psr.panel_replay_enabled ? 3715 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS; 3716 3717 ret = drm_dp_dpcd_readb(aux, offset, status); 3718 if (ret != 1) 3719 return ret; 3720 3721 offset = intel_dp->psr.panel_replay_enabled ? 3722 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS; 3723 3724 ret = drm_dp_dpcd_readb(aux, offset, error_status); 3725 if (ret != 1) 3726 return ret; 3727 3728 *status = *status & DP_PSR_SINK_STATE_MASK; 3729 3730 return 0; 3731 } 3732 3733 static void psr_alpm_check(struct intel_dp *intel_dp) 3734 { 3735 struct intel_psr *psr = &intel_dp->psr; 3736 3737 if (!psr->sel_update_enabled) 3738 return; 3739 3740 if (intel_alpm_get_error(intel_dp)) { 3741 intel_psr_disable_locked(intel_dp); 3742 psr->sink_not_reliable = true; 3743 } 3744 } 3745 3746 static void psr_capability_changed_check(struct intel_dp *intel_dp) 3747 { 3748 struct intel_display *display = to_intel_display(intel_dp); 3749 struct intel_psr *psr = &intel_dp->psr; 3750 u8 val; 3751 int r; 3752 3753 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val); 3754 if (r != 1) { 3755 drm_err(display->drm, "Error reading DP_PSR_ESI\n"); 3756 return; 3757 } 3758 3759 if (val & DP_PSR_CAPS_CHANGE) { 3760 intel_psr_disable_locked(intel_dp); 3761 psr->sink_not_reliable = true; 3762 drm_dbg_kms(display->drm, 3763 "Sink PSR capability changed, disabling PSR\n"); 3764 3765 /* Clearing it */ 3766 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val); 3767 } 3768 } 3769 3770 /* 3771 * On common bits: 3772 * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR 3773 * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR 3774 * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR 3775 * this function is relying on PSR definitions 3776 */ 3777 void intel_psr_short_pulse(struct intel_dp *intel_dp) 3778 { 3779 struct intel_display *display = to_intel_display(intel_dp); 3780 struct intel_psr *psr = &intel_dp->psr; 3781 u8 status, error_status; 3782 const u8 errors = DP_PSR_RFB_STORAGE_ERROR | 3783 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | 3784 DP_PSR_LINK_CRC_ERROR; 3785 3786 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) 3787 return; 3788 3789 mutex_lock(&psr->lock); 3790 3791 psr->link_ok = false; 3792 3793 if (!psr->enabled) 3794 goto exit; 3795 3796 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) { 3797 drm_err(display->drm, 3798 "Error reading PSR status or error status\n"); 3799 goto exit; 3800 } 3801 3802 if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) || 3803 (error_status & errors)) { 3804 intel_psr_disable_locked(intel_dp); 3805 psr->sink_not_reliable = true; 3806 } 3807 3808 if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR && 3809 !error_status) 3810 drm_dbg_kms(display->drm, 3811 "PSR sink internal error, disabling PSR\n"); 3812 if (error_status & DP_PSR_RFB_STORAGE_ERROR) 3813 drm_dbg_kms(display->drm, 3814 "PSR RFB storage error, disabling PSR\n"); 3815 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) 3816 drm_dbg_kms(display->drm, 3817 "PSR VSC SDP uncorrectable error, disabling PSR\n"); 3818 if (error_status & DP_PSR_LINK_CRC_ERROR) 3819 drm_dbg_kms(display->drm, 3820 "PSR Link CRC error, disabling PSR\n"); 3821 3822 if (error_status & ~errors) 3823 drm_err(display->drm, 3824 "PSR_ERROR_STATUS unhandled errors %x\n", 3825 error_status & ~errors); 3826 /* clear status register */ 3827 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status); 3828 3829 if (!psr->panel_replay_enabled) { 3830 psr_alpm_check(intel_dp); 3831 psr_capability_changed_check(intel_dp); 3832 } 3833 3834 exit: 3835 mutex_unlock(&psr->lock); 3836 } 3837 3838 bool intel_psr_enabled(struct intel_dp *intel_dp) 3839 { 3840 bool ret; 3841 3842 if (!CAN_PSR(intel_dp)) 3843 return false; 3844 3845 mutex_lock(&intel_dp->psr.lock); 3846 ret = intel_dp->psr.enabled; 3847 mutex_unlock(&intel_dp->psr.lock); 3848 3849 return ret; 3850 } 3851 3852 /** 3853 * intel_psr_link_ok - return psr->link_ok 3854 * @intel_dp: struct intel_dp 3855 * 3856 * We are seeing unexpected link re-trainings with some panels. This is caused 3857 * by panel stating bad link status after PSR is enabled. Code checking link 3858 * status can call this to ensure it can ignore bad link status stated by the 3859 * panel I.e. if panel is stating bad link and intel_psr_link_ok is stating link 3860 * is ok caller should rely on latter. 3861 * 3862 * Return value of link_ok 3863 */ 3864 bool intel_psr_link_ok(struct intel_dp *intel_dp) 3865 { 3866 bool ret; 3867 3868 if ((!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) || 3869 !intel_dp_is_edp(intel_dp)) 3870 return false; 3871 3872 mutex_lock(&intel_dp->psr.lock); 3873 ret = intel_dp->psr.link_ok; 3874 mutex_unlock(&intel_dp->psr.lock); 3875 3876 return ret; 3877 } 3878 3879 /** 3880 * intel_psr_lock - grab PSR lock 3881 * @crtc_state: the crtc state 3882 * 3883 * This is initially meant to be used by around CRTC update, when 3884 * vblank sensitive registers are updated and we need grab the lock 3885 * before it to avoid vblank evasion. 3886 */ 3887 void intel_psr_lock(const struct intel_crtc_state *crtc_state) 3888 { 3889 struct intel_display *display = to_intel_display(crtc_state); 3890 struct intel_encoder *encoder; 3891 3892 if (!crtc_state->has_psr) 3893 return; 3894 3895 for_each_intel_encoder_mask_with_psr(display->drm, encoder, 3896 crtc_state->uapi.encoder_mask) { 3897 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3898 3899 mutex_lock(&intel_dp->psr.lock); 3900 break; 3901 } 3902 } 3903 3904 /** 3905 * intel_psr_unlock - release PSR lock 3906 * @crtc_state: the crtc state 3907 * 3908 * Release the PSR lock that was held during pipe update. 3909 */ 3910 void intel_psr_unlock(const struct intel_crtc_state *crtc_state) 3911 { 3912 struct intel_display *display = to_intel_display(crtc_state); 3913 struct intel_encoder *encoder; 3914 3915 if (!crtc_state->has_psr) 3916 return; 3917 3918 for_each_intel_encoder_mask_with_psr(display->drm, encoder, 3919 crtc_state->uapi.encoder_mask) { 3920 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3921 3922 mutex_unlock(&intel_dp->psr.lock); 3923 break; 3924 } 3925 } 3926 3927 /* Wa_16025596647 */ 3928 static void intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp *intel_dp) 3929 { 3930 struct intel_display *display = to_intel_display(intel_dp); 3931 bool dc5_dc6_blocked; 3932 3933 if (!intel_dp->psr.active || !intel_dp->psr.pkg_c_latency_used) 3934 return; 3935 3936 dc5_dc6_blocked = is_dc5_dc6_blocked(intel_dp); 3937 3938 if (intel_dp->psr.sel_update_enabled) 3939 psr2_program_idle_frames(intel_dp, dc5_dc6_blocked ? 0 : 3940 psr_compute_idle_frames(intel_dp)); 3941 else 3942 intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display, 3943 intel_dp->psr.pipe, 3944 dc5_dc6_blocked); 3945 } 3946 3947 static void psr_dc5_dc6_wa_work(struct work_struct *work) 3948 { 3949 struct intel_display *display = container_of(work, typeof(*display), 3950 psr_dc5_dc6_wa_work); 3951 struct intel_encoder *encoder; 3952 3953 for_each_intel_encoder_with_psr(display->drm, encoder) { 3954 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3955 3956 mutex_lock(&intel_dp->psr.lock); 3957 3958 if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled && 3959 !intel_dp->psr.pkg_c_latency_used) 3960 intel_psr_apply_underrun_on_idle_wa_locked(intel_dp); 3961 3962 mutex_unlock(&intel_dp->psr.lock); 3963 } 3964 } 3965 3966 /** 3967 * intel_psr_notify_dc5_dc6 - Notify PSR about enable/disable dc5/dc6 3968 * @display: intel atomic state 3969 * 3970 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to schedule 3971 * psr_dc5_dc6_wa_work used for applying/removing the workaround. 3972 */ 3973 void intel_psr_notify_dc5_dc6(struct intel_display *display) 3974 { 3975 if (DISPLAY_VER(display) != 20 && 3976 !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) 3977 return; 3978 3979 schedule_work(&display->psr_dc5_dc6_wa_work); 3980 } 3981 3982 /** 3983 * intel_psr_dc5_dc6_wa_init - Init work for underrun on idle PSR HW bug wa 3984 * @display: intel atomic state 3985 * 3986 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to init 3987 * psr_dc5_dc6_wa_work used for applying the workaround. 3988 */ 3989 void intel_psr_dc5_dc6_wa_init(struct intel_display *display) 3990 { 3991 if (DISPLAY_VER(display) != 20 && 3992 !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) 3993 return; 3994 3995 INIT_WORK(&display->psr_dc5_dc6_wa_work, psr_dc5_dc6_wa_work); 3996 } 3997 3998 /** 3999 * intel_psr_notify_pipe_change - Notify PSR about enable/disable of a pipe 4000 * @state: intel atomic state 4001 * @crtc: intel crtc 4002 * @enable: enable/disable 4003 * 4004 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply 4005 * remove the workaround when pipe is getting enabled/disabled 4006 */ 4007 void intel_psr_notify_pipe_change(struct intel_atomic_state *state, 4008 struct intel_crtc *crtc, bool enable) 4009 { 4010 struct intel_display *display = to_intel_display(state); 4011 struct intel_encoder *encoder; 4012 4013 if (DISPLAY_VER(display) != 20 && 4014 !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) 4015 return; 4016 4017 for_each_intel_encoder_with_psr(display->drm, encoder) { 4018 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4019 u8 active_non_psr_pipes; 4020 4021 mutex_lock(&intel_dp->psr.lock); 4022 4023 if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled) 4024 goto unlock; 4025 4026 active_non_psr_pipes = intel_dp->psr.active_non_psr_pipes; 4027 4028 if (enable) 4029 active_non_psr_pipes |= BIT(crtc->pipe); 4030 else 4031 active_non_psr_pipes &= ~BIT(crtc->pipe); 4032 4033 if (active_non_psr_pipes == intel_dp->psr.active_non_psr_pipes) 4034 goto unlock; 4035 4036 if ((enable && intel_dp->psr.active_non_psr_pipes) || 4037 (!enable && !intel_dp->psr.active_non_psr_pipes) || 4038 !intel_dp->psr.pkg_c_latency_used) { 4039 intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes; 4040 goto unlock; 4041 } 4042 4043 intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes; 4044 4045 intel_psr_apply_underrun_on_idle_wa_locked(intel_dp); 4046 unlock: 4047 mutex_unlock(&intel_dp->psr.lock); 4048 } 4049 } 4050 4051 /** 4052 * intel_psr_notify_vblank_enable_disable - Notify PSR about enable/disable of vblank 4053 * @display: intel display struct 4054 * @enable: enable/disable 4055 * 4056 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply 4057 * remove the workaround when vblank is getting enabled/disabled 4058 */ 4059 void intel_psr_notify_vblank_enable_disable(struct intel_display *display, 4060 bool enable) 4061 { 4062 struct intel_encoder *encoder; 4063 4064 for_each_intel_encoder_with_psr(display->drm, encoder) { 4065 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4066 4067 mutex_lock(&intel_dp->psr.lock); 4068 if (intel_dp->psr.panel_replay_enabled) { 4069 mutex_unlock(&intel_dp->psr.lock); 4070 break; 4071 } 4072 4073 if (intel_dp->psr.enabled && intel_dp->psr.pkg_c_latency_used) 4074 intel_psr_apply_underrun_on_idle_wa_locked(intel_dp); 4075 4076 mutex_unlock(&intel_dp->psr.lock); 4077 return; 4078 } 4079 4080 /* 4081 * NOTE: intel_display_power_set_target_dc_state is used 4082 * only by PSR * code for DC3CO handling. DC3CO target 4083 * state is currently disabled in * PSR code. If DC3CO 4084 * is taken into use we need take that into account here 4085 * as well. 4086 */ 4087 intel_display_power_set_target_dc_state(display, enable ? DC_STATE_DISABLE : 4088 DC_STATE_EN_UPTO_DC6); 4089 } 4090 4091 static void 4092 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) 4093 { 4094 struct intel_display *display = to_intel_display(intel_dp); 4095 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 4096 const char *status = "unknown"; 4097 u32 val, status_val; 4098 4099 if ((intel_dp_is_edp(intel_dp) || DISPLAY_VER(display) >= 30) && 4100 (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) { 4101 static const char * const live_status[] = { 4102 "IDLE", 4103 "CAPTURE", 4104 "CAPTURE_FS", 4105 "SLEEP", 4106 "BUFON_FW", 4107 "ML_UP", 4108 "SU_STANDBY", 4109 "FAST_SLEEP", 4110 "DEEP_SLEEP", 4111 "BUF_ON", 4112 "TG_ON" 4113 }; 4114 val = intel_de_read(display, 4115 EDP_PSR2_STATUS(display, cpu_transcoder)); 4116 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val); 4117 if (status_val < ARRAY_SIZE(live_status)) 4118 status = live_status[status_val]; 4119 } else { 4120 static const char * const live_status[] = { 4121 "IDLE", 4122 "SRDONACK", 4123 "SRDENT", 4124 "BUFOFF", 4125 "BUFON", 4126 "AUXACK", 4127 "SRDOFFACK", 4128 "SRDENT_ON", 4129 }; 4130 val = intel_de_read(display, 4131 psr_status_reg(display, cpu_transcoder)); 4132 status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val); 4133 if (status_val < ARRAY_SIZE(live_status)) 4134 status = live_status[status_val]; 4135 } 4136 4137 seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val); 4138 } 4139 4140 static void intel_psr_sink_capability(struct intel_connector *connector, 4141 struct seq_file *m) 4142 { 4143 seq_printf(m, "Sink support: PSR = %s", 4144 str_yes_no(connector->dp.psr_caps.support)); 4145 4146 if (connector->dp.psr_caps.support) 4147 seq_printf(m, " [0x%02x]", connector->dp.psr_caps.dpcd[0]); 4148 if (connector->dp.psr_caps.dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED) 4149 seq_printf(m, " (Early Transport)"); 4150 seq_printf(m, ", Panel Replay = %s", str_yes_no(connector->dp.panel_replay_caps.support)); 4151 seq_printf(m, ", Panel Replay Selective Update = %s", 4152 str_yes_no(connector->dp.panel_replay_caps.su_support)); 4153 seq_printf(m, ", Panel Replay DSC support = %s", 4154 panel_replay_dsc_support_str(connector->dp.panel_replay_caps.dsc_support)); 4155 if (connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] & 4156 DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT) 4157 seq_printf(m, " (Early Transport)"); 4158 seq_printf(m, "\n"); 4159 } 4160 4161 static void intel_psr_print_mode(struct intel_dp *intel_dp, 4162 struct seq_file *m) 4163 { 4164 struct intel_psr *psr = &intel_dp->psr; 4165 const char *status, *mode, *region_et; 4166 4167 if (psr->enabled) 4168 status = " enabled"; 4169 else 4170 status = "disabled"; 4171 4172 if (psr->panel_replay_enabled && psr->sel_update_enabled) 4173 mode = "Panel Replay Selective Update"; 4174 else if (psr->panel_replay_enabled) 4175 mode = "Panel Replay"; 4176 else if (psr->sel_update_enabled) 4177 mode = "PSR2"; 4178 else if (psr->enabled) 4179 mode = "PSR1"; 4180 else 4181 mode = ""; 4182 4183 if (psr->su_region_et_enabled) 4184 region_et = " (Early Transport)"; 4185 else 4186 region_et = ""; 4187 4188 seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et); 4189 if (psr->no_psr_reason) 4190 seq_printf(m, " %s\n", psr->no_psr_reason); 4191 } 4192 4193 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp, 4194 struct intel_connector *connector) 4195 { 4196 struct intel_display *display = to_intel_display(intel_dp); 4197 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 4198 struct intel_psr *psr = &intel_dp->psr; 4199 struct ref_tracker *wakeref; 4200 bool enabled; 4201 u32 val, psr2_ctl; 4202 4203 intel_psr_sink_capability(connector, m); 4204 4205 if (!(connector->dp.psr_caps.support || connector->dp.panel_replay_caps.support)) 4206 return 0; 4207 4208 wakeref = intel_display_rpm_get(display); 4209 mutex_lock(&psr->lock); 4210 4211 intel_psr_print_mode(intel_dp, m); 4212 4213 if (!psr->enabled) { 4214 seq_printf(m, "PSR sink not reliable: %s\n", 4215 str_yes_no(psr->sink_not_reliable)); 4216 4217 goto unlock; 4218 } 4219 4220 if (psr->panel_replay_enabled) { 4221 val = intel_de_read(display, TRANS_DP2_CTL(cpu_transcoder)); 4222 4223 if (intel_dp_is_edp(intel_dp)) 4224 psr2_ctl = intel_de_read(display, 4225 EDP_PSR2_CTL(display, 4226 cpu_transcoder)); 4227 4228 enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE; 4229 } else if (psr->sel_update_enabled) { 4230 val = intel_de_read(display, 4231 EDP_PSR2_CTL(display, cpu_transcoder)); 4232 enabled = val & EDP_PSR2_ENABLE; 4233 } else { 4234 val = intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)); 4235 enabled = val & EDP_PSR_ENABLE; 4236 } 4237 seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n", 4238 str_enabled_disabled(enabled), val); 4239 if (psr->panel_replay_enabled && intel_dp_is_edp(intel_dp)) 4240 seq_printf(m, "PSR2_CTL: 0x%08x\n", 4241 psr2_ctl); 4242 psr_source_status(intel_dp, m); 4243 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n", 4244 psr->busy_frontbuffer_bits); 4245 4246 /* 4247 * SKL+ Perf counter is reset to 0 everytime DC state is entered 4248 */ 4249 val = intel_de_read(display, psr_perf_cnt_reg(display, cpu_transcoder)); 4250 seq_printf(m, "Performance counter: %u\n", 4251 REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val)); 4252 4253 if (psr->debug & I915_PSR_DEBUG_IRQ) { 4254 seq_printf(m, "Last attempted entry at: %lld\n", 4255 psr->last_entry_attempt); 4256 seq_printf(m, "Last exit at: %lld\n", psr->last_exit); 4257 } 4258 4259 if (psr->sel_update_enabled) { 4260 u32 su_frames_val[3]; 4261 int frame; 4262 4263 /* 4264 * PSR2_SU_STATUS register has been tied-off since DG2/ADL-P 4265 * (it returns zeros only) and it has been removed on Xe2_LPD. 4266 */ 4267 if (DISPLAY_VER(display) < 13) { 4268 /* 4269 * Reading all 3 registers before hand to minimize crossing a 4270 * frame boundary between register reads 4271 */ 4272 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { 4273 val = intel_de_read(display, 4274 PSR2_SU_STATUS(display, cpu_transcoder, frame)); 4275 su_frames_val[frame / 3] = val; 4276 } 4277 4278 seq_puts(m, "Frame:\tPSR2 SU blocks:\n"); 4279 4280 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) { 4281 u32 su_blocks; 4282 4283 su_blocks = su_frames_val[frame / 3] & 4284 PSR2_SU_STATUS_MASK(frame); 4285 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame); 4286 seq_printf(m, "%d\t%d\n", frame, su_blocks); 4287 } 4288 } 4289 4290 seq_printf(m, "PSR2 selective fetch: %s\n", 4291 str_enabled_disabled(psr->psr2_sel_fetch_enabled)); 4292 } 4293 4294 unlock: 4295 mutex_unlock(&psr->lock); 4296 intel_display_rpm_put(display, wakeref); 4297 4298 return 0; 4299 } 4300 4301 static int i915_edp_psr_status_show(struct seq_file *m, void *data) 4302 { 4303 struct intel_display *display = m->private; 4304 struct intel_dp *intel_dp = NULL; 4305 struct intel_encoder *encoder; 4306 4307 if (!HAS_PSR(display)) 4308 return -ENODEV; 4309 4310 /* Find the first EDP which supports PSR */ 4311 for_each_intel_encoder_with_psr(display->drm, encoder) { 4312 intel_dp = enc_to_intel_dp(encoder); 4313 break; 4314 } 4315 4316 if (!intel_dp) 4317 return -ENODEV; 4318 4319 return intel_psr_status(m, intel_dp, intel_dp->attached_connector); 4320 } 4321 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status); 4322 4323 static int 4324 i915_edp_psr_debug_set(void *data, u64 val) 4325 { 4326 struct intel_display *display = data; 4327 struct intel_encoder *encoder; 4328 int ret = -ENODEV; 4329 4330 if (!HAS_PSR(display)) 4331 return ret; 4332 4333 for_each_intel_encoder_with_psr(display->drm, encoder) { 4334 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4335 4336 drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val); 4337 4338 // TODO: split to each transcoder's PSR debug state 4339 with_intel_display_rpm(display) 4340 ret = intel_psr_debug_set(intel_dp, val); 4341 } 4342 4343 return ret; 4344 } 4345 4346 static int 4347 i915_edp_psr_debug_get(void *data, u64 *val) 4348 { 4349 struct intel_display *display = data; 4350 struct intel_encoder *encoder; 4351 4352 if (!HAS_PSR(display)) 4353 return -ENODEV; 4354 4355 for_each_intel_encoder_with_psr(display->drm, encoder) { 4356 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4357 4358 // TODO: split to each transcoder's PSR debug state 4359 *val = READ_ONCE(intel_dp->psr.debug); 4360 return 0; 4361 } 4362 4363 return -ENODEV; 4364 } 4365 4366 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, 4367 i915_edp_psr_debug_get, i915_edp_psr_debug_set, 4368 "%llu\n"); 4369 4370 void intel_psr_debugfs_register(struct intel_display *display) 4371 { 4372 struct dentry *debugfs_root = display->drm->debugfs_root; 4373 4374 debugfs_create_file("i915_edp_psr_debug", 0644, debugfs_root, 4375 display, &i915_edp_psr_debug_fops); 4376 4377 debugfs_create_file("i915_edp_psr_status", 0444, debugfs_root, 4378 display, &i915_edp_psr_status_fops); 4379 } 4380 4381 static const char *psr_mode_str(struct intel_dp *intel_dp) 4382 { 4383 if (intel_dp->psr.panel_replay_enabled) 4384 return "PANEL-REPLAY"; 4385 else if (intel_dp->psr.enabled) 4386 return "PSR"; 4387 4388 return "unknown"; 4389 } 4390 4391 static int i915_psr_sink_status_show(struct seq_file *m, void *data) 4392 { 4393 struct intel_connector *connector = m->private; 4394 struct intel_dp *intel_dp = intel_attached_dp(connector); 4395 static const char * const sink_status[] = { 4396 "inactive", 4397 "transition to active, capture and display", 4398 "active, display from RFB", 4399 "active, capture and display on sink device timings", 4400 "transition to inactive, capture and display, timing re-sync", 4401 "reserved", 4402 "reserved", 4403 "sink internal error", 4404 }; 4405 const char *str; 4406 int ret; 4407 u8 status, error_status; 4408 4409 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) { 4410 seq_puts(m, "PSR/Panel-Replay Unsupported\n"); 4411 return -ENODEV; 4412 } 4413 4414 if (connector->base.status != connector_status_connected) 4415 return -ENODEV; 4416 4417 ret = psr_get_status_and_error_status(intel_dp, &status, &error_status); 4418 if (ret) 4419 return ret; 4420 4421 status &= DP_PSR_SINK_STATE_MASK; 4422 if (status < ARRAY_SIZE(sink_status)) 4423 str = sink_status[status]; 4424 else 4425 str = "unknown"; 4426 4427 seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str); 4428 4429 seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status); 4430 4431 if (error_status & (DP_PSR_RFB_STORAGE_ERROR | 4432 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | 4433 DP_PSR_LINK_CRC_ERROR)) 4434 seq_puts(m, ":\n"); 4435 else 4436 seq_puts(m, "\n"); 4437 if (error_status & DP_PSR_RFB_STORAGE_ERROR) 4438 seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp)); 4439 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) 4440 seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp)); 4441 if (error_status & DP_PSR_LINK_CRC_ERROR) 4442 seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp)); 4443 4444 return ret; 4445 } 4446 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); 4447 4448 static int i915_psr_status_show(struct seq_file *m, void *data) 4449 { 4450 struct intel_connector *connector = m->private; 4451 struct intel_dp *intel_dp = intel_attached_dp(connector); 4452 4453 return intel_psr_status(m, intel_dp, connector); 4454 } 4455 DEFINE_SHOW_ATTRIBUTE(i915_psr_status); 4456 4457 void intel_psr_connector_debugfs_add(struct intel_connector *connector) 4458 { 4459 struct intel_display *display = to_intel_display(connector); 4460 struct dentry *root = connector->base.debugfs_entry; 4461 4462 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP && 4463 connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) 4464 return; 4465 4466 debugfs_create_file("i915_psr_sink_status", 0444, root, 4467 connector, &i915_psr_sink_status_fops); 4468 4469 if (HAS_PSR(display) || HAS_DP20(display)) 4470 debugfs_create_file("i915_psr_status", 0444, root, 4471 connector, &i915_psr_status_fops); 4472 } 4473 4474 bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) 4475 { 4476 /* 4477 * eDP Panel Replay uses always ALPM 4478 * PSR2 uses ALPM but PSR1 doesn't 4479 */ 4480 return intel_dp_is_edp(intel_dp) && (crtc_state->has_sel_update || 4481 crtc_state->has_panel_replay); 4482 } 4483 4484 bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp, 4485 const struct intel_crtc_state *crtc_state) 4486 { 4487 return intel_dp_is_edp(intel_dp) && crtc_state->has_panel_replay; 4488 } 4489 4490 void intel_psr_compute_config_late(struct intel_dp *intel_dp, 4491 struct intel_crtc_state *crtc_state) 4492 { 4493 struct intel_display *display = to_intel_display(intel_dp); 4494 int vblank = intel_crtc_vblank_length(crtc_state); 4495 int wake_lines; 4496 4497 if (intel_psr_needs_alpm_aux_less(intel_dp, crtc_state)) 4498 wake_lines = crtc_state->alpm_state.aux_less_wake_lines; 4499 else if (intel_psr_needs_alpm(intel_dp, crtc_state)) 4500 wake_lines = DISPLAY_VER(display) < 20 ? 4501 psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines, 4502 crtc_state->alpm_state.fast_wake_lines) : 4503 crtc_state->alpm_state.io_wake_lines; 4504 else 4505 wake_lines = 0; 4506 4507 /* 4508 * Disable the PSR features if wake lines exceed the available vblank. 4509 * Though SCL is computed based on these PSR features, it is not reset 4510 * even if the PSR features are disabled to avoid changing vblank start 4511 * at this stage. 4512 */ 4513 if (wake_lines && !_wake_lines_fit_into_vblank(crtc_state, vblank, wake_lines)) { 4514 drm_dbg_kms(display->drm, 4515 "Adjusting PSR/PR mode: vblank too short for wake lines = %d\n", 4516 wake_lines); 4517 4518 if (crtc_state->has_panel_replay) { 4519 crtc_state->has_panel_replay = false; 4520 /* 4521 * #TODO : Add fall back to PSR/PSR2 4522 * Since panel replay cannot be supported, we can fall back to PSR/PSR2. 4523 * This will require calling compute_config for psr and psr2 with check for 4524 * actual guardband instead of vblank_length. 4525 */ 4526 crtc_state->has_psr = false; 4527 } 4528 4529 crtc_state->has_sel_update = false; 4530 crtc_state->enable_psr2_su_region_et = false; 4531 crtc_state->enable_psr2_sel_fetch = false; 4532 } 4533 4534 /* Wa_18037818876 */ 4535 if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) { 4536 crtc_state->has_psr = false; 4537 drm_dbg_kms(display->drm, 4538 "PSR disabled to workaround PSR FSM hang issue\n"); 4539 } 4540 4541 intel_psr_set_non_psr_pipes(intel_dp, crtc_state); 4542 } 4543 4544 int intel_psr_min_guardband(struct intel_crtc_state *crtc_state) 4545 { 4546 struct intel_display *display = to_intel_display(crtc_state); 4547 int psr_min_guardband; 4548 int wake_lines; 4549 4550 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) 4551 return 0; 4552 4553 if (crtc_state->has_panel_replay) 4554 wake_lines = crtc_state->alpm_state.aux_less_wake_lines; 4555 else if (crtc_state->has_sel_update) 4556 wake_lines = DISPLAY_VER(display) < 20 ? 4557 psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines, 4558 crtc_state->alpm_state.fast_wake_lines) : 4559 crtc_state->alpm_state.io_wake_lines; 4560 else 4561 return 0; 4562 4563 psr_min_guardband = wake_lines + crtc_state->set_context_latency; 4564 4565 if (crtc_state->req_psr2_sdp_prior_scanline) 4566 psr_min_guardband++; 4567 4568 return psr_min_guardband; 4569 } 4570