1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 #include <linux/kernel.h> 6 #include <linux/pm_qos.h> 7 #include <linux/slab.h> 8 9 #include <drm/drm_atomic_helper.h> 10 #include <drm/drm_fourcc.h> 11 #include <drm/drm_plane.h> 12 #include <drm/drm_vblank.h> 13 #include <drm/drm_vblank_work.h> 14 15 #include "i915_vgpu.h" 16 #include "i9xx_plane.h" 17 #include "icl_dsi.h" 18 #include "intel_atomic.h" 19 #include "intel_atomic_plane.h" 20 #include "intel_color.h" 21 #include "intel_crtc.h" 22 #include "intel_cursor.h" 23 #include "intel_display_debugfs.h" 24 #include "intel_display_irq.h" 25 #include "intel_display_trace.h" 26 #include "intel_display_types.h" 27 #include "intel_drrs.h" 28 #include "intel_dsi.h" 29 #include "intel_fifo_underrun.h" 30 #include "intel_pipe_crc.h" 31 #include "intel_psr.h" 32 #include "intel_sprite.h" 33 #include "intel_vblank.h" 34 #include "intel_vrr.h" 35 #include "skl_universal_plane.h" 36 37 static void assert_vblank_disabled(struct drm_crtc *crtc) 38 { 39 struct drm_i915_private *i915 = to_i915(crtc->dev); 40 41 if (I915_STATE_WARN(i915, drm_crtc_vblank_get(crtc) == 0, 42 "[CRTC:%d:%s] vblank assertion failure (expected off, current on)\n", 43 crtc->base.id, crtc->name)) 44 drm_crtc_vblank_put(crtc); 45 } 46 47 struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915) 48 { 49 return to_intel_crtc(drm_crtc_from_index(&i915->drm, 0)); 50 } 51 52 struct intel_crtc *intel_crtc_for_pipe(struct intel_display *display, 53 enum pipe pipe) 54 { 55 struct intel_crtc *crtc; 56 57 for_each_intel_crtc(display->drm, crtc) { 58 if (crtc->pipe == pipe) 59 return crtc; 60 } 61 62 return NULL; 63 } 64 65 void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc) 66 { 67 drm_crtc_wait_one_vblank(&crtc->base); 68 } 69 70 void intel_wait_for_vblank_if_active(struct drm_i915_private *i915, 71 enum pipe pipe) 72 { 73 struct intel_display *display = &i915->display; 74 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 75 76 if (crtc->active) 77 intel_crtc_wait_for_next_vblank(crtc); 78 } 79 80 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) 81 { 82 struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base); 83 84 if (!crtc->active) 85 return 0; 86 87 if (!vblank->max_vblank_count) 88 return (u32)drm_crtc_accurate_vblank_count(&crtc->base); 89 90 return crtc->base.funcs->get_vblank_counter(&crtc->base); 91 } 92 93 u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state) 94 { 95 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 96 97 /* 98 * From Gen 11, In case of dsi cmd mode, frame counter wouldnt 99 * have updated at the beginning of TE, if we want to use 100 * the hw counter, then we would find it updated in only 101 * the next TE, hence switching to sw counter. 102 */ 103 if (crtc_state->mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | 104 I915_MODE_FLAG_DSI_USE_TE1)) 105 return 0; 106 107 /* 108 * On i965gm the hardware frame counter reads 109 * zero when the TV encoder is enabled :( 110 */ 111 if (IS_I965GM(dev_priv) && 112 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT))) 113 return 0; 114 115 if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) 116 return 0xffffffff; /* full 32 bit counter */ 117 else if (DISPLAY_VER(dev_priv) >= 3) 118 return 0xffffff; /* only 24 bits of frame count */ 119 else 120 return 0; /* Gen2 doesn't have a hardware frame counter */ 121 } 122 123 void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) 124 { 125 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 126 127 assert_vblank_disabled(&crtc->base); 128 drm_crtc_set_max_vblank_count(&crtc->base, 129 intel_crtc_max_vblank_count(crtc_state)); 130 drm_crtc_vblank_on(&crtc->base); 131 132 /* 133 * Should really happen exactly when we enable the pipe 134 * but we want the frame counters in the trace, and that 135 * requires vblank support on some platforms/outputs. 136 */ 137 trace_intel_pipe_enable(crtc); 138 } 139 140 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state) 141 { 142 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 143 144 /* 145 * Should really happen exactly when we disable the pipe 146 * but we want the frame counters in the trace, and that 147 * requires vblank support on some platforms/outputs. 148 */ 149 trace_intel_pipe_disable(crtc); 150 151 drm_crtc_vblank_off(&crtc->base); 152 assert_vblank_disabled(&crtc->base); 153 } 154 155 struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc) 156 { 157 struct intel_crtc_state *crtc_state; 158 159 crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL); 160 161 if (crtc_state) 162 intel_crtc_state_reset(crtc_state, crtc); 163 164 return crtc_state; 165 } 166 167 void intel_crtc_state_reset(struct intel_crtc_state *crtc_state, 168 struct intel_crtc *crtc) 169 { 170 memset(crtc_state, 0, sizeof(*crtc_state)); 171 172 __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base); 173 174 crtc_state->cpu_transcoder = INVALID_TRANSCODER; 175 crtc_state->master_transcoder = INVALID_TRANSCODER; 176 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 177 crtc_state->scaler_state.scaler_id = -1; 178 crtc_state->mst_master_transcoder = INVALID_TRANSCODER; 179 crtc_state->max_link_bpp_x16 = INT_MAX; 180 } 181 182 static struct intel_crtc *intel_crtc_alloc(void) 183 { 184 struct intel_crtc_state *crtc_state; 185 struct intel_crtc *crtc; 186 187 crtc = kzalloc(sizeof(*crtc), GFP_KERNEL); 188 if (!crtc) 189 return ERR_PTR(-ENOMEM); 190 191 crtc_state = intel_crtc_state_alloc(crtc); 192 if (!crtc_state) { 193 kfree(crtc); 194 return ERR_PTR(-ENOMEM); 195 } 196 197 crtc->base.state = &crtc_state->uapi; 198 crtc->config = crtc_state; 199 200 return crtc; 201 } 202 203 static void intel_crtc_free(struct intel_crtc *crtc) 204 { 205 intel_crtc_destroy_state(&crtc->base, crtc->base.state); 206 kfree(crtc); 207 } 208 209 static void intel_crtc_destroy(struct drm_crtc *_crtc) 210 { 211 struct intel_crtc *crtc = to_intel_crtc(_crtc); 212 213 cpu_latency_qos_remove_request(&crtc->vblank_pm_qos); 214 215 drm_crtc_cleanup(&crtc->base); 216 kfree(crtc); 217 } 218 219 static int intel_crtc_late_register(struct drm_crtc *crtc) 220 { 221 intel_crtc_debugfs_add(to_intel_crtc(crtc)); 222 return 0; 223 } 224 225 #define INTEL_CRTC_FUNCS \ 226 .set_config = drm_atomic_helper_set_config, \ 227 .destroy = intel_crtc_destroy, \ 228 .page_flip = drm_atomic_helper_page_flip, \ 229 .atomic_duplicate_state = intel_crtc_duplicate_state, \ 230 .atomic_destroy_state = intel_crtc_destroy_state, \ 231 .set_crc_source = intel_crtc_set_crc_source, \ 232 .verify_crc_source = intel_crtc_verify_crc_source, \ 233 .get_crc_sources = intel_crtc_get_crc_sources, \ 234 .late_register = intel_crtc_late_register 235 236 static const struct drm_crtc_funcs bdw_crtc_funcs = { 237 INTEL_CRTC_FUNCS, 238 239 .get_vblank_counter = g4x_get_vblank_counter, 240 .enable_vblank = bdw_enable_vblank, 241 .disable_vblank = bdw_disable_vblank, 242 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 243 }; 244 245 static const struct drm_crtc_funcs ilk_crtc_funcs = { 246 INTEL_CRTC_FUNCS, 247 248 .get_vblank_counter = g4x_get_vblank_counter, 249 .enable_vblank = ilk_enable_vblank, 250 .disable_vblank = ilk_disable_vblank, 251 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 252 }; 253 254 static const struct drm_crtc_funcs g4x_crtc_funcs = { 255 INTEL_CRTC_FUNCS, 256 257 .get_vblank_counter = g4x_get_vblank_counter, 258 .enable_vblank = i965_enable_vblank, 259 .disable_vblank = i965_disable_vblank, 260 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 261 }; 262 263 static const struct drm_crtc_funcs i965_crtc_funcs = { 264 INTEL_CRTC_FUNCS, 265 266 .get_vblank_counter = i915_get_vblank_counter, 267 .enable_vblank = i965_enable_vblank, 268 .disable_vblank = i965_disable_vblank, 269 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 270 }; 271 272 static const struct drm_crtc_funcs i915gm_crtc_funcs = { 273 INTEL_CRTC_FUNCS, 274 275 .get_vblank_counter = i915_get_vblank_counter, 276 .enable_vblank = i915gm_enable_vblank, 277 .disable_vblank = i915gm_disable_vblank, 278 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 279 }; 280 281 static const struct drm_crtc_funcs i915_crtc_funcs = { 282 INTEL_CRTC_FUNCS, 283 284 .get_vblank_counter = i915_get_vblank_counter, 285 .enable_vblank = i8xx_enable_vblank, 286 .disable_vblank = i8xx_disable_vblank, 287 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 288 }; 289 290 static const struct drm_crtc_funcs i8xx_crtc_funcs = { 291 INTEL_CRTC_FUNCS, 292 293 /* no hw vblank counter */ 294 .enable_vblank = i8xx_enable_vblank, 295 .disable_vblank = i8xx_disable_vblank, 296 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 297 }; 298 299 int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) 300 { 301 struct intel_plane *primary, *cursor; 302 const struct drm_crtc_funcs *funcs; 303 struct intel_crtc *crtc; 304 int sprite, ret; 305 306 crtc = intel_crtc_alloc(); 307 if (IS_ERR(crtc)) 308 return PTR_ERR(crtc); 309 310 crtc->pipe = pipe; 311 crtc->num_scalers = DISPLAY_RUNTIME_INFO(dev_priv)->num_scalers[pipe]; 312 313 if (DISPLAY_VER(dev_priv) >= 9) 314 primary = skl_universal_plane_create(dev_priv, pipe, PLANE_1); 315 else 316 primary = intel_primary_plane_create(dev_priv, pipe); 317 if (IS_ERR(primary)) { 318 ret = PTR_ERR(primary); 319 goto fail; 320 } 321 crtc->plane_ids_mask |= BIT(primary->id); 322 323 intel_init_fifo_underrun_reporting(dev_priv, crtc, false); 324 325 for_each_sprite(dev_priv, pipe, sprite) { 326 struct intel_plane *plane; 327 328 if (DISPLAY_VER(dev_priv) >= 9) 329 plane = skl_universal_plane_create(dev_priv, pipe, PLANE_2 + sprite); 330 else 331 plane = intel_sprite_plane_create(dev_priv, pipe, sprite); 332 if (IS_ERR(plane)) { 333 ret = PTR_ERR(plane); 334 goto fail; 335 } 336 crtc->plane_ids_mask |= BIT(plane->id); 337 } 338 339 cursor = intel_cursor_plane_create(dev_priv, pipe); 340 if (IS_ERR(cursor)) { 341 ret = PTR_ERR(cursor); 342 goto fail; 343 } 344 crtc->plane_ids_mask |= BIT(cursor->id); 345 346 if (HAS_GMCH(dev_priv)) { 347 if (IS_CHERRYVIEW(dev_priv) || 348 IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv)) 349 funcs = &g4x_crtc_funcs; 350 else if (DISPLAY_VER(dev_priv) == 4) 351 funcs = &i965_crtc_funcs; 352 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv)) 353 funcs = &i915gm_crtc_funcs; 354 else if (DISPLAY_VER(dev_priv) == 3) 355 funcs = &i915_crtc_funcs; 356 else 357 funcs = &i8xx_crtc_funcs; 358 } else { 359 if (DISPLAY_VER(dev_priv) >= 8) 360 funcs = &bdw_crtc_funcs; 361 else 362 funcs = &ilk_crtc_funcs; 363 } 364 365 ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base, 366 &primary->base, &cursor->base, 367 funcs, "pipe %c", pipe_name(pipe)); 368 if (ret) 369 goto fail; 370 371 if (DISPLAY_VER(dev_priv) >= 11) 372 drm_crtc_create_scaling_filter_property(&crtc->base, 373 BIT(DRM_SCALING_FILTER_DEFAULT) | 374 BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR)); 375 376 intel_color_crtc_init(crtc); 377 intel_drrs_crtc_init(crtc); 378 intel_crtc_crc_init(crtc); 379 380 cpu_latency_qos_add_request(&crtc->vblank_pm_qos, PM_QOS_DEFAULT_VALUE); 381 382 drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe); 383 384 return 0; 385 386 fail: 387 intel_crtc_free(crtc); 388 389 return ret; 390 } 391 392 int intel_crtc_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 393 struct drm_file *file) 394 { 395 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 396 struct drm_crtc *drm_crtc; 397 struct intel_crtc *crtc; 398 399 drm_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); 400 if (!drm_crtc) 401 return -ENOENT; 402 403 crtc = to_intel_crtc(drm_crtc); 404 pipe_from_crtc_id->pipe = crtc->pipe; 405 406 return 0; 407 } 408 409 static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_state) 410 { 411 return crtc_state->hw.active && 412 !intel_crtc_needs_modeset(crtc_state) && 413 !crtc_state->preload_luts && 414 intel_crtc_needs_color_update(crtc_state) && 415 !intel_color_uses_dsb(crtc_state); 416 } 417 418 static void intel_crtc_vblank_work(struct kthread_work *base) 419 { 420 struct drm_vblank_work *work = to_drm_vblank_work(base); 421 struct intel_crtc_state *crtc_state = 422 container_of(work, typeof(*crtc_state), vblank_work); 423 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 424 425 trace_intel_crtc_vblank_work_start(crtc); 426 427 intel_color_load_luts(crtc_state); 428 429 if (crtc_state->uapi.event) { 430 spin_lock_irq(&crtc->base.dev->event_lock); 431 drm_crtc_send_vblank_event(&crtc->base, crtc_state->uapi.event); 432 spin_unlock_irq(&crtc->base.dev->event_lock); 433 crtc_state->uapi.event = NULL; 434 } 435 436 trace_intel_crtc_vblank_work_end(crtc); 437 } 438 439 static void intel_crtc_vblank_work_init(struct intel_crtc_state *crtc_state) 440 { 441 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 442 443 drm_vblank_work_init(&crtc_state->vblank_work, &crtc->base, 444 intel_crtc_vblank_work); 445 /* 446 * Interrupt latency is critical for getting the vblank 447 * work executed as early as possible during the vblank. 448 */ 449 cpu_latency_qos_update_request(&crtc->vblank_pm_qos, 0); 450 } 451 452 void intel_wait_for_vblank_workers(struct intel_atomic_state *state) 453 { 454 struct intel_crtc_state *crtc_state; 455 struct intel_crtc *crtc; 456 int i; 457 458 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 459 if (!intel_crtc_needs_vblank_work(crtc_state)) 460 continue; 461 462 drm_vblank_work_flush(&crtc_state->vblank_work); 463 cpu_latency_qos_update_request(&crtc->vblank_pm_qos, 464 PM_QOS_DEFAULT_VALUE); 465 } 466 } 467 468 int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, 469 int usecs) 470 { 471 /* paranoia */ 472 if (!adjusted_mode->crtc_htotal) 473 return 1; 474 475 return DIV_ROUND_UP_ULL(mul_u32_u32(usecs, adjusted_mode->crtc_clock), 476 1000 * adjusted_mode->crtc_htotal); 477 } 478 479 /** 480 * intel_pipe_update_start() - start update of a set of display registers 481 * @state: the atomic state 482 * @crtc: the crtc 483 * 484 * Mark the start of an update to pipe registers that should be updated 485 * atomically regarding vblank. If the next vblank will happens within 486 * the next 100 us, this function waits until the vblank passes. 487 * 488 * After a successful call to this function, interrupts will be disabled 489 * until a subsequent call to intel_pipe_update_end(). That is done to 490 * avoid random delays. 491 */ 492 void intel_pipe_update_start(struct intel_atomic_state *state, 493 struct intel_crtc *crtc) 494 { 495 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 496 const struct intel_crtc_state *old_crtc_state = 497 intel_atomic_get_old_crtc_state(state, crtc); 498 struct intel_crtc_state *new_crtc_state = 499 intel_atomic_get_new_crtc_state(state, crtc); 500 struct intel_vblank_evade_ctx evade; 501 int scanline; 502 503 intel_psr_lock(new_crtc_state); 504 505 if (new_crtc_state->do_async_flip) { 506 spin_lock_irq(&crtc->base.dev->event_lock); 507 /* arm the event for the flip done irq handler */ 508 crtc->flip_done_event = new_crtc_state->uapi.event; 509 spin_unlock_irq(&crtc->base.dev->event_lock); 510 511 new_crtc_state->uapi.event = NULL; 512 return; 513 } 514 515 if (intel_crtc_needs_vblank_work(new_crtc_state)) 516 intel_crtc_vblank_work_init(new_crtc_state); 517 518 if (state->base.legacy_cursor_update) { 519 struct intel_plane *plane; 520 struct intel_plane_state *old_plane_state, *new_plane_state; 521 int i; 522 523 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 524 new_plane_state, i) { 525 if (old_plane_state->uapi.crtc == &crtc->base) 526 intel_plane_init_cursor_vblank_work(old_plane_state, 527 new_plane_state); 528 } 529 } 530 531 intel_vblank_evade_init(old_crtc_state, new_crtc_state, &evade); 532 533 if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base))) 534 goto irq_disable; 535 536 /* 537 * Wait for psr to idle out after enabling the VBL interrupts 538 * VBL interrupts will start the PSR exit and prevent a PSR 539 * re-entry as well. 540 */ 541 intel_psr_wait_for_idle_locked(new_crtc_state); 542 543 local_irq_disable(); 544 545 crtc->debug.min_vbl = evade.min; 546 crtc->debug.max_vbl = evade.max; 547 trace_intel_pipe_update_start(crtc); 548 549 scanline = intel_vblank_evade(&evade); 550 551 drm_crtc_vblank_put(&crtc->base); 552 553 crtc->debug.scanline_start = scanline; 554 crtc->debug.start_vbl_time = ktime_get(); 555 crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); 556 557 trace_intel_pipe_update_vblank_evaded(crtc); 558 return; 559 560 irq_disable: 561 local_irq_disable(); 562 } 563 564 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE) 565 static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) 566 { 567 u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time)); 568 unsigned int h; 569 570 h = ilog2(delta >> 9); 571 if (h >= ARRAY_SIZE(crtc->debug.vbl.times)) 572 h = ARRAY_SIZE(crtc->debug.vbl.times) - 1; 573 crtc->debug.vbl.times[h]++; 574 575 crtc->debug.vbl.sum += delta; 576 if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min) 577 crtc->debug.vbl.min = delta; 578 if (delta > crtc->debug.vbl.max) 579 crtc->debug.vbl.max = delta; 580 581 if (delta > 1000 * VBLANK_EVASION_TIME_US) { 582 drm_dbg_kms(crtc->base.dev, 583 "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n", 584 pipe_name(crtc->pipe), 585 div_u64(delta, 1000), 586 VBLANK_EVASION_TIME_US); 587 crtc->debug.vbl.over++; 588 } 589 } 590 #else 591 static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {} 592 #endif 593 594 void intel_crtc_arm_vblank_event(struct intel_crtc_state *crtc_state) 595 { 596 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 597 unsigned long irqflags; 598 599 if (!crtc_state->uapi.event) 600 return; 601 602 drm_WARN_ON(crtc->base.dev, drm_crtc_vblank_get(&crtc->base) != 0); 603 604 spin_lock_irqsave(&crtc->base.dev->event_lock, irqflags); 605 drm_crtc_arm_vblank_event(&crtc->base, crtc_state->uapi.event); 606 spin_unlock_irqrestore(&crtc->base.dev->event_lock, irqflags); 607 608 crtc_state->uapi.event = NULL; 609 } 610 611 /** 612 * intel_pipe_update_end() - end update of a set of display registers 613 * @state: the atomic state 614 * @crtc: the crtc 615 * 616 * Mark the end of an update started with intel_pipe_update_start(). This 617 * re-enables interrupts and verifies the update was actually completed 618 * before a vblank. 619 */ 620 void intel_pipe_update_end(struct intel_atomic_state *state, 621 struct intel_crtc *crtc) 622 { 623 struct intel_crtc_state *new_crtc_state = 624 intel_atomic_get_new_crtc_state(state, crtc); 625 enum pipe pipe = crtc->pipe; 626 int scanline_end = intel_get_crtc_scanline(crtc); 627 u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc); 628 ktime_t end_vbl_time = ktime_get(); 629 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 630 631 if (new_crtc_state->do_async_flip) 632 goto out; 633 634 trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end); 635 636 /* 637 * Incase of mipi dsi command mode, we need to set frame update 638 * request for every commit. 639 */ 640 if (DISPLAY_VER(dev_priv) >= 11 && 641 intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 642 icl_dsi_frame_update(new_crtc_state); 643 644 /* We're still in the vblank-evade critical section, this can't race. 645 * Would be slightly nice to just grab the vblank count and arm the 646 * event outside of the critical section - the spinlock might spin for a 647 * while ... */ 648 if (intel_crtc_needs_vblank_work(new_crtc_state)) { 649 drm_vblank_work_schedule(&new_crtc_state->vblank_work, 650 drm_crtc_accurate_vblank_count(&crtc->base) + 1, 651 false); 652 } else { 653 intel_crtc_arm_vblank_event(new_crtc_state); 654 } 655 656 if (state->base.legacy_cursor_update) { 657 struct intel_plane *plane; 658 struct intel_plane_state *old_plane_state; 659 int i; 660 661 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 662 if (old_plane_state->uapi.crtc == &crtc->base && 663 old_plane_state->unpin_work.vblank) { 664 drm_vblank_work_schedule(&old_plane_state->unpin_work, 665 drm_crtc_accurate_vblank_count(&crtc->base) + 1, 666 false); 667 668 /* Remove plane from atomic state, cleanup/free is done from vblank worker. */ 669 memset(&state->base.planes[i], 0, sizeof(state->base.planes[i])); 670 } 671 } 672 } 673 674 /* 675 * Send VRR Push to terminate Vblank. If we are already in vblank 676 * this has to be done _after_ sampling the frame counter, as 677 * otherwise the push would immediately terminate the vblank and 678 * the sampled frame counter would correspond to the next frame 679 * instead of the current frame. 680 * 681 * There is a tiny race here (iff vblank evasion failed us) where 682 * we might sample the frame counter just before vmax vblank start 683 * but the push would be sent just after it. That would cause the 684 * push to affect the next frame instead of the current frame, 685 * which would cause the next frame to terminate already at vmin 686 * vblank start instead of vmax vblank start. 687 */ 688 intel_vrr_send_push(new_crtc_state); 689 690 local_irq_enable(); 691 692 if (intel_vgpu_active(dev_priv)) 693 goto out; 694 695 if (crtc->debug.start_vbl_count && 696 crtc->debug.start_vbl_count != end_vbl_count) { 697 drm_err(&dev_priv->drm, 698 "Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n", 699 pipe_name(pipe), crtc->debug.start_vbl_count, 700 end_vbl_count, 701 ktime_us_delta(end_vbl_time, 702 crtc->debug.start_vbl_time), 703 crtc->debug.min_vbl, crtc->debug.max_vbl, 704 crtc->debug.scanline_start, scanline_end); 705 } 706 707 dbg_vblank_evade(crtc, end_vbl_time); 708 709 out: 710 intel_psr_unlock(new_crtc_state); 711 } 712