1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 #include <linux/kernel.h> 6 #include <linux/pm_qos.h> 7 #include <linux/slab.h> 8 9 #include <drm/drm_atomic_helper.h> 10 #include <drm/drm_fourcc.h> 11 #include <drm/drm_plane.h> 12 #include <drm/drm_print.h> 13 #include <drm/drm_vblank.h> 14 #include <drm/drm_vblank_work.h> 15 16 #include "i9xx_plane.h" 17 #include "icl_dsi.h" 18 #include "intel_atomic.h" 19 #include "intel_color.h" 20 #include "intel_crtc.h" 21 #include "intel_cursor.h" 22 #include "intel_display_debugfs.h" 23 #include "intel_display_irq.h" 24 #include "intel_display_trace.h" 25 #include "intel_display_types.h" 26 #include "intel_drrs.h" 27 #include "intel_dsi.h" 28 #include "intel_fifo_underrun.h" 29 #include "intel_parent.h" 30 #include "intel_pipe_crc.h" 31 #include "intel_plane.h" 32 #include "intel_psr.h" 33 #include "intel_sprite.h" 34 #include "intel_vblank.h" 35 #include "intel_vrr.h" 36 #include "skl_universal_plane.h" 37 38 static void assert_vblank_disabled(struct drm_crtc *crtc) 39 { 40 struct intel_display *display = to_intel_display(crtc->dev); 41 42 if (INTEL_DISPLAY_STATE_WARN(display, drm_crtc_vblank_get(crtc) == 0, 43 "[CRTC:%d:%s] vblank assertion failure (expected off, current on)\n", 44 crtc->base.id, crtc->name)) 45 drm_crtc_vblank_put(crtc); 46 } 47 48 struct intel_crtc *intel_first_crtc(struct intel_display *display) 49 { 50 return to_intel_crtc(drm_crtc_from_index(display->drm, 0)); 51 } 52 53 struct intel_crtc *intel_crtc_for_pipe(struct intel_display *display, 54 enum pipe pipe) 55 { 56 struct intel_crtc *crtc; 57 58 for_each_intel_crtc(display->drm, crtc) { 59 if (crtc->pipe == pipe) 60 return crtc; 61 } 62 63 return NULL; 64 } 65 66 void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc) 67 { 68 drm_crtc_wait_one_vblank(&crtc->base); 69 } 70 71 void intel_wait_for_vblank_if_active(struct intel_display *display, 72 enum pipe pipe) 73 { 74 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 75 76 if (crtc->active) 77 intel_crtc_wait_for_next_vblank(crtc); 78 } 79 80 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) 81 { 82 struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base); 83 84 if (!crtc->active) 85 return 0; 86 87 if (!vblank->max_vblank_count) { 88 /* On preempt-rt we cannot take the vblank spinlock since this function is called from tracepoints */ 89 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 90 return (u32)drm_crtc_vblank_count(&crtc->base); 91 else 92 return (u32)drm_crtc_accurate_vblank_count(&crtc->base); 93 } 94 95 return crtc->base.funcs->get_vblank_counter(&crtc->base); 96 } 97 98 u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state) 99 { 100 struct intel_display *display = to_intel_display(crtc_state); 101 102 /* 103 * From Gen 11, in case of dsi cmd mode, frame counter wouldn't 104 * have updated at the beginning of TE, if we want to use 105 * the hw counter, then we would find it updated in only 106 * the next TE, hence switching to sw counter. 107 */ 108 if (crtc_state->mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | 109 I915_MODE_FLAG_DSI_USE_TE1)) 110 return 0; 111 112 /* 113 * On i965gm the hardware frame counter reads 114 * zero when the TV encoder is enabled :( 115 */ 116 if (display->platform.i965gm && 117 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT))) 118 return 0; 119 120 if (DISPLAY_VER(display) >= 5 || display->platform.g4x) 121 return 0xffffffff; /* full 32 bit counter */ 122 else if (DISPLAY_VER(display) >= 3) 123 return 0xffffff; /* only 24 bits of frame count */ 124 else 125 return 0; /* Gen2 doesn't have a hardware frame counter */ 126 } 127 128 void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) 129 { 130 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 131 132 crtc->vblank_psr_notify = intel_psr_needs_vblank_notification(crtc_state); 133 134 assert_vblank_disabled(&crtc->base); 135 drm_crtc_set_max_vblank_count(&crtc->base, 136 intel_crtc_max_vblank_count(crtc_state)); 137 drm_crtc_vblank_on(&crtc->base); 138 139 /* 140 * Should really happen exactly when we enable the pipe 141 * but we want the frame counters in the trace, and that 142 * requires vblank support on some platforms/outputs. 143 */ 144 trace_intel_pipe_enable(crtc); 145 } 146 147 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state) 148 { 149 struct intel_display *display = to_intel_display(crtc_state); 150 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 151 152 /* 153 * Should really happen exactly when we disable the pipe 154 * but we want the frame counters in the trace, and that 155 * requires vblank support on some platforms/outputs. 156 */ 157 trace_intel_pipe_disable(crtc); 158 159 drm_crtc_vblank_off(&crtc->base); 160 assert_vblank_disabled(&crtc->base); 161 162 crtc->vblank_psr_notify = false; 163 164 flush_work(&display->irq.vblank_notify_work); 165 } 166 167 struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc) 168 { 169 struct intel_crtc_state *crtc_state; 170 171 crtc_state = kmalloc_obj(*crtc_state); 172 173 if (crtc_state) 174 intel_crtc_state_reset(crtc_state, crtc); 175 176 return crtc_state; 177 } 178 179 void intel_crtc_state_reset(struct intel_crtc_state *crtc_state, 180 struct intel_crtc *crtc) 181 { 182 memset(crtc_state, 0, sizeof(*crtc_state)); 183 184 __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base); 185 186 crtc_state->cpu_transcoder = INVALID_TRANSCODER; 187 crtc_state->master_transcoder = INVALID_TRANSCODER; 188 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 189 crtc_state->scaler_state.scaler_id = -1; 190 crtc_state->mst_master_transcoder = INVALID_TRANSCODER; 191 crtc_state->max_link_bpp_x16 = INT_MAX; 192 } 193 194 static struct intel_crtc *intel_crtc_alloc(void) 195 { 196 struct intel_crtc_state *crtc_state; 197 struct intel_crtc *crtc; 198 199 crtc = kzalloc_obj(*crtc); 200 if (!crtc) 201 return ERR_PTR(-ENOMEM); 202 203 crtc_state = intel_crtc_state_alloc(crtc); 204 if (!crtc_state) { 205 kfree(crtc); 206 return ERR_PTR(-ENOMEM); 207 } 208 209 crtc->base.state = &crtc_state->uapi; 210 crtc->config = crtc_state; 211 212 return crtc; 213 } 214 215 static void intel_crtc_free(struct intel_crtc *crtc) 216 { 217 intel_crtc_destroy_state(&crtc->base, crtc->base.state); 218 kfree(crtc); 219 } 220 221 static void intel_crtc_destroy(struct drm_crtc *_crtc) 222 { 223 struct intel_crtc *crtc = to_intel_crtc(_crtc); 224 225 cpu_latency_qos_remove_request(&crtc->vblank_pm_qos); 226 227 drm_crtc_cleanup(&crtc->base); 228 kfree(crtc); 229 } 230 231 static int intel_crtc_late_register(struct drm_crtc *crtc) 232 { 233 intel_crtc_debugfs_add(to_intel_crtc(crtc)); 234 return 0; 235 } 236 237 #define INTEL_CRTC_FUNCS \ 238 .set_config = drm_atomic_helper_set_config, \ 239 .destroy = intel_crtc_destroy, \ 240 .page_flip = drm_atomic_helper_page_flip, \ 241 .atomic_duplicate_state = intel_crtc_duplicate_state, \ 242 .atomic_destroy_state = intel_crtc_destroy_state, \ 243 .set_crc_source = intel_crtc_set_crc_source, \ 244 .verify_crc_source = intel_crtc_verify_crc_source, \ 245 .get_crc_sources = intel_crtc_get_crc_sources, \ 246 .late_register = intel_crtc_late_register 247 248 static const struct drm_crtc_funcs bdw_crtc_funcs = { 249 INTEL_CRTC_FUNCS, 250 251 .get_vblank_counter = g4x_get_vblank_counter, 252 .enable_vblank = bdw_enable_vblank, 253 .disable_vblank = bdw_disable_vblank, 254 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 255 }; 256 257 static const struct drm_crtc_funcs ilk_crtc_funcs = { 258 INTEL_CRTC_FUNCS, 259 260 .get_vblank_counter = g4x_get_vblank_counter, 261 .enable_vblank = ilk_enable_vblank, 262 .disable_vblank = ilk_disable_vblank, 263 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 264 }; 265 266 static const struct drm_crtc_funcs g4x_crtc_funcs = { 267 INTEL_CRTC_FUNCS, 268 269 .get_vblank_counter = g4x_get_vblank_counter, 270 .enable_vblank = i965_enable_vblank, 271 .disable_vblank = i965_disable_vblank, 272 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 273 }; 274 275 static const struct drm_crtc_funcs i965_crtc_funcs = { 276 INTEL_CRTC_FUNCS, 277 278 .get_vblank_counter = i915_get_vblank_counter, 279 .enable_vblank = i965_enable_vblank, 280 .disable_vblank = i965_disable_vblank, 281 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 282 }; 283 284 static const struct drm_crtc_funcs i915gm_crtc_funcs = { 285 INTEL_CRTC_FUNCS, 286 287 .get_vblank_counter = i915_get_vblank_counter, 288 .enable_vblank = i915gm_enable_vblank, 289 .disable_vblank = i915gm_disable_vblank, 290 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 291 }; 292 293 static const struct drm_crtc_funcs i915_crtc_funcs = { 294 INTEL_CRTC_FUNCS, 295 296 .get_vblank_counter = i915_get_vblank_counter, 297 .enable_vblank = i8xx_enable_vblank, 298 .disable_vblank = i8xx_disable_vblank, 299 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 300 }; 301 302 static const struct drm_crtc_funcs i8xx_crtc_funcs = { 303 INTEL_CRTC_FUNCS, 304 305 /* no hw vblank counter */ 306 .enable_vblank = i8xx_enable_vblank, 307 .disable_vblank = i8xx_disable_vblank, 308 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 309 }; 310 311 static int __intel_crtc_init(struct intel_display *display, enum pipe pipe) 312 { 313 struct intel_plane *primary, *cursor; 314 const struct drm_crtc_funcs *funcs; 315 struct intel_crtc *crtc; 316 int sprite, ret; 317 318 crtc = intel_crtc_alloc(); 319 if (IS_ERR(crtc)) 320 return PTR_ERR(crtc); 321 322 crtc->pipe = pipe; 323 crtc->num_scalers = DISPLAY_RUNTIME_INFO(display)->num_scalers[pipe]; 324 325 if (DISPLAY_VER(display) >= 9) 326 primary = skl_universal_plane_create(display, pipe, PLANE_1); 327 else 328 primary = intel_primary_plane_create(display, pipe); 329 if (IS_ERR(primary)) { 330 ret = PTR_ERR(primary); 331 goto fail; 332 } 333 crtc->plane_ids_mask |= BIT(primary->id); 334 335 intel_init_fifo_underrun_reporting(display, crtc, false); 336 337 for_each_sprite(display, pipe, sprite) { 338 struct intel_plane *plane; 339 340 if (DISPLAY_VER(display) >= 9) 341 plane = skl_universal_plane_create(display, pipe, PLANE_2 + sprite); 342 else 343 plane = intel_sprite_plane_create(display, pipe, sprite); 344 if (IS_ERR(plane)) { 345 ret = PTR_ERR(plane); 346 goto fail; 347 } 348 crtc->plane_ids_mask |= BIT(plane->id); 349 } 350 351 cursor = intel_cursor_plane_create(display, pipe); 352 if (IS_ERR(cursor)) { 353 ret = PTR_ERR(cursor); 354 goto fail; 355 } 356 crtc->plane_ids_mask |= BIT(cursor->id); 357 358 if (HAS_GMCH(display)) { 359 if (display->platform.cherryview || 360 display->platform.valleyview || 361 display->platform.g4x) 362 funcs = &g4x_crtc_funcs; 363 else if (DISPLAY_VER(display) == 4) 364 funcs = &i965_crtc_funcs; 365 else if (display->platform.i945gm || 366 display->platform.i915gm) 367 funcs = &i915gm_crtc_funcs; 368 else if (DISPLAY_VER(display) == 3) 369 funcs = &i915_crtc_funcs; 370 else 371 funcs = &i8xx_crtc_funcs; 372 } else { 373 if (DISPLAY_VER(display) >= 8) 374 funcs = &bdw_crtc_funcs; 375 else 376 funcs = &ilk_crtc_funcs; 377 } 378 379 ret = drm_crtc_init_with_planes(display->drm, &crtc->base, 380 &primary->base, &cursor->base, 381 funcs, "pipe %c", pipe_name(pipe)); 382 if (ret) 383 goto fail; 384 385 if (DISPLAY_VER(display) >= 11) 386 drm_crtc_create_scaling_filter_property(&crtc->base, 387 BIT(DRM_SCALING_FILTER_DEFAULT) | 388 BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR)); 389 390 intel_color_crtc_init(crtc); 391 intel_drrs_crtc_init(crtc); 392 intel_crtc_crc_init(crtc); 393 394 cpu_latency_qos_add_request(&crtc->vblank_pm_qos, PM_QOS_DEFAULT_VALUE); 395 396 drm_WARN_ON(display->drm, drm_crtc_index(&crtc->base) != crtc->pipe); 397 398 if (HAS_CASF(display) && crtc->num_scalers >= 2) 399 drm_crtc_create_sharpness_strength_property(&crtc->base); 400 401 return 0; 402 403 fail: 404 intel_crtc_free(crtc); 405 406 return ret; 407 } 408 409 int intel_crtc_init(struct intel_display *display) 410 { 411 enum pipe pipe; 412 int ret; 413 414 drm_dbg_kms(display->drm, "%d display pipe%s available.\n", 415 INTEL_NUM_PIPES(display), str_plural(INTEL_NUM_PIPES(display))); 416 417 for_each_pipe(display, pipe) { 418 ret = __intel_crtc_init(display, pipe); 419 if (ret) 420 return ret; 421 } 422 423 return 0; 424 } 425 426 int intel_crtc_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 427 struct drm_file *file) 428 { 429 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 430 struct drm_crtc *drm_crtc; 431 struct intel_crtc *crtc; 432 433 drm_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); 434 if (!drm_crtc) 435 return -ENOENT; 436 437 crtc = to_intel_crtc(drm_crtc); 438 pipe_from_crtc_id->pipe = crtc->pipe; 439 440 return 0; 441 } 442 443 static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_state) 444 { 445 struct intel_display *display = to_intel_display(crtc_state); 446 447 return crtc_state->hw.active && 448 !crtc_state->preload_luts && 449 !intel_crtc_needs_modeset(crtc_state) && 450 (intel_crtc_needs_color_update(crtc_state) && 451 !HAS_DOUBLE_BUFFERED_LUT(display)) && 452 !intel_color_uses_dsb(crtc_state) && 453 !crtc_state->use_dsb; 454 } 455 456 static void intel_crtc_vblank_work(struct kthread_work *base) 457 { 458 struct drm_vblank_work *work = to_drm_vblank_work(base); 459 struct intel_crtc_state *crtc_state = 460 container_of(work, typeof(*crtc_state), vblank_work); 461 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 462 463 trace_intel_crtc_vblank_work_start(crtc); 464 465 intel_color_load_luts(crtc_state); 466 467 if (crtc_state->uapi.event) { 468 spin_lock_irq(&crtc->base.dev->event_lock); 469 drm_crtc_send_vblank_event(&crtc->base, crtc_state->uapi.event); 470 spin_unlock_irq(&crtc->base.dev->event_lock); 471 crtc_state->uapi.event = NULL; 472 } 473 474 trace_intel_crtc_vblank_work_end(crtc); 475 } 476 477 static void intel_crtc_vblank_work_init(struct intel_crtc_state *crtc_state) 478 { 479 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 480 481 drm_vblank_work_init(&crtc_state->vblank_work, &crtc->base, 482 intel_crtc_vblank_work); 483 /* 484 * Interrupt latency is critical for getting the vblank 485 * work executed as early as possible during the vblank. 486 */ 487 cpu_latency_qos_update_request(&crtc->vblank_pm_qos, 0); 488 } 489 490 void intel_wait_for_vblank_workers(struct intel_atomic_state *state) 491 { 492 struct intel_crtc_state *crtc_state; 493 struct intel_crtc *crtc; 494 int i; 495 496 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 497 if (!intel_crtc_needs_vblank_work(crtc_state)) 498 continue; 499 500 drm_vblank_work_flush(&crtc_state->vblank_work); 501 cpu_latency_qos_update_request(&crtc->vblank_pm_qos, 502 PM_QOS_DEFAULT_VALUE); 503 } 504 } 505 506 int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, 507 int usecs) 508 { 509 /* paranoia */ 510 if (!adjusted_mode->crtc_htotal) 511 return 1; 512 513 return DIV_ROUND_UP_ULL(mul_u32_u32(usecs, adjusted_mode->crtc_clock), 514 1000 * adjusted_mode->crtc_htotal); 515 } 516 517 int intel_scanlines_to_usecs(const struct drm_display_mode *adjusted_mode, 518 int scanlines) 519 { 520 /* paranoia */ 521 if (!adjusted_mode->crtc_clock) 522 return 1; 523 524 return DIV_ROUND_UP_ULL(mul_u32_u32(scanlines, adjusted_mode->crtc_htotal * 1000), 525 adjusted_mode->crtc_clock); 526 } 527 528 /** 529 * intel_pipe_update_start() - start update of a set of display registers 530 * @state: the atomic state 531 * @crtc: the crtc 532 * 533 * Mark the start of an update to pipe registers that should be updated 534 * atomically regarding vblank. If the next vblank will happens within 535 * the next 100 us, this function waits until the vblank passes. 536 * 537 * After a successful call to this function, interrupts will be disabled 538 * until a subsequent call to intel_pipe_update_end(). That is done to 539 * avoid random delays. 540 */ 541 void intel_pipe_update_start(struct intel_atomic_state *state, 542 struct intel_crtc *crtc) 543 { 544 struct intel_display *display = to_intel_display(state); 545 const struct intel_crtc_state *old_crtc_state = 546 intel_atomic_get_old_crtc_state(state, crtc); 547 struct intel_crtc_state *new_crtc_state = 548 intel_atomic_get_new_crtc_state(state, crtc); 549 struct intel_vblank_evade_ctx evade; 550 int scanline; 551 552 drm_WARN_ON(display->drm, new_crtc_state->use_dsb); 553 554 intel_psr_lock(new_crtc_state); 555 556 if (new_crtc_state->do_async_flip) { 557 intel_crtc_prepare_vblank_event(new_crtc_state, 558 &crtc->flip_done_event); 559 return; 560 } 561 562 if (intel_crtc_needs_vblank_work(new_crtc_state)) 563 intel_crtc_vblank_work_init(new_crtc_state); 564 565 if (state->base.legacy_cursor_update) { 566 struct intel_plane *plane; 567 struct intel_plane_state *old_plane_state, *new_plane_state; 568 int i; 569 570 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 571 new_plane_state, i) { 572 if (old_plane_state->hw.crtc == &crtc->base) 573 intel_plane_init_cursor_vblank_work(old_plane_state, 574 new_plane_state); 575 } 576 } 577 578 intel_vblank_evade_init(old_crtc_state, new_crtc_state, &evade); 579 580 if (drm_WARN_ON(display->drm, drm_crtc_vblank_get(&crtc->base))) 581 goto irq_disable; 582 583 /* 584 * Wait for psr to idle out after enabling the VBL interrupts 585 * VBL interrupts will start the PSR exit and prevent a PSR 586 * re-entry as well. 587 */ 588 intel_psr_wait_for_idle_locked(new_crtc_state); 589 590 local_irq_disable(); 591 592 crtc->debug.min_vbl = evade.min; 593 crtc->debug.max_vbl = evade.max; 594 trace_intel_pipe_update_start(crtc); 595 596 scanline = intel_vblank_evade(&evade); 597 598 drm_crtc_vblank_put(&crtc->base); 599 600 crtc->debug.scanline_start = scanline; 601 crtc->debug.start_vbl_time = ktime_get(); 602 crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); 603 604 trace_intel_pipe_update_vblank_evaded(crtc); 605 return; 606 607 irq_disable: 608 local_irq_disable(); 609 } 610 611 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE) 612 static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) 613 { 614 u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time)); 615 unsigned int h; 616 617 h = ilog2(delta >> 9); 618 if (h >= ARRAY_SIZE(crtc->debug.vbl.times)) 619 h = ARRAY_SIZE(crtc->debug.vbl.times) - 1; 620 crtc->debug.vbl.times[h]++; 621 622 crtc->debug.vbl.sum += delta; 623 if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min) 624 crtc->debug.vbl.min = delta; 625 if (delta > crtc->debug.vbl.max) 626 crtc->debug.vbl.max = delta; 627 628 if (delta > 1000 * VBLANK_EVASION_TIME_US) { 629 drm_dbg_kms(crtc->base.dev, 630 "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n", 631 pipe_name(crtc->pipe), 632 div_u64(delta, 1000), 633 VBLANK_EVASION_TIME_US); 634 crtc->debug.vbl.over++; 635 } 636 } 637 #else 638 static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {} 639 #endif 640 641 void intel_crtc_arm_vblank_event(struct intel_crtc_state *crtc_state) 642 { 643 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 644 unsigned long irqflags; 645 646 if (!crtc_state->uapi.event) 647 return; 648 649 drm_WARN_ON(crtc->base.dev, drm_crtc_vblank_get(&crtc->base) != 0); 650 651 spin_lock_irqsave(&crtc->base.dev->event_lock, irqflags); 652 drm_crtc_arm_vblank_event(&crtc->base, crtc_state->uapi.event); 653 spin_unlock_irqrestore(&crtc->base.dev->event_lock, irqflags); 654 655 crtc_state->uapi.event = NULL; 656 } 657 658 void intel_crtc_prepare_vblank_event(struct intel_crtc_state *crtc_state, 659 struct drm_pending_vblank_event **event) 660 { 661 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 662 unsigned long irqflags; 663 664 spin_lock_irqsave(&crtc->base.dev->event_lock, irqflags); 665 *event = crtc_state->uapi.event; 666 spin_unlock_irqrestore(&crtc->base.dev->event_lock, irqflags); 667 668 crtc_state->uapi.event = NULL; 669 } 670 671 /** 672 * intel_pipe_update_end() - end update of a set of display registers 673 * @state: the atomic state 674 * @crtc: the crtc 675 * 676 * Mark the end of an update started with intel_pipe_update_start(). This 677 * re-enables interrupts and verifies the update was actually completed 678 * before a vblank. 679 */ 680 void intel_pipe_update_end(struct intel_atomic_state *state, 681 struct intel_crtc *crtc) 682 { 683 struct intel_display *display = to_intel_display(state); 684 struct intel_crtc_state *new_crtc_state = 685 intel_atomic_get_new_crtc_state(state, crtc); 686 enum pipe pipe = crtc->pipe; 687 int scanline_end = intel_get_crtc_scanline(crtc); 688 u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc); 689 ktime_t end_vbl_time = ktime_get(); 690 691 drm_WARN_ON(display->drm, new_crtc_state->use_dsb); 692 693 if (new_crtc_state->do_async_flip) 694 goto out; 695 696 trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end); 697 698 /* 699 * Incase of mipi dsi command mode, we need to set frame update 700 * request for every commit. 701 */ 702 if (DISPLAY_VER(display) >= 11 && 703 intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 704 icl_dsi_frame_update(new_crtc_state); 705 706 /* We're still in the vblank-evade critical section, this can't race. 707 * Would be slightly nice to just grab the vblank count and arm the 708 * event outside of the critical section - the spinlock might spin for a 709 * while ... */ 710 if (intel_crtc_needs_vblank_work(new_crtc_state)) { 711 drm_vblank_work_schedule(&new_crtc_state->vblank_work, 712 drm_crtc_accurate_vblank_count(&crtc->base) + 1, 713 false); 714 } else { 715 intel_crtc_arm_vblank_event(new_crtc_state); 716 } 717 718 if (state->base.legacy_cursor_update) { 719 struct intel_plane *plane; 720 struct intel_plane_state *old_plane_state; 721 int i; 722 723 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 724 if (old_plane_state->hw.crtc == &crtc->base && 725 old_plane_state->unpin_work.vblank) { 726 drm_vblank_work_schedule(&old_plane_state->unpin_work, 727 drm_crtc_accurate_vblank_count(&crtc->base) + 1, 728 false); 729 730 /* Remove plane from atomic state, cleanup/free is done from vblank worker. */ 731 memset(&state->base.planes[i], 0, sizeof(state->base.planes[i])); 732 } 733 } 734 } 735 736 /* 737 * Send VRR Push to terminate Vblank. If we are already in vblank 738 * this has to be done _after_ sampling the frame counter, as 739 * otherwise the push would immediately terminate the vblank and 740 * the sampled frame counter would correspond to the next frame 741 * instead of the current frame. 742 * 743 * There is a tiny race here (iff vblank evasion failed us) where 744 * we might sample the frame counter just before vmax vblank start 745 * but the push would be sent just after it. That would cause the 746 * push to affect the next frame instead of the current frame, 747 * which would cause the next frame to terminate already at vmin 748 * vblank start instead of vmax vblank start. 749 */ 750 if (!state->base.legacy_cursor_update) 751 intel_vrr_send_push(NULL, new_crtc_state); 752 753 local_irq_enable(); 754 755 if (intel_parent_vgpu_active(display)) 756 goto out; 757 758 if (crtc->debug.start_vbl_count && 759 crtc->debug.start_vbl_count != end_vbl_count) { 760 drm_err(display->drm, 761 "Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n", 762 pipe_name(pipe), crtc->debug.start_vbl_count, 763 end_vbl_count, 764 ktime_us_delta(end_vbl_time, 765 crtc->debug.start_vbl_time), 766 crtc->debug.min_vbl, crtc->debug.max_vbl, 767 crtc->debug.scanline_start, scanline_end); 768 } 769 770 dbg_vblank_evade(crtc, end_vbl_time); 771 772 out: 773 intel_psr_unlock(new_crtc_state); 774 } 775 776 bool intel_crtc_enable_changed(const struct intel_crtc_state *old_crtc_state, 777 const struct intel_crtc_state *new_crtc_state) 778 { 779 return old_crtc_state->hw.enable != new_crtc_state->hw.enable; 780 } 781 782 bool intel_any_crtc_enable_changed(struct intel_atomic_state *state) 783 { 784 const struct intel_crtc_state *old_crtc_state, *new_crtc_state; 785 struct intel_crtc *crtc; 786 int i; 787 788 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 789 new_crtc_state, i) { 790 if (intel_crtc_enable_changed(old_crtc_state, new_crtc_state)) 791 return true; 792 } 793 794 return false; 795 } 796 797 bool intel_crtc_active_changed(const struct intel_crtc_state *old_crtc_state, 798 const struct intel_crtc_state *new_crtc_state) 799 { 800 return old_crtc_state->hw.active != new_crtc_state->hw.active; 801 } 802 803 bool intel_any_crtc_active_changed(struct intel_atomic_state *state) 804 { 805 const struct intel_crtc_state *old_crtc_state, *new_crtc_state; 806 struct intel_crtc *crtc; 807 int i; 808 809 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 810 new_crtc_state, i) { 811 if (intel_crtc_active_changed(old_crtc_state, new_crtc_state)) 812 return true; 813 } 814 815 return false; 816 } 817 818 unsigned int intel_crtc_bw_num_active_planes(const struct intel_crtc_state *crtc_state) 819 { 820 /* 821 * We assume cursors are small enough 822 * to not cause bandwidth problems. 823 */ 824 return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR)); 825 } 826 827 unsigned int intel_crtc_bw_data_rate(const struct intel_crtc_state *crtc_state) 828 { 829 struct intel_display *display = to_intel_display(crtc_state); 830 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 831 unsigned int data_rate = 0; 832 enum plane_id plane_id; 833 834 for_each_plane_id_on_crtc(crtc, plane_id) { 835 /* 836 * We assume cursors are small enough 837 * to not cause bandwidth problems. 838 */ 839 if (plane_id == PLANE_CURSOR) 840 continue; 841 842 data_rate += crtc_state->data_rate[plane_id]; 843 844 if (DISPLAY_VER(display) < 11) 845 data_rate += crtc_state->data_rate_y[plane_id]; 846 } 847 848 return data_rate; 849 } 850 851 /* "Maximum Pipe Read Bandwidth" */ 852 int intel_crtc_bw_min_cdclk(const struct intel_crtc_state *crtc_state) 853 { 854 struct intel_display *display = to_intel_display(crtc_state); 855 856 if (DISPLAY_VER(display) < 12) 857 return 0; 858 859 return DIV_ROUND_UP_ULL(mul_u32_u32(intel_crtc_bw_data_rate(crtc_state), 10), 512); 860 } 861