1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022-2023 Intel Corporation 4 * 5 * High level display driver entry points. This is a layer between top level 6 * driver code and low level display functionality; no low level display code or 7 * details here. 8 */ 9 10 #include <linux/vga_switcheroo.h> 11 #include <acpi/video.h> 12 #include <drm/display/drm_dp_mst_helper.h> 13 #include <drm/drm_atomic_helper.h> 14 #include <drm/drm_client_event.h> 15 #include <drm/drm_mode_config.h> 16 #include <drm/drm_privacy_screen_consumer.h> 17 #include <drm/drm_print.h> 18 #include <drm/drm_probe_helper.h> 19 #include <drm/drm_vblank.h> 20 21 #include "i915_drv.h" 22 #include "i915_utils.h" /* for i915_inject_probe_failure() */ 23 #include "i9xx_wm.h" 24 #include "intel_acpi.h" 25 #include "intel_atomic.h" 26 #include "intel_audio.h" 27 #include "intel_bios.h" 28 #include "intel_bw.h" 29 #include "intel_cdclk.h" 30 #include "intel_color.h" 31 #include "intel_crtc.h" 32 #include "intel_cursor.h" 33 #include "intel_dbuf_bw.h" 34 #include "intel_display_core.h" 35 #include "intel_display_debugfs.h" 36 #include "intel_display_driver.h" 37 #include "intel_display_irq.h" 38 #include "intel_display_power.h" 39 #include "intel_display_types.h" 40 #include "intel_display_utils.h" 41 #include "intel_display_wa.h" 42 #include "intel_dkl_phy.h" 43 #include "intel_dmc.h" 44 #include "intel_dp.h" 45 #include "intel_dp_tunnel.h" 46 #include "intel_dpll.h" 47 #include "intel_dpll_mgr.h" 48 #include "intel_fb.h" 49 #include "intel_fbc.h" 50 #include "intel_fbdev.h" 51 #include "intel_fdi.h" 52 #include "intel_flipq.h" 53 #include "intel_gmbus.h" 54 #include "intel_hdcp.h" 55 #include "intel_hotplug.h" 56 #include "intel_hti.h" 57 #include "intel_modeset_lock.h" 58 #include "intel_modeset_setup.h" 59 #include "intel_opregion.h" 60 #include "intel_overlay.h" 61 #include "intel_plane_initial.h" 62 #include "intel_pmdemand.h" 63 #include "intel_pps.h" 64 #include "intel_psr.h" 65 #include "intel_quirks.h" 66 #include "intel_vga.h" 67 #include "intel_wm.h" 68 #include "skl_watermark.h" 69 70 bool intel_display_driver_probe_defer(struct pci_dev *pdev) 71 { 72 struct drm_privacy_screen *privacy_screen; 73 74 /* 75 * apple-gmux is needed on dual GPU MacBook Pro 76 * to probe the panel if we're the inactive GPU. 77 */ 78 if (vga_switcheroo_client_probe_defer(pdev)) 79 return true; 80 81 /* If the LCD panel has a privacy-screen, wait for it */ 82 privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL); 83 if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER) 84 return true; 85 86 drm_privacy_screen_put(privacy_screen); 87 88 return false; 89 } 90 91 void intel_display_driver_init_hw(struct intel_display *display) 92 { 93 if (!HAS_DISPLAY(display)) 94 return; 95 96 intel_cdclk_read_hw(display); 97 98 intel_display_wa_apply(display); 99 } 100 101 static const struct drm_mode_config_funcs intel_mode_funcs = { 102 .fb_create = intel_user_framebuffer_create, 103 .get_format_info = intel_fb_get_format_info, 104 .mode_valid = intel_mode_valid, 105 .atomic_check = intel_atomic_check, 106 .atomic_commit = intel_atomic_commit, 107 .atomic_state_alloc = intel_atomic_state_alloc, 108 .atomic_state_clear = intel_atomic_state_clear, 109 .atomic_state_free = intel_atomic_state_free, 110 }; 111 112 static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = { 113 .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, 114 }; 115 116 static void intel_mode_config_init(struct intel_display *display) 117 { 118 struct drm_mode_config *mode_config = &display->drm->mode_config; 119 120 drm_mode_config_init(display->drm); 121 INIT_LIST_HEAD(&display->global.obj_list); 122 123 mode_config->min_width = 0; 124 mode_config->min_height = 0; 125 126 mode_config->preferred_depth = 24; 127 mode_config->prefer_shadow = 1; 128 129 mode_config->funcs = &intel_mode_funcs; 130 mode_config->helper_private = &intel_mode_config_funcs; 131 132 mode_config->async_page_flip = HAS_ASYNC_FLIPS(display); 133 134 /* 135 * Maximum framebuffer dimensions, chosen to match 136 * the maximum render engine surface size on gen4+. 137 */ 138 if (DISPLAY_VER(display) >= 7) { 139 mode_config->max_width = 16384; 140 mode_config->max_height = 16384; 141 } else if (DISPLAY_VER(display) >= 4) { 142 mode_config->max_width = 8192; 143 mode_config->max_height = 8192; 144 } else if (DISPLAY_VER(display) == 3) { 145 mode_config->max_width = 4096; 146 mode_config->max_height = 4096; 147 } else { 148 mode_config->max_width = 2048; 149 mode_config->max_height = 2048; 150 } 151 152 intel_cursor_mode_config_init(display); 153 } 154 155 static void intel_mode_config_cleanup(struct intel_display *display) 156 { 157 intel_atomic_global_obj_cleanup(display); 158 drm_mode_config_cleanup(display->drm); 159 } 160 161 static void intel_plane_possible_crtcs_init(struct intel_display *display) 162 { 163 struct intel_plane *plane; 164 165 for_each_intel_plane(display->drm, plane) { 166 struct intel_crtc *crtc = intel_crtc_for_pipe(display, 167 plane->pipe); 168 169 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base); 170 } 171 } 172 173 void intel_display_driver_early_probe(struct intel_display *display) 174 { 175 /* This must be called before any calls to HAS_PCH_* */ 176 intel_pch_detect(display); 177 178 if (!HAS_DISPLAY(display)) 179 return; 180 181 spin_lock_init(&display->fb_tracking.lock); 182 mutex_init(&display->backlight.lock); 183 mutex_init(&display->audio.mutex); 184 mutex_init(&display->wm.wm_mutex); 185 mutex_init(&display->pps.mutex); 186 mutex_init(&display->hdcp.hdcp_mutex); 187 188 intel_display_irq_init(display); 189 intel_dkl_phy_init(display); 190 intel_color_init_hooks(display); 191 intel_init_cdclk_hooks(display); 192 intel_audio_hooks_init(display); 193 intel_dpll_init_clock_hook(display); 194 intel_init_display_hooks(display); 195 intel_fdi_init_hook(display); 196 intel_dmc_wl_init(display); 197 } 198 199 /* part #1: call before irq install */ 200 int intel_display_driver_probe_noirq(struct intel_display *display) 201 { 202 struct drm_i915_private *i915 = to_i915(display->drm); 203 int ret; 204 205 if (i915_inject_probe_failure(i915)) 206 return -ENODEV; 207 208 if (HAS_DISPLAY(display)) { 209 ret = drm_vblank_init(display->drm, 210 INTEL_NUM_PIPES(display)); 211 if (ret) 212 return ret; 213 } 214 215 intel_bios_init(display); 216 217 ret = intel_vga_register(display); 218 if (ret) 219 goto cleanup_bios; 220 221 intel_psr_dc5_dc6_wa_init(display); 222 223 /* FIXME: completely on the wrong abstraction layer */ 224 ret = intel_power_domains_init(display); 225 if (ret < 0) 226 goto cleanup_vga; 227 228 intel_pmdemand_init_early(display); 229 230 intel_power_domains_init_hw(display, false); 231 232 if (!HAS_DISPLAY(display)) 233 return 0; 234 235 display->hotplug.dp_wq = alloc_ordered_workqueue("intel-dp", 0); 236 if (!display->hotplug.dp_wq) { 237 ret = -ENOMEM; 238 goto cleanup_vga_client_pw_domain_dmc; 239 } 240 241 display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0); 242 if (!display->wq.modeset) { 243 ret = -ENOMEM; 244 goto cleanup_wq_dp; 245 } 246 247 display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI | 248 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); 249 if (!display->wq.flip) { 250 ret = -ENOMEM; 251 goto cleanup_wq_modeset; 252 } 253 254 display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI, 0); 255 if (!display->wq.cleanup) { 256 ret = -ENOMEM; 257 goto cleanup_wq_flip; 258 } 259 260 display->wq.unordered = alloc_workqueue("display_unordered", 0, 0); 261 if (!display->wq.unordered) { 262 ret = -ENOMEM; 263 goto cleanup_wq_cleanup; 264 } 265 266 intel_dmc_init(display); 267 268 intel_mode_config_init(display); 269 270 ret = intel_cdclk_init(display); 271 if (ret) 272 goto cleanup_wq_unordered; 273 274 ret = intel_color_init(display); 275 if (ret) 276 goto cleanup_wq_unordered; 277 278 ret = intel_dbuf_init(display); 279 if (ret) 280 goto cleanup_wq_unordered; 281 282 ret = intel_dbuf_bw_init(display); 283 if (ret) 284 goto cleanup_wq_unordered; 285 286 ret = intel_bw_init(display); 287 if (ret) 288 goto cleanup_wq_unordered; 289 290 ret = intel_pmdemand_init(display); 291 if (ret) 292 goto cleanup_wq_unordered; 293 294 intel_init_quirks(display); 295 296 intel_fbc_init(display); 297 298 return 0; 299 300 cleanup_wq_unordered: 301 destroy_workqueue(display->wq.unordered); 302 cleanup_wq_cleanup: 303 destroy_workqueue(display->wq.cleanup); 304 cleanup_wq_flip: 305 destroy_workqueue(display->wq.flip); 306 cleanup_wq_modeset: 307 destroy_workqueue(display->wq.modeset); 308 cleanup_wq_dp: 309 destroy_workqueue(display->hotplug.dp_wq); 310 cleanup_vga_client_pw_domain_dmc: 311 intel_dmc_fini(display); 312 intel_power_domains_driver_remove(display); 313 cleanup_vga: 314 intel_vga_unregister(display); 315 cleanup_bios: 316 intel_bios_driver_remove(display); 317 318 return ret; 319 } 320 321 static void set_display_access(struct intel_display *display, 322 bool any_task_allowed, 323 struct task_struct *allowed_task) 324 { 325 struct drm_modeset_acquire_ctx ctx; 326 int err; 327 328 intel_modeset_lock_ctx_retry(&ctx, NULL, 0, err) { 329 err = drm_modeset_lock_all_ctx(display->drm, &ctx); 330 if (err) 331 continue; 332 333 display->access.any_task_allowed = any_task_allowed; 334 display->access.allowed_task = allowed_task; 335 } 336 337 drm_WARN_ON(display->drm, err); 338 } 339 340 /** 341 * intel_display_driver_enable_user_access - Enable display HW access for all threads 342 * @display: display device instance 343 * 344 * Enable the display HW access for all threads. Examples for such accesses 345 * are modeset commits and connector probing. 346 * 347 * This function should be called during driver loading and system resume once 348 * all the HW initialization steps are done. 349 */ 350 void intel_display_driver_enable_user_access(struct intel_display *display) 351 { 352 set_display_access(display, true, NULL); 353 354 intel_hpd_enable_detection_work(display); 355 } 356 357 /** 358 * intel_display_driver_disable_user_access - Disable display HW access for user threads 359 * @display: display device instance 360 * 361 * Disable the display HW access for user threads. Examples for such accesses 362 * are modeset commits and connector probing. For the current thread the 363 * access is still enabled, which should only perform HW init/deinit 364 * programming (as the initial modeset during driver loading or the disabling 365 * modeset during driver unloading and system suspend/shutdown). This function 366 * should be followed by calling either intel_display_driver_enable_user_access() 367 * after completing the HW init programming or 368 * intel_display_driver_suspend_access() after completing the HW deinit 369 * programming. 370 * 371 * This function should be called during driver loading/unloading and system 372 * suspend/shutdown before starting the HW init/deinit programming. 373 */ 374 void intel_display_driver_disable_user_access(struct intel_display *display) 375 { 376 intel_hpd_disable_detection_work(display); 377 378 set_display_access(display, false, current); 379 } 380 381 /** 382 * intel_display_driver_suspend_access - Suspend display HW access for all threads 383 * @display: display device instance 384 * 385 * Disable the display HW access for all threads. Examples for such accesses 386 * are modeset commits and connector probing. This call should be either 387 * followed by calling intel_display_driver_resume_access(), or the driver 388 * should be unloaded/shutdown. 389 * 390 * This function should be called during driver unloading and system 391 * suspend/shutdown after completing the HW deinit programming. 392 */ 393 void intel_display_driver_suspend_access(struct intel_display *display) 394 { 395 set_display_access(display, false, NULL); 396 } 397 398 /** 399 * intel_display_driver_resume_access - Resume display HW access for the resume thread 400 * @display: display device instance 401 * 402 * Enable the display HW access for the current resume thread, keeping the 403 * access disabled for all other (user) threads. Examples for such accesses 404 * are modeset commits and connector probing. The resume thread should only 405 * perform HW init programming (as the restoring modeset). This function 406 * should be followed by calling intel_display_driver_enable_user_access(), 407 * after completing the HW init programming steps. 408 * 409 * This function should be called during system resume before starting the HW 410 * init steps. 411 */ 412 void intel_display_driver_resume_access(struct intel_display *display) 413 { 414 set_display_access(display, false, current); 415 } 416 417 /** 418 * intel_display_driver_check_access - Check if the current thread has disaplay HW access 419 * @display: display device instance 420 * 421 * Check whether the current thread has display HW access, print a debug 422 * message if it doesn't. Such accesses are modeset commits and connector 423 * probing. If the function returns %false any HW access should be prevented. 424 * 425 * Returns %true if the current thread has display HW access, %false 426 * otherwise. 427 */ 428 bool intel_display_driver_check_access(struct intel_display *display) 429 { 430 char current_task[TASK_COMM_LEN + 16]; 431 char allowed_task[TASK_COMM_LEN + 16] = "none"; 432 433 if (display->access.any_task_allowed || 434 display->access.allowed_task == current) 435 return true; 436 437 snprintf(current_task, sizeof(current_task), "%s[%d]", 438 current->comm, task_pid_vnr(current)); 439 440 if (display->access.allowed_task) 441 snprintf(allowed_task, sizeof(allowed_task), "%s[%d]", 442 display->access.allowed_task->comm, 443 task_pid_vnr(display->access.allowed_task)); 444 445 drm_dbg_kms(display->drm, 446 "Reject display access from task %s (allowed to %s)\n", 447 current_task, allowed_task); 448 449 return false; 450 } 451 452 /* part #2: call after irq install, but before gem init */ 453 int intel_display_driver_probe_nogem(struct intel_display *display) 454 { 455 enum pipe pipe; 456 int ret; 457 458 if (!HAS_DISPLAY(display)) 459 return 0; 460 461 intel_wm_init(display); 462 463 intel_panel_sanitize_ssc(display); 464 465 intel_pps_setup(display); 466 467 intel_gmbus_setup(display); 468 469 drm_dbg_kms(display->drm, "%d display pipe%s available.\n", 470 INTEL_NUM_PIPES(display), 471 INTEL_NUM_PIPES(display) > 1 ? "s" : ""); 472 473 for_each_pipe(display, pipe) { 474 ret = intel_crtc_init(display, pipe); 475 if (ret) 476 goto err_mode_config; 477 } 478 479 intel_plane_possible_crtcs_init(display); 480 intel_dpll_init(display); 481 intel_fdi_pll_freq_update(display); 482 483 intel_display_driver_init_hw(display); 484 intel_dpll_update_ref_clks(display); 485 486 if (display->cdclk.max_cdclk_freq == 0) 487 intel_update_max_cdclk(display); 488 489 intel_hti_init(display); 490 491 intel_setup_outputs(display); 492 493 ret = intel_dp_tunnel_mgr_init(display); 494 if (ret) 495 goto err_hdcp; 496 497 intel_display_driver_disable_user_access(display); 498 499 drm_modeset_lock_all(display->drm); 500 intel_modeset_setup_hw_state(display, display->drm->mode_config.acquire_ctx); 501 intel_acpi_assign_connector_fwnodes(display); 502 drm_modeset_unlock_all(display->drm); 503 504 intel_initial_plane_config(display); 505 506 /* 507 * Make sure hardware watermarks really match the state we read out. 508 * Note that we need to do this after reconstructing the BIOS fb's 509 * since the watermark calculation done here will use pstate->fb. 510 */ 511 if (!HAS_GMCH(display)) 512 ilk_wm_sanitize(display); 513 514 return 0; 515 516 err_hdcp: 517 intel_hdcp_component_fini(display); 518 err_mode_config: 519 intel_mode_config_cleanup(display); 520 521 return ret; 522 } 523 524 /* part #3: call after gem init */ 525 int intel_display_driver_probe(struct intel_display *display) 526 { 527 int ret; 528 529 if (!HAS_DISPLAY(display)) 530 return 0; 531 532 /* 533 * This will bind stuff into ggtt, so it needs to be done after 534 * the BIOS fb takeover and whatever else magic ggtt reservations 535 * happen during gem/ggtt init. 536 */ 537 intel_hdcp_component_init(display); 538 539 intel_flipq_init(display); 540 541 /* 542 * Force all active planes to recompute their states. So that on 543 * mode_setcrtc after probe, all the intel_plane_state variables 544 * are already calculated and there is no assert_plane warnings 545 * during bootup. 546 */ 547 ret = intel_initial_commit(display); 548 if (ret) 549 drm_dbg_kms(display->drm, "Initial modeset failed, %d\n", ret); 550 551 intel_overlay_setup(display); 552 553 /* Only enable hotplug handling once the fbdev is fully set up. */ 554 intel_hpd_init(display); 555 556 skl_watermark_ipc_init(display); 557 558 return 0; 559 } 560 561 void intel_display_driver_register(struct intel_display *display) 562 { 563 struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS, 564 "i915 display info:"); 565 566 if (!HAS_DISPLAY(display)) 567 return; 568 569 /* Must be done after probing outputs */ 570 intel_opregion_register(display); 571 intel_acpi_video_register(display); 572 573 intel_audio_init(display); 574 575 intel_display_driver_enable_user_access(display); 576 577 intel_audio_register(display); 578 579 intel_display_debugfs_register(display); 580 581 /* 582 * We need to coordinate the hotplugs with the asynchronous 583 * fbdev configuration, for which we use the 584 * fbdev->async_cookie. 585 */ 586 drm_kms_helper_poll_init(display->drm); 587 intel_hpd_poll_disable(display); 588 589 intel_fbdev_setup(display); 590 591 intel_display_device_info_print(DISPLAY_INFO(display), 592 DISPLAY_RUNTIME_INFO(display), &p); 593 594 intel_register_dsm_handler(); 595 } 596 597 /* part #1: call before irq uninstall */ 598 void intel_display_driver_remove(struct intel_display *display) 599 { 600 if (!HAS_DISPLAY(display)) 601 return; 602 603 flush_workqueue(display->wq.flip); 604 flush_workqueue(display->wq.modeset); 605 flush_workqueue(display->wq.cleanup); 606 flush_workqueue(display->wq.unordered); 607 608 /* 609 * MST topology needs to be suspended so we don't have any calls to 610 * fbdev after it's finalized. MST will be destroyed later as part of 611 * drm_mode_config_cleanup() 612 */ 613 intel_dp_mst_suspend(display); 614 } 615 616 /* part #2: call after irq uninstall */ 617 void intel_display_driver_remove_noirq(struct intel_display *display) 618 { 619 if (!HAS_DISPLAY(display)) 620 return; 621 622 intel_display_driver_suspend_access(display); 623 624 /* 625 * Due to the hpd irq storm handling the hotplug work can re-arm the 626 * poll handlers. Hence disable polling after hpd handling is shut down. 627 */ 628 intel_hpd_poll_fini(display); 629 630 intel_unregister_dsm_handler(); 631 632 /* flush any delayed tasks or pending work */ 633 flush_workqueue(display->wq.unordered); 634 635 intel_hdcp_component_fini(display); 636 637 intel_mode_config_cleanup(display); 638 639 intel_dp_tunnel_mgr_cleanup(display); 640 641 intel_overlay_cleanup(display); 642 643 intel_gmbus_teardown(display); 644 645 destroy_workqueue(display->hotplug.dp_wq); 646 destroy_workqueue(display->wq.flip); 647 destroy_workqueue(display->wq.modeset); 648 destroy_workqueue(display->wq.cleanup); 649 destroy_workqueue(display->wq.unordered); 650 651 intel_fbc_cleanup(display); 652 } 653 654 /* part #3: call after gem init */ 655 void intel_display_driver_remove_nogem(struct intel_display *display) 656 { 657 intel_dmc_fini(display); 658 659 intel_power_domains_driver_remove(display); 660 661 intel_vga_unregister(display); 662 663 intel_bios_driver_remove(display); 664 } 665 666 void intel_display_driver_unregister(struct intel_display *display) 667 { 668 if (!HAS_DISPLAY(display)) 669 return; 670 671 intel_unregister_dsm_handler(); 672 673 drm_client_dev_unregister(display->drm); 674 675 /* 676 * After flushing the fbdev (incl. a late async config which 677 * will have delayed queuing of a hotplug event), then flush 678 * the hotplug events. 679 */ 680 drm_kms_helper_poll_fini(display->drm); 681 682 intel_display_driver_disable_user_access(display); 683 684 intel_audio_deinit(display); 685 686 drm_atomic_helper_shutdown(display->drm); 687 688 acpi_video_unregister(); 689 intel_opregion_unregister(display); 690 } 691 692 /* 693 * turn all crtc's off, but do not adjust state 694 * This has to be paired with a call to intel_modeset_setup_hw_state. 695 */ 696 int intel_display_driver_suspend(struct intel_display *display) 697 { 698 struct drm_atomic_state *state; 699 int ret; 700 701 if (!HAS_DISPLAY(display)) 702 return 0; 703 704 state = drm_atomic_helper_suspend(display->drm); 705 ret = PTR_ERR_OR_ZERO(state); 706 if (ret) 707 drm_err(display->drm, "Suspending crtc's failed with %i\n", 708 ret); 709 else 710 display->restore.modeset_state = state; 711 712 /* ensure all DPT VMAs have been unpinned for intel_dpt_suspend() */ 713 flush_workqueue(display->wq.cleanup); 714 715 intel_dp_mst_suspend(display); 716 717 return ret; 718 } 719 720 int 721 __intel_display_driver_resume(struct intel_display *display, 722 struct drm_atomic_state *state, 723 struct drm_modeset_acquire_ctx *ctx) 724 { 725 struct drm_crtc_state *crtc_state; 726 struct drm_crtc *crtc; 727 int ret, i; 728 729 intel_modeset_setup_hw_state(display, ctx); 730 731 if (!state) 732 return 0; 733 734 /* 735 * We've duplicated the state, pointers to the old state are invalid. 736 * 737 * Don't attempt to use the old state until we commit the duplicated state. 738 */ 739 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 740 /* 741 * Force recalculation even if we restore 742 * current state. With fast modeset this may not result 743 * in a modeset when the state is compatible. 744 */ 745 crtc_state->mode_changed = true; 746 } 747 748 /* ignore any reset values/BIOS leftovers in the WM registers */ 749 if (!HAS_GMCH(display)) 750 to_intel_atomic_state(state)->skip_intermediate_wm = true; 751 752 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 753 754 drm_WARN_ON(display->drm, ret == -EDEADLK); 755 756 return ret; 757 } 758 759 void intel_display_driver_resume(struct intel_display *display) 760 { 761 struct drm_atomic_state *state = display->restore.modeset_state; 762 struct drm_modeset_acquire_ctx ctx; 763 int ret; 764 765 if (!HAS_DISPLAY(display)) 766 return; 767 768 /* MST sideband requires HPD interrupts enabled */ 769 intel_dp_mst_resume(display); 770 771 display->restore.modeset_state = NULL; 772 if (state) 773 state->acquire_ctx = &ctx; 774 775 drm_modeset_acquire_init(&ctx, 0); 776 777 while (1) { 778 ret = drm_modeset_lock_all_ctx(display->drm, &ctx); 779 if (ret != -EDEADLK) 780 break; 781 782 drm_modeset_backoff(&ctx); 783 } 784 785 if (!ret) 786 ret = __intel_display_driver_resume(display, state, &ctx); 787 788 skl_watermark_ipc_update(display); 789 drm_modeset_drop_locks(&ctx); 790 drm_modeset_acquire_fini(&ctx); 791 792 if (ret) 793 drm_err(display->drm, 794 "Restoring old state failed with %i\n", ret); 795 if (state) 796 drm_atomic_state_put(state); 797 } 798