1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022-2023 Intel Corporation 4 * 5 * High level display driver entry points. This is a layer between top level 6 * driver code and low level display functionality; no low level display code or 7 * details here. 8 */ 9 10 #include <linux/vga_switcheroo.h> 11 #include <acpi/video.h> 12 #include <drm/display/drm_dp_mst_helper.h> 13 #include <drm/drm_atomic_helper.h> 14 #include <drm/drm_client_event.h> 15 #include <drm/drm_mode_config.h> 16 #include <drm/drm_privacy_screen_consumer.h> 17 #include <drm/drm_probe_helper.h> 18 #include <drm/drm_vblank.h> 19 20 #include "i915_drv.h" 21 #include "i9xx_wm.h" 22 #include "intel_acpi.h" 23 #include "intel_atomic.h" 24 #include "intel_audio.h" 25 #include "intel_bios.h" 26 #include "intel_bw.h" 27 #include "intel_cdclk.h" 28 #include "intel_color.h" 29 #include "intel_crtc.h" 30 #include "intel_display_debugfs.h" 31 #include "intel_display_driver.h" 32 #include "intel_display_irq.h" 33 #include "intel_display_power.h" 34 #include "intel_display_types.h" 35 #include "intel_display_wa.h" 36 #include "intel_dkl_phy.h" 37 #include "intel_dmc.h" 38 #include "intel_dp.h" 39 #include "intel_dp_tunnel.h" 40 #include "intel_dpll.h" 41 #include "intel_dpll_mgr.h" 42 #include "intel_fb.h" 43 #include "intel_fbc.h" 44 #include "intel_fbdev.h" 45 #include "intel_fdi.h" 46 #include "intel_gmbus.h" 47 #include "intel_hdcp.h" 48 #include "intel_hotplug.h" 49 #include "intel_hti.h" 50 #include "intel_modeset_lock.h" 51 #include "intel_modeset_setup.h" 52 #include "intel_opregion.h" 53 #include "intel_overlay.h" 54 #include "intel_plane_initial.h" 55 #include "intel_pmdemand.h" 56 #include "intel_pps.h" 57 #include "intel_psr.h" 58 #include "intel_quirks.h" 59 #include "intel_vga.h" 60 #include "intel_wm.h" 61 #include "skl_watermark.h" 62 63 bool intel_display_driver_probe_defer(struct pci_dev *pdev) 64 { 65 struct drm_privacy_screen *privacy_screen; 66 67 /* 68 * apple-gmux is needed on dual GPU MacBook Pro 69 * to probe the panel if we're the inactive GPU. 70 */ 71 if (vga_switcheroo_client_probe_defer(pdev)) 72 return true; 73 74 /* If the LCD panel has a privacy-screen, wait for it */ 75 privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL); 76 if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER) 77 return true; 78 79 drm_privacy_screen_put(privacy_screen); 80 81 return false; 82 } 83 84 void intel_display_driver_init_hw(struct intel_display *display) 85 { 86 struct intel_cdclk_state *cdclk_state; 87 88 if (!HAS_DISPLAY(display)) 89 return; 90 91 cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state); 92 93 intel_update_cdclk(display); 94 intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK"); 95 cdclk_state->logical = cdclk_state->actual = display->cdclk.hw; 96 97 intel_display_wa_apply(display); 98 } 99 100 static const struct drm_mode_config_funcs intel_mode_funcs = { 101 .fb_create = intel_user_framebuffer_create, 102 .get_format_info = intel_fb_get_format_info, 103 .mode_valid = intel_mode_valid, 104 .atomic_check = intel_atomic_check, 105 .atomic_commit = intel_atomic_commit, 106 .atomic_state_alloc = intel_atomic_state_alloc, 107 .atomic_state_clear = intel_atomic_state_clear, 108 .atomic_state_free = intel_atomic_state_free, 109 }; 110 111 static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = { 112 .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, 113 }; 114 115 static void intel_mode_config_init(struct intel_display *display) 116 { 117 struct drm_mode_config *mode_config = &display->drm->mode_config; 118 119 drm_mode_config_init(display->drm); 120 INIT_LIST_HEAD(&display->global.obj_list); 121 122 mode_config->min_width = 0; 123 mode_config->min_height = 0; 124 125 mode_config->preferred_depth = 24; 126 mode_config->prefer_shadow = 1; 127 128 mode_config->funcs = &intel_mode_funcs; 129 mode_config->helper_private = &intel_mode_config_funcs; 130 131 mode_config->async_page_flip = HAS_ASYNC_FLIPS(display); 132 133 /* 134 * Maximum framebuffer dimensions, chosen to match 135 * the maximum render engine surface size on gen4+. 136 */ 137 if (DISPLAY_VER(display) >= 7) { 138 mode_config->max_width = 16384; 139 mode_config->max_height = 16384; 140 } else if (DISPLAY_VER(display) >= 4) { 141 mode_config->max_width = 8192; 142 mode_config->max_height = 8192; 143 } else if (DISPLAY_VER(display) == 3) { 144 mode_config->max_width = 4096; 145 mode_config->max_height = 4096; 146 } else { 147 mode_config->max_width = 2048; 148 mode_config->max_height = 2048; 149 } 150 151 if (display->platform.i845g || display->platform.i865g) { 152 mode_config->cursor_width = display->platform.i845g ? 64 : 512; 153 mode_config->cursor_height = 1023; 154 } else if (display->platform.i830 || display->platform.i85x || 155 display->platform.i915g || display->platform.i915gm) { 156 mode_config->cursor_width = 64; 157 mode_config->cursor_height = 64; 158 } else { 159 mode_config->cursor_width = 256; 160 mode_config->cursor_height = 256; 161 } 162 } 163 164 static void intel_mode_config_cleanup(struct intel_display *display) 165 { 166 intel_atomic_global_obj_cleanup(display); 167 drm_mode_config_cleanup(display->drm); 168 } 169 170 static void intel_plane_possible_crtcs_init(struct intel_display *display) 171 { 172 struct intel_plane *plane; 173 174 for_each_intel_plane(display->drm, plane) { 175 struct intel_crtc *crtc = intel_crtc_for_pipe(display, 176 plane->pipe); 177 178 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base); 179 } 180 } 181 182 void intel_display_driver_early_probe(struct intel_display *display) 183 { 184 /* This must be called before any calls to HAS_PCH_* */ 185 intel_pch_detect(display); 186 187 if (!HAS_DISPLAY(display)) 188 return; 189 190 spin_lock_init(&display->fb_tracking.lock); 191 mutex_init(&display->backlight.lock); 192 mutex_init(&display->audio.mutex); 193 mutex_init(&display->wm.wm_mutex); 194 mutex_init(&display->pps.mutex); 195 mutex_init(&display->hdcp.hdcp_mutex); 196 197 intel_display_irq_init(display); 198 intel_dkl_phy_init(display); 199 intel_color_init_hooks(display); 200 intel_init_cdclk_hooks(display); 201 intel_audio_hooks_init(display); 202 intel_dpll_init_clock_hook(display); 203 intel_init_display_hooks(display); 204 intel_fdi_init_hook(display); 205 intel_dmc_wl_init(display); 206 } 207 208 /* part #1: call before irq install */ 209 int intel_display_driver_probe_noirq(struct intel_display *display) 210 { 211 struct drm_i915_private *i915 = to_i915(display->drm); 212 int ret; 213 214 if (i915_inject_probe_failure(i915)) 215 return -ENODEV; 216 217 if (HAS_DISPLAY(display)) { 218 ret = drm_vblank_init(display->drm, 219 INTEL_NUM_PIPES(display)); 220 if (ret) 221 return ret; 222 } 223 224 intel_bios_init(display); 225 226 ret = intel_vga_register(display); 227 if (ret) 228 goto cleanup_bios; 229 230 intel_psr_dc5_dc6_wa_init(display); 231 232 /* FIXME: completely on the wrong abstraction layer */ 233 ret = intel_power_domains_init(display); 234 if (ret < 0) 235 goto cleanup_vga; 236 237 intel_pmdemand_init_early(display); 238 239 intel_power_domains_init_hw(display, false); 240 241 if (!HAS_DISPLAY(display)) 242 return 0; 243 244 intel_dmc_init(display); 245 246 display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0); 247 if (!display->wq.modeset) { 248 ret = -ENOMEM; 249 goto cleanup_vga_client_pw_domain_dmc; 250 } 251 252 display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI | 253 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); 254 if (!display->wq.flip) { 255 ret = -ENOMEM; 256 goto cleanup_wq_modeset; 257 } 258 259 display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI, 0); 260 if (!display->wq.cleanup) { 261 ret = -ENOMEM; 262 goto cleanup_wq_flip; 263 } 264 265 intel_mode_config_init(display); 266 267 ret = intel_cdclk_init(display); 268 if (ret) 269 goto cleanup_wq_cleanup; 270 271 ret = intel_color_init(display); 272 if (ret) 273 goto cleanup_wq_cleanup; 274 275 ret = intel_dbuf_init(display); 276 if (ret) 277 goto cleanup_wq_cleanup; 278 279 ret = intel_bw_init(display); 280 if (ret) 281 goto cleanup_wq_cleanup; 282 283 ret = intel_pmdemand_init(display); 284 if (ret) 285 goto cleanup_wq_cleanup; 286 287 intel_init_quirks(display); 288 289 intel_fbc_init(display); 290 291 return 0; 292 293 cleanup_wq_cleanup: 294 destroy_workqueue(display->wq.cleanup); 295 cleanup_wq_flip: 296 destroy_workqueue(display->wq.flip); 297 cleanup_wq_modeset: 298 destroy_workqueue(display->wq.modeset); 299 cleanup_vga_client_pw_domain_dmc: 300 intel_dmc_fini(display); 301 intel_power_domains_driver_remove(display); 302 cleanup_vga: 303 intel_vga_unregister(display); 304 cleanup_bios: 305 intel_bios_driver_remove(display); 306 307 return ret; 308 } 309 310 static void set_display_access(struct intel_display *display, 311 bool any_task_allowed, 312 struct task_struct *allowed_task) 313 { 314 struct drm_modeset_acquire_ctx ctx; 315 int err; 316 317 intel_modeset_lock_ctx_retry(&ctx, NULL, 0, err) { 318 err = drm_modeset_lock_all_ctx(display->drm, &ctx); 319 if (err) 320 continue; 321 322 display->access.any_task_allowed = any_task_allowed; 323 display->access.allowed_task = allowed_task; 324 } 325 326 drm_WARN_ON(display->drm, err); 327 } 328 329 /** 330 * intel_display_driver_enable_user_access - Enable display HW access for all threads 331 * @display: display device instance 332 * 333 * Enable the display HW access for all threads. Examples for such accesses 334 * are modeset commits and connector probing. 335 * 336 * This function should be called during driver loading and system resume once 337 * all the HW initialization steps are done. 338 */ 339 void intel_display_driver_enable_user_access(struct intel_display *display) 340 { 341 set_display_access(display, true, NULL); 342 343 intel_hpd_enable_detection_work(display); 344 } 345 346 /** 347 * intel_display_driver_disable_user_access - Disable display HW access for user threads 348 * @display: display device instance 349 * 350 * Disable the display HW access for user threads. Examples for such accesses 351 * are modeset commits and connector probing. For the current thread the 352 * access is still enabled, which should only perform HW init/deinit 353 * programming (as the initial modeset during driver loading or the disabling 354 * modeset during driver unloading and system suspend/shutdown). This function 355 * should be followed by calling either intel_display_driver_enable_user_access() 356 * after completing the HW init programming or 357 * intel_display_driver_suspend_access() after completing the HW deinit 358 * programming. 359 * 360 * This function should be called during driver loading/unloading and system 361 * suspend/shutdown before starting the HW init/deinit programming. 362 */ 363 void intel_display_driver_disable_user_access(struct intel_display *display) 364 { 365 intel_hpd_disable_detection_work(display); 366 367 set_display_access(display, false, current); 368 } 369 370 /** 371 * intel_display_driver_suspend_access - Suspend display HW access for all threads 372 * @display: display device instance 373 * 374 * Disable the display HW access for all threads. Examples for such accesses 375 * are modeset commits and connector probing. This call should be either 376 * followed by calling intel_display_driver_resume_access(), or the driver 377 * should be unloaded/shutdown. 378 * 379 * This function should be called during driver unloading and system 380 * suspend/shutdown after completing the HW deinit programming. 381 */ 382 void intel_display_driver_suspend_access(struct intel_display *display) 383 { 384 set_display_access(display, false, NULL); 385 } 386 387 /** 388 * intel_display_driver_resume_access - Resume display HW access for the resume thread 389 * @display: display device instance 390 * 391 * Enable the display HW access for the current resume thread, keeping the 392 * access disabled for all other (user) threads. Examples for such accesses 393 * are modeset commits and connector probing. The resume thread should only 394 * perform HW init programming (as the restoring modeset). This function 395 * should be followed by calling intel_display_driver_enable_user_access(), 396 * after completing the HW init programming steps. 397 * 398 * This function should be called during system resume before starting the HW 399 * init steps. 400 */ 401 void intel_display_driver_resume_access(struct intel_display *display) 402 { 403 set_display_access(display, false, current); 404 } 405 406 /** 407 * intel_display_driver_check_access - Check if the current thread has disaplay HW access 408 * @display: display device instance 409 * 410 * Check whether the current thread has display HW access, print a debug 411 * message if it doesn't. Such accesses are modeset commits and connector 412 * probing. If the function returns %false any HW access should be prevented. 413 * 414 * Returns %true if the current thread has display HW access, %false 415 * otherwise. 416 */ 417 bool intel_display_driver_check_access(struct intel_display *display) 418 { 419 char current_task[TASK_COMM_LEN + 16]; 420 char allowed_task[TASK_COMM_LEN + 16] = "none"; 421 422 if (display->access.any_task_allowed || 423 display->access.allowed_task == current) 424 return true; 425 426 snprintf(current_task, sizeof(current_task), "%s[%d]", 427 current->comm, task_pid_vnr(current)); 428 429 if (display->access.allowed_task) 430 snprintf(allowed_task, sizeof(allowed_task), "%s[%d]", 431 display->access.allowed_task->comm, 432 task_pid_vnr(display->access.allowed_task)); 433 434 drm_dbg_kms(display->drm, 435 "Reject display access from task %s (allowed to %s)\n", 436 current_task, allowed_task); 437 438 return false; 439 } 440 441 /* part #2: call after irq install, but before gem init */ 442 int intel_display_driver_probe_nogem(struct intel_display *display) 443 { 444 enum pipe pipe; 445 int ret; 446 447 if (!HAS_DISPLAY(display)) 448 return 0; 449 450 intel_wm_init(display); 451 452 intel_panel_sanitize_ssc(display); 453 454 intel_pps_setup(display); 455 456 intel_gmbus_setup(display); 457 458 drm_dbg_kms(display->drm, "%d display pipe%s available.\n", 459 INTEL_NUM_PIPES(display), 460 INTEL_NUM_PIPES(display) > 1 ? "s" : ""); 461 462 for_each_pipe(display, pipe) { 463 ret = intel_crtc_init(display, pipe); 464 if (ret) 465 goto err_mode_config; 466 } 467 468 intel_plane_possible_crtcs_init(display); 469 intel_shared_dpll_init(display); 470 intel_fdi_pll_freq_update(display); 471 472 intel_update_czclk(display); 473 intel_display_driver_init_hw(display); 474 intel_dpll_update_ref_clks(display); 475 476 if (display->cdclk.max_cdclk_freq == 0) 477 intel_update_max_cdclk(display); 478 479 intel_hti_init(display); 480 481 intel_setup_outputs(display); 482 483 ret = intel_dp_tunnel_mgr_init(display); 484 if (ret) 485 goto err_hdcp; 486 487 intel_display_driver_disable_user_access(display); 488 489 drm_modeset_lock_all(display->drm); 490 intel_modeset_setup_hw_state(display, display->drm->mode_config.acquire_ctx); 491 intel_acpi_assign_connector_fwnodes(display); 492 drm_modeset_unlock_all(display->drm); 493 494 intel_initial_plane_config(display); 495 496 /* 497 * Make sure hardware watermarks really match the state we read out. 498 * Note that we need to do this after reconstructing the BIOS fb's 499 * since the watermark calculation done here will use pstate->fb. 500 */ 501 if (!HAS_GMCH(display)) 502 ilk_wm_sanitize(display); 503 504 return 0; 505 506 err_hdcp: 507 intel_hdcp_component_fini(display); 508 err_mode_config: 509 intel_mode_config_cleanup(display); 510 511 return ret; 512 } 513 514 /* part #3: call after gem init */ 515 int intel_display_driver_probe(struct intel_display *display) 516 { 517 int ret; 518 519 if (!HAS_DISPLAY(display)) 520 return 0; 521 522 /* 523 * This will bind stuff into ggtt, so it needs to be done after 524 * the BIOS fb takeover and whatever else magic ggtt reservations 525 * happen during gem/ggtt init. 526 */ 527 intel_hdcp_component_init(display); 528 529 /* 530 * Force all active planes to recompute their states. So that on 531 * mode_setcrtc after probe, all the intel_plane_state variables 532 * are already calculated and there is no assert_plane warnings 533 * during bootup. 534 */ 535 ret = intel_initial_commit(display); 536 if (ret) 537 drm_dbg_kms(display->drm, "Initial modeset failed, %d\n", ret); 538 539 intel_overlay_setup(display); 540 541 /* Only enable hotplug handling once the fbdev is fully set up. */ 542 intel_hpd_init(display); 543 544 skl_watermark_ipc_init(display); 545 546 return 0; 547 } 548 549 void intel_display_driver_register(struct intel_display *display) 550 { 551 struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS, 552 "i915 display info:"); 553 554 if (!HAS_DISPLAY(display)) 555 return; 556 557 /* Must be done after probing outputs */ 558 intel_opregion_register(display); 559 intel_acpi_video_register(display); 560 561 intel_audio_init(display); 562 563 intel_display_driver_enable_user_access(display); 564 565 intel_audio_register(display); 566 567 intel_display_debugfs_register(display); 568 569 /* 570 * We need to coordinate the hotplugs with the asynchronous 571 * fbdev configuration, for which we use the 572 * fbdev->async_cookie. 573 */ 574 drm_kms_helper_poll_init(display->drm); 575 intel_hpd_poll_disable(display); 576 577 intel_fbdev_setup(display); 578 579 intel_display_device_info_print(DISPLAY_INFO(display), 580 DISPLAY_RUNTIME_INFO(display), &p); 581 582 intel_register_dsm_handler(); 583 } 584 585 /* part #1: call before irq uninstall */ 586 void intel_display_driver_remove(struct intel_display *display) 587 { 588 if (!HAS_DISPLAY(display)) 589 return; 590 591 flush_workqueue(display->wq.flip); 592 flush_workqueue(display->wq.modeset); 593 flush_workqueue(display->wq.cleanup); 594 595 /* 596 * MST topology needs to be suspended so we don't have any calls to 597 * fbdev after it's finalized. MST will be destroyed later as part of 598 * drm_mode_config_cleanup() 599 */ 600 intel_dp_mst_suspend(display); 601 } 602 603 /* part #2: call after irq uninstall */ 604 void intel_display_driver_remove_noirq(struct intel_display *display) 605 { 606 struct drm_i915_private *i915 = to_i915(display->drm); 607 608 if (!HAS_DISPLAY(display)) 609 return; 610 611 intel_display_driver_suspend_access(display); 612 613 /* 614 * Due to the hpd irq storm handling the hotplug work can re-arm the 615 * poll handlers. Hence disable polling after hpd handling is shut down. 616 */ 617 intel_hpd_poll_fini(display); 618 619 intel_unregister_dsm_handler(); 620 621 /* flush any delayed tasks or pending work */ 622 flush_workqueue(i915->unordered_wq); 623 624 intel_hdcp_component_fini(display); 625 626 intel_mode_config_cleanup(display); 627 628 intel_dp_tunnel_mgr_cleanup(display); 629 630 intel_overlay_cleanup(display); 631 632 intel_gmbus_teardown(display); 633 634 destroy_workqueue(display->wq.flip); 635 destroy_workqueue(display->wq.modeset); 636 destroy_workqueue(display->wq.cleanup); 637 638 intel_fbc_cleanup(display); 639 } 640 641 /* part #3: call after gem init */ 642 void intel_display_driver_remove_nogem(struct intel_display *display) 643 { 644 intel_dmc_fini(display); 645 646 intel_power_domains_driver_remove(display); 647 648 intel_vga_unregister(display); 649 650 intel_bios_driver_remove(display); 651 } 652 653 void intel_display_driver_unregister(struct intel_display *display) 654 { 655 if (!HAS_DISPLAY(display)) 656 return; 657 658 intel_unregister_dsm_handler(); 659 660 drm_client_dev_unregister(display->drm); 661 662 /* 663 * After flushing the fbdev (incl. a late async config which 664 * will have delayed queuing of a hotplug event), then flush 665 * the hotplug events. 666 */ 667 drm_kms_helper_poll_fini(display->drm); 668 669 intel_display_driver_disable_user_access(display); 670 671 intel_audio_deinit(display); 672 673 drm_atomic_helper_shutdown(display->drm); 674 675 acpi_video_unregister(); 676 intel_opregion_unregister(display); 677 } 678 679 /* 680 * turn all crtc's off, but do not adjust state 681 * This has to be paired with a call to intel_modeset_setup_hw_state. 682 */ 683 int intel_display_driver_suspend(struct intel_display *display) 684 { 685 struct drm_atomic_state *state; 686 int ret; 687 688 if (!HAS_DISPLAY(display)) 689 return 0; 690 691 state = drm_atomic_helper_suspend(display->drm); 692 ret = PTR_ERR_OR_ZERO(state); 693 if (ret) 694 drm_err(display->drm, "Suspending crtc's failed with %i\n", 695 ret); 696 else 697 display->restore.modeset_state = state; 698 699 /* ensure all DPT VMAs have been unpinned for intel_dpt_suspend() */ 700 flush_workqueue(display->wq.cleanup); 701 702 intel_dp_mst_suspend(display); 703 704 return ret; 705 } 706 707 int 708 __intel_display_driver_resume(struct intel_display *display, 709 struct drm_atomic_state *state, 710 struct drm_modeset_acquire_ctx *ctx) 711 { 712 struct drm_crtc_state *crtc_state; 713 struct drm_crtc *crtc; 714 int ret, i; 715 716 intel_modeset_setup_hw_state(display, ctx); 717 718 if (!state) 719 return 0; 720 721 /* 722 * We've duplicated the state, pointers to the old state are invalid. 723 * 724 * Don't attempt to use the old state until we commit the duplicated state. 725 */ 726 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 727 /* 728 * Force recalculation even if we restore 729 * current state. With fast modeset this may not result 730 * in a modeset when the state is compatible. 731 */ 732 crtc_state->mode_changed = true; 733 } 734 735 /* ignore any reset values/BIOS leftovers in the WM registers */ 736 if (!HAS_GMCH(display)) 737 to_intel_atomic_state(state)->skip_intermediate_wm = true; 738 739 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 740 741 drm_WARN_ON(display->drm, ret == -EDEADLK); 742 743 return ret; 744 } 745 746 void intel_display_driver_resume(struct intel_display *display) 747 { 748 struct drm_atomic_state *state = display->restore.modeset_state; 749 struct drm_modeset_acquire_ctx ctx; 750 int ret; 751 752 if (!HAS_DISPLAY(display)) 753 return; 754 755 /* MST sideband requires HPD interrupts enabled */ 756 intel_dp_mst_resume(display); 757 758 display->restore.modeset_state = NULL; 759 if (state) 760 state->acquire_ctx = &ctx; 761 762 drm_modeset_acquire_init(&ctx, 0); 763 764 while (1) { 765 ret = drm_modeset_lock_all_ctx(display->drm, &ctx); 766 if (ret != -EDEADLK) 767 break; 768 769 drm_modeset_backoff(&ctx); 770 } 771 772 if (!ret) 773 ret = __intel_display_driver_resume(display, state, &ctx); 774 775 skl_watermark_ipc_update(display); 776 drm_modeset_drop_locks(&ctx); 777 drm_modeset_acquire_fini(&ctx); 778 779 if (ret) 780 drm_err(display->drm, 781 "Restoring old state failed with %i\n", ret); 782 if (state) 783 drm_atomic_state_put(state); 784 } 785