1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022-2023 Intel Corporation 4 * 5 * High level display driver entry points. This is a layer between top level 6 * driver code and low level display functionality; no low level display code or 7 * details here. 8 */ 9 10 #include <linux/vga_switcheroo.h> 11 #include <acpi/video.h> 12 #include <drm/display/drm_dp_mst_helper.h> 13 #include <drm/drm_atomic_helper.h> 14 #include <drm/drm_client_event.h> 15 #include <drm/drm_mode_config.h> 16 #include <drm/drm_privacy_screen_consumer.h> 17 #include <drm/drm_print.h> 18 #include <drm/drm_probe_helper.h> 19 #include <drm/drm_vblank.h> 20 21 #include "i9xx_wm.h" 22 #include "intel_acpi.h" 23 #include "intel_atomic.h" 24 #include "intel_audio.h" 25 #include "intel_bios.h" 26 #include "intel_bw.h" 27 #include "intel_cdclk.h" 28 #include "intel_color.h" 29 #include "intel_crtc.h" 30 #include "intel_cursor.h" 31 #include "intel_dbuf_bw.h" 32 #include "intel_display_core.h" 33 #include "intel_display_debugfs.h" 34 #include "intel_display_driver.h" 35 #include "intel_display_irq.h" 36 #include "intel_display_power.h" 37 #include "intel_display_types.h" 38 #include "intel_display_utils.h" 39 #include "intel_display_wa.h" 40 #include "intel_dkl_phy.h" 41 #include "intel_dmc.h" 42 #include "intel_dp.h" 43 #include "intel_dp_tunnel.h" 44 #include "intel_dpll.h" 45 #include "intel_dpll_mgr.h" 46 #include "intel_fb.h" 47 #include "intel_fbc.h" 48 #include "intel_fbdev.h" 49 #include "intel_fdi.h" 50 #include "intel_flipq.h" 51 #include "intel_gmbus.h" 52 #include "intel_hdcp.h" 53 #include "intel_hotplug.h" 54 #include "intel_hti.h" 55 #include "intel_initial_plane.h" 56 #include "intel_modeset_lock.h" 57 #include "intel_modeset_setup.h" 58 #include "intel_opregion.h" 59 #include "intel_overlay.h" 60 #include "intel_pmdemand.h" 61 #include "intel_pps.h" 62 #include "intel_psr.h" 63 #include "intel_quirks.h" 64 #include "intel_vga.h" 65 #include "intel_wm.h" 66 #include "skl_watermark.h" 67 68 bool intel_display_driver_probe_defer(struct pci_dev *pdev) 69 { 70 struct drm_privacy_screen *privacy_screen; 71 72 /* 73 * apple-gmux is needed on dual GPU MacBook Pro 74 * to probe the panel if we're the inactive GPU. 75 */ 76 if (vga_switcheroo_client_probe_defer(pdev)) 77 return true; 78 79 /* If the LCD panel has a privacy-screen, wait for it */ 80 privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL); 81 if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER) 82 return true; 83 84 drm_privacy_screen_put(privacy_screen); 85 86 return false; 87 } 88 89 void intel_display_driver_init_hw(struct intel_display *display) 90 { 91 if (!HAS_DISPLAY(display)) 92 return; 93 94 intel_cdclk_read_hw(display); 95 96 intel_display_wa_apply(display); 97 } 98 99 static const struct drm_mode_config_funcs intel_mode_funcs = { 100 .fb_create = intel_user_framebuffer_create, 101 .get_format_info = intel_fb_get_format_info, 102 .mode_valid = intel_mode_valid, 103 .atomic_check = intel_atomic_check, 104 .atomic_commit = intel_atomic_commit, 105 .atomic_state_alloc = intel_atomic_state_alloc, 106 .atomic_state_clear = intel_atomic_state_clear, 107 .atomic_state_free = intel_atomic_state_free, 108 }; 109 110 static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = { 111 .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, 112 }; 113 114 static void intel_mode_config_init(struct intel_display *display) 115 { 116 struct drm_mode_config *mode_config = &display->drm->mode_config; 117 118 drm_mode_config_init(display->drm); 119 INIT_LIST_HEAD(&display->global.obj_list); 120 121 mode_config->min_width = 0; 122 mode_config->min_height = 0; 123 124 mode_config->preferred_depth = 24; 125 mode_config->prefer_shadow = 1; 126 127 mode_config->funcs = &intel_mode_funcs; 128 mode_config->helper_private = &intel_mode_config_funcs; 129 130 mode_config->async_page_flip = HAS_ASYNC_FLIPS(display); 131 132 /* 133 * Maximum framebuffer dimensions, chosen to match 134 * the maximum render engine surface size on gen4+. 135 */ 136 if (DISPLAY_VER(display) >= 7) { 137 mode_config->max_width = 16384; 138 mode_config->max_height = 16384; 139 } else if (DISPLAY_VER(display) >= 4) { 140 mode_config->max_width = 8192; 141 mode_config->max_height = 8192; 142 } else if (DISPLAY_VER(display) == 3) { 143 mode_config->max_width = 4096; 144 mode_config->max_height = 4096; 145 } else { 146 mode_config->max_width = 2048; 147 mode_config->max_height = 2048; 148 } 149 150 intel_cursor_mode_config_init(display); 151 } 152 153 static void intel_mode_config_cleanup(struct intel_display *display) 154 { 155 intel_atomic_global_obj_cleanup(display); 156 drm_mode_config_cleanup(display->drm); 157 } 158 159 static void intel_plane_possible_crtcs_init(struct intel_display *display) 160 { 161 struct intel_plane *plane; 162 163 for_each_intel_plane(display->drm, plane) { 164 struct intel_crtc *crtc = intel_crtc_for_pipe(display, 165 plane->pipe); 166 167 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base); 168 } 169 } 170 171 void intel_display_driver_early_probe(struct intel_display *display) 172 { 173 /* This must be called before any calls to HAS_PCH_* */ 174 intel_pch_detect(display); 175 176 if (!HAS_DISPLAY(display)) 177 return; 178 179 spin_lock_init(&display->fb_tracking.lock); 180 mutex_init(&display->backlight.lock); 181 mutex_init(&display->audio.mutex); 182 mutex_init(&display->wm.wm_mutex); 183 mutex_init(&display->pps.mutex); 184 mutex_init(&display->hdcp.hdcp_mutex); 185 186 intel_display_irq_init(display); 187 intel_dkl_phy_init(display); 188 intel_color_init_hooks(display); 189 intel_init_cdclk_hooks(display); 190 intel_audio_hooks_init(display); 191 intel_dpll_init_clock_hook(display); 192 intel_init_display_hooks(display); 193 intel_fdi_init_hook(display); 194 intel_dmc_wl_init(display); 195 } 196 197 /* part #1: call before irq install */ 198 int intel_display_driver_probe_noirq(struct intel_display *display) 199 { 200 int ret; 201 202 if (HAS_DISPLAY(display)) { 203 ret = drm_vblank_init(display->drm, 204 INTEL_NUM_PIPES(display)); 205 if (ret) 206 return ret; 207 } 208 209 intel_bios_init(display); 210 211 ret = intel_vga_register(display); 212 if (ret) 213 goto cleanup_bios; 214 215 intel_psr_dc5_dc6_wa_init(display); 216 217 /* FIXME: completely on the wrong abstraction layer */ 218 ret = intel_power_domains_init(display); 219 if (ret < 0) 220 goto cleanup_vga; 221 222 intel_pmdemand_init_early(display); 223 224 intel_power_domains_init_hw(display, false); 225 226 if (!HAS_DISPLAY(display)) 227 return 0; 228 229 display->hotplug.dp_wq = alloc_ordered_workqueue("intel-dp", 0); 230 if (!display->hotplug.dp_wq) { 231 ret = -ENOMEM; 232 goto cleanup_vga_client_pw_domain_dmc; 233 } 234 235 display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0); 236 if (!display->wq.modeset) { 237 ret = -ENOMEM; 238 goto cleanup_wq_dp; 239 } 240 241 display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI | 242 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); 243 if (!display->wq.flip) { 244 ret = -ENOMEM; 245 goto cleanup_wq_modeset; 246 } 247 248 display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI, 0); 249 if (!display->wq.cleanup) { 250 ret = -ENOMEM; 251 goto cleanup_wq_flip; 252 } 253 254 display->wq.unordered = alloc_workqueue("display_unordered", 0, 0); 255 if (!display->wq.unordered) { 256 ret = -ENOMEM; 257 goto cleanup_wq_cleanup; 258 } 259 260 intel_dmc_init(display); 261 262 intel_mode_config_init(display); 263 264 ret = intel_cdclk_init(display); 265 if (ret) 266 goto cleanup_wq_unordered; 267 268 ret = intel_color_init(display); 269 if (ret) 270 goto cleanup_wq_unordered; 271 272 ret = intel_dbuf_init(display); 273 if (ret) 274 goto cleanup_wq_unordered; 275 276 ret = intel_dbuf_bw_init(display); 277 if (ret) 278 goto cleanup_wq_unordered; 279 280 ret = intel_bw_init(display); 281 if (ret) 282 goto cleanup_wq_unordered; 283 284 ret = intel_pmdemand_init(display); 285 if (ret) 286 goto cleanup_wq_unordered; 287 288 intel_init_quirks(display); 289 290 intel_fbc_init(display); 291 292 return 0; 293 294 cleanup_wq_unordered: 295 destroy_workqueue(display->wq.unordered); 296 cleanup_wq_cleanup: 297 destroy_workqueue(display->wq.cleanup); 298 cleanup_wq_flip: 299 destroy_workqueue(display->wq.flip); 300 cleanup_wq_modeset: 301 destroy_workqueue(display->wq.modeset); 302 cleanup_wq_dp: 303 destroy_workqueue(display->hotplug.dp_wq); 304 cleanup_vga_client_pw_domain_dmc: 305 intel_dmc_fini(display); 306 intel_power_domains_driver_remove(display); 307 cleanup_vga: 308 intel_vga_unregister(display); 309 cleanup_bios: 310 intel_bios_driver_remove(display); 311 312 return ret; 313 } 314 ALLOW_ERROR_INJECTION(intel_display_driver_probe_noirq, ERRNO); 315 316 static void set_display_access(struct intel_display *display, 317 bool any_task_allowed, 318 struct task_struct *allowed_task) 319 { 320 struct drm_modeset_acquire_ctx ctx; 321 int err; 322 323 intel_modeset_lock_ctx_retry(&ctx, NULL, 0, err) { 324 err = drm_modeset_lock_all_ctx(display->drm, &ctx); 325 if (err) 326 continue; 327 328 display->access.any_task_allowed = any_task_allowed; 329 display->access.allowed_task = allowed_task; 330 } 331 332 drm_WARN_ON(display->drm, err); 333 } 334 335 /** 336 * intel_display_driver_enable_user_access - Enable display HW access for all threads 337 * @display: display device instance 338 * 339 * Enable the display HW access for all threads. Examples for such accesses 340 * are modeset commits and connector probing. 341 * 342 * This function should be called during driver loading and system resume once 343 * all the HW initialization steps are done. 344 */ 345 void intel_display_driver_enable_user_access(struct intel_display *display) 346 { 347 set_display_access(display, true, NULL); 348 349 intel_hpd_enable_detection_work(display); 350 } 351 352 /** 353 * intel_display_driver_disable_user_access - Disable display HW access for user threads 354 * @display: display device instance 355 * 356 * Disable the display HW access for user threads. Examples for such accesses 357 * are modeset commits and connector probing. For the current thread the 358 * access is still enabled, which should only perform HW init/deinit 359 * programming (as the initial modeset during driver loading or the disabling 360 * modeset during driver unloading and system suspend/shutdown). This function 361 * should be followed by calling either intel_display_driver_enable_user_access() 362 * after completing the HW init programming or 363 * intel_display_driver_suspend_access() after completing the HW deinit 364 * programming. 365 * 366 * This function should be called during driver loading/unloading and system 367 * suspend/shutdown before starting the HW init/deinit programming. 368 */ 369 void intel_display_driver_disable_user_access(struct intel_display *display) 370 { 371 intel_hpd_disable_detection_work(display); 372 373 set_display_access(display, false, current); 374 } 375 376 /** 377 * intel_display_driver_suspend_access - Suspend display HW access for all threads 378 * @display: display device instance 379 * 380 * Disable the display HW access for all threads. Examples for such accesses 381 * are modeset commits and connector probing. This call should be either 382 * followed by calling intel_display_driver_resume_access(), or the driver 383 * should be unloaded/shutdown. 384 * 385 * This function should be called during driver unloading and system 386 * suspend/shutdown after completing the HW deinit programming. 387 */ 388 void intel_display_driver_suspend_access(struct intel_display *display) 389 { 390 set_display_access(display, false, NULL); 391 } 392 393 /** 394 * intel_display_driver_resume_access - Resume display HW access for the resume thread 395 * @display: display device instance 396 * 397 * Enable the display HW access for the current resume thread, keeping the 398 * access disabled for all other (user) threads. Examples for such accesses 399 * are modeset commits and connector probing. The resume thread should only 400 * perform HW init programming (as the restoring modeset). This function 401 * should be followed by calling intel_display_driver_enable_user_access(), 402 * after completing the HW init programming steps. 403 * 404 * This function should be called during system resume before starting the HW 405 * init steps. 406 */ 407 void intel_display_driver_resume_access(struct intel_display *display) 408 { 409 set_display_access(display, false, current); 410 } 411 412 /** 413 * intel_display_driver_check_access - Check if the current thread has disaplay HW access 414 * @display: display device instance 415 * 416 * Check whether the current thread has display HW access, print a debug 417 * message if it doesn't. Such accesses are modeset commits and connector 418 * probing. If the function returns %false any HW access should be prevented. 419 * 420 * Returns %true if the current thread has display HW access, %false 421 * otherwise. 422 */ 423 bool intel_display_driver_check_access(struct intel_display *display) 424 { 425 char current_task[TASK_COMM_LEN + 16]; 426 char allowed_task[TASK_COMM_LEN + 16] = "none"; 427 428 if (display->access.any_task_allowed || 429 display->access.allowed_task == current) 430 return true; 431 432 snprintf(current_task, sizeof(current_task), "%s[%d]", 433 current->comm, task_pid_vnr(current)); 434 435 if (display->access.allowed_task) 436 snprintf(allowed_task, sizeof(allowed_task), "%s[%d]", 437 display->access.allowed_task->comm, 438 task_pid_vnr(display->access.allowed_task)); 439 440 drm_dbg_kms(display->drm, 441 "Reject display access from task %s (allowed to %s)\n", 442 current_task, allowed_task); 443 444 return false; 445 } 446 447 /* part #2: call after irq install, but before gem init */ 448 int intel_display_driver_probe_nogem(struct intel_display *display) 449 { 450 int ret; 451 452 if (!HAS_DISPLAY(display)) 453 return 0; 454 455 intel_wm_init(display); 456 457 intel_panel_sanitize_ssc(display); 458 459 intel_pps_setup(display); 460 461 intel_gmbus_setup(display); 462 463 ret = intel_crtc_init(display); 464 if (ret) 465 goto err_mode_config; 466 467 intel_plane_possible_crtcs_init(display); 468 intel_dpll_init(display); 469 intel_fdi_pll_freq_update(display); 470 471 intel_display_driver_init_hw(display); 472 intel_dpll_update_ref_clks(display); 473 474 if (display->cdclk.max_cdclk_freq == 0) 475 intel_update_max_cdclk(display); 476 477 intel_hti_init(display); 478 479 intel_setup_outputs(display); 480 481 ret = intel_dp_tunnel_mgr_init(display); 482 if (ret) 483 goto err_hdcp; 484 485 intel_display_driver_disable_user_access(display); 486 487 drm_modeset_lock_all(display->drm); 488 intel_modeset_setup_hw_state(display, display->drm->mode_config.acquire_ctx); 489 intel_acpi_assign_connector_fwnodes(display); 490 drm_modeset_unlock_all(display->drm); 491 492 intel_initial_plane_config(display); 493 494 /* 495 * Make sure hardware watermarks really match the state we read out. 496 * Note that we need to do this after reconstructing the BIOS fb's 497 * since the watermark calculation done here will use pstate->fb. 498 */ 499 if (!HAS_GMCH(display)) 500 ilk_wm_sanitize(display); 501 502 return 0; 503 504 err_hdcp: 505 intel_hdcp_component_fini(display); 506 err_mode_config: 507 intel_mode_config_cleanup(display); 508 509 return ret; 510 } 511 512 /* part #3: call after gem init */ 513 int intel_display_driver_probe(struct intel_display *display) 514 { 515 int ret; 516 517 if (!HAS_DISPLAY(display)) 518 return 0; 519 520 /* 521 * This will bind stuff into ggtt, so it needs to be done after 522 * the BIOS fb takeover and whatever else magic ggtt reservations 523 * happen during gem/ggtt init. 524 */ 525 intel_hdcp_component_init(display); 526 527 intel_flipq_init(display); 528 529 /* 530 * Force all active planes to recompute their states. So that on 531 * mode_setcrtc after probe, all the intel_plane_state variables 532 * are already calculated and there is no assert_plane warnings 533 * during bootup. 534 */ 535 ret = intel_initial_commit(display); 536 if (ret) 537 drm_dbg_kms(display->drm, "Initial modeset failed, %d\n", ret); 538 539 intel_overlay_setup(display); 540 541 /* Only enable hotplug handling once the fbdev is fully set up. */ 542 intel_hpd_init(display); 543 544 skl_watermark_ipc_init(display); 545 546 return 0; 547 } 548 549 void intel_display_driver_register(struct intel_display *display) 550 { 551 struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS, 552 "i915 display info:"); 553 554 if (!HAS_DISPLAY(display)) 555 return; 556 557 /* Must be done after probing outputs */ 558 intel_opregion_register(display); 559 intel_acpi_video_register(display); 560 561 intel_audio_init(display); 562 563 intel_display_driver_enable_user_access(display); 564 565 intel_audio_register(display); 566 567 intel_display_debugfs_register(display); 568 569 /* 570 * We need to coordinate the hotplugs with the asynchronous 571 * fbdev configuration, for which we use the 572 * fbdev->async_cookie. 573 */ 574 drm_kms_helper_poll_init(display->drm); 575 intel_hpd_poll_disable(display); 576 577 intel_fbdev_setup(display); 578 579 intel_display_device_info_print(DISPLAY_INFO(display), 580 DISPLAY_RUNTIME_INFO(display), &p); 581 582 intel_register_dsm_handler(); 583 } 584 585 /* part #1: call before irq uninstall */ 586 void intel_display_driver_remove(struct intel_display *display) 587 { 588 if (!HAS_DISPLAY(display)) 589 return; 590 591 flush_workqueue(display->wq.flip); 592 flush_workqueue(display->wq.modeset); 593 flush_workqueue(display->wq.cleanup); 594 flush_workqueue(display->wq.unordered); 595 596 /* 597 * MST topology needs to be suspended so we don't have any calls to 598 * fbdev after it's finalized. MST will be destroyed later as part of 599 * drm_mode_config_cleanup() 600 */ 601 intel_dp_mst_suspend(display); 602 } 603 604 /* part #2: call after irq uninstall */ 605 void intel_display_driver_remove_noirq(struct intel_display *display) 606 { 607 if (!HAS_DISPLAY(display)) 608 return; 609 610 intel_display_driver_suspend_access(display); 611 612 /* 613 * Due to the hpd irq storm handling the hotplug work can re-arm the 614 * poll handlers. Hence disable polling after hpd handling is shut down. 615 */ 616 intel_hpd_poll_fini(display); 617 618 intel_unregister_dsm_handler(); 619 620 /* flush any delayed tasks or pending work */ 621 flush_workqueue(display->wq.unordered); 622 623 intel_hdcp_component_fini(display); 624 625 intel_mode_config_cleanup(display); 626 627 intel_dp_tunnel_mgr_cleanup(display); 628 629 intel_overlay_cleanup(display); 630 631 intel_gmbus_teardown(display); 632 633 destroy_workqueue(display->hotplug.dp_wq); 634 destroy_workqueue(display->wq.flip); 635 destroy_workqueue(display->wq.modeset); 636 destroy_workqueue(display->wq.cleanup); 637 destroy_workqueue(display->wq.unordered); 638 639 intel_fbc_cleanup(display); 640 } 641 642 /* part #3: call after gem init */ 643 void intel_display_driver_remove_nogem(struct intel_display *display) 644 { 645 intel_dmc_fini(display); 646 647 intel_power_domains_driver_remove(display); 648 649 intel_vga_unregister(display); 650 651 intel_bios_driver_remove(display); 652 } 653 654 void intel_display_driver_unregister(struct intel_display *display) 655 { 656 if (!HAS_DISPLAY(display)) 657 return; 658 659 intel_unregister_dsm_handler(); 660 661 drm_client_dev_unregister(display->drm); 662 663 /* 664 * After flushing the fbdev (incl. a late async config which 665 * will have delayed queuing of a hotplug event), then flush 666 * the hotplug events. 667 */ 668 drm_kms_helper_poll_fini(display->drm); 669 670 intel_display_driver_disable_user_access(display); 671 672 intel_audio_deinit(display); 673 674 drm_atomic_helper_shutdown(display->drm); 675 676 acpi_video_unregister(); 677 intel_opregion_unregister(display); 678 } 679 680 /* 681 * turn all crtc's off, but do not adjust state 682 * This has to be paired with a call to intel_modeset_setup_hw_state. 683 */ 684 int intel_display_driver_suspend(struct intel_display *display) 685 { 686 struct drm_atomic_state *state; 687 int ret; 688 689 if (!HAS_DISPLAY(display)) 690 return 0; 691 692 state = drm_atomic_helper_suspend(display->drm); 693 ret = PTR_ERR_OR_ZERO(state); 694 if (ret) 695 drm_err(display->drm, "Suspending crtc's failed with %i\n", 696 ret); 697 else 698 display->restore.modeset_state = state; 699 700 /* ensure all DPT VMAs have been unpinned for intel_dpt_suspend() */ 701 flush_workqueue(display->wq.cleanup); 702 703 intel_dp_mst_suspend(display); 704 705 return ret; 706 } 707 708 int 709 __intel_display_driver_resume(struct intel_display *display, 710 struct drm_atomic_state *state, 711 struct drm_modeset_acquire_ctx *ctx) 712 { 713 struct drm_crtc_state *crtc_state; 714 struct drm_crtc *crtc; 715 int ret, i; 716 717 intel_modeset_setup_hw_state(display, ctx); 718 719 if (!state) 720 return 0; 721 722 /* 723 * We've duplicated the state, pointers to the old state are invalid. 724 * 725 * Don't attempt to use the old state until we commit the duplicated state. 726 */ 727 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 728 /* 729 * Force recalculation even if we restore 730 * current state. With fast modeset this may not result 731 * in a modeset when the state is compatible. 732 */ 733 crtc_state->mode_changed = true; 734 } 735 736 /* ignore any reset values/BIOS leftovers in the WM registers */ 737 if (!HAS_GMCH(display)) 738 to_intel_atomic_state(state)->skip_intermediate_wm = true; 739 740 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 741 742 drm_WARN_ON(display->drm, ret == -EDEADLK); 743 744 return ret; 745 } 746 747 void intel_display_driver_resume(struct intel_display *display) 748 { 749 struct drm_atomic_state *state = display->restore.modeset_state; 750 struct drm_modeset_acquire_ctx ctx; 751 int ret; 752 753 if (!HAS_DISPLAY(display)) 754 return; 755 756 /* MST sideband requires HPD interrupts enabled */ 757 intel_dp_mst_resume(display); 758 759 display->restore.modeset_state = NULL; 760 if (state) 761 state->acquire_ctx = &ctx; 762 763 drm_modeset_acquire_init(&ctx, 0); 764 765 while (1) { 766 ret = drm_modeset_lock_all_ctx(display->drm, &ctx); 767 if (ret != -EDEADLK) 768 break; 769 770 drm_modeset_backoff(&ctx); 771 } 772 773 if (!ret) 774 ret = __intel_display_driver_resume(display, state, &ctx); 775 776 skl_watermark_ipc_update(display); 777 drm_modeset_drop_locks(&ctx); 778 drm_modeset_acquire_fini(&ctx); 779 780 if (ret) 781 drm_err(display->drm, 782 "Restoring old state failed with %i\n", ret); 783 if (state) 784 drm_atomic_state_put(state); 785 } 786