xref: /linux/drivers/gpu/drm/i915/display/intel_display_driver.c (revision a4871e6201c46c8e1d04308265b4b4c5753c8209)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022-2023 Intel Corporation
4  *
5  * High level display driver entry points. This is a layer between top level
6  * driver code and low level display functionality; no low level display code or
7  * details here.
8  */
9 
10 #include <linux/vga_switcheroo.h>
11 #include <acpi/video.h>
12 #include <drm/display/drm_dp_mst_helper.h>
13 #include <drm/drm_atomic_helper.h>
14 #include <drm/drm_client_event.h>
15 #include <drm/drm_mode_config.h>
16 #include <drm/drm_privacy_screen_consumer.h>
17 #include <drm/drm_probe_helper.h>
18 #include <drm/drm_vblank.h>
19 
20 #include "i915_drv.h"
21 #include "i9xx_wm.h"
22 #include "intel_acpi.h"
23 #include "intel_atomic.h"
24 #include "intel_audio.h"
25 #include "intel_bios.h"
26 #include "intel_bw.h"
27 #include "intel_cdclk.h"
28 #include "intel_color.h"
29 #include "intel_crtc.h"
30 #include "intel_display_debugfs.h"
31 #include "intel_display_driver.h"
32 #include "intel_display_irq.h"
33 #include "intel_display_power.h"
34 #include "intel_display_types.h"
35 #include "intel_display_wa.h"
36 #include "intel_dkl_phy.h"
37 #include "intel_dmc.h"
38 #include "intel_dp.h"
39 #include "intel_dp_tunnel.h"
40 #include "intel_dpll.h"
41 #include "intel_dpll_mgr.h"
42 #include "intel_fb.h"
43 #include "intel_fbc.h"
44 #include "intel_fbdev.h"
45 #include "intel_fdi.h"
46 #include "intel_gmbus.h"
47 #include "intel_hdcp.h"
48 #include "intel_hotplug.h"
49 #include "intel_hti.h"
50 #include "intel_modeset_lock.h"
51 #include "intel_modeset_setup.h"
52 #include "intel_opregion.h"
53 #include "intel_overlay.h"
54 #include "intel_plane_initial.h"
55 #include "intel_pmdemand.h"
56 #include "intel_pps.h"
57 #include "intel_quirks.h"
58 #include "intel_vga.h"
59 #include "intel_wm.h"
60 #include "skl_watermark.h"
61 
62 bool intel_display_driver_probe_defer(struct pci_dev *pdev)
63 {
64 	struct drm_privacy_screen *privacy_screen;
65 
66 	/*
67 	 * apple-gmux is needed on dual GPU MacBook Pro
68 	 * to probe the panel if we're the inactive GPU.
69 	 */
70 	if (vga_switcheroo_client_probe_defer(pdev))
71 		return true;
72 
73 	/* If the LCD panel has a privacy-screen, wait for it */
74 	privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
75 	if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
76 		return true;
77 
78 	drm_privacy_screen_put(privacy_screen);
79 
80 	return false;
81 }
82 
83 void intel_display_driver_init_hw(struct intel_display *display)
84 {
85 	struct intel_cdclk_state *cdclk_state;
86 
87 	if (!HAS_DISPLAY(display))
88 		return;
89 
90 	cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state);
91 
92 	intel_update_cdclk(display);
93 	intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
94 	cdclk_state->logical = cdclk_state->actual = display->cdclk.hw;
95 
96 	intel_display_wa_apply(display);
97 }
98 
99 static const struct drm_mode_config_funcs intel_mode_funcs = {
100 	.fb_create = intel_user_framebuffer_create,
101 	.get_format_info = intel_fb_get_format_info,
102 	.mode_valid = intel_mode_valid,
103 	.atomic_check = intel_atomic_check,
104 	.atomic_commit = intel_atomic_commit,
105 	.atomic_state_alloc = intel_atomic_state_alloc,
106 	.atomic_state_clear = intel_atomic_state_clear,
107 	.atomic_state_free = intel_atomic_state_free,
108 };
109 
110 static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = {
111 	.atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
112 };
113 
114 static void intel_mode_config_init(struct intel_display *display)
115 {
116 	struct drm_mode_config *mode_config = &display->drm->mode_config;
117 
118 	drm_mode_config_init(display->drm);
119 	INIT_LIST_HEAD(&display->global.obj_list);
120 
121 	mode_config->min_width = 0;
122 	mode_config->min_height = 0;
123 
124 	mode_config->preferred_depth = 24;
125 	mode_config->prefer_shadow = 1;
126 
127 	mode_config->funcs = &intel_mode_funcs;
128 	mode_config->helper_private = &intel_mode_config_funcs;
129 
130 	mode_config->async_page_flip = HAS_ASYNC_FLIPS(display);
131 
132 	/*
133 	 * Maximum framebuffer dimensions, chosen to match
134 	 * the maximum render engine surface size on gen4+.
135 	 */
136 	if (DISPLAY_VER(display) >= 7) {
137 		mode_config->max_width = 16384;
138 		mode_config->max_height = 16384;
139 	} else if (DISPLAY_VER(display) >= 4) {
140 		mode_config->max_width = 8192;
141 		mode_config->max_height = 8192;
142 	} else if (DISPLAY_VER(display) == 3) {
143 		mode_config->max_width = 4096;
144 		mode_config->max_height = 4096;
145 	} else {
146 		mode_config->max_width = 2048;
147 		mode_config->max_height = 2048;
148 	}
149 
150 	if (display->platform.i845g || display->platform.i865g) {
151 		mode_config->cursor_width = display->platform.i845g ? 64 : 512;
152 		mode_config->cursor_height = 1023;
153 	} else if (display->platform.i830 || display->platform.i85x ||
154 		   display->platform.i915g || display->platform.i915gm) {
155 		mode_config->cursor_width = 64;
156 		mode_config->cursor_height = 64;
157 	} else {
158 		mode_config->cursor_width = 256;
159 		mode_config->cursor_height = 256;
160 	}
161 }
162 
163 static void intel_mode_config_cleanup(struct intel_display *display)
164 {
165 	intel_atomic_global_obj_cleanup(display);
166 	drm_mode_config_cleanup(display->drm);
167 }
168 
169 static void intel_plane_possible_crtcs_init(struct intel_display *display)
170 {
171 	struct intel_plane *plane;
172 
173 	for_each_intel_plane(display->drm, plane) {
174 		struct intel_crtc *crtc = intel_crtc_for_pipe(display,
175 							      plane->pipe);
176 
177 		plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
178 	}
179 }
180 
181 void intel_display_driver_early_probe(struct intel_display *display)
182 {
183 	if (!HAS_DISPLAY(display))
184 		return;
185 
186 	spin_lock_init(&display->fb_tracking.lock);
187 	mutex_init(&display->backlight.lock);
188 	mutex_init(&display->audio.mutex);
189 	mutex_init(&display->wm.wm_mutex);
190 	mutex_init(&display->pps.mutex);
191 	mutex_init(&display->hdcp.hdcp_mutex);
192 
193 	intel_display_irq_init(display);
194 	intel_dkl_phy_init(display);
195 	intel_color_init_hooks(display);
196 	intel_init_cdclk_hooks(display);
197 	intel_audio_hooks_init(display);
198 	intel_dpll_init_clock_hook(display);
199 	intel_init_display_hooks(display);
200 	intel_fdi_init_hook(display);
201 	intel_dmc_wl_init(display);
202 }
203 
204 /* part #1: call before irq install */
205 int intel_display_driver_probe_noirq(struct intel_display *display)
206 {
207 	struct drm_i915_private *i915 = to_i915(display->drm);
208 	int ret;
209 
210 	if (i915_inject_probe_failure(i915))
211 		return -ENODEV;
212 
213 	if (HAS_DISPLAY(display)) {
214 		ret = drm_vblank_init(display->drm,
215 				      INTEL_NUM_PIPES(display));
216 		if (ret)
217 			return ret;
218 	}
219 
220 	intel_bios_init(display);
221 
222 	ret = intel_vga_register(display);
223 	if (ret)
224 		goto cleanup_bios;
225 
226 	/* FIXME: completely on the wrong abstraction layer */
227 	ret = intel_power_domains_init(display);
228 	if (ret < 0)
229 		goto cleanup_vga;
230 
231 	intel_pmdemand_init_early(display);
232 
233 	intel_power_domains_init_hw(display, false);
234 
235 	if (!HAS_DISPLAY(display))
236 		return 0;
237 
238 	intel_dmc_init(display);
239 
240 	display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
241 	display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
242 						WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
243 	display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI, 0);
244 
245 	intel_mode_config_init(display);
246 
247 	ret = intel_cdclk_init(display);
248 	if (ret)
249 		goto cleanup_vga_client_pw_domain_dmc;
250 
251 	ret = intel_color_init(display);
252 	if (ret)
253 		goto cleanup_vga_client_pw_domain_dmc;
254 
255 	ret = intel_dbuf_init(display);
256 	if (ret)
257 		goto cleanup_vga_client_pw_domain_dmc;
258 
259 	ret = intel_bw_init(display);
260 	if (ret)
261 		goto cleanup_vga_client_pw_domain_dmc;
262 
263 	ret = intel_pmdemand_init(display);
264 	if (ret)
265 		goto cleanup_vga_client_pw_domain_dmc;
266 
267 	intel_init_quirks(display);
268 
269 	intel_fbc_init(display);
270 
271 	return 0;
272 
273 cleanup_vga_client_pw_domain_dmc:
274 	intel_dmc_fini(display);
275 	intel_power_domains_driver_remove(display);
276 cleanup_vga:
277 	intel_vga_unregister(display);
278 cleanup_bios:
279 	intel_bios_driver_remove(display);
280 
281 	return ret;
282 }
283 
284 static void set_display_access(struct intel_display *display,
285 			       bool any_task_allowed,
286 			       struct task_struct *allowed_task)
287 {
288 	struct drm_modeset_acquire_ctx ctx;
289 	int err;
290 
291 	intel_modeset_lock_ctx_retry(&ctx, NULL, 0, err) {
292 		err = drm_modeset_lock_all_ctx(display->drm, &ctx);
293 		if (err)
294 			continue;
295 
296 		display->access.any_task_allowed = any_task_allowed;
297 		display->access.allowed_task = allowed_task;
298 	}
299 
300 	drm_WARN_ON(display->drm, err);
301 }
302 
303 /**
304  * intel_display_driver_enable_user_access - Enable display HW access for all threads
305  * @display: display device instance
306  *
307  * Enable the display HW access for all threads. Examples for such accesses
308  * are modeset commits and connector probing.
309  *
310  * This function should be called during driver loading and system resume once
311  * all the HW initialization steps are done.
312  */
313 void intel_display_driver_enable_user_access(struct intel_display *display)
314 {
315 	set_display_access(display, true, NULL);
316 
317 	intel_hpd_enable_detection_work(display);
318 }
319 
320 /**
321  * intel_display_driver_disable_user_access - Disable display HW access for user threads
322  * @display: display device instance
323  *
324  * Disable the display HW access for user threads. Examples for such accesses
325  * are modeset commits and connector probing. For the current thread the
326  * access is still enabled, which should only perform HW init/deinit
327  * programming (as the initial modeset during driver loading or the disabling
328  * modeset during driver unloading and system suspend/shutdown). This function
329  * should be followed by calling either intel_display_driver_enable_user_access()
330  * after completing the HW init programming or
331  * intel_display_driver_suspend_access() after completing the HW deinit
332  * programming.
333  *
334  * This function should be called during driver loading/unloading and system
335  * suspend/shutdown before starting the HW init/deinit programming.
336  */
337 void intel_display_driver_disable_user_access(struct intel_display *display)
338 {
339 	intel_hpd_disable_detection_work(display);
340 
341 	set_display_access(display, false, current);
342 }
343 
344 /**
345  * intel_display_driver_suspend_access - Suspend display HW access for all threads
346  * @display: display device instance
347  *
348  * Disable the display HW access for all threads. Examples for such accesses
349  * are modeset commits and connector probing. This call should be either
350  * followed by calling intel_display_driver_resume_access(), or the driver
351  * should be unloaded/shutdown.
352  *
353  * This function should be called during driver unloading and system
354  * suspend/shutdown after completing the HW deinit programming.
355  */
356 void intel_display_driver_suspend_access(struct intel_display *display)
357 {
358 	set_display_access(display, false, NULL);
359 }
360 
361 /**
362  * intel_display_driver_resume_access - Resume display HW access for the resume thread
363  * @display: display device instance
364  *
365  * Enable the display HW access for the current resume thread, keeping the
366  * access disabled for all other (user) threads. Examples for such accesses
367  * are modeset commits and connector probing. The resume thread should only
368  * perform HW init programming (as the restoring modeset). This function
369  * should be followed by calling intel_display_driver_enable_user_access(),
370  * after completing the HW init programming steps.
371  *
372  * This function should be called during system resume before starting the HW
373  * init steps.
374  */
375 void intel_display_driver_resume_access(struct intel_display *display)
376 {
377 	set_display_access(display, false, current);
378 }
379 
380 /**
381  * intel_display_driver_check_access - Check if the current thread has disaplay HW access
382  * @display: display device instance
383  *
384  * Check whether the current thread has display HW access, print a debug
385  * message if it doesn't. Such accesses are modeset commits and connector
386  * probing. If the function returns %false any HW access should be prevented.
387  *
388  * Returns %true if the current thread has display HW access, %false
389  * otherwise.
390  */
391 bool intel_display_driver_check_access(struct intel_display *display)
392 {
393 	char current_task[TASK_COMM_LEN + 16];
394 	char allowed_task[TASK_COMM_LEN + 16] = "none";
395 
396 	if (display->access.any_task_allowed ||
397 	    display->access.allowed_task == current)
398 		return true;
399 
400 	snprintf(current_task, sizeof(current_task), "%s[%d]",
401 		 current->comm, task_pid_vnr(current));
402 
403 	if (display->access.allowed_task)
404 		snprintf(allowed_task, sizeof(allowed_task), "%s[%d]",
405 			 display->access.allowed_task->comm,
406 			 task_pid_vnr(display->access.allowed_task));
407 
408 	drm_dbg_kms(display->drm,
409 		    "Reject display access from task %s (allowed to %s)\n",
410 		    current_task, allowed_task);
411 
412 	return false;
413 }
414 
415 /* part #2: call after irq install, but before gem init */
416 int intel_display_driver_probe_nogem(struct intel_display *display)
417 {
418 	struct drm_i915_private *i915 = to_i915(display->drm);
419 	enum pipe pipe;
420 	int ret;
421 
422 	if (!HAS_DISPLAY(display))
423 		return 0;
424 
425 	intel_wm_init(display);
426 
427 	intel_panel_sanitize_ssc(display);
428 
429 	intel_pps_setup(display);
430 
431 	intel_gmbus_setup(display);
432 
433 	drm_dbg_kms(display->drm, "%d display pipe%s available.\n",
434 		    INTEL_NUM_PIPES(display),
435 		    INTEL_NUM_PIPES(display) > 1 ? "s" : "");
436 
437 	for_each_pipe(display, pipe) {
438 		ret = intel_crtc_init(display, pipe);
439 		if (ret)
440 			goto err_mode_config;
441 	}
442 
443 	intel_plane_possible_crtcs_init(display);
444 	intel_shared_dpll_init(display);
445 	intel_fdi_pll_freq_update(display);
446 
447 	intel_update_czclk(display);
448 	intel_display_driver_init_hw(display);
449 	intel_dpll_update_ref_clks(display);
450 
451 	if (display->cdclk.max_cdclk_freq == 0)
452 		intel_update_max_cdclk(display);
453 
454 	intel_hti_init(display);
455 
456 	/* Just disable it once at startup */
457 	intel_vga_disable(display);
458 	intel_setup_outputs(display);
459 
460 	ret = intel_dp_tunnel_mgr_init(display);
461 	if (ret)
462 		goto err_hdcp;
463 
464 	intel_display_driver_disable_user_access(display);
465 
466 	drm_modeset_lock_all(display->drm);
467 	intel_modeset_setup_hw_state(i915, display->drm->mode_config.acquire_ctx);
468 	intel_acpi_assign_connector_fwnodes(display);
469 	drm_modeset_unlock_all(display->drm);
470 
471 	intel_initial_plane_config(display);
472 
473 	/*
474 	 * Make sure hardware watermarks really match the state we read out.
475 	 * Note that we need to do this after reconstructing the BIOS fb's
476 	 * since the watermark calculation done here will use pstate->fb.
477 	 */
478 	if (!HAS_GMCH(display))
479 		ilk_wm_sanitize(display);
480 
481 	return 0;
482 
483 err_hdcp:
484 	intel_hdcp_component_fini(display);
485 err_mode_config:
486 	intel_mode_config_cleanup(display);
487 
488 	return ret;
489 }
490 
491 /* part #3: call after gem init */
492 int intel_display_driver_probe(struct intel_display *display)
493 {
494 	int ret;
495 
496 	if (!HAS_DISPLAY(display))
497 		return 0;
498 
499 	/*
500 	 * This will bind stuff into ggtt, so it needs to be done after
501 	 * the BIOS fb takeover and whatever else magic ggtt reservations
502 	 * happen during gem/ggtt init.
503 	 */
504 	intel_hdcp_component_init(display);
505 
506 	/*
507 	 * Force all active planes to recompute their states. So that on
508 	 * mode_setcrtc after probe, all the intel_plane_state variables
509 	 * are already calculated and there is no assert_plane warnings
510 	 * during bootup.
511 	 */
512 	ret = intel_initial_commit(display);
513 	if (ret)
514 		drm_dbg_kms(display->drm, "Initial modeset failed, %d\n", ret);
515 
516 	intel_overlay_setup(display);
517 
518 	/* Only enable hotplug handling once the fbdev is fully set up. */
519 	intel_hpd_init(display);
520 
521 	skl_watermark_ipc_init(display);
522 
523 	return 0;
524 }
525 
526 void intel_display_driver_register(struct intel_display *display)
527 {
528 	struct drm_i915_private *i915 = to_i915(display->drm);
529 	struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS,
530 					       "i915 display info:");
531 
532 	if (!HAS_DISPLAY(display))
533 		return;
534 
535 	/* Must be done after probing outputs */
536 	intel_opregion_register(display);
537 	intel_acpi_video_register(display);
538 
539 	intel_audio_init(display);
540 
541 	intel_display_driver_enable_user_access(display);
542 
543 	intel_audio_register(display);
544 
545 	intel_display_debugfs_register(display);
546 
547 	/*
548 	 * We need to coordinate the hotplugs with the asynchronous
549 	 * fbdev configuration, for which we use the
550 	 * fbdev->async_cookie.
551 	 */
552 	drm_kms_helper_poll_init(display->drm);
553 	intel_hpd_poll_disable(display);
554 
555 	intel_fbdev_setup(i915);
556 
557 	intel_display_device_info_print(DISPLAY_INFO(display),
558 					DISPLAY_RUNTIME_INFO(display), &p);
559 
560 	intel_register_dsm_handler();
561 }
562 
563 /* part #1: call before irq uninstall */
564 void intel_display_driver_remove(struct intel_display *display)
565 {
566 	if (!HAS_DISPLAY(display))
567 		return;
568 
569 	flush_workqueue(display->wq.flip);
570 	flush_workqueue(display->wq.modeset);
571 	flush_workqueue(display->wq.cleanup);
572 
573 	/*
574 	 * MST topology needs to be suspended so we don't have any calls to
575 	 * fbdev after it's finalized. MST will be destroyed later as part of
576 	 * drm_mode_config_cleanup()
577 	 */
578 	intel_dp_mst_suspend(display);
579 }
580 
581 /* part #2: call after irq uninstall */
582 void intel_display_driver_remove_noirq(struct intel_display *display)
583 {
584 	struct drm_i915_private *i915 = to_i915(display->drm);
585 
586 	if (!HAS_DISPLAY(display))
587 		return;
588 
589 	intel_display_driver_suspend_access(display);
590 
591 	/*
592 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
593 	 * poll handlers. Hence disable polling after hpd handling is shut down.
594 	 */
595 	intel_hpd_poll_fini(display);
596 
597 	intel_unregister_dsm_handler();
598 
599 	/* flush any delayed tasks or pending work */
600 	flush_workqueue(i915->unordered_wq);
601 
602 	intel_hdcp_component_fini(display);
603 
604 	intel_mode_config_cleanup(display);
605 
606 	intel_dp_tunnel_mgr_cleanup(display);
607 
608 	intel_overlay_cleanup(display);
609 
610 	intel_gmbus_teardown(display);
611 
612 	destroy_workqueue(display->wq.flip);
613 	destroy_workqueue(display->wq.modeset);
614 	destroy_workqueue(display->wq.cleanup);
615 
616 	intel_fbc_cleanup(display);
617 }
618 
619 /* part #3: call after gem init */
620 void intel_display_driver_remove_nogem(struct intel_display *display)
621 {
622 	intel_dmc_fini(display);
623 
624 	intel_power_domains_driver_remove(display);
625 
626 	intel_vga_unregister(display);
627 
628 	intel_bios_driver_remove(display);
629 }
630 
631 void intel_display_driver_unregister(struct intel_display *display)
632 {
633 	if (!HAS_DISPLAY(display))
634 		return;
635 
636 	intel_unregister_dsm_handler();
637 
638 	drm_client_dev_unregister(display->drm);
639 
640 	/*
641 	 * After flushing the fbdev (incl. a late async config which
642 	 * will have delayed queuing of a hotplug event), then flush
643 	 * the hotplug events.
644 	 */
645 	drm_kms_helper_poll_fini(display->drm);
646 
647 	intel_display_driver_disable_user_access(display);
648 
649 	intel_audio_deinit(display);
650 
651 	drm_atomic_helper_shutdown(display->drm);
652 
653 	acpi_video_unregister();
654 	intel_opregion_unregister(display);
655 }
656 
657 /*
658  * turn all crtc's off, but do not adjust state
659  * This has to be paired with a call to intel_modeset_setup_hw_state.
660  */
661 int intel_display_driver_suspend(struct intel_display *display)
662 {
663 	struct drm_atomic_state *state;
664 	int ret;
665 
666 	if (!HAS_DISPLAY(display))
667 		return 0;
668 
669 	state = drm_atomic_helper_suspend(display->drm);
670 	ret = PTR_ERR_OR_ZERO(state);
671 	if (ret)
672 		drm_err(display->drm, "Suspending crtc's failed with %i\n",
673 			ret);
674 	else
675 		display->restore.modeset_state = state;
676 
677 	/* ensure all DPT VMAs have been unpinned for intel_dpt_suspend() */
678 	flush_workqueue(display->wq.cleanup);
679 
680 	intel_dp_mst_suspend(display);
681 
682 	return ret;
683 }
684 
685 int
686 __intel_display_driver_resume(struct intel_display *display,
687 			      struct drm_atomic_state *state,
688 			      struct drm_modeset_acquire_ctx *ctx)
689 {
690 	struct drm_i915_private *i915 = to_i915(display->drm);
691 	struct drm_crtc_state *crtc_state;
692 	struct drm_crtc *crtc;
693 	int ret, i;
694 
695 	intel_modeset_setup_hw_state(i915, ctx);
696 	intel_vga_redisable(display);
697 
698 	if (!state)
699 		return 0;
700 
701 	/*
702 	 * We've duplicated the state, pointers to the old state are invalid.
703 	 *
704 	 * Don't attempt to use the old state until we commit the duplicated state.
705 	 */
706 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
707 		/*
708 		 * Force recalculation even if we restore
709 		 * current state. With fast modeset this may not result
710 		 * in a modeset when the state is compatible.
711 		 */
712 		crtc_state->mode_changed = true;
713 	}
714 
715 	/* ignore any reset values/BIOS leftovers in the WM registers */
716 	if (!HAS_GMCH(display))
717 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
718 
719 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
720 
721 	drm_WARN_ON(display->drm, ret == -EDEADLK);
722 
723 	return ret;
724 }
725 
726 void intel_display_driver_resume(struct intel_display *display)
727 {
728 	struct drm_atomic_state *state = display->restore.modeset_state;
729 	struct drm_modeset_acquire_ctx ctx;
730 	int ret;
731 
732 	if (!HAS_DISPLAY(display))
733 		return;
734 
735 	/* MST sideband requires HPD interrupts enabled */
736 	intel_dp_mst_resume(display);
737 
738 	display->restore.modeset_state = NULL;
739 	if (state)
740 		state->acquire_ctx = &ctx;
741 
742 	drm_modeset_acquire_init(&ctx, 0);
743 
744 	while (1) {
745 		ret = drm_modeset_lock_all_ctx(display->drm, &ctx);
746 		if (ret != -EDEADLK)
747 			break;
748 
749 		drm_modeset_backoff(&ctx);
750 	}
751 
752 	if (!ret)
753 		ret = __intel_display_driver_resume(display, state, &ctx);
754 
755 	skl_watermark_ipc_update(display);
756 	drm_modeset_drop_locks(&ctx);
757 	drm_modeset_acquire_fini(&ctx);
758 
759 	if (ret)
760 		drm_err(display->drm,
761 			"Restoring old state failed with %i\n", ret);
762 	if (state)
763 		drm_atomic_state_put(state);
764 }
765