xref: /linux/drivers/gpu/drm/i915/display/intel_display_driver.c (revision f86ad0ed620cb3c91ec7d5468e93ac68d727539d)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022-2023 Intel Corporation
4  *
5  * High level display driver entry points. This is a layer between top level
6  * driver code and low level display functionality; no low level display code or
7  * details here.
8  */
9 
10 #include <linux/vga_switcheroo.h>
11 #include <acpi/video.h>
12 #include <drm/display/drm_dp_mst_helper.h>
13 #include <drm/drm_atomic_helper.h>
14 #include <drm/drm_client_event.h>
15 #include <drm/drm_mode_config.h>
16 #include <drm/drm_privacy_screen_consumer.h>
17 #include <drm/drm_probe_helper.h>
18 #include <drm/drm_vblank.h>
19 
20 #include "i915_drv.h"
21 #include "i9xx_wm.h"
22 #include "intel_acpi.h"
23 #include "intel_atomic.h"
24 #include "intel_audio.h"
25 #include "intel_bios.h"
26 #include "intel_bw.h"
27 #include "intel_cdclk.h"
28 #include "intel_color.h"
29 #include "intel_crtc.h"
30 #include "intel_display_core.h"
31 #include "intel_display_debugfs.h"
32 #include "intel_display_driver.h"
33 #include "intel_display_irq.h"
34 #include "intel_display_power.h"
35 #include "intel_display_types.h"
36 #include "intel_display_wa.h"
37 #include "intel_dkl_phy.h"
38 #include "intel_dmc.h"
39 #include "intel_dp.h"
40 #include "intel_dp_tunnel.h"
41 #include "intel_dpll.h"
42 #include "intel_dpll_mgr.h"
43 #include "intel_fb.h"
44 #include "intel_fbc.h"
45 #include "intel_fbdev.h"
46 #include "intel_fdi.h"
47 #include "intel_gmbus.h"
48 #include "intel_hdcp.h"
49 #include "intel_hotplug.h"
50 #include "intel_hti.h"
51 #include "intel_modeset_lock.h"
52 #include "intel_modeset_setup.h"
53 #include "intel_opregion.h"
54 #include "intel_overlay.h"
55 #include "intel_plane_initial.h"
56 #include "intel_pmdemand.h"
57 #include "intel_pps.h"
58 #include "intel_psr.h"
59 #include "intel_quirks.h"
60 #include "intel_vga.h"
61 #include "intel_wm.h"
62 #include "skl_watermark.h"
63 
64 bool intel_display_driver_probe_defer(struct pci_dev *pdev)
65 {
66 	struct drm_privacy_screen *privacy_screen;
67 
68 	/*
69 	 * apple-gmux is needed on dual GPU MacBook Pro
70 	 * to probe the panel if we're the inactive GPU.
71 	 */
72 	if (vga_switcheroo_client_probe_defer(pdev))
73 		return true;
74 
75 	/* If the LCD panel has a privacy-screen, wait for it */
76 	privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
77 	if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
78 		return true;
79 
80 	drm_privacy_screen_put(privacy_screen);
81 
82 	return false;
83 }
84 
85 void intel_display_driver_init_hw(struct intel_display *display)
86 {
87 	struct intel_cdclk_state *cdclk_state;
88 
89 	if (!HAS_DISPLAY(display))
90 		return;
91 
92 	cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state);
93 
94 	intel_update_cdclk(display);
95 	intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
96 	cdclk_state->logical = cdclk_state->actual = display->cdclk.hw;
97 
98 	intel_display_wa_apply(display);
99 }
100 
101 static const struct drm_mode_config_funcs intel_mode_funcs = {
102 	.fb_create = intel_user_framebuffer_create,
103 	.get_format_info = intel_fb_get_format_info,
104 	.mode_valid = intel_mode_valid,
105 	.atomic_check = intel_atomic_check,
106 	.atomic_commit = intel_atomic_commit,
107 	.atomic_state_alloc = intel_atomic_state_alloc,
108 	.atomic_state_clear = intel_atomic_state_clear,
109 	.atomic_state_free = intel_atomic_state_free,
110 };
111 
112 static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = {
113 	.atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
114 };
115 
116 static void intel_mode_config_init(struct intel_display *display)
117 {
118 	struct drm_mode_config *mode_config = &display->drm->mode_config;
119 
120 	drm_mode_config_init(display->drm);
121 	INIT_LIST_HEAD(&display->global.obj_list);
122 
123 	mode_config->min_width = 0;
124 	mode_config->min_height = 0;
125 
126 	mode_config->preferred_depth = 24;
127 	mode_config->prefer_shadow = 1;
128 
129 	mode_config->funcs = &intel_mode_funcs;
130 	mode_config->helper_private = &intel_mode_config_funcs;
131 
132 	mode_config->async_page_flip = HAS_ASYNC_FLIPS(display);
133 
134 	/*
135 	 * Maximum framebuffer dimensions, chosen to match
136 	 * the maximum render engine surface size on gen4+.
137 	 */
138 	if (DISPLAY_VER(display) >= 7) {
139 		mode_config->max_width = 16384;
140 		mode_config->max_height = 16384;
141 	} else if (DISPLAY_VER(display) >= 4) {
142 		mode_config->max_width = 8192;
143 		mode_config->max_height = 8192;
144 	} else if (DISPLAY_VER(display) == 3) {
145 		mode_config->max_width = 4096;
146 		mode_config->max_height = 4096;
147 	} else {
148 		mode_config->max_width = 2048;
149 		mode_config->max_height = 2048;
150 	}
151 
152 	if (display->platform.i845g || display->platform.i865g) {
153 		mode_config->cursor_width = display->platform.i845g ? 64 : 512;
154 		mode_config->cursor_height = 1023;
155 	} else if (display->platform.i830 || display->platform.i85x ||
156 		   display->platform.i915g || display->platform.i915gm) {
157 		mode_config->cursor_width = 64;
158 		mode_config->cursor_height = 64;
159 	} else {
160 		mode_config->cursor_width = 256;
161 		mode_config->cursor_height = 256;
162 	}
163 }
164 
165 static void intel_mode_config_cleanup(struct intel_display *display)
166 {
167 	intel_atomic_global_obj_cleanup(display);
168 	drm_mode_config_cleanup(display->drm);
169 }
170 
171 static void intel_plane_possible_crtcs_init(struct intel_display *display)
172 {
173 	struct intel_plane *plane;
174 
175 	for_each_intel_plane(display->drm, plane) {
176 		struct intel_crtc *crtc = intel_crtc_for_pipe(display,
177 							      plane->pipe);
178 
179 		plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
180 	}
181 }
182 
183 void intel_display_driver_early_probe(struct intel_display *display)
184 {
185 	/* This must be called before any calls to HAS_PCH_* */
186 	intel_pch_detect(display);
187 
188 	if (!HAS_DISPLAY(display))
189 		return;
190 
191 	spin_lock_init(&display->fb_tracking.lock);
192 	mutex_init(&display->backlight.lock);
193 	mutex_init(&display->audio.mutex);
194 	mutex_init(&display->wm.wm_mutex);
195 	mutex_init(&display->pps.mutex);
196 	mutex_init(&display->hdcp.hdcp_mutex);
197 
198 	intel_display_irq_init(display);
199 	intel_dkl_phy_init(display);
200 	intel_color_init_hooks(display);
201 	intel_init_cdclk_hooks(display);
202 	intel_audio_hooks_init(display);
203 	intel_dpll_init_clock_hook(display);
204 	intel_init_display_hooks(display);
205 	intel_fdi_init_hook(display);
206 	intel_dmc_wl_init(display);
207 }
208 
209 /* part #1: call before irq install */
210 int intel_display_driver_probe_noirq(struct intel_display *display)
211 {
212 	struct drm_i915_private *i915 = to_i915(display->drm);
213 	int ret;
214 
215 	if (i915_inject_probe_failure(i915))
216 		return -ENODEV;
217 
218 	if (HAS_DISPLAY(display)) {
219 		ret = drm_vblank_init(display->drm,
220 				      INTEL_NUM_PIPES(display));
221 		if (ret)
222 			return ret;
223 	}
224 
225 	intel_bios_init(display);
226 
227 	ret = intel_vga_register(display);
228 	if (ret)
229 		goto cleanup_bios;
230 
231 	intel_psr_dc5_dc6_wa_init(display);
232 
233 	/* FIXME: completely on the wrong abstraction layer */
234 	ret = intel_power_domains_init(display);
235 	if (ret < 0)
236 		goto cleanup_vga;
237 
238 	intel_pmdemand_init_early(display);
239 
240 	intel_power_domains_init_hw(display, false);
241 
242 	if (!HAS_DISPLAY(display))
243 		return 0;
244 
245 	intel_dmc_init(display);
246 
247 	display->hotplug.dp_wq = alloc_ordered_workqueue("intel-dp", 0);
248 	if (!display->hotplug.dp_wq) {
249 		ret = -ENOMEM;
250 		goto cleanup_vga_client_pw_domain_dmc;
251 	}
252 
253 	display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
254 	if (!display->wq.modeset) {
255 		ret = -ENOMEM;
256 		goto cleanup_wq_dp;
257 	}
258 
259 	display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
260 						WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
261 	if (!display->wq.flip) {
262 		ret = -ENOMEM;
263 		goto cleanup_wq_modeset;
264 	}
265 
266 	display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI, 0);
267 	if (!display->wq.cleanup) {
268 		ret = -ENOMEM;
269 		goto cleanup_wq_flip;
270 	}
271 
272 	intel_mode_config_init(display);
273 
274 	ret = intel_cdclk_init(display);
275 	if (ret)
276 		goto cleanup_wq_cleanup;
277 
278 	ret = intel_color_init(display);
279 	if (ret)
280 		goto cleanup_wq_cleanup;
281 
282 	ret = intel_dbuf_init(display);
283 	if (ret)
284 		goto cleanup_wq_cleanup;
285 
286 	ret = intel_bw_init(display);
287 	if (ret)
288 		goto cleanup_wq_cleanup;
289 
290 	ret = intel_pmdemand_init(display);
291 	if (ret)
292 		goto cleanup_wq_cleanup;
293 
294 	intel_init_quirks(display);
295 
296 	intel_fbc_init(display);
297 
298 	return 0;
299 
300 cleanup_wq_cleanup:
301 	destroy_workqueue(display->wq.cleanup);
302 cleanup_wq_flip:
303 	destroy_workqueue(display->wq.flip);
304 cleanup_wq_modeset:
305 	destroy_workqueue(display->wq.modeset);
306 cleanup_wq_dp:
307 	destroy_workqueue(display->hotplug.dp_wq);
308 cleanup_vga_client_pw_domain_dmc:
309 	intel_dmc_fini(display);
310 	intel_power_domains_driver_remove(display);
311 cleanup_vga:
312 	intel_vga_unregister(display);
313 cleanup_bios:
314 	intel_bios_driver_remove(display);
315 
316 	return ret;
317 }
318 
319 static void set_display_access(struct intel_display *display,
320 			       bool any_task_allowed,
321 			       struct task_struct *allowed_task)
322 {
323 	struct drm_modeset_acquire_ctx ctx;
324 	int err;
325 
326 	intel_modeset_lock_ctx_retry(&ctx, NULL, 0, err) {
327 		err = drm_modeset_lock_all_ctx(display->drm, &ctx);
328 		if (err)
329 			continue;
330 
331 		display->access.any_task_allowed = any_task_allowed;
332 		display->access.allowed_task = allowed_task;
333 	}
334 
335 	drm_WARN_ON(display->drm, err);
336 }
337 
338 /**
339  * intel_display_driver_enable_user_access - Enable display HW access for all threads
340  * @display: display device instance
341  *
342  * Enable the display HW access for all threads. Examples for such accesses
343  * are modeset commits and connector probing.
344  *
345  * This function should be called during driver loading and system resume once
346  * all the HW initialization steps are done.
347  */
348 void intel_display_driver_enable_user_access(struct intel_display *display)
349 {
350 	set_display_access(display, true, NULL);
351 
352 	intel_hpd_enable_detection_work(display);
353 }
354 
355 /**
356  * intel_display_driver_disable_user_access - Disable display HW access for user threads
357  * @display: display device instance
358  *
359  * Disable the display HW access for user threads. Examples for such accesses
360  * are modeset commits and connector probing. For the current thread the
361  * access is still enabled, which should only perform HW init/deinit
362  * programming (as the initial modeset during driver loading or the disabling
363  * modeset during driver unloading and system suspend/shutdown). This function
364  * should be followed by calling either intel_display_driver_enable_user_access()
365  * after completing the HW init programming or
366  * intel_display_driver_suspend_access() after completing the HW deinit
367  * programming.
368  *
369  * This function should be called during driver loading/unloading and system
370  * suspend/shutdown before starting the HW init/deinit programming.
371  */
372 void intel_display_driver_disable_user_access(struct intel_display *display)
373 {
374 	intel_hpd_disable_detection_work(display);
375 
376 	set_display_access(display, false, current);
377 }
378 
379 /**
380  * intel_display_driver_suspend_access - Suspend display HW access for all threads
381  * @display: display device instance
382  *
383  * Disable the display HW access for all threads. Examples for such accesses
384  * are modeset commits and connector probing. This call should be either
385  * followed by calling intel_display_driver_resume_access(), or the driver
386  * should be unloaded/shutdown.
387  *
388  * This function should be called during driver unloading and system
389  * suspend/shutdown after completing the HW deinit programming.
390  */
391 void intel_display_driver_suspend_access(struct intel_display *display)
392 {
393 	set_display_access(display, false, NULL);
394 }
395 
396 /**
397  * intel_display_driver_resume_access - Resume display HW access for the resume thread
398  * @display: display device instance
399  *
400  * Enable the display HW access for the current resume thread, keeping the
401  * access disabled for all other (user) threads. Examples for such accesses
402  * are modeset commits and connector probing. The resume thread should only
403  * perform HW init programming (as the restoring modeset). This function
404  * should be followed by calling intel_display_driver_enable_user_access(),
405  * after completing the HW init programming steps.
406  *
407  * This function should be called during system resume before starting the HW
408  * init steps.
409  */
410 void intel_display_driver_resume_access(struct intel_display *display)
411 {
412 	set_display_access(display, false, current);
413 }
414 
415 /**
416  * intel_display_driver_check_access - Check if the current thread has disaplay HW access
417  * @display: display device instance
418  *
419  * Check whether the current thread has display HW access, print a debug
420  * message if it doesn't. Such accesses are modeset commits and connector
421  * probing. If the function returns %false any HW access should be prevented.
422  *
423  * Returns %true if the current thread has display HW access, %false
424  * otherwise.
425  */
426 bool intel_display_driver_check_access(struct intel_display *display)
427 {
428 	char current_task[TASK_COMM_LEN + 16];
429 	char allowed_task[TASK_COMM_LEN + 16] = "none";
430 
431 	if (display->access.any_task_allowed ||
432 	    display->access.allowed_task == current)
433 		return true;
434 
435 	snprintf(current_task, sizeof(current_task), "%s[%d]",
436 		 current->comm, task_pid_vnr(current));
437 
438 	if (display->access.allowed_task)
439 		snprintf(allowed_task, sizeof(allowed_task), "%s[%d]",
440 			 display->access.allowed_task->comm,
441 			 task_pid_vnr(display->access.allowed_task));
442 
443 	drm_dbg_kms(display->drm,
444 		    "Reject display access from task %s (allowed to %s)\n",
445 		    current_task, allowed_task);
446 
447 	return false;
448 }
449 
450 /* part #2: call after irq install, but before gem init */
451 int intel_display_driver_probe_nogem(struct intel_display *display)
452 {
453 	enum pipe pipe;
454 	int ret;
455 
456 	if (!HAS_DISPLAY(display))
457 		return 0;
458 
459 	intel_wm_init(display);
460 
461 	intel_panel_sanitize_ssc(display);
462 
463 	intel_pps_setup(display);
464 
465 	intel_gmbus_setup(display);
466 
467 	drm_dbg_kms(display->drm, "%d display pipe%s available.\n",
468 		    INTEL_NUM_PIPES(display),
469 		    INTEL_NUM_PIPES(display) > 1 ? "s" : "");
470 
471 	for_each_pipe(display, pipe) {
472 		ret = intel_crtc_init(display, pipe);
473 		if (ret)
474 			goto err_mode_config;
475 	}
476 
477 	intel_plane_possible_crtcs_init(display);
478 	intel_dpll_init(display);
479 	intel_fdi_pll_freq_update(display);
480 
481 	intel_update_czclk(display);
482 	intel_display_driver_init_hw(display);
483 	intel_dpll_update_ref_clks(display);
484 
485 	if (display->cdclk.max_cdclk_freq == 0)
486 		intel_update_max_cdclk(display);
487 
488 	intel_hti_init(display);
489 
490 	intel_setup_outputs(display);
491 
492 	ret = intel_dp_tunnel_mgr_init(display);
493 	if (ret)
494 		goto err_hdcp;
495 
496 	intel_display_driver_disable_user_access(display);
497 
498 	drm_modeset_lock_all(display->drm);
499 	intel_modeset_setup_hw_state(display, display->drm->mode_config.acquire_ctx);
500 	intel_acpi_assign_connector_fwnodes(display);
501 	drm_modeset_unlock_all(display->drm);
502 
503 	intel_initial_plane_config(display);
504 
505 	/*
506 	 * Make sure hardware watermarks really match the state we read out.
507 	 * Note that we need to do this after reconstructing the BIOS fb's
508 	 * since the watermark calculation done here will use pstate->fb.
509 	 */
510 	if (!HAS_GMCH(display))
511 		ilk_wm_sanitize(display);
512 
513 	return 0;
514 
515 err_hdcp:
516 	intel_hdcp_component_fini(display);
517 err_mode_config:
518 	intel_mode_config_cleanup(display);
519 
520 	return ret;
521 }
522 
523 /* part #3: call after gem init */
524 int intel_display_driver_probe(struct intel_display *display)
525 {
526 	int ret;
527 
528 	if (!HAS_DISPLAY(display))
529 		return 0;
530 
531 	/*
532 	 * This will bind stuff into ggtt, so it needs to be done after
533 	 * the BIOS fb takeover and whatever else magic ggtt reservations
534 	 * happen during gem/ggtt init.
535 	 */
536 	intel_hdcp_component_init(display);
537 
538 	/*
539 	 * Force all active planes to recompute their states. So that on
540 	 * mode_setcrtc after probe, all the intel_plane_state variables
541 	 * are already calculated and there is no assert_plane warnings
542 	 * during bootup.
543 	 */
544 	ret = intel_initial_commit(display);
545 	if (ret)
546 		drm_dbg_kms(display->drm, "Initial modeset failed, %d\n", ret);
547 
548 	intel_overlay_setup(display);
549 
550 	/* Only enable hotplug handling once the fbdev is fully set up. */
551 	intel_hpd_init(display);
552 
553 	skl_watermark_ipc_init(display);
554 
555 	return 0;
556 }
557 
558 void intel_display_driver_register(struct intel_display *display)
559 {
560 	struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS,
561 					       "i915 display info:");
562 
563 	if (!HAS_DISPLAY(display))
564 		return;
565 
566 	/* Must be done after probing outputs */
567 	intel_opregion_register(display);
568 	intel_acpi_video_register(display);
569 
570 	intel_audio_init(display);
571 
572 	intel_display_driver_enable_user_access(display);
573 
574 	intel_audio_register(display);
575 
576 	intel_display_debugfs_register(display);
577 
578 	/*
579 	 * We need to coordinate the hotplugs with the asynchronous
580 	 * fbdev configuration, for which we use the
581 	 * fbdev->async_cookie.
582 	 */
583 	drm_kms_helper_poll_init(display->drm);
584 	intel_hpd_poll_disable(display);
585 
586 	intel_fbdev_setup(display);
587 
588 	intel_display_device_info_print(DISPLAY_INFO(display),
589 					DISPLAY_RUNTIME_INFO(display), &p);
590 
591 	intel_register_dsm_handler();
592 }
593 
594 /* part #1: call before irq uninstall */
595 void intel_display_driver_remove(struct intel_display *display)
596 {
597 	if (!HAS_DISPLAY(display))
598 		return;
599 
600 	flush_workqueue(display->wq.flip);
601 	flush_workqueue(display->wq.modeset);
602 	flush_workqueue(display->wq.cleanup);
603 
604 	/*
605 	 * MST topology needs to be suspended so we don't have any calls to
606 	 * fbdev after it's finalized. MST will be destroyed later as part of
607 	 * drm_mode_config_cleanup()
608 	 */
609 	intel_dp_mst_suspend(display);
610 }
611 
612 /* part #2: call after irq uninstall */
613 void intel_display_driver_remove_noirq(struct intel_display *display)
614 {
615 	struct drm_i915_private *i915 = to_i915(display->drm);
616 
617 	if (!HAS_DISPLAY(display))
618 		return;
619 
620 	intel_display_driver_suspend_access(display);
621 
622 	/*
623 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
624 	 * poll handlers. Hence disable polling after hpd handling is shut down.
625 	 */
626 	intel_hpd_poll_fini(display);
627 
628 	intel_unregister_dsm_handler();
629 
630 	/* flush any delayed tasks or pending work */
631 	flush_workqueue(i915->unordered_wq);
632 
633 	intel_hdcp_component_fini(display);
634 
635 	intel_mode_config_cleanup(display);
636 
637 	intel_dp_tunnel_mgr_cleanup(display);
638 
639 	intel_overlay_cleanup(display);
640 
641 	intel_gmbus_teardown(display);
642 
643 	destroy_workqueue(display->hotplug.dp_wq);
644 	destroy_workqueue(display->wq.flip);
645 	destroy_workqueue(display->wq.modeset);
646 	destroy_workqueue(display->wq.cleanup);
647 
648 	intel_fbc_cleanup(display);
649 }
650 
651 /* part #3: call after gem init */
652 void intel_display_driver_remove_nogem(struct intel_display *display)
653 {
654 	intel_dmc_fini(display);
655 
656 	intel_power_domains_driver_remove(display);
657 
658 	intel_vga_unregister(display);
659 
660 	intel_bios_driver_remove(display);
661 }
662 
663 void intel_display_driver_unregister(struct intel_display *display)
664 {
665 	if (!HAS_DISPLAY(display))
666 		return;
667 
668 	intel_unregister_dsm_handler();
669 
670 	drm_client_dev_unregister(display->drm);
671 
672 	/*
673 	 * After flushing the fbdev (incl. a late async config which
674 	 * will have delayed queuing of a hotplug event), then flush
675 	 * the hotplug events.
676 	 */
677 	drm_kms_helper_poll_fini(display->drm);
678 
679 	intel_display_driver_disable_user_access(display);
680 
681 	intel_audio_deinit(display);
682 
683 	drm_atomic_helper_shutdown(display->drm);
684 
685 	acpi_video_unregister();
686 	intel_opregion_unregister(display);
687 }
688 
689 /*
690  * turn all crtc's off, but do not adjust state
691  * This has to be paired with a call to intel_modeset_setup_hw_state.
692  */
693 int intel_display_driver_suspend(struct intel_display *display)
694 {
695 	struct drm_atomic_state *state;
696 	int ret;
697 
698 	if (!HAS_DISPLAY(display))
699 		return 0;
700 
701 	state = drm_atomic_helper_suspend(display->drm);
702 	ret = PTR_ERR_OR_ZERO(state);
703 	if (ret)
704 		drm_err(display->drm, "Suspending crtc's failed with %i\n",
705 			ret);
706 	else
707 		display->restore.modeset_state = state;
708 
709 	/* ensure all DPT VMAs have been unpinned for intel_dpt_suspend() */
710 	flush_workqueue(display->wq.cleanup);
711 
712 	intel_dp_mst_suspend(display);
713 
714 	return ret;
715 }
716 
717 int
718 __intel_display_driver_resume(struct intel_display *display,
719 			      struct drm_atomic_state *state,
720 			      struct drm_modeset_acquire_ctx *ctx)
721 {
722 	struct drm_crtc_state *crtc_state;
723 	struct drm_crtc *crtc;
724 	int ret, i;
725 
726 	intel_modeset_setup_hw_state(display, ctx);
727 
728 	if (!state)
729 		return 0;
730 
731 	/*
732 	 * We've duplicated the state, pointers to the old state are invalid.
733 	 *
734 	 * Don't attempt to use the old state until we commit the duplicated state.
735 	 */
736 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
737 		/*
738 		 * Force recalculation even if we restore
739 		 * current state. With fast modeset this may not result
740 		 * in a modeset when the state is compatible.
741 		 */
742 		crtc_state->mode_changed = true;
743 	}
744 
745 	/* ignore any reset values/BIOS leftovers in the WM registers */
746 	if (!HAS_GMCH(display))
747 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
748 
749 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
750 
751 	drm_WARN_ON(display->drm, ret == -EDEADLK);
752 
753 	return ret;
754 }
755 
756 void intel_display_driver_resume(struct intel_display *display)
757 {
758 	struct drm_atomic_state *state = display->restore.modeset_state;
759 	struct drm_modeset_acquire_ctx ctx;
760 	int ret;
761 
762 	if (!HAS_DISPLAY(display))
763 		return;
764 
765 	/* MST sideband requires HPD interrupts enabled */
766 	intel_dp_mst_resume(display);
767 
768 	display->restore.modeset_state = NULL;
769 	if (state)
770 		state->acquire_ctx = &ctx;
771 
772 	drm_modeset_acquire_init(&ctx, 0);
773 
774 	while (1) {
775 		ret = drm_modeset_lock_all_ctx(display->drm, &ctx);
776 		if (ret != -EDEADLK)
777 			break;
778 
779 		drm_modeset_backoff(&ctx);
780 	}
781 
782 	if (!ret)
783 		ret = __intel_display_driver_resume(display, state, &ctx);
784 
785 	skl_watermark_ipc_update(display);
786 	drm_modeset_drop_locks(&ctx);
787 	drm_modeset_acquire_fini(&ctx);
788 
789 	if (ret)
790 		drm_err(display->drm,
791 			"Restoring old state failed with %i\n", ret);
792 	if (state)
793 		drm_atomic_state_put(state);
794 }
795