xref: /linux/drivers/gpu/drm/i915/display/intel_display_driver.c (revision 9dacae143e6ff18e77fbad6f1413fb8f2f975407)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022-2023 Intel Corporation
4  *
5  * High level display driver entry points. This is a layer between top level
6  * driver code and low level display functionality; no low level display code or
7  * details here.
8  */
9 
10 #include <linux/vga_switcheroo.h>
11 #include <acpi/video.h>
12 #include <drm/display/drm_dp_mst_helper.h>
13 #include <drm/drm_atomic_helper.h>
14 #include <drm/drm_client_event.h>
15 #include <drm/drm_mode_config.h>
16 #include <drm/drm_privacy_screen_consumer.h>
17 #include <drm/drm_print.h>
18 #include <drm/drm_probe_helper.h>
19 #include <drm/drm_vblank.h>
20 
21 #include "i915_drv.h"
22 #include "i915_utils.h" /* for i915_inject_probe_failure() */
23 #include "i9xx_wm.h"
24 #include "intel_acpi.h"
25 #include "intel_atomic.h"
26 #include "intel_audio.h"
27 #include "intel_bios.h"
28 #include "intel_bw.h"
29 #include "intel_cdclk.h"
30 #include "intel_color.h"
31 #include "intel_crtc.h"
32 #include "intel_cursor.h"
33 #include "intel_dbuf_bw.h"
34 #include "intel_display_core.h"
35 #include "intel_display_debugfs.h"
36 #include "intel_display_driver.h"
37 #include "intel_display_irq.h"
38 #include "intel_display_power.h"
39 #include "intel_display_types.h"
40 #include "intel_display_utils.h"
41 #include "intel_display_wa.h"
42 #include "intel_dkl_phy.h"
43 #include "intel_dmc.h"
44 #include "intel_dp.h"
45 #include "intel_dp_tunnel.h"
46 #include "intel_dpll.h"
47 #include "intel_dpll_mgr.h"
48 #include "intel_fb.h"
49 #include "intel_fbc.h"
50 #include "intel_fbdev.h"
51 #include "intel_fdi.h"
52 #include "intel_flipq.h"
53 #include "intel_gmbus.h"
54 #include "intel_hdcp.h"
55 #include "intel_hotplug.h"
56 #include "intel_hti.h"
57 #include "intel_initial_plane.h"
58 #include "intel_modeset_lock.h"
59 #include "intel_modeset_setup.h"
60 #include "intel_opregion.h"
61 #include "intel_overlay.h"
62 #include "intel_pmdemand.h"
63 #include "intel_pps.h"
64 #include "intel_psr.h"
65 #include "intel_quirks.h"
66 #include "intel_vga.h"
67 #include "intel_wm.h"
68 #include "skl_watermark.h"
69 
70 bool intel_display_driver_probe_defer(struct pci_dev *pdev)
71 {
72 	struct drm_privacy_screen *privacy_screen;
73 
74 	/*
75 	 * apple-gmux is needed on dual GPU MacBook Pro
76 	 * to probe the panel if we're the inactive GPU.
77 	 */
78 	if (vga_switcheroo_client_probe_defer(pdev))
79 		return true;
80 
81 	/* If the LCD panel has a privacy-screen, wait for it */
82 	privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
83 	if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
84 		return true;
85 
86 	drm_privacy_screen_put(privacy_screen);
87 
88 	return false;
89 }
90 
91 void intel_display_driver_init_hw(struct intel_display *display)
92 {
93 	if (!HAS_DISPLAY(display))
94 		return;
95 
96 	intel_cdclk_read_hw(display);
97 
98 	intel_display_wa_apply(display);
99 }
100 
101 static const struct drm_mode_config_funcs intel_mode_funcs = {
102 	.fb_create = intel_user_framebuffer_create,
103 	.get_format_info = intel_fb_get_format_info,
104 	.mode_valid = intel_mode_valid,
105 	.atomic_check = intel_atomic_check,
106 	.atomic_commit = intel_atomic_commit,
107 	.atomic_state_alloc = intel_atomic_state_alloc,
108 	.atomic_state_clear = intel_atomic_state_clear,
109 	.atomic_state_free = intel_atomic_state_free,
110 };
111 
112 static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = {
113 	.atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
114 };
115 
116 static void intel_mode_config_init(struct intel_display *display)
117 {
118 	struct drm_mode_config *mode_config = &display->drm->mode_config;
119 
120 	drm_mode_config_init(display->drm);
121 	INIT_LIST_HEAD(&display->global.obj_list);
122 
123 	mode_config->min_width = 0;
124 	mode_config->min_height = 0;
125 
126 	mode_config->preferred_depth = 24;
127 	mode_config->prefer_shadow = 1;
128 
129 	mode_config->funcs = &intel_mode_funcs;
130 	mode_config->helper_private = &intel_mode_config_funcs;
131 
132 	mode_config->async_page_flip = HAS_ASYNC_FLIPS(display);
133 
134 	/*
135 	 * Maximum framebuffer dimensions, chosen to match
136 	 * the maximum render engine surface size on gen4+.
137 	 */
138 	if (DISPLAY_VER(display) >= 7) {
139 		mode_config->max_width = 16384;
140 		mode_config->max_height = 16384;
141 	} else if (DISPLAY_VER(display) >= 4) {
142 		mode_config->max_width = 8192;
143 		mode_config->max_height = 8192;
144 	} else if (DISPLAY_VER(display) == 3) {
145 		mode_config->max_width = 4096;
146 		mode_config->max_height = 4096;
147 	} else {
148 		mode_config->max_width = 2048;
149 		mode_config->max_height = 2048;
150 	}
151 
152 	intel_cursor_mode_config_init(display);
153 }
154 
155 static void intel_mode_config_cleanup(struct intel_display *display)
156 {
157 	intel_atomic_global_obj_cleanup(display);
158 	drm_mode_config_cleanup(display->drm);
159 }
160 
161 static void intel_plane_possible_crtcs_init(struct intel_display *display)
162 {
163 	struct intel_plane *plane;
164 
165 	for_each_intel_plane(display->drm, plane) {
166 		struct intel_crtc *crtc = intel_crtc_for_pipe(display,
167 							      plane->pipe);
168 
169 		plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
170 	}
171 }
172 
173 void intel_display_driver_early_probe(struct intel_display *display)
174 {
175 	/* This must be called before any calls to HAS_PCH_* */
176 	intel_pch_detect(display);
177 
178 	if (!HAS_DISPLAY(display))
179 		return;
180 
181 	spin_lock_init(&display->fb_tracking.lock);
182 	mutex_init(&display->backlight.lock);
183 	mutex_init(&display->audio.mutex);
184 	mutex_init(&display->wm.wm_mutex);
185 	mutex_init(&display->pps.mutex);
186 	mutex_init(&display->hdcp.hdcp_mutex);
187 
188 	intel_display_irq_init(display);
189 	intel_dkl_phy_init(display);
190 	intel_color_init_hooks(display);
191 	intel_init_cdclk_hooks(display);
192 	intel_audio_hooks_init(display);
193 	intel_dpll_init_clock_hook(display);
194 	intel_init_display_hooks(display);
195 	intel_fdi_init_hook(display);
196 	intel_dmc_wl_init(display);
197 }
198 
199 /* part #1: call before irq install */
200 int intel_display_driver_probe_noirq(struct intel_display *display)
201 {
202 	int ret;
203 
204 	if (HAS_DISPLAY(display)) {
205 		ret = drm_vblank_init(display->drm,
206 				      INTEL_NUM_PIPES(display));
207 		if (ret)
208 			return ret;
209 	}
210 
211 	intel_bios_init(display);
212 
213 	ret = intel_vga_register(display);
214 	if (ret)
215 		goto cleanup_bios;
216 
217 	intel_psr_dc5_dc6_wa_init(display);
218 
219 	/* FIXME: completely on the wrong abstraction layer */
220 	ret = intel_power_domains_init(display);
221 	if (ret < 0)
222 		goto cleanup_vga;
223 
224 	intel_pmdemand_init_early(display);
225 
226 	intel_power_domains_init_hw(display, false);
227 
228 	if (!HAS_DISPLAY(display))
229 		return 0;
230 
231 	display->hotplug.dp_wq = alloc_ordered_workqueue("intel-dp", 0);
232 	if (!display->hotplug.dp_wq) {
233 		ret = -ENOMEM;
234 		goto cleanup_vga_client_pw_domain_dmc;
235 	}
236 
237 	display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
238 	if (!display->wq.modeset) {
239 		ret = -ENOMEM;
240 		goto cleanup_wq_dp;
241 	}
242 
243 	display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
244 						WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
245 	if (!display->wq.flip) {
246 		ret = -ENOMEM;
247 		goto cleanup_wq_modeset;
248 	}
249 
250 	display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI, 0);
251 	if (!display->wq.cleanup) {
252 		ret = -ENOMEM;
253 		goto cleanup_wq_flip;
254 	}
255 
256 	display->wq.unordered = alloc_workqueue("display_unordered", 0, 0);
257 	if (!display->wq.unordered) {
258 		ret = -ENOMEM;
259 		goto cleanup_wq_cleanup;
260 	}
261 
262 	intel_dmc_init(display);
263 
264 	intel_mode_config_init(display);
265 
266 	ret = intel_cdclk_init(display);
267 	if (ret)
268 		goto cleanup_wq_unordered;
269 
270 	ret = intel_color_init(display);
271 	if (ret)
272 		goto cleanup_wq_unordered;
273 
274 	ret = intel_dbuf_init(display);
275 	if (ret)
276 		goto cleanup_wq_unordered;
277 
278 	ret = intel_dbuf_bw_init(display);
279 	if (ret)
280 		goto cleanup_wq_unordered;
281 
282 	ret = intel_bw_init(display);
283 	if (ret)
284 		goto cleanup_wq_unordered;
285 
286 	ret = intel_pmdemand_init(display);
287 	if (ret)
288 		goto cleanup_wq_unordered;
289 
290 	intel_init_quirks(display);
291 
292 	intel_fbc_init(display);
293 
294 	return 0;
295 
296 cleanup_wq_unordered:
297 	destroy_workqueue(display->wq.unordered);
298 cleanup_wq_cleanup:
299 	destroy_workqueue(display->wq.cleanup);
300 cleanup_wq_flip:
301 	destroy_workqueue(display->wq.flip);
302 cleanup_wq_modeset:
303 	destroy_workqueue(display->wq.modeset);
304 cleanup_wq_dp:
305 	destroy_workqueue(display->hotplug.dp_wq);
306 cleanup_vga_client_pw_domain_dmc:
307 	intel_dmc_fini(display);
308 	intel_power_domains_driver_remove(display);
309 cleanup_vga:
310 	intel_vga_unregister(display);
311 cleanup_bios:
312 	intel_bios_driver_remove(display);
313 
314 	return ret;
315 }
316 ALLOW_ERROR_INJECTION(intel_display_driver_probe_noirq, ERRNO);
317 
318 static void set_display_access(struct intel_display *display,
319 			       bool any_task_allowed,
320 			       struct task_struct *allowed_task)
321 {
322 	struct drm_modeset_acquire_ctx ctx;
323 	int err;
324 
325 	intel_modeset_lock_ctx_retry(&ctx, NULL, 0, err) {
326 		err = drm_modeset_lock_all_ctx(display->drm, &ctx);
327 		if (err)
328 			continue;
329 
330 		display->access.any_task_allowed = any_task_allowed;
331 		display->access.allowed_task = allowed_task;
332 	}
333 
334 	drm_WARN_ON(display->drm, err);
335 }
336 
337 /**
338  * intel_display_driver_enable_user_access - Enable display HW access for all threads
339  * @display: display device instance
340  *
341  * Enable the display HW access for all threads. Examples for such accesses
342  * are modeset commits and connector probing.
343  *
344  * This function should be called during driver loading and system resume once
345  * all the HW initialization steps are done.
346  */
347 void intel_display_driver_enable_user_access(struct intel_display *display)
348 {
349 	set_display_access(display, true, NULL);
350 
351 	intel_hpd_enable_detection_work(display);
352 }
353 
354 /**
355  * intel_display_driver_disable_user_access - Disable display HW access for user threads
356  * @display: display device instance
357  *
358  * Disable the display HW access for user threads. Examples for such accesses
359  * are modeset commits and connector probing. For the current thread the
360  * access is still enabled, which should only perform HW init/deinit
361  * programming (as the initial modeset during driver loading or the disabling
362  * modeset during driver unloading and system suspend/shutdown). This function
363  * should be followed by calling either intel_display_driver_enable_user_access()
364  * after completing the HW init programming or
365  * intel_display_driver_suspend_access() after completing the HW deinit
366  * programming.
367  *
368  * This function should be called during driver loading/unloading and system
369  * suspend/shutdown before starting the HW init/deinit programming.
370  */
371 void intel_display_driver_disable_user_access(struct intel_display *display)
372 {
373 	intel_hpd_disable_detection_work(display);
374 
375 	set_display_access(display, false, current);
376 }
377 
378 /**
379  * intel_display_driver_suspend_access - Suspend display HW access for all threads
380  * @display: display device instance
381  *
382  * Disable the display HW access for all threads. Examples for such accesses
383  * are modeset commits and connector probing. This call should be either
384  * followed by calling intel_display_driver_resume_access(), or the driver
385  * should be unloaded/shutdown.
386  *
387  * This function should be called during driver unloading and system
388  * suspend/shutdown after completing the HW deinit programming.
389  */
390 void intel_display_driver_suspend_access(struct intel_display *display)
391 {
392 	set_display_access(display, false, NULL);
393 }
394 
395 /**
396  * intel_display_driver_resume_access - Resume display HW access for the resume thread
397  * @display: display device instance
398  *
399  * Enable the display HW access for the current resume thread, keeping the
400  * access disabled for all other (user) threads. Examples for such accesses
401  * are modeset commits and connector probing. The resume thread should only
402  * perform HW init programming (as the restoring modeset). This function
403  * should be followed by calling intel_display_driver_enable_user_access(),
404  * after completing the HW init programming steps.
405  *
406  * This function should be called during system resume before starting the HW
407  * init steps.
408  */
409 void intel_display_driver_resume_access(struct intel_display *display)
410 {
411 	set_display_access(display, false, current);
412 }
413 
414 /**
415  * intel_display_driver_check_access - Check if the current thread has disaplay HW access
416  * @display: display device instance
417  *
418  * Check whether the current thread has display HW access, print a debug
419  * message if it doesn't. Such accesses are modeset commits and connector
420  * probing. If the function returns %false any HW access should be prevented.
421  *
422  * Returns %true if the current thread has display HW access, %false
423  * otherwise.
424  */
425 bool intel_display_driver_check_access(struct intel_display *display)
426 {
427 	char current_task[TASK_COMM_LEN + 16];
428 	char allowed_task[TASK_COMM_LEN + 16] = "none";
429 
430 	if (display->access.any_task_allowed ||
431 	    display->access.allowed_task == current)
432 		return true;
433 
434 	snprintf(current_task, sizeof(current_task), "%s[%d]",
435 		 current->comm, task_pid_vnr(current));
436 
437 	if (display->access.allowed_task)
438 		snprintf(allowed_task, sizeof(allowed_task), "%s[%d]",
439 			 display->access.allowed_task->comm,
440 			 task_pid_vnr(display->access.allowed_task));
441 
442 	drm_dbg_kms(display->drm,
443 		    "Reject display access from task %s (allowed to %s)\n",
444 		    current_task, allowed_task);
445 
446 	return false;
447 }
448 
449 /* part #2: call after irq install, but before gem init */
450 int intel_display_driver_probe_nogem(struct intel_display *display)
451 {
452 	int ret;
453 
454 	if (!HAS_DISPLAY(display))
455 		return 0;
456 
457 	intel_wm_init(display);
458 
459 	intel_panel_sanitize_ssc(display);
460 
461 	intel_pps_setup(display);
462 
463 	intel_gmbus_setup(display);
464 
465 	ret = intel_crtc_init(display);
466 	if (ret)
467 		goto err_mode_config;
468 
469 	intel_plane_possible_crtcs_init(display);
470 	intel_dpll_init(display);
471 	intel_fdi_pll_freq_update(display);
472 
473 	intel_display_driver_init_hw(display);
474 	intel_dpll_update_ref_clks(display);
475 
476 	if (display->cdclk.max_cdclk_freq == 0)
477 		intel_update_max_cdclk(display);
478 
479 	intel_hti_init(display);
480 
481 	intel_setup_outputs(display);
482 
483 	ret = intel_dp_tunnel_mgr_init(display);
484 	if (ret)
485 		goto err_hdcp;
486 
487 	intel_display_driver_disable_user_access(display);
488 
489 	drm_modeset_lock_all(display->drm);
490 	intel_modeset_setup_hw_state(display, display->drm->mode_config.acquire_ctx);
491 	intel_acpi_assign_connector_fwnodes(display);
492 	drm_modeset_unlock_all(display->drm);
493 
494 	intel_initial_plane_config(display);
495 
496 	/*
497 	 * Make sure hardware watermarks really match the state we read out.
498 	 * Note that we need to do this after reconstructing the BIOS fb's
499 	 * since the watermark calculation done here will use pstate->fb.
500 	 */
501 	if (!HAS_GMCH(display))
502 		ilk_wm_sanitize(display);
503 
504 	return 0;
505 
506 err_hdcp:
507 	intel_hdcp_component_fini(display);
508 err_mode_config:
509 	intel_mode_config_cleanup(display);
510 
511 	return ret;
512 }
513 
514 /* part #3: call after gem init */
515 int intel_display_driver_probe(struct intel_display *display)
516 {
517 	int ret;
518 
519 	if (!HAS_DISPLAY(display))
520 		return 0;
521 
522 	/*
523 	 * This will bind stuff into ggtt, so it needs to be done after
524 	 * the BIOS fb takeover and whatever else magic ggtt reservations
525 	 * happen during gem/ggtt init.
526 	 */
527 	intel_hdcp_component_init(display);
528 
529 	intel_flipq_init(display);
530 
531 	/*
532 	 * Force all active planes to recompute their states. So that on
533 	 * mode_setcrtc after probe, all the intel_plane_state variables
534 	 * are already calculated and there is no assert_plane warnings
535 	 * during bootup.
536 	 */
537 	ret = intel_initial_commit(display);
538 	if (ret)
539 		drm_dbg_kms(display->drm, "Initial modeset failed, %d\n", ret);
540 
541 	intel_overlay_setup(display);
542 
543 	/* Only enable hotplug handling once the fbdev is fully set up. */
544 	intel_hpd_init(display);
545 
546 	skl_watermark_ipc_init(display);
547 
548 	return 0;
549 }
550 
551 void intel_display_driver_register(struct intel_display *display)
552 {
553 	struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS,
554 					       "i915 display info:");
555 
556 	if (!HAS_DISPLAY(display))
557 		return;
558 
559 	/* Must be done after probing outputs */
560 	intel_opregion_register(display);
561 	intel_acpi_video_register(display);
562 
563 	intel_audio_init(display);
564 
565 	intel_display_driver_enable_user_access(display);
566 
567 	intel_audio_register(display);
568 
569 	intel_display_debugfs_register(display);
570 
571 	/*
572 	 * We need to coordinate the hotplugs with the asynchronous
573 	 * fbdev configuration, for which we use the
574 	 * fbdev->async_cookie.
575 	 */
576 	drm_kms_helper_poll_init(display->drm);
577 	intel_hpd_poll_disable(display);
578 
579 	intel_fbdev_setup(display);
580 
581 	intel_display_device_info_print(DISPLAY_INFO(display),
582 					DISPLAY_RUNTIME_INFO(display), &p);
583 
584 	intel_register_dsm_handler();
585 }
586 
587 /* part #1: call before irq uninstall */
588 void intel_display_driver_remove(struct intel_display *display)
589 {
590 	if (!HAS_DISPLAY(display))
591 		return;
592 
593 	flush_workqueue(display->wq.flip);
594 	flush_workqueue(display->wq.modeset);
595 	flush_workqueue(display->wq.cleanup);
596 	flush_workqueue(display->wq.unordered);
597 
598 	/*
599 	 * MST topology needs to be suspended so we don't have any calls to
600 	 * fbdev after it's finalized. MST will be destroyed later as part of
601 	 * drm_mode_config_cleanup()
602 	 */
603 	intel_dp_mst_suspend(display);
604 }
605 
606 /* part #2: call after irq uninstall */
607 void intel_display_driver_remove_noirq(struct intel_display *display)
608 {
609 	if (!HAS_DISPLAY(display))
610 		return;
611 
612 	intel_display_driver_suspend_access(display);
613 
614 	/*
615 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
616 	 * poll handlers. Hence disable polling after hpd handling is shut down.
617 	 */
618 	intel_hpd_poll_fini(display);
619 
620 	intel_unregister_dsm_handler();
621 
622 	/* flush any delayed tasks or pending work */
623 	flush_workqueue(display->wq.unordered);
624 
625 	intel_hdcp_component_fini(display);
626 
627 	intel_mode_config_cleanup(display);
628 
629 	intel_dp_tunnel_mgr_cleanup(display);
630 
631 	intel_overlay_cleanup(display);
632 
633 	intel_gmbus_teardown(display);
634 
635 	destroy_workqueue(display->hotplug.dp_wq);
636 	destroy_workqueue(display->wq.flip);
637 	destroy_workqueue(display->wq.modeset);
638 	destroy_workqueue(display->wq.cleanup);
639 	destroy_workqueue(display->wq.unordered);
640 
641 	intel_fbc_cleanup(display);
642 }
643 
644 /* part #3: call after gem init */
645 void intel_display_driver_remove_nogem(struct intel_display *display)
646 {
647 	intel_dmc_fini(display);
648 
649 	intel_power_domains_driver_remove(display);
650 
651 	intel_vga_unregister(display);
652 
653 	intel_bios_driver_remove(display);
654 }
655 
656 void intel_display_driver_unregister(struct intel_display *display)
657 {
658 	if (!HAS_DISPLAY(display))
659 		return;
660 
661 	intel_unregister_dsm_handler();
662 
663 	drm_client_dev_unregister(display->drm);
664 
665 	/*
666 	 * After flushing the fbdev (incl. a late async config which
667 	 * will have delayed queuing of a hotplug event), then flush
668 	 * the hotplug events.
669 	 */
670 	drm_kms_helper_poll_fini(display->drm);
671 
672 	intel_display_driver_disable_user_access(display);
673 
674 	intel_audio_deinit(display);
675 
676 	drm_atomic_helper_shutdown(display->drm);
677 
678 	acpi_video_unregister();
679 	intel_opregion_unregister(display);
680 }
681 
682 /*
683  * turn all crtc's off, but do not adjust state
684  * This has to be paired with a call to intel_modeset_setup_hw_state.
685  */
686 int intel_display_driver_suspend(struct intel_display *display)
687 {
688 	struct drm_atomic_state *state;
689 	int ret;
690 
691 	if (!HAS_DISPLAY(display))
692 		return 0;
693 
694 	state = drm_atomic_helper_suspend(display->drm);
695 	ret = PTR_ERR_OR_ZERO(state);
696 	if (ret)
697 		drm_err(display->drm, "Suspending crtc's failed with %i\n",
698 			ret);
699 	else
700 		display->restore.modeset_state = state;
701 
702 	/* ensure all DPT VMAs have been unpinned for intel_dpt_suspend() */
703 	flush_workqueue(display->wq.cleanup);
704 
705 	intel_dp_mst_suspend(display);
706 
707 	return ret;
708 }
709 
710 int
711 __intel_display_driver_resume(struct intel_display *display,
712 			      struct drm_atomic_state *state,
713 			      struct drm_modeset_acquire_ctx *ctx)
714 {
715 	struct drm_crtc_state *crtc_state;
716 	struct drm_crtc *crtc;
717 	int ret, i;
718 
719 	intel_modeset_setup_hw_state(display, ctx);
720 
721 	if (!state)
722 		return 0;
723 
724 	/*
725 	 * We've duplicated the state, pointers to the old state are invalid.
726 	 *
727 	 * Don't attempt to use the old state until we commit the duplicated state.
728 	 */
729 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
730 		/*
731 		 * Force recalculation even if we restore
732 		 * current state. With fast modeset this may not result
733 		 * in a modeset when the state is compatible.
734 		 */
735 		crtc_state->mode_changed = true;
736 	}
737 
738 	/* ignore any reset values/BIOS leftovers in the WM registers */
739 	if (!HAS_GMCH(display))
740 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
741 
742 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
743 
744 	drm_WARN_ON(display->drm, ret == -EDEADLK);
745 
746 	return ret;
747 }
748 
749 void intel_display_driver_resume(struct intel_display *display)
750 {
751 	struct drm_atomic_state *state = display->restore.modeset_state;
752 	struct drm_modeset_acquire_ctx ctx;
753 	int ret;
754 
755 	if (!HAS_DISPLAY(display))
756 		return;
757 
758 	/* MST sideband requires HPD interrupts enabled */
759 	intel_dp_mst_resume(display);
760 
761 	display->restore.modeset_state = NULL;
762 	if (state)
763 		state->acquire_ctx = &ctx;
764 
765 	drm_modeset_acquire_init(&ctx, 0);
766 
767 	while (1) {
768 		ret = drm_modeset_lock_all_ctx(display->drm, &ctx);
769 		if (ret != -EDEADLK)
770 			break;
771 
772 		drm_modeset_backoff(&ctx);
773 	}
774 
775 	if (!ret)
776 		ret = __intel_display_driver_resume(display, state, &ctx);
777 
778 	skl_watermark_ipc_update(display);
779 	drm_modeset_drop_locks(&ctx);
780 	drm_modeset_acquire_fini(&ctx);
781 
782 	if (ret)
783 		drm_err(display->drm,
784 			"Restoring old state failed with %i\n", ret);
785 	if (state)
786 		drm_atomic_state_put(state);
787 }
788