xref: /linux/drivers/gpu/drm/i915/display/intel_display_driver.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022-2023 Intel Corporation
4  *
5  * High level display driver entry points. This is a layer between top level
6  * driver code and low level display functionality; no low level display code or
7  * details here.
8  */
9 
10 #include <linux/vga_switcheroo.h>
11 #include <acpi/video.h>
12 #include <drm/display/drm_dp_mst_helper.h>
13 #include <drm/drm_atomic_helper.h>
14 #include <drm/drm_client_event.h>
15 #include <drm/drm_mode_config.h>
16 #include <drm/drm_privacy_screen_consumer.h>
17 #include <drm/drm_print.h>
18 #include <drm/drm_probe_helper.h>
19 #include <drm/drm_vblank.h>
20 
21 #include "i9xx_wm.h"
22 #include "intel_acpi.h"
23 #include "intel_atomic.h"
24 #include "intel_audio.h"
25 #include "intel_bios.h"
26 #include "intel_bw.h"
27 #include "intel_cdclk.h"
28 #include "intel_color.h"
29 #include "intel_crtc.h"
30 #include "intel_cursor.h"
31 #include "intel_dbuf_bw.h"
32 #include "intel_display_core.h"
33 #include "intel_display_debugfs.h"
34 #include "intel_display_driver.h"
35 #include "intel_display_irq.h"
36 #include "intel_display_power.h"
37 #include "intel_display_types.h"
38 #include "intel_display_utils.h"
39 #include "intel_display_wa.h"
40 #include "intel_dkl_phy.h"
41 #include "intel_dmc.h"
42 #include "intel_dp.h"
43 #include "intel_dp_tunnel.h"
44 #include "intel_dpll.h"
45 #include "intel_dpll_mgr.h"
46 #include "intel_fb.h"
47 #include "intel_fbc.h"
48 #include "intel_fbdev.h"
49 #include "intel_fdi.h"
50 #include "intel_flipq.h"
51 #include "intel_gmbus.h"
52 #include "intel_hdcp.h"
53 #include "intel_hotplug.h"
54 #include "intel_hti.h"
55 #include "intel_initial_plane.h"
56 #include "intel_modeset_lock.h"
57 #include "intel_modeset_setup.h"
58 #include "intel_opregion.h"
59 #include "intel_overlay.h"
60 #include "intel_pmdemand.h"
61 #include "intel_pps.h"
62 #include "intel_psr.h"
63 #include "intel_quirks.h"
64 #include "intel_vga.h"
65 #include "intel_wm.h"
66 #include "skl_watermark.h"
67 
68 bool intel_display_driver_probe_defer(struct pci_dev *pdev)
69 {
70 	struct drm_privacy_screen *privacy_screen;
71 
72 	/*
73 	 * apple-gmux is needed on dual GPU MacBook Pro
74 	 * to probe the panel if we're the inactive GPU.
75 	 */
76 	if (vga_switcheroo_client_probe_defer(pdev))
77 		return true;
78 
79 	/* If the LCD panel has a privacy-screen, wait for it */
80 	privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
81 	if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
82 		return true;
83 
84 	drm_privacy_screen_put(privacy_screen);
85 
86 	return false;
87 }
88 
89 void intel_display_driver_init_hw(struct intel_display *display)
90 {
91 	if (!HAS_DISPLAY(display))
92 		return;
93 
94 	intel_cdclk_read_hw(display);
95 
96 	intel_display_wa_apply(display);
97 }
98 
99 static const struct drm_mode_config_funcs intel_mode_funcs = {
100 	.fb_create = intel_user_framebuffer_create,
101 	.get_format_info = intel_fb_get_format_info,
102 	.mode_valid = intel_mode_valid,
103 	.atomic_check = intel_atomic_check,
104 	.atomic_commit = intel_atomic_commit,
105 	.atomic_state_alloc = intel_atomic_state_alloc,
106 	.atomic_state_clear = intel_atomic_state_clear,
107 	.atomic_state_free = intel_atomic_state_free,
108 };
109 
110 static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = {
111 	.atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
112 };
113 
114 static void intel_mode_config_init(struct intel_display *display)
115 {
116 	struct drm_mode_config *mode_config = &display->drm->mode_config;
117 
118 	drm_mode_config_init(display->drm);
119 	INIT_LIST_HEAD(&display->global.obj_list);
120 
121 	mode_config->min_width = 0;
122 	mode_config->min_height = 0;
123 
124 	mode_config->preferred_depth = 24;
125 	mode_config->prefer_shadow = 1;
126 
127 	mode_config->funcs = &intel_mode_funcs;
128 	mode_config->helper_private = &intel_mode_config_funcs;
129 
130 	mode_config->async_page_flip = HAS_ASYNC_FLIPS(display);
131 
132 	/*
133 	 * Maximum framebuffer dimensions, chosen to match
134 	 * the maximum render engine surface size on gen4+.
135 	 */
136 	if (DISPLAY_VER(display) >= 7) {
137 		mode_config->max_width = 16384;
138 		mode_config->max_height = 16384;
139 	} else if (DISPLAY_VER(display) >= 4) {
140 		mode_config->max_width = 8192;
141 		mode_config->max_height = 8192;
142 	} else if (DISPLAY_VER(display) == 3) {
143 		mode_config->max_width = 4096;
144 		mode_config->max_height = 4096;
145 	} else {
146 		mode_config->max_width = 2048;
147 		mode_config->max_height = 2048;
148 	}
149 
150 	intel_cursor_mode_config_init(display);
151 }
152 
153 static void intel_mode_config_cleanup(struct intel_display *display)
154 {
155 	intel_atomic_global_obj_cleanup(display);
156 	drm_mode_config_cleanup(display->drm);
157 }
158 
159 static void intel_plane_possible_crtcs_init(struct intel_display *display)
160 {
161 	struct intel_plane *plane;
162 
163 	for_each_intel_plane(display->drm, plane) {
164 		struct intel_crtc *crtc = intel_crtc_for_pipe(display,
165 							      plane->pipe);
166 
167 		plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
168 	}
169 }
170 
171 void intel_display_driver_early_probe(struct intel_display *display)
172 {
173 	/* This must be called before any calls to HAS_PCH_* */
174 	intel_pch_detect(display);
175 
176 	if (!HAS_DISPLAY(display))
177 		return;
178 
179 	spin_lock_init(&display->fb_tracking.lock);
180 	mutex_init(&display->backlight.lock);
181 	mutex_init(&display->audio.mutex);
182 	mutex_init(&display->wm.wm_mutex);
183 	mutex_init(&display->pps.mutex);
184 	mutex_init(&display->hdcp.hdcp_mutex);
185 
186 	intel_display_irq_init(display);
187 	intel_dkl_phy_init(display);
188 	intel_color_init_hooks(display);
189 	intel_init_cdclk_hooks(display);
190 	intel_audio_hooks_init(display);
191 	intel_dpll_init_clock_hook(display);
192 	intel_init_display_hooks(display);
193 	intel_fdi_init_hook(display);
194 	intel_dmc_wl_init(display);
195 }
196 
197 /* part #1: call before irq install */
198 int intel_display_driver_probe_noirq(struct intel_display *display)
199 {
200 	int ret;
201 
202 	if (HAS_DISPLAY(display)) {
203 		ret = drm_vblank_init(display->drm,
204 				      INTEL_NUM_PIPES(display));
205 		if (ret)
206 			return ret;
207 	}
208 
209 	intel_bios_init(display);
210 
211 	intel_psr_dc5_dc6_wa_init(display);
212 
213 	/* FIXME: completely on the wrong abstraction layer */
214 	ret = intel_power_domains_init(display);
215 	if (ret < 0)
216 		goto cleanup_bios;
217 
218 	intel_pmdemand_init_early(display);
219 
220 	intel_power_domains_init_hw(display, false);
221 
222 	if (!HAS_DISPLAY(display))
223 		return 0;
224 
225 	display->hotplug.dp_wq = alloc_ordered_workqueue("intel-dp", 0);
226 	if (!display->hotplug.dp_wq) {
227 		ret = -ENOMEM;
228 		goto cleanup_pw_domain_dmc;
229 	}
230 
231 	display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
232 	if (!display->wq.modeset) {
233 		ret = -ENOMEM;
234 		goto cleanup_wq_dp;
235 	}
236 
237 	display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
238 						WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
239 	if (!display->wq.flip) {
240 		ret = -ENOMEM;
241 		goto cleanup_wq_modeset;
242 	}
243 
244 	display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI | WQ_PERCPU, 0);
245 	if (!display->wq.cleanup) {
246 		ret = -ENOMEM;
247 		goto cleanup_wq_flip;
248 	}
249 
250 	display->wq.unordered = alloc_workqueue("display_unordered", WQ_PERCPU, 0);
251 	if (!display->wq.unordered) {
252 		ret = -ENOMEM;
253 		goto cleanup_wq_cleanup;
254 	}
255 
256 	intel_dmc_init(display);
257 
258 	intel_mode_config_init(display);
259 
260 	ret = intel_cdclk_init(display);
261 	if (ret)
262 		goto cleanup_wq_unordered;
263 
264 	ret = intel_color_init(display);
265 	if (ret)
266 		goto cleanup_wq_unordered;
267 
268 	ret = intel_dbuf_init(display);
269 	if (ret)
270 		goto cleanup_wq_unordered;
271 
272 	ret = intel_dbuf_bw_init(display);
273 	if (ret)
274 		goto cleanup_wq_unordered;
275 
276 	ret = intel_bw_init(display);
277 	if (ret)
278 		goto cleanup_wq_unordered;
279 
280 	ret = intel_pmdemand_init(display);
281 	if (ret)
282 		goto cleanup_wq_unordered;
283 
284 	intel_init_quirks(display);
285 
286 	intel_fbc_init(display);
287 
288 	return 0;
289 
290 cleanup_wq_unordered:
291 	destroy_workqueue(display->wq.unordered);
292 cleanup_wq_cleanup:
293 	destroy_workqueue(display->wq.cleanup);
294 cleanup_wq_flip:
295 	destroy_workqueue(display->wq.flip);
296 cleanup_wq_modeset:
297 	destroy_workqueue(display->wq.modeset);
298 cleanup_wq_dp:
299 	destroy_workqueue(display->hotplug.dp_wq);
300 cleanup_pw_domain_dmc:
301 	intel_dmc_fini(display);
302 	intel_power_domains_driver_remove(display);
303 cleanup_bios:
304 	intel_bios_driver_remove(display);
305 
306 	return ret;
307 }
308 ALLOW_ERROR_INJECTION(intel_display_driver_probe_noirq, ERRNO);
309 
310 static void set_display_access(struct intel_display *display,
311 			       bool any_task_allowed,
312 			       struct task_struct *allowed_task)
313 {
314 	struct drm_modeset_acquire_ctx ctx;
315 	int err;
316 
317 	intel_modeset_lock_ctx_retry(&ctx, NULL, 0, err) {
318 		err = drm_modeset_lock_all_ctx(display->drm, &ctx);
319 		if (err)
320 			continue;
321 
322 		display->access.any_task_allowed = any_task_allowed;
323 		display->access.allowed_task = allowed_task;
324 	}
325 
326 	drm_WARN_ON(display->drm, err);
327 }
328 
329 /**
330  * intel_display_driver_enable_user_access - Enable display HW access for all threads
331  * @display: display device instance
332  *
333  * Enable the display HW access for all threads. Examples for such accesses
334  * are modeset commits and connector probing.
335  *
336  * This function should be called during driver loading and system resume once
337  * all the HW initialization steps are done.
338  */
339 void intel_display_driver_enable_user_access(struct intel_display *display)
340 {
341 	set_display_access(display, true, NULL);
342 
343 	intel_hpd_enable_detection_work(display);
344 }
345 
346 /**
347  * intel_display_driver_disable_user_access - Disable display HW access for user threads
348  * @display: display device instance
349  *
350  * Disable the display HW access for user threads. Examples for such accesses
351  * are modeset commits and connector probing. For the current thread the
352  * access is still enabled, which should only perform HW init/deinit
353  * programming (as the initial modeset during driver loading or the disabling
354  * modeset during driver unloading and system suspend/shutdown). This function
355  * should be followed by calling either intel_display_driver_enable_user_access()
356  * after completing the HW init programming or
357  * intel_display_driver_suspend_access() after completing the HW deinit
358  * programming.
359  *
360  * This function should be called during driver loading/unloading and system
361  * suspend/shutdown before starting the HW init/deinit programming.
362  */
363 void intel_display_driver_disable_user_access(struct intel_display *display)
364 {
365 	intel_hpd_disable_detection_work(display);
366 
367 	set_display_access(display, false, current);
368 }
369 
370 /**
371  * intel_display_driver_suspend_access - Suspend display HW access for all threads
372  * @display: display device instance
373  *
374  * Disable the display HW access for all threads. Examples for such accesses
375  * are modeset commits and connector probing. This call should be either
376  * followed by calling intel_display_driver_resume_access(), or the driver
377  * should be unloaded/shutdown.
378  *
379  * This function should be called during driver unloading and system
380  * suspend/shutdown after completing the HW deinit programming.
381  */
382 void intel_display_driver_suspend_access(struct intel_display *display)
383 {
384 	set_display_access(display, false, NULL);
385 }
386 
387 /**
388  * intel_display_driver_resume_access - Resume display HW access for the resume thread
389  * @display: display device instance
390  *
391  * Enable the display HW access for the current resume thread, keeping the
392  * access disabled for all other (user) threads. Examples for such accesses
393  * are modeset commits and connector probing. The resume thread should only
394  * perform HW init programming (as the restoring modeset). This function
395  * should be followed by calling intel_display_driver_enable_user_access(),
396  * after completing the HW init programming steps.
397  *
398  * This function should be called during system resume before starting the HW
399  * init steps.
400  */
401 void intel_display_driver_resume_access(struct intel_display *display)
402 {
403 	set_display_access(display, false, current);
404 }
405 
406 /**
407  * intel_display_driver_check_access - Check if the current thread has disaplay HW access
408  * @display: display device instance
409  *
410  * Check whether the current thread has display HW access, print a debug
411  * message if it doesn't. Such accesses are modeset commits and connector
412  * probing. If the function returns %false any HW access should be prevented.
413  *
414  * Returns %true if the current thread has display HW access, %false
415  * otherwise.
416  */
417 bool intel_display_driver_check_access(struct intel_display *display)
418 {
419 	char current_task[TASK_COMM_LEN + 16];
420 	char allowed_task[TASK_COMM_LEN + 16] = "none";
421 
422 	if (display->access.any_task_allowed ||
423 	    display->access.allowed_task == current)
424 		return true;
425 
426 	snprintf(current_task, sizeof(current_task), "%s[%d]",
427 		 current->comm, task_pid_vnr(current));
428 
429 	if (display->access.allowed_task)
430 		snprintf(allowed_task, sizeof(allowed_task), "%s[%d]",
431 			 display->access.allowed_task->comm,
432 			 task_pid_vnr(display->access.allowed_task));
433 
434 	drm_dbg_kms(display->drm,
435 		    "Reject display access from task %s (allowed to %s)\n",
436 		    current_task, allowed_task);
437 
438 	return false;
439 }
440 
441 /* part #2: call after irq install, but before gem init */
442 int intel_display_driver_probe_nogem(struct intel_display *display)
443 {
444 	int ret;
445 
446 	if (!HAS_DISPLAY(display))
447 		return 0;
448 
449 	intel_wm_init(display);
450 
451 	intel_panel_sanitize_ssc(display);
452 
453 	intel_pps_setup(display);
454 
455 	intel_gmbus_setup(display);
456 
457 	ret = intel_crtc_init(display);
458 	if (ret)
459 		goto err_mode_config;
460 
461 	intel_plane_possible_crtcs_init(display);
462 	intel_dpll_init(display);
463 	intel_fdi_pll_freq_update(display);
464 
465 	intel_display_driver_init_hw(display);
466 	intel_dpll_update_ref_clks(display);
467 
468 	if (display->cdclk.max_cdclk_freq == 0)
469 		intel_update_max_cdclk(display);
470 
471 	intel_hti_init(display);
472 
473 	intel_setup_outputs(display);
474 
475 	ret = intel_dp_tunnel_mgr_init(display);
476 	if (ret)
477 		goto err_hdcp;
478 
479 	intel_display_driver_disable_user_access(display);
480 
481 	drm_modeset_lock_all(display->drm);
482 	intel_modeset_setup_hw_state(display, display->drm->mode_config.acquire_ctx);
483 	intel_acpi_assign_connector_fwnodes(display);
484 	drm_modeset_unlock_all(display->drm);
485 
486 	intel_initial_plane_config(display);
487 
488 	/*
489 	 * Make sure hardware watermarks really match the state we read out.
490 	 * Note that we need to do this after reconstructing the BIOS fb's
491 	 * since the watermark calculation done here will use pstate->fb.
492 	 */
493 	if (!HAS_GMCH(display))
494 		ilk_wm_sanitize(display);
495 
496 	return 0;
497 
498 err_hdcp:
499 	intel_hdcp_component_fini(display);
500 err_mode_config:
501 	intel_mode_config_cleanup(display);
502 
503 	return ret;
504 }
505 
506 /* part #3: call after gem init */
507 int intel_display_driver_probe(struct intel_display *display)
508 {
509 	int ret;
510 
511 	if (!HAS_DISPLAY(display))
512 		return 0;
513 
514 	/*
515 	 * This will bind stuff into ggtt, so it needs to be done after
516 	 * the BIOS fb takeover and whatever else magic ggtt reservations
517 	 * happen during gem/ggtt init.
518 	 */
519 	intel_hdcp_component_init(display);
520 
521 	intel_flipq_init(display);
522 
523 	/*
524 	 * Force all active planes to recompute their states. So that on
525 	 * mode_setcrtc after probe, all the intel_plane_state variables
526 	 * are already calculated and there is no assert_plane warnings
527 	 * during bootup.
528 	 */
529 	ret = intel_initial_commit(display);
530 	if (ret)
531 		drm_dbg_kms(display->drm, "Initial modeset failed, %d\n", ret);
532 
533 	intel_overlay_setup(display);
534 
535 	/* Only enable hotplug handling once the fbdev is fully set up. */
536 	intel_hpd_init(display);
537 
538 	skl_watermark_ipc_init(display);
539 
540 	return 0;
541 }
542 
543 void intel_display_driver_register(struct intel_display *display)
544 {
545 	struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS,
546 					       "i915 display info:");
547 
548 	if (!HAS_DISPLAY(display))
549 		return;
550 
551 	intel_vga_register(display);
552 
553 	/* Must be done after probing outputs */
554 	intel_opregion_register(display);
555 	intel_acpi_video_register(display);
556 
557 	intel_audio_init(display);
558 
559 	intel_display_driver_enable_user_access(display);
560 
561 	intel_audio_register(display);
562 
563 	intel_display_debugfs_register(display);
564 
565 	/*
566 	 * We need to coordinate the hotplugs with the asynchronous
567 	 * fbdev configuration, for which we use the
568 	 * fbdev->async_cookie.
569 	 */
570 	drm_kms_helper_poll_init(display->drm);
571 	intel_hpd_poll_disable(display);
572 
573 	intel_fbdev_setup(display);
574 
575 	intel_display_device_info_print(DISPLAY_INFO(display),
576 					DISPLAY_RUNTIME_INFO(display), &p);
577 
578 	intel_register_dsm_handler();
579 }
580 
581 /* part #1: call before irq uninstall */
582 void intel_display_driver_remove(struct intel_display *display)
583 {
584 	if (!HAS_DISPLAY(display))
585 		return;
586 
587 	flush_workqueue(display->wq.flip);
588 	flush_workqueue(display->wq.modeset);
589 	flush_workqueue(display->wq.cleanup);
590 	flush_workqueue(display->wq.unordered);
591 
592 	/*
593 	 * MST topology needs to be suspended so we don't have any calls to
594 	 * fbdev after it's finalized. MST will be destroyed later as part of
595 	 * drm_mode_config_cleanup()
596 	 */
597 	intel_dp_mst_suspend(display);
598 }
599 
600 /* part #2: call after irq uninstall */
601 void intel_display_driver_remove_noirq(struct intel_display *display)
602 {
603 	if (!HAS_DISPLAY(display))
604 		return;
605 
606 	intel_display_driver_suspend_access(display);
607 
608 	/*
609 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
610 	 * poll handlers. Hence disable polling after hpd handling is shut down.
611 	 */
612 	intel_hpd_poll_fini(display);
613 
614 	intel_unregister_dsm_handler();
615 
616 	/* flush any delayed tasks or pending work */
617 	flush_workqueue(display->wq.unordered);
618 
619 	intel_hdcp_component_fini(display);
620 
621 	intel_mode_config_cleanup(display);
622 
623 	intel_dp_tunnel_mgr_cleanup(display);
624 
625 	intel_overlay_cleanup(display);
626 
627 	intel_gmbus_teardown(display);
628 
629 	destroy_workqueue(display->hotplug.dp_wq);
630 	destroy_workqueue(display->wq.flip);
631 	destroy_workqueue(display->wq.modeset);
632 	destroy_workqueue(display->wq.cleanup);
633 	destroy_workqueue(display->wq.unordered);
634 
635 	intel_fbc_cleanup(display);
636 }
637 
638 /* part #3: call after gem init */
639 void intel_display_driver_remove_nogem(struct intel_display *display)
640 {
641 	intel_dmc_fini(display);
642 
643 	intel_power_domains_driver_remove(display);
644 
645 	intel_bios_driver_remove(display);
646 }
647 
648 void intel_display_driver_unregister(struct intel_display *display)
649 {
650 	if (!HAS_DISPLAY(display))
651 		return;
652 
653 	intel_unregister_dsm_handler();
654 
655 	drm_client_dev_unregister(display->drm);
656 
657 	/*
658 	 * After flushing the fbdev (incl. a late async config which
659 	 * will have delayed queuing of a hotplug event), then flush
660 	 * the hotplug events.
661 	 */
662 	drm_kms_helper_poll_fini(display->drm);
663 
664 	intel_display_driver_disable_user_access(display);
665 
666 	intel_audio_deinit(display);
667 
668 	drm_atomic_helper_shutdown(display->drm);
669 
670 	acpi_video_unregister();
671 	intel_opregion_unregister(display);
672 
673 	intel_vga_unregister(display);
674 }
675 
676 /*
677  * turn all crtc's off, but do not adjust state
678  * This has to be paired with a call to intel_modeset_setup_hw_state.
679  */
680 int intel_display_driver_suspend(struct intel_display *display)
681 {
682 	struct drm_atomic_state *state;
683 	int ret;
684 
685 	if (!HAS_DISPLAY(display))
686 		return 0;
687 
688 	state = drm_atomic_helper_suspend(display->drm);
689 	ret = PTR_ERR_OR_ZERO(state);
690 	if (ret)
691 		drm_err(display->drm, "Suspending crtc's failed with %i\n",
692 			ret);
693 	else
694 		display->restore.modeset_state = state;
695 
696 	/* ensure all DPT VMAs have been unpinned for intel_dpt_suspend() */
697 	flush_workqueue(display->wq.cleanup);
698 
699 	intel_dp_mst_suspend(display);
700 
701 	return ret;
702 }
703 
704 int
705 __intel_display_driver_resume(struct intel_display *display,
706 			      struct drm_atomic_state *state,
707 			      struct drm_modeset_acquire_ctx *ctx)
708 {
709 	struct drm_crtc_state *crtc_state;
710 	struct drm_crtc *crtc;
711 	int ret, i;
712 
713 	intel_modeset_setup_hw_state(display, ctx);
714 
715 	if (!state)
716 		return 0;
717 
718 	/*
719 	 * We've duplicated the state, pointers to the old state are invalid.
720 	 *
721 	 * Don't attempt to use the old state until we commit the duplicated state.
722 	 */
723 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
724 		/*
725 		 * Force recalculation even if we restore
726 		 * current state. With fast modeset this may not result
727 		 * in a modeset when the state is compatible.
728 		 */
729 		crtc_state->mode_changed = true;
730 	}
731 
732 	/* ignore any reset values/BIOS leftovers in the WM registers */
733 	if (!HAS_GMCH(display))
734 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
735 
736 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
737 
738 	drm_WARN_ON(display->drm, ret == -EDEADLK);
739 
740 	return ret;
741 }
742 
743 void intel_display_driver_resume(struct intel_display *display)
744 {
745 	struct drm_atomic_state *state = display->restore.modeset_state;
746 	struct drm_modeset_acquire_ctx ctx;
747 	int ret;
748 
749 	if (!HAS_DISPLAY(display))
750 		return;
751 
752 	/* MST sideband requires HPD interrupts enabled */
753 	intel_dp_mst_resume(display);
754 
755 	display->restore.modeset_state = NULL;
756 	if (state)
757 		state->acquire_ctx = &ctx;
758 
759 	drm_modeset_acquire_init(&ctx, 0);
760 
761 	while (1) {
762 		ret = drm_modeset_lock_all_ctx(display->drm, &ctx);
763 		if (ret != -EDEADLK)
764 			break;
765 
766 		drm_modeset_backoff(&ctx);
767 	}
768 
769 	if (!ret)
770 		ret = __intel_display_driver_resume(display, state, &ctx);
771 
772 	skl_watermark_ipc_update(display);
773 	drm_modeset_drop_locks(&ctx);
774 	drm_modeset_acquire_fini(&ctx);
775 
776 	if (ret)
777 		drm_err(display->drm,
778 			"Restoring old state failed with %i\n", ret);
779 	if (state)
780 		drm_atomic_state_put(state);
781 }
782