xref: /linux/drivers/gpu/drm/i915/display/intel_display_driver.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022-2023 Intel Corporation
4  *
5  * High level display driver entry points. This is a layer between top level
6  * driver code and low level display functionality; no low level display code or
7  * details here.
8  */
9 
10 #include <linux/vga_switcheroo.h>
11 #include <acpi/video.h>
12 #include <drm/display/drm_dp_mst_helper.h>
13 #include <drm/drm_atomic_helper.h>
14 #include <drm/drm_client_event.h>
15 #include <drm/drm_mode_config.h>
16 #include <drm/drm_privacy_screen_consumer.h>
17 #include <drm/drm_print.h>
18 #include <drm/drm_probe_helper.h>
19 #include <drm/drm_vblank.h>
20 
21 #include "i915_drv.h"
22 #include "i915_utils.h" /* for i915_inject_probe_failure() */
23 #include "i9xx_wm.h"
24 #include "intel_acpi.h"
25 #include "intel_atomic.h"
26 #include "intel_audio.h"
27 #include "intel_bios.h"
28 #include "intel_bw.h"
29 #include "intel_cdclk.h"
30 #include "intel_color.h"
31 #include "intel_crtc.h"
32 #include "intel_dbuf_bw.h"
33 #include "intel_display_core.h"
34 #include "intel_display_debugfs.h"
35 #include "intel_display_driver.h"
36 #include "intel_display_irq.h"
37 #include "intel_display_power.h"
38 #include "intel_display_types.h"
39 #include "intel_display_utils.h"
40 #include "intel_display_wa.h"
41 #include "intel_dkl_phy.h"
42 #include "intel_dmc.h"
43 #include "intel_dp.h"
44 #include "intel_dp_tunnel.h"
45 #include "intel_dpll.h"
46 #include "intel_dpll_mgr.h"
47 #include "intel_fb.h"
48 #include "intel_fbc.h"
49 #include "intel_fbdev.h"
50 #include "intel_fdi.h"
51 #include "intel_flipq.h"
52 #include "intel_gmbus.h"
53 #include "intel_hdcp.h"
54 #include "intel_hotplug.h"
55 #include "intel_hti.h"
56 #include "intel_modeset_lock.h"
57 #include "intel_modeset_setup.h"
58 #include "intel_opregion.h"
59 #include "intel_overlay.h"
60 #include "intel_plane_initial.h"
61 #include "intel_pmdemand.h"
62 #include "intel_pps.h"
63 #include "intel_psr.h"
64 #include "intel_quirks.h"
65 #include "intel_vga.h"
66 #include "intel_wm.h"
67 #include "skl_watermark.h"
68 
69 bool intel_display_driver_probe_defer(struct pci_dev *pdev)
70 {
71 	struct drm_privacy_screen *privacy_screen;
72 
73 	/*
74 	 * apple-gmux is needed on dual GPU MacBook Pro
75 	 * to probe the panel if we're the inactive GPU.
76 	 */
77 	if (vga_switcheroo_client_probe_defer(pdev))
78 		return true;
79 
80 	/* If the LCD panel has a privacy-screen, wait for it */
81 	privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
82 	if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
83 		return true;
84 
85 	drm_privacy_screen_put(privacy_screen);
86 
87 	return false;
88 }
89 
90 void intel_display_driver_init_hw(struct intel_display *display)
91 {
92 	if (!HAS_DISPLAY(display))
93 		return;
94 
95 	intel_cdclk_read_hw(display);
96 
97 	intel_display_wa_apply(display);
98 }
99 
100 static const struct drm_mode_config_funcs intel_mode_funcs = {
101 	.fb_create = intel_user_framebuffer_create,
102 	.get_format_info = intel_fb_get_format_info,
103 	.mode_valid = intel_mode_valid,
104 	.atomic_check = intel_atomic_check,
105 	.atomic_commit = intel_atomic_commit,
106 	.atomic_state_alloc = intel_atomic_state_alloc,
107 	.atomic_state_clear = intel_atomic_state_clear,
108 	.atomic_state_free = intel_atomic_state_free,
109 };
110 
111 static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = {
112 	.atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
113 };
114 
115 static void intel_mode_config_init(struct intel_display *display)
116 {
117 	struct drm_mode_config *mode_config = &display->drm->mode_config;
118 
119 	drm_mode_config_init(display->drm);
120 	INIT_LIST_HEAD(&display->global.obj_list);
121 
122 	mode_config->min_width = 0;
123 	mode_config->min_height = 0;
124 
125 	mode_config->preferred_depth = 24;
126 	mode_config->prefer_shadow = 1;
127 
128 	mode_config->funcs = &intel_mode_funcs;
129 	mode_config->helper_private = &intel_mode_config_funcs;
130 
131 	mode_config->async_page_flip = HAS_ASYNC_FLIPS(display);
132 
133 	/*
134 	 * Maximum framebuffer dimensions, chosen to match
135 	 * the maximum render engine surface size on gen4+.
136 	 */
137 	if (DISPLAY_VER(display) >= 7) {
138 		mode_config->max_width = 16384;
139 		mode_config->max_height = 16384;
140 	} else if (DISPLAY_VER(display) >= 4) {
141 		mode_config->max_width = 8192;
142 		mode_config->max_height = 8192;
143 	} else if (DISPLAY_VER(display) == 3) {
144 		mode_config->max_width = 4096;
145 		mode_config->max_height = 4096;
146 	} else {
147 		mode_config->max_width = 2048;
148 		mode_config->max_height = 2048;
149 	}
150 
151 	if (display->platform.i845g || display->platform.i865g) {
152 		mode_config->cursor_width = display->platform.i845g ? 64 : 512;
153 		mode_config->cursor_height = 1023;
154 	} else if (display->platform.i830 || display->platform.i85x ||
155 		   display->platform.i915g || display->platform.i915gm) {
156 		mode_config->cursor_width = 64;
157 		mode_config->cursor_height = 64;
158 	} else {
159 		mode_config->cursor_width = 256;
160 		mode_config->cursor_height = 256;
161 	}
162 }
163 
164 static void intel_mode_config_cleanup(struct intel_display *display)
165 {
166 	intel_atomic_global_obj_cleanup(display);
167 	drm_mode_config_cleanup(display->drm);
168 }
169 
170 static void intel_plane_possible_crtcs_init(struct intel_display *display)
171 {
172 	struct intel_plane *plane;
173 
174 	for_each_intel_plane(display->drm, plane) {
175 		struct intel_crtc *crtc = intel_crtc_for_pipe(display,
176 							      plane->pipe);
177 
178 		plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
179 	}
180 }
181 
182 void intel_display_driver_early_probe(struct intel_display *display)
183 {
184 	/* This must be called before any calls to HAS_PCH_* */
185 	intel_pch_detect(display);
186 
187 	if (!HAS_DISPLAY(display))
188 		return;
189 
190 	spin_lock_init(&display->fb_tracking.lock);
191 	mutex_init(&display->backlight.lock);
192 	mutex_init(&display->audio.mutex);
193 	mutex_init(&display->wm.wm_mutex);
194 	mutex_init(&display->pps.mutex);
195 	mutex_init(&display->hdcp.hdcp_mutex);
196 
197 	intel_display_irq_init(display);
198 	intel_dkl_phy_init(display);
199 	intel_color_init_hooks(display);
200 	intel_init_cdclk_hooks(display);
201 	intel_audio_hooks_init(display);
202 	intel_dpll_init_clock_hook(display);
203 	intel_init_display_hooks(display);
204 	intel_fdi_init_hook(display);
205 	intel_dmc_wl_init(display);
206 }
207 
208 /* part #1: call before irq install */
209 int intel_display_driver_probe_noirq(struct intel_display *display)
210 {
211 	struct drm_i915_private *i915 = to_i915(display->drm);
212 	int ret;
213 
214 	if (i915_inject_probe_failure(i915))
215 		return -ENODEV;
216 
217 	if (HAS_DISPLAY(display)) {
218 		ret = drm_vblank_init(display->drm,
219 				      INTEL_NUM_PIPES(display));
220 		if (ret)
221 			return ret;
222 	}
223 
224 	intel_bios_init(display);
225 
226 	ret = intel_vga_register(display);
227 	if (ret)
228 		goto cleanup_bios;
229 
230 	intel_psr_dc5_dc6_wa_init(display);
231 
232 	/* FIXME: completely on the wrong abstraction layer */
233 	ret = intel_power_domains_init(display);
234 	if (ret < 0)
235 		goto cleanup_vga;
236 
237 	intel_pmdemand_init_early(display);
238 
239 	intel_power_domains_init_hw(display, false);
240 
241 	if (!HAS_DISPLAY(display))
242 		return 0;
243 
244 	display->hotplug.dp_wq = alloc_ordered_workqueue("intel-dp", 0);
245 	if (!display->hotplug.dp_wq) {
246 		ret = -ENOMEM;
247 		goto cleanup_vga_client_pw_domain_dmc;
248 	}
249 
250 	display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
251 	if (!display->wq.modeset) {
252 		ret = -ENOMEM;
253 		goto cleanup_wq_dp;
254 	}
255 
256 	display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
257 						WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
258 	if (!display->wq.flip) {
259 		ret = -ENOMEM;
260 		goto cleanup_wq_modeset;
261 	}
262 
263 	display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI, 0);
264 	if (!display->wq.cleanup) {
265 		ret = -ENOMEM;
266 		goto cleanup_wq_flip;
267 	}
268 
269 	display->wq.unordered = alloc_workqueue("display_unordered", 0, 0);
270 	if (!display->wq.unordered) {
271 		ret = -ENOMEM;
272 		goto cleanup_wq_cleanup;
273 	}
274 
275 	intel_dmc_init(display);
276 
277 	intel_mode_config_init(display);
278 
279 	ret = intel_cdclk_init(display);
280 	if (ret)
281 		goto cleanup_wq_unordered;
282 
283 	ret = intel_color_init(display);
284 	if (ret)
285 		goto cleanup_wq_unordered;
286 
287 	ret = intel_dbuf_init(display);
288 	if (ret)
289 		goto cleanup_wq_unordered;
290 
291 	ret = intel_dbuf_bw_init(display);
292 	if (ret)
293 		goto cleanup_wq_unordered;
294 
295 	ret = intel_bw_init(display);
296 	if (ret)
297 		goto cleanup_wq_unordered;
298 
299 	ret = intel_pmdemand_init(display);
300 	if (ret)
301 		goto cleanup_wq_unordered;
302 
303 	intel_init_quirks(display);
304 
305 	intel_fbc_init(display);
306 
307 	return 0;
308 
309 cleanup_wq_unordered:
310 	destroy_workqueue(display->wq.unordered);
311 cleanup_wq_cleanup:
312 	destroy_workqueue(display->wq.cleanup);
313 cleanup_wq_flip:
314 	destroy_workqueue(display->wq.flip);
315 cleanup_wq_modeset:
316 	destroy_workqueue(display->wq.modeset);
317 cleanup_wq_dp:
318 	destroy_workqueue(display->hotplug.dp_wq);
319 cleanup_vga_client_pw_domain_dmc:
320 	intel_dmc_fini(display);
321 	intel_power_domains_driver_remove(display);
322 cleanup_vga:
323 	intel_vga_unregister(display);
324 cleanup_bios:
325 	intel_bios_driver_remove(display);
326 
327 	return ret;
328 }
329 
330 static void set_display_access(struct intel_display *display,
331 			       bool any_task_allowed,
332 			       struct task_struct *allowed_task)
333 {
334 	struct drm_modeset_acquire_ctx ctx;
335 	int err;
336 
337 	intel_modeset_lock_ctx_retry(&ctx, NULL, 0, err) {
338 		err = drm_modeset_lock_all_ctx(display->drm, &ctx);
339 		if (err)
340 			continue;
341 
342 		display->access.any_task_allowed = any_task_allowed;
343 		display->access.allowed_task = allowed_task;
344 	}
345 
346 	drm_WARN_ON(display->drm, err);
347 }
348 
349 /**
350  * intel_display_driver_enable_user_access - Enable display HW access for all threads
351  * @display: display device instance
352  *
353  * Enable the display HW access for all threads. Examples for such accesses
354  * are modeset commits and connector probing.
355  *
356  * This function should be called during driver loading and system resume once
357  * all the HW initialization steps are done.
358  */
359 void intel_display_driver_enable_user_access(struct intel_display *display)
360 {
361 	set_display_access(display, true, NULL);
362 
363 	intel_hpd_enable_detection_work(display);
364 }
365 
366 /**
367  * intel_display_driver_disable_user_access - Disable display HW access for user threads
368  * @display: display device instance
369  *
370  * Disable the display HW access for user threads. Examples for such accesses
371  * are modeset commits and connector probing. For the current thread the
372  * access is still enabled, which should only perform HW init/deinit
373  * programming (as the initial modeset during driver loading or the disabling
374  * modeset during driver unloading and system suspend/shutdown). This function
375  * should be followed by calling either intel_display_driver_enable_user_access()
376  * after completing the HW init programming or
377  * intel_display_driver_suspend_access() after completing the HW deinit
378  * programming.
379  *
380  * This function should be called during driver loading/unloading and system
381  * suspend/shutdown before starting the HW init/deinit programming.
382  */
383 void intel_display_driver_disable_user_access(struct intel_display *display)
384 {
385 	intel_hpd_disable_detection_work(display);
386 
387 	set_display_access(display, false, current);
388 }
389 
390 /**
391  * intel_display_driver_suspend_access - Suspend display HW access for all threads
392  * @display: display device instance
393  *
394  * Disable the display HW access for all threads. Examples for such accesses
395  * are modeset commits and connector probing. This call should be either
396  * followed by calling intel_display_driver_resume_access(), or the driver
397  * should be unloaded/shutdown.
398  *
399  * This function should be called during driver unloading and system
400  * suspend/shutdown after completing the HW deinit programming.
401  */
402 void intel_display_driver_suspend_access(struct intel_display *display)
403 {
404 	set_display_access(display, false, NULL);
405 }
406 
407 /**
408  * intel_display_driver_resume_access - Resume display HW access for the resume thread
409  * @display: display device instance
410  *
411  * Enable the display HW access for the current resume thread, keeping the
412  * access disabled for all other (user) threads. Examples for such accesses
413  * are modeset commits and connector probing. The resume thread should only
414  * perform HW init programming (as the restoring modeset). This function
415  * should be followed by calling intel_display_driver_enable_user_access(),
416  * after completing the HW init programming steps.
417  *
418  * This function should be called during system resume before starting the HW
419  * init steps.
420  */
421 void intel_display_driver_resume_access(struct intel_display *display)
422 {
423 	set_display_access(display, false, current);
424 }
425 
426 /**
427  * intel_display_driver_check_access - Check if the current thread has disaplay HW access
428  * @display: display device instance
429  *
430  * Check whether the current thread has display HW access, print a debug
431  * message if it doesn't. Such accesses are modeset commits and connector
432  * probing. If the function returns %false any HW access should be prevented.
433  *
434  * Returns %true if the current thread has display HW access, %false
435  * otherwise.
436  */
437 bool intel_display_driver_check_access(struct intel_display *display)
438 {
439 	char current_task[TASK_COMM_LEN + 16];
440 	char allowed_task[TASK_COMM_LEN + 16] = "none";
441 
442 	if (display->access.any_task_allowed ||
443 	    display->access.allowed_task == current)
444 		return true;
445 
446 	snprintf(current_task, sizeof(current_task), "%s[%d]",
447 		 current->comm, task_pid_vnr(current));
448 
449 	if (display->access.allowed_task)
450 		snprintf(allowed_task, sizeof(allowed_task), "%s[%d]",
451 			 display->access.allowed_task->comm,
452 			 task_pid_vnr(display->access.allowed_task));
453 
454 	drm_dbg_kms(display->drm,
455 		    "Reject display access from task %s (allowed to %s)\n",
456 		    current_task, allowed_task);
457 
458 	return false;
459 }
460 
461 /* part #2: call after irq install, but before gem init */
462 int intel_display_driver_probe_nogem(struct intel_display *display)
463 {
464 	enum pipe pipe;
465 	int ret;
466 
467 	if (!HAS_DISPLAY(display))
468 		return 0;
469 
470 	intel_wm_init(display);
471 
472 	intel_panel_sanitize_ssc(display);
473 
474 	intel_pps_setup(display);
475 
476 	intel_gmbus_setup(display);
477 
478 	drm_dbg_kms(display->drm, "%d display pipe%s available.\n",
479 		    INTEL_NUM_PIPES(display),
480 		    INTEL_NUM_PIPES(display) > 1 ? "s" : "");
481 
482 	for_each_pipe(display, pipe) {
483 		ret = intel_crtc_init(display, pipe);
484 		if (ret)
485 			goto err_mode_config;
486 	}
487 
488 	intel_plane_possible_crtcs_init(display);
489 	intel_dpll_init(display);
490 	intel_fdi_pll_freq_update(display);
491 
492 	intel_display_driver_init_hw(display);
493 	intel_dpll_update_ref_clks(display);
494 
495 	if (display->cdclk.max_cdclk_freq == 0)
496 		intel_update_max_cdclk(display);
497 
498 	intel_hti_init(display);
499 
500 	intel_setup_outputs(display);
501 
502 	ret = intel_dp_tunnel_mgr_init(display);
503 	if (ret)
504 		goto err_hdcp;
505 
506 	intel_display_driver_disable_user_access(display);
507 
508 	drm_modeset_lock_all(display->drm);
509 	intel_modeset_setup_hw_state(display, display->drm->mode_config.acquire_ctx);
510 	intel_acpi_assign_connector_fwnodes(display);
511 	drm_modeset_unlock_all(display->drm);
512 
513 	intel_initial_plane_config(display);
514 
515 	/*
516 	 * Make sure hardware watermarks really match the state we read out.
517 	 * Note that we need to do this after reconstructing the BIOS fb's
518 	 * since the watermark calculation done here will use pstate->fb.
519 	 */
520 	if (!HAS_GMCH(display))
521 		ilk_wm_sanitize(display);
522 
523 	return 0;
524 
525 err_hdcp:
526 	intel_hdcp_component_fini(display);
527 err_mode_config:
528 	intel_mode_config_cleanup(display);
529 
530 	return ret;
531 }
532 
533 /* part #3: call after gem init */
534 int intel_display_driver_probe(struct intel_display *display)
535 {
536 	int ret;
537 
538 	if (!HAS_DISPLAY(display))
539 		return 0;
540 
541 	/*
542 	 * This will bind stuff into ggtt, so it needs to be done after
543 	 * the BIOS fb takeover and whatever else magic ggtt reservations
544 	 * happen during gem/ggtt init.
545 	 */
546 	intel_hdcp_component_init(display);
547 
548 	intel_flipq_init(display);
549 
550 	/*
551 	 * Force all active planes to recompute their states. So that on
552 	 * mode_setcrtc after probe, all the intel_plane_state variables
553 	 * are already calculated and there is no assert_plane warnings
554 	 * during bootup.
555 	 */
556 	ret = intel_initial_commit(display);
557 	if (ret)
558 		drm_dbg_kms(display->drm, "Initial modeset failed, %d\n", ret);
559 
560 	intel_overlay_setup(display);
561 
562 	/* Only enable hotplug handling once the fbdev is fully set up. */
563 	intel_hpd_init(display);
564 
565 	skl_watermark_ipc_init(display);
566 
567 	return 0;
568 }
569 
570 void intel_display_driver_register(struct intel_display *display)
571 {
572 	struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS,
573 					       "i915 display info:");
574 
575 	if (!HAS_DISPLAY(display))
576 		return;
577 
578 	/* Must be done after probing outputs */
579 	intel_opregion_register(display);
580 	intel_acpi_video_register(display);
581 
582 	intel_audio_init(display);
583 
584 	intel_display_driver_enable_user_access(display);
585 
586 	intel_audio_register(display);
587 
588 	intel_display_debugfs_register(display);
589 
590 	/*
591 	 * We need to coordinate the hotplugs with the asynchronous
592 	 * fbdev configuration, for which we use the
593 	 * fbdev->async_cookie.
594 	 */
595 	drm_kms_helper_poll_init(display->drm);
596 	intel_hpd_poll_disable(display);
597 
598 	intel_fbdev_setup(display);
599 
600 	intel_display_device_info_print(DISPLAY_INFO(display),
601 					DISPLAY_RUNTIME_INFO(display), &p);
602 
603 	intel_register_dsm_handler();
604 }
605 
606 /* part #1: call before irq uninstall */
607 void intel_display_driver_remove(struct intel_display *display)
608 {
609 	if (!HAS_DISPLAY(display))
610 		return;
611 
612 	flush_workqueue(display->wq.flip);
613 	flush_workqueue(display->wq.modeset);
614 	flush_workqueue(display->wq.cleanup);
615 	flush_workqueue(display->wq.unordered);
616 
617 	/*
618 	 * MST topology needs to be suspended so we don't have any calls to
619 	 * fbdev after it's finalized. MST will be destroyed later as part of
620 	 * drm_mode_config_cleanup()
621 	 */
622 	intel_dp_mst_suspend(display);
623 }
624 
625 /* part #2: call after irq uninstall */
626 void intel_display_driver_remove_noirq(struct intel_display *display)
627 {
628 	if (!HAS_DISPLAY(display))
629 		return;
630 
631 	intel_display_driver_suspend_access(display);
632 
633 	/*
634 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
635 	 * poll handlers. Hence disable polling after hpd handling is shut down.
636 	 */
637 	intel_hpd_poll_fini(display);
638 
639 	intel_unregister_dsm_handler();
640 
641 	/* flush any delayed tasks or pending work */
642 	flush_workqueue(display->wq.unordered);
643 
644 	intel_hdcp_component_fini(display);
645 
646 	intel_mode_config_cleanup(display);
647 
648 	intel_dp_tunnel_mgr_cleanup(display);
649 
650 	intel_overlay_cleanup(display);
651 
652 	intel_gmbus_teardown(display);
653 
654 	destroy_workqueue(display->hotplug.dp_wq);
655 	destroy_workqueue(display->wq.flip);
656 	destroy_workqueue(display->wq.modeset);
657 	destroy_workqueue(display->wq.cleanup);
658 	destroy_workqueue(display->wq.unordered);
659 
660 	intel_fbc_cleanup(display);
661 }
662 
663 /* part #3: call after gem init */
664 void intel_display_driver_remove_nogem(struct intel_display *display)
665 {
666 	intel_dmc_fini(display);
667 
668 	intel_power_domains_driver_remove(display);
669 
670 	intel_vga_unregister(display);
671 
672 	intel_bios_driver_remove(display);
673 }
674 
675 void intel_display_driver_unregister(struct intel_display *display)
676 {
677 	if (!HAS_DISPLAY(display))
678 		return;
679 
680 	intel_unregister_dsm_handler();
681 
682 	drm_client_dev_unregister(display->drm);
683 
684 	/*
685 	 * After flushing the fbdev (incl. a late async config which
686 	 * will have delayed queuing of a hotplug event), then flush
687 	 * the hotplug events.
688 	 */
689 	drm_kms_helper_poll_fini(display->drm);
690 
691 	intel_display_driver_disable_user_access(display);
692 
693 	intel_audio_deinit(display);
694 
695 	drm_atomic_helper_shutdown(display->drm);
696 
697 	acpi_video_unregister();
698 	intel_opregion_unregister(display);
699 }
700 
701 /*
702  * turn all crtc's off, but do not adjust state
703  * This has to be paired with a call to intel_modeset_setup_hw_state.
704  */
705 int intel_display_driver_suspend(struct intel_display *display)
706 {
707 	struct drm_atomic_state *state;
708 	int ret;
709 
710 	if (!HAS_DISPLAY(display))
711 		return 0;
712 
713 	state = drm_atomic_helper_suspend(display->drm);
714 	ret = PTR_ERR_OR_ZERO(state);
715 	if (ret)
716 		drm_err(display->drm, "Suspending crtc's failed with %i\n",
717 			ret);
718 	else
719 		display->restore.modeset_state = state;
720 
721 	/* ensure all DPT VMAs have been unpinned for intel_dpt_suspend() */
722 	flush_workqueue(display->wq.cleanup);
723 
724 	intel_dp_mst_suspend(display);
725 
726 	return ret;
727 }
728 
729 int
730 __intel_display_driver_resume(struct intel_display *display,
731 			      struct drm_atomic_state *state,
732 			      struct drm_modeset_acquire_ctx *ctx)
733 {
734 	struct drm_crtc_state *crtc_state;
735 	struct drm_crtc *crtc;
736 	int ret, i;
737 
738 	intel_modeset_setup_hw_state(display, ctx);
739 
740 	if (!state)
741 		return 0;
742 
743 	/*
744 	 * We've duplicated the state, pointers to the old state are invalid.
745 	 *
746 	 * Don't attempt to use the old state until we commit the duplicated state.
747 	 */
748 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
749 		/*
750 		 * Force recalculation even if we restore
751 		 * current state. With fast modeset this may not result
752 		 * in a modeset when the state is compatible.
753 		 */
754 		crtc_state->mode_changed = true;
755 	}
756 
757 	/* ignore any reset values/BIOS leftovers in the WM registers */
758 	if (!HAS_GMCH(display))
759 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
760 
761 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
762 
763 	drm_WARN_ON(display->drm, ret == -EDEADLK);
764 
765 	return ret;
766 }
767 
768 void intel_display_driver_resume(struct intel_display *display)
769 {
770 	struct drm_atomic_state *state = display->restore.modeset_state;
771 	struct drm_modeset_acquire_ctx ctx;
772 	int ret;
773 
774 	if (!HAS_DISPLAY(display))
775 		return;
776 
777 	/* MST sideband requires HPD interrupts enabled */
778 	intel_dp_mst_resume(display);
779 
780 	display->restore.modeset_state = NULL;
781 	if (state)
782 		state->acquire_ctx = &ctx;
783 
784 	drm_modeset_acquire_init(&ctx, 0);
785 
786 	while (1) {
787 		ret = drm_modeset_lock_all_ctx(display->drm, &ctx);
788 		if (ret != -EDEADLK)
789 			break;
790 
791 		drm_modeset_backoff(&ctx);
792 	}
793 
794 	if (!ret)
795 		ret = __intel_display_driver_resume(display, state, &ctx);
796 
797 	skl_watermark_ipc_update(display);
798 	drm_modeset_drop_locks(&ctx);
799 	drm_modeset_acquire_fini(&ctx);
800 
801 	if (ret)
802 		drm_err(display->drm,
803 			"Restoring old state failed with %i\n", ret);
804 	if (state)
805 		drm_atomic_state_put(state);
806 }
807