xref: /linux/drivers/gpu/drm/i915/display/intel_display_driver.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022-2023 Intel Corporation
4  *
5  * High level display driver entry points. This is a layer between top level
6  * driver code and low level display functionality; no low level display code or
7  * details here.
8  */
9 
10 #include <linux/vga_switcheroo.h>
11 #include <acpi/video.h>
12 #include <drm/display/drm_dp_mst_helper.h>
13 #include <drm/drm_atomic_helper.h>
14 #include <drm/drm_client_event.h>
15 #include <drm/drm_mode_config.h>
16 #include <drm/drm_privacy_screen_consumer.h>
17 #include <drm/drm_probe_helper.h>
18 #include <drm/drm_vblank.h>
19 
20 #include "i915_drv.h"
21 #include "i915_utils.h"
22 #include "i9xx_wm.h"
23 #include "intel_acpi.h"
24 #include "intel_atomic.h"
25 #include "intel_audio.h"
26 #include "intel_bios.h"
27 #include "intel_bw.h"
28 #include "intel_cdclk.h"
29 #include "intel_color.h"
30 #include "intel_crtc.h"
31 #include "intel_display_core.h"
32 #include "intel_display_debugfs.h"
33 #include "intel_display_driver.h"
34 #include "intel_display_irq.h"
35 #include "intel_display_power.h"
36 #include "intel_display_types.h"
37 #include "intel_display_wa.h"
38 #include "intel_dkl_phy.h"
39 #include "intel_dmc.h"
40 #include "intel_dp.h"
41 #include "intel_dp_tunnel.h"
42 #include "intel_dpll.h"
43 #include "intel_dpll_mgr.h"
44 #include "intel_fb.h"
45 #include "intel_fbc.h"
46 #include "intel_fbdev.h"
47 #include "intel_fdi.h"
48 #include "intel_flipq.h"
49 #include "intel_gmbus.h"
50 #include "intel_hdcp.h"
51 #include "intel_hotplug.h"
52 #include "intel_hti.h"
53 #include "intel_modeset_lock.h"
54 #include "intel_modeset_setup.h"
55 #include "intel_opregion.h"
56 #include "intel_overlay.h"
57 #include "intel_plane_initial.h"
58 #include "intel_pmdemand.h"
59 #include "intel_pps.h"
60 #include "intel_psr.h"
61 #include "intel_quirks.h"
62 #include "intel_vga.h"
63 #include "intel_wm.h"
64 #include "skl_watermark.h"
65 
66 bool intel_display_driver_probe_defer(struct pci_dev *pdev)
67 {
68 	struct drm_privacy_screen *privacy_screen;
69 
70 	/*
71 	 * apple-gmux is needed on dual GPU MacBook Pro
72 	 * to probe the panel if we're the inactive GPU.
73 	 */
74 	if (vga_switcheroo_client_probe_defer(pdev))
75 		return true;
76 
77 	/* If the LCD panel has a privacy-screen, wait for it */
78 	privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
79 	if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
80 		return true;
81 
82 	drm_privacy_screen_put(privacy_screen);
83 
84 	return false;
85 }
86 
87 void intel_display_driver_init_hw(struct intel_display *display)
88 {
89 	if (!HAS_DISPLAY(display))
90 		return;
91 
92 	intel_cdclk_read_hw(display);
93 
94 	intel_display_wa_apply(display);
95 }
96 
97 static const struct drm_mode_config_funcs intel_mode_funcs = {
98 	.fb_create = intel_user_framebuffer_create,
99 	.get_format_info = intel_fb_get_format_info,
100 	.mode_valid = intel_mode_valid,
101 	.atomic_check = intel_atomic_check,
102 	.atomic_commit = intel_atomic_commit,
103 	.atomic_state_alloc = intel_atomic_state_alloc,
104 	.atomic_state_clear = intel_atomic_state_clear,
105 	.atomic_state_free = intel_atomic_state_free,
106 };
107 
108 static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = {
109 	.atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
110 };
111 
112 static void intel_mode_config_init(struct intel_display *display)
113 {
114 	struct drm_mode_config *mode_config = &display->drm->mode_config;
115 
116 	drm_mode_config_init(display->drm);
117 	INIT_LIST_HEAD(&display->global.obj_list);
118 
119 	mode_config->min_width = 0;
120 	mode_config->min_height = 0;
121 
122 	mode_config->preferred_depth = 24;
123 	mode_config->prefer_shadow = 1;
124 
125 	mode_config->funcs = &intel_mode_funcs;
126 	mode_config->helper_private = &intel_mode_config_funcs;
127 
128 	mode_config->async_page_flip = HAS_ASYNC_FLIPS(display);
129 
130 	/*
131 	 * Maximum framebuffer dimensions, chosen to match
132 	 * the maximum render engine surface size on gen4+.
133 	 */
134 	if (DISPLAY_VER(display) >= 7) {
135 		mode_config->max_width = 16384;
136 		mode_config->max_height = 16384;
137 	} else if (DISPLAY_VER(display) >= 4) {
138 		mode_config->max_width = 8192;
139 		mode_config->max_height = 8192;
140 	} else if (DISPLAY_VER(display) == 3) {
141 		mode_config->max_width = 4096;
142 		mode_config->max_height = 4096;
143 	} else {
144 		mode_config->max_width = 2048;
145 		mode_config->max_height = 2048;
146 	}
147 
148 	if (display->platform.i845g || display->platform.i865g) {
149 		mode_config->cursor_width = display->platform.i845g ? 64 : 512;
150 		mode_config->cursor_height = 1023;
151 	} else if (display->platform.i830 || display->platform.i85x ||
152 		   display->platform.i915g || display->platform.i915gm) {
153 		mode_config->cursor_width = 64;
154 		mode_config->cursor_height = 64;
155 	} else {
156 		mode_config->cursor_width = 256;
157 		mode_config->cursor_height = 256;
158 	}
159 }
160 
161 static void intel_mode_config_cleanup(struct intel_display *display)
162 {
163 	intel_atomic_global_obj_cleanup(display);
164 	drm_mode_config_cleanup(display->drm);
165 }
166 
167 static void intel_plane_possible_crtcs_init(struct intel_display *display)
168 {
169 	struct intel_plane *plane;
170 
171 	for_each_intel_plane(display->drm, plane) {
172 		struct intel_crtc *crtc = intel_crtc_for_pipe(display,
173 							      plane->pipe);
174 
175 		plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
176 	}
177 }
178 
179 void intel_display_driver_early_probe(struct intel_display *display)
180 {
181 	/* This must be called before any calls to HAS_PCH_* */
182 	intel_pch_detect(display);
183 
184 	if (!HAS_DISPLAY(display))
185 		return;
186 
187 	spin_lock_init(&display->fb_tracking.lock);
188 	mutex_init(&display->backlight.lock);
189 	mutex_init(&display->audio.mutex);
190 	mutex_init(&display->wm.wm_mutex);
191 	mutex_init(&display->pps.mutex);
192 	mutex_init(&display->hdcp.hdcp_mutex);
193 
194 	intel_display_irq_init(display);
195 	intel_dkl_phy_init(display);
196 	intel_color_init_hooks(display);
197 	intel_init_cdclk_hooks(display);
198 	intel_audio_hooks_init(display);
199 	intel_dpll_init_clock_hook(display);
200 	intel_init_display_hooks(display);
201 	intel_fdi_init_hook(display);
202 	intel_dmc_wl_init(display);
203 }
204 
205 /* part #1: call before irq install */
206 int intel_display_driver_probe_noirq(struct intel_display *display)
207 {
208 	struct drm_i915_private *i915 = to_i915(display->drm);
209 	int ret;
210 
211 	if (i915_inject_probe_failure(i915))
212 		return -ENODEV;
213 
214 	if (HAS_DISPLAY(display)) {
215 		ret = drm_vblank_init(display->drm,
216 				      INTEL_NUM_PIPES(display));
217 		if (ret)
218 			return ret;
219 	}
220 
221 	intel_bios_init(display);
222 
223 	ret = intel_vga_register(display);
224 	if (ret)
225 		goto cleanup_bios;
226 
227 	intel_psr_dc5_dc6_wa_init(display);
228 
229 	/* FIXME: completely on the wrong abstraction layer */
230 	ret = intel_power_domains_init(display);
231 	if (ret < 0)
232 		goto cleanup_vga;
233 
234 	intel_pmdemand_init_early(display);
235 
236 	intel_power_domains_init_hw(display, false);
237 
238 	if (!HAS_DISPLAY(display))
239 		return 0;
240 
241 	display->hotplug.dp_wq = alloc_ordered_workqueue("intel-dp", 0);
242 	if (!display->hotplug.dp_wq) {
243 		ret = -ENOMEM;
244 		goto cleanup_vga_client_pw_domain_dmc;
245 	}
246 
247 	display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
248 	if (!display->wq.modeset) {
249 		ret = -ENOMEM;
250 		goto cleanup_wq_dp;
251 	}
252 
253 	display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
254 						WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
255 	if (!display->wq.flip) {
256 		ret = -ENOMEM;
257 		goto cleanup_wq_modeset;
258 	}
259 
260 	display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI, 0);
261 	if (!display->wq.cleanup) {
262 		ret = -ENOMEM;
263 		goto cleanup_wq_flip;
264 	}
265 
266 	display->wq.unordered = alloc_workqueue("display_unordered", 0, 0);
267 	if (!display->wq.unordered) {
268 		ret = -ENOMEM;
269 		goto cleanup_wq_cleanup;
270 	}
271 
272 	intel_dmc_init(display);
273 
274 	intel_mode_config_init(display);
275 
276 	ret = intel_cdclk_init(display);
277 	if (ret)
278 		goto cleanup_wq_unordered;
279 
280 	ret = intel_color_init(display);
281 	if (ret)
282 		goto cleanup_wq_unordered;
283 
284 	ret = intel_dbuf_init(display);
285 	if (ret)
286 		goto cleanup_wq_unordered;
287 
288 	ret = intel_bw_init(display);
289 	if (ret)
290 		goto cleanup_wq_unordered;
291 
292 	ret = intel_pmdemand_init(display);
293 	if (ret)
294 		goto cleanup_wq_unordered;
295 
296 	intel_init_quirks(display);
297 
298 	intel_fbc_init(display);
299 
300 	return 0;
301 
302 cleanup_wq_unordered:
303 	destroy_workqueue(display->wq.unordered);
304 cleanup_wq_cleanup:
305 	destroy_workqueue(display->wq.cleanup);
306 cleanup_wq_flip:
307 	destroy_workqueue(display->wq.flip);
308 cleanup_wq_modeset:
309 	destroy_workqueue(display->wq.modeset);
310 cleanup_wq_dp:
311 	destroy_workqueue(display->hotplug.dp_wq);
312 cleanup_vga_client_pw_domain_dmc:
313 	intel_dmc_fini(display);
314 	intel_power_domains_driver_remove(display);
315 cleanup_vga:
316 	intel_vga_unregister(display);
317 cleanup_bios:
318 	intel_bios_driver_remove(display);
319 
320 	return ret;
321 }
322 
323 static void set_display_access(struct intel_display *display,
324 			       bool any_task_allowed,
325 			       struct task_struct *allowed_task)
326 {
327 	struct drm_modeset_acquire_ctx ctx;
328 	int err;
329 
330 	intel_modeset_lock_ctx_retry(&ctx, NULL, 0, err) {
331 		err = drm_modeset_lock_all_ctx(display->drm, &ctx);
332 		if (err)
333 			continue;
334 
335 		display->access.any_task_allowed = any_task_allowed;
336 		display->access.allowed_task = allowed_task;
337 	}
338 
339 	drm_WARN_ON(display->drm, err);
340 }
341 
342 /**
343  * intel_display_driver_enable_user_access - Enable display HW access for all threads
344  * @display: display device instance
345  *
346  * Enable the display HW access for all threads. Examples for such accesses
347  * are modeset commits and connector probing.
348  *
349  * This function should be called during driver loading and system resume once
350  * all the HW initialization steps are done.
351  */
352 void intel_display_driver_enable_user_access(struct intel_display *display)
353 {
354 	set_display_access(display, true, NULL);
355 
356 	intel_hpd_enable_detection_work(display);
357 }
358 
359 /**
360  * intel_display_driver_disable_user_access - Disable display HW access for user threads
361  * @display: display device instance
362  *
363  * Disable the display HW access for user threads. Examples for such accesses
364  * are modeset commits and connector probing. For the current thread the
365  * access is still enabled, which should only perform HW init/deinit
366  * programming (as the initial modeset during driver loading or the disabling
367  * modeset during driver unloading and system suspend/shutdown). This function
368  * should be followed by calling either intel_display_driver_enable_user_access()
369  * after completing the HW init programming or
370  * intel_display_driver_suspend_access() after completing the HW deinit
371  * programming.
372  *
373  * This function should be called during driver loading/unloading and system
374  * suspend/shutdown before starting the HW init/deinit programming.
375  */
376 void intel_display_driver_disable_user_access(struct intel_display *display)
377 {
378 	intel_hpd_disable_detection_work(display);
379 
380 	set_display_access(display, false, current);
381 }
382 
383 /**
384  * intel_display_driver_suspend_access - Suspend display HW access for all threads
385  * @display: display device instance
386  *
387  * Disable the display HW access for all threads. Examples for such accesses
388  * are modeset commits and connector probing. This call should be either
389  * followed by calling intel_display_driver_resume_access(), or the driver
390  * should be unloaded/shutdown.
391  *
392  * This function should be called during driver unloading and system
393  * suspend/shutdown after completing the HW deinit programming.
394  */
395 void intel_display_driver_suspend_access(struct intel_display *display)
396 {
397 	set_display_access(display, false, NULL);
398 }
399 
400 /**
401  * intel_display_driver_resume_access - Resume display HW access for the resume thread
402  * @display: display device instance
403  *
404  * Enable the display HW access for the current resume thread, keeping the
405  * access disabled for all other (user) threads. Examples for such accesses
406  * are modeset commits and connector probing. The resume thread should only
407  * perform HW init programming (as the restoring modeset). This function
408  * should be followed by calling intel_display_driver_enable_user_access(),
409  * after completing the HW init programming steps.
410  *
411  * This function should be called during system resume before starting the HW
412  * init steps.
413  */
414 void intel_display_driver_resume_access(struct intel_display *display)
415 {
416 	set_display_access(display, false, current);
417 }
418 
419 /**
420  * intel_display_driver_check_access - Check if the current thread has disaplay HW access
421  * @display: display device instance
422  *
423  * Check whether the current thread has display HW access, print a debug
424  * message if it doesn't. Such accesses are modeset commits and connector
425  * probing. If the function returns %false any HW access should be prevented.
426  *
427  * Returns %true if the current thread has display HW access, %false
428  * otherwise.
429  */
430 bool intel_display_driver_check_access(struct intel_display *display)
431 {
432 	char current_task[TASK_COMM_LEN + 16];
433 	char allowed_task[TASK_COMM_LEN + 16] = "none";
434 
435 	if (display->access.any_task_allowed ||
436 	    display->access.allowed_task == current)
437 		return true;
438 
439 	snprintf(current_task, sizeof(current_task), "%s[%d]",
440 		 current->comm, task_pid_vnr(current));
441 
442 	if (display->access.allowed_task)
443 		snprintf(allowed_task, sizeof(allowed_task), "%s[%d]",
444 			 display->access.allowed_task->comm,
445 			 task_pid_vnr(display->access.allowed_task));
446 
447 	drm_dbg_kms(display->drm,
448 		    "Reject display access from task %s (allowed to %s)\n",
449 		    current_task, allowed_task);
450 
451 	return false;
452 }
453 
454 /* part #2: call after irq install, but before gem init */
455 int intel_display_driver_probe_nogem(struct intel_display *display)
456 {
457 	enum pipe pipe;
458 	int ret;
459 
460 	if (!HAS_DISPLAY(display))
461 		return 0;
462 
463 	intel_wm_init(display);
464 
465 	intel_panel_sanitize_ssc(display);
466 
467 	intel_pps_setup(display);
468 
469 	intel_gmbus_setup(display);
470 
471 	drm_dbg_kms(display->drm, "%d display pipe%s available.\n",
472 		    INTEL_NUM_PIPES(display),
473 		    INTEL_NUM_PIPES(display) > 1 ? "s" : "");
474 
475 	for_each_pipe(display, pipe) {
476 		ret = intel_crtc_init(display, pipe);
477 		if (ret)
478 			goto err_mode_config;
479 	}
480 
481 	intel_plane_possible_crtcs_init(display);
482 	intel_dpll_init(display);
483 	intel_fdi_pll_freq_update(display);
484 
485 	intel_update_czclk(display);
486 	intel_display_driver_init_hw(display);
487 	intel_dpll_update_ref_clks(display);
488 
489 	if (display->cdclk.max_cdclk_freq == 0)
490 		intel_update_max_cdclk(display);
491 
492 	intel_hti_init(display);
493 
494 	intel_setup_outputs(display);
495 
496 	ret = intel_dp_tunnel_mgr_init(display);
497 	if (ret)
498 		goto err_hdcp;
499 
500 	intel_display_driver_disable_user_access(display);
501 
502 	drm_modeset_lock_all(display->drm);
503 	intel_modeset_setup_hw_state(display, display->drm->mode_config.acquire_ctx);
504 	intel_acpi_assign_connector_fwnodes(display);
505 	drm_modeset_unlock_all(display->drm);
506 
507 	intel_initial_plane_config(display);
508 
509 	/*
510 	 * Make sure hardware watermarks really match the state we read out.
511 	 * Note that we need to do this after reconstructing the BIOS fb's
512 	 * since the watermark calculation done here will use pstate->fb.
513 	 */
514 	if (!HAS_GMCH(display))
515 		ilk_wm_sanitize(display);
516 
517 	return 0;
518 
519 err_hdcp:
520 	intel_hdcp_component_fini(display);
521 err_mode_config:
522 	intel_mode_config_cleanup(display);
523 
524 	return ret;
525 }
526 
527 /* part #3: call after gem init */
528 int intel_display_driver_probe(struct intel_display *display)
529 {
530 	int ret;
531 
532 	if (!HAS_DISPLAY(display))
533 		return 0;
534 
535 	/*
536 	 * This will bind stuff into ggtt, so it needs to be done after
537 	 * the BIOS fb takeover and whatever else magic ggtt reservations
538 	 * happen during gem/ggtt init.
539 	 */
540 	intel_hdcp_component_init(display);
541 
542 	intel_flipq_init(display);
543 
544 	/*
545 	 * Force all active planes to recompute their states. So that on
546 	 * mode_setcrtc after probe, all the intel_plane_state variables
547 	 * are already calculated and there is no assert_plane warnings
548 	 * during bootup.
549 	 */
550 	ret = intel_initial_commit(display);
551 	if (ret)
552 		drm_dbg_kms(display->drm, "Initial modeset failed, %d\n", ret);
553 
554 	intel_overlay_setup(display);
555 
556 	/* Only enable hotplug handling once the fbdev is fully set up. */
557 	intel_hpd_init(display);
558 
559 	skl_watermark_ipc_init(display);
560 
561 	return 0;
562 }
563 
564 void intel_display_driver_register(struct intel_display *display)
565 {
566 	struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS,
567 					       "i915 display info:");
568 
569 	if (!HAS_DISPLAY(display))
570 		return;
571 
572 	/* Must be done after probing outputs */
573 	intel_opregion_register(display);
574 	intel_acpi_video_register(display);
575 
576 	intel_audio_init(display);
577 
578 	intel_display_driver_enable_user_access(display);
579 
580 	intel_audio_register(display);
581 
582 	intel_display_debugfs_register(display);
583 
584 	/*
585 	 * We need to coordinate the hotplugs with the asynchronous
586 	 * fbdev configuration, for which we use the
587 	 * fbdev->async_cookie.
588 	 */
589 	drm_kms_helper_poll_init(display->drm);
590 	intel_hpd_poll_disable(display);
591 
592 	intel_fbdev_setup(display);
593 
594 	intel_display_device_info_print(DISPLAY_INFO(display),
595 					DISPLAY_RUNTIME_INFO(display), &p);
596 
597 	intel_register_dsm_handler();
598 }
599 
600 /* part #1: call before irq uninstall */
601 void intel_display_driver_remove(struct intel_display *display)
602 {
603 	if (!HAS_DISPLAY(display))
604 		return;
605 
606 	flush_workqueue(display->wq.flip);
607 	flush_workqueue(display->wq.modeset);
608 	flush_workqueue(display->wq.cleanup);
609 	flush_workqueue(display->wq.unordered);
610 
611 	/*
612 	 * MST topology needs to be suspended so we don't have any calls to
613 	 * fbdev after it's finalized. MST will be destroyed later as part of
614 	 * drm_mode_config_cleanup()
615 	 */
616 	intel_dp_mst_suspend(display);
617 }
618 
619 /* part #2: call after irq uninstall */
620 void intel_display_driver_remove_noirq(struct intel_display *display)
621 {
622 	if (!HAS_DISPLAY(display))
623 		return;
624 
625 	intel_display_driver_suspend_access(display);
626 
627 	/*
628 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
629 	 * poll handlers. Hence disable polling after hpd handling is shut down.
630 	 */
631 	intel_hpd_poll_fini(display);
632 
633 	intel_unregister_dsm_handler();
634 
635 	/* flush any delayed tasks or pending work */
636 	flush_workqueue(display->wq.unordered);
637 
638 	intel_hdcp_component_fini(display);
639 
640 	intel_mode_config_cleanup(display);
641 
642 	intel_dp_tunnel_mgr_cleanup(display);
643 
644 	intel_overlay_cleanup(display);
645 
646 	intel_gmbus_teardown(display);
647 
648 	destroy_workqueue(display->hotplug.dp_wq);
649 	destroy_workqueue(display->wq.flip);
650 	destroy_workqueue(display->wq.modeset);
651 	destroy_workqueue(display->wq.cleanup);
652 	destroy_workqueue(display->wq.unordered);
653 
654 	intel_fbc_cleanup(display);
655 }
656 
657 /* part #3: call after gem init */
658 void intel_display_driver_remove_nogem(struct intel_display *display)
659 {
660 	intel_dmc_fini(display);
661 
662 	intel_power_domains_driver_remove(display);
663 
664 	intel_vga_unregister(display);
665 
666 	intel_bios_driver_remove(display);
667 }
668 
669 void intel_display_driver_unregister(struct intel_display *display)
670 {
671 	if (!HAS_DISPLAY(display))
672 		return;
673 
674 	intel_unregister_dsm_handler();
675 
676 	drm_client_dev_unregister(display->drm);
677 
678 	/*
679 	 * After flushing the fbdev (incl. a late async config which
680 	 * will have delayed queuing of a hotplug event), then flush
681 	 * the hotplug events.
682 	 */
683 	drm_kms_helper_poll_fini(display->drm);
684 
685 	intel_display_driver_disable_user_access(display);
686 
687 	intel_audio_deinit(display);
688 
689 	drm_atomic_helper_shutdown(display->drm);
690 
691 	acpi_video_unregister();
692 	intel_opregion_unregister(display);
693 }
694 
695 /*
696  * turn all crtc's off, but do not adjust state
697  * This has to be paired with a call to intel_modeset_setup_hw_state.
698  */
699 int intel_display_driver_suspend(struct intel_display *display)
700 {
701 	struct drm_atomic_state *state;
702 	int ret;
703 
704 	if (!HAS_DISPLAY(display))
705 		return 0;
706 
707 	state = drm_atomic_helper_suspend(display->drm);
708 	ret = PTR_ERR_OR_ZERO(state);
709 	if (ret)
710 		drm_err(display->drm, "Suspending crtc's failed with %i\n",
711 			ret);
712 	else
713 		display->restore.modeset_state = state;
714 
715 	/* ensure all DPT VMAs have been unpinned for intel_dpt_suspend() */
716 	flush_workqueue(display->wq.cleanup);
717 
718 	intel_dp_mst_suspend(display);
719 
720 	return ret;
721 }
722 
723 int
724 __intel_display_driver_resume(struct intel_display *display,
725 			      struct drm_atomic_state *state,
726 			      struct drm_modeset_acquire_ctx *ctx)
727 {
728 	struct drm_crtc_state *crtc_state;
729 	struct drm_crtc *crtc;
730 	int ret, i;
731 
732 	intel_modeset_setup_hw_state(display, ctx);
733 
734 	if (!state)
735 		return 0;
736 
737 	/*
738 	 * We've duplicated the state, pointers to the old state are invalid.
739 	 *
740 	 * Don't attempt to use the old state until we commit the duplicated state.
741 	 */
742 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
743 		/*
744 		 * Force recalculation even if we restore
745 		 * current state. With fast modeset this may not result
746 		 * in a modeset when the state is compatible.
747 		 */
748 		crtc_state->mode_changed = true;
749 	}
750 
751 	/* ignore any reset values/BIOS leftovers in the WM registers */
752 	if (!HAS_GMCH(display))
753 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
754 
755 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
756 
757 	drm_WARN_ON(display->drm, ret == -EDEADLK);
758 
759 	return ret;
760 }
761 
762 void intel_display_driver_resume(struct intel_display *display)
763 {
764 	struct drm_atomic_state *state = display->restore.modeset_state;
765 	struct drm_modeset_acquire_ctx ctx;
766 	int ret;
767 
768 	if (!HAS_DISPLAY(display))
769 		return;
770 
771 	/* MST sideband requires HPD interrupts enabled */
772 	intel_dp_mst_resume(display);
773 
774 	display->restore.modeset_state = NULL;
775 	if (state)
776 		state->acquire_ctx = &ctx;
777 
778 	drm_modeset_acquire_init(&ctx, 0);
779 
780 	while (1) {
781 		ret = drm_modeset_lock_all_ctx(display->drm, &ctx);
782 		if (ret != -EDEADLK)
783 			break;
784 
785 		drm_modeset_backoff(&ctx);
786 	}
787 
788 	if (!ret)
789 		ret = __intel_display_driver_resume(display, state, &ctx);
790 
791 	skl_watermark_ipc_update(display);
792 	drm_modeset_drop_locks(&ctx);
793 	drm_modeset_acquire_fini(&ctx);
794 
795 	if (ret)
796 		drm_err(display->drm,
797 			"Restoring old state failed with %i\n", ret);
798 	if (state)
799 		drm_atomic_state_put(state);
800 }
801