xref: /linux/drivers/gpu/drm/xe/display/xe_display.c (revision dcdd6b84d9acaa0794c29de7024cfdb20cfd7b92)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_display.h"
7 #include "regs/xe_irq_regs.h"
8 
9 #include <linux/fb.h>
10 
11 #include <drm/drm_drv.h>
12 #include <drm/drm_managed.h>
13 #include <drm/drm_probe_helper.h>
14 #include <uapi/drm/xe_drm.h>
15 
16 #include "soc/intel_dram.h"
17 #include "intel_acpi.h"
18 #include "intel_audio.h"
19 #include "intel_bw.h"
20 #include "intel_display.h"
21 #include "intel_display_driver.h"
22 #include "intel_display_irq.h"
23 #include "intel_display_types.h"
24 #include "intel_dmc.h"
25 #include "intel_dmc_wl.h"
26 #include "intel_dp.h"
27 #include "intel_encoder.h"
28 #include "intel_fbdev.h"
29 #include "intel_hdcp.h"
30 #include "intel_hotplug.h"
31 #include "intel_opregion.h"
32 #include "xe_module.h"
33 
34 /* Xe device functions */
35 
36 static bool has_display(struct xe_device *xe)
37 {
38 	return HAS_DISPLAY(&xe->display);
39 }
40 
41 /**
42  * xe_display_driver_probe_defer - Detect if we need to wait for other drivers
43  *				   early on
44  * @pdev: PCI device
45  *
46  * Returns: true if probe needs to be deferred, false otherwise
47  */
48 bool xe_display_driver_probe_defer(struct pci_dev *pdev)
49 {
50 	if (!xe_modparam.probe_display)
51 		return 0;
52 
53 	return intel_display_driver_probe_defer(pdev);
54 }
55 
56 /**
57  * xe_display_driver_set_hooks - Add driver flags and hooks for display
58  * @driver: DRM device driver
59  *
60  * Set features and function hooks in @driver that are needed for driving the
61  * display IP. This sets the driver's capability of driving display, regardless
62  * if the device has it enabled
63  */
64 void xe_display_driver_set_hooks(struct drm_driver *driver)
65 {
66 	if (!xe_modparam.probe_display)
67 		return;
68 
69 	driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
70 }
71 
72 static void unset_display_features(struct xe_device *xe)
73 {
74 	xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
75 }
76 
77 static void display_destroy(struct drm_device *dev, void *dummy)
78 {
79 	struct xe_device *xe = to_xe_device(dev);
80 
81 	destroy_workqueue(xe->display.hotplug.dp_wq);
82 }
83 
84 /**
85  * xe_display_create - create display struct
86  * @xe: XE device instance
87  *
88  * Initialize all fields used by the display part.
89  *
90  * TODO: once everything can be inside a single struct, make the struct opaque
91  * to the rest of xe and return it to be xe->display.
92  *
93  * Returns: 0 on success
94  */
95 int xe_display_create(struct xe_device *xe)
96 {
97 	spin_lock_init(&xe->display.fb_tracking.lock);
98 
99 	xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
100 
101 	return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
102 }
103 
104 static void xe_display_fini_early(void *arg)
105 {
106 	struct xe_device *xe = arg;
107 	struct intel_display *display = &xe->display;
108 
109 	if (!xe->info.probe_display)
110 		return;
111 
112 	intel_display_driver_remove_nogem(display);
113 	intel_display_driver_remove_noirq(display);
114 	intel_opregion_cleanup(display);
115 	intel_power_domains_cleanup(display);
116 }
117 
118 int xe_display_init_early(struct xe_device *xe)
119 {
120 	struct intel_display *display = &xe->display;
121 	int err;
122 
123 	if (!xe->info.probe_display)
124 		return 0;
125 
126 	/* Fake uncore lock */
127 	spin_lock_init(&xe->uncore.lock);
128 
129 	/* This must be called before any calls to HAS_PCH_* */
130 	intel_detect_pch(xe);
131 
132 	intel_display_driver_early_probe(display);
133 
134 	/* Early display init.. */
135 	intel_opregion_setup(display);
136 
137 	/*
138 	 * Fill the dram structure to get the system dram info. This will be
139 	 * used for memory latency calculation.
140 	 */
141 	intel_dram_detect(xe);
142 
143 	intel_bw_init_hw(xe);
144 
145 	intel_display_device_info_runtime_init(display);
146 
147 	err = intel_display_driver_probe_noirq(display);
148 	if (err)
149 		goto err_opregion;
150 
151 	err = intel_display_driver_probe_nogem(display);
152 	if (err)
153 		goto err_noirq;
154 
155 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_early, xe);
156 err_noirq:
157 	intel_display_driver_remove_noirq(display);
158 	intel_power_domains_cleanup(display);
159 err_opregion:
160 	intel_opregion_cleanup(display);
161 	return err;
162 }
163 
164 int xe_display_init(struct xe_device *xe)
165 {
166 	struct intel_display *display = &xe->display;
167 
168 	if (!xe->info.probe_display)
169 		return 0;
170 
171 	return intel_display_driver_probe(display);
172 }
173 
174 void xe_display_fini(struct xe_device *xe)
175 {
176 	struct intel_display *display = &xe->display;
177 
178 	if (!xe->info.probe_display)
179 		return;
180 
181 	intel_hpd_poll_fini(xe);
182 
183 	intel_hdcp_component_fini(display);
184 	intel_audio_deinit(xe);
185 }
186 
187 void xe_display_register(struct xe_device *xe)
188 {
189 	struct intel_display *display = &xe->display;
190 
191 	if (!xe->info.probe_display)
192 		return;
193 
194 	intel_display_driver_register(display);
195 	intel_power_domains_enable(display);
196 	intel_register_dsm_handler();
197 }
198 
199 void xe_display_unregister(struct xe_device *xe)
200 {
201 	struct intel_display *display = &xe->display;
202 
203 	if (!xe->info.probe_display)
204 		return;
205 
206 	intel_unregister_dsm_handler();
207 	intel_power_domains_disable(display);
208 	intel_display_driver_unregister(display);
209 }
210 
211 void xe_display_driver_remove(struct xe_device *xe)
212 {
213 	struct intel_display *display = &xe->display;
214 
215 	if (!xe->info.probe_display)
216 		return;
217 
218 	intel_display_driver_remove(display);
219 }
220 
221 /* IRQ-related functions */
222 
223 void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
224 {
225 	if (!xe->info.probe_display)
226 		return;
227 
228 	if (master_ctl & DISPLAY_IRQ)
229 		gen11_display_irq_handler(xe);
230 }
231 
232 void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
233 {
234 	struct intel_display *display = &xe->display;
235 
236 	if (!xe->info.probe_display)
237 		return;
238 
239 	if (gu_misc_iir & GU_MISC_GSE)
240 		intel_opregion_asle_intr(display);
241 }
242 
243 void xe_display_irq_reset(struct xe_device *xe)
244 {
245 	if (!xe->info.probe_display)
246 		return;
247 
248 	gen11_display_irq_reset(xe);
249 }
250 
251 void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
252 {
253 	if (!xe->info.probe_display)
254 		return;
255 
256 	if (gt->info.id == XE_GT0)
257 		gen11_de_irq_postinstall(xe);
258 }
259 
260 static bool suspend_to_idle(void)
261 {
262 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
263 	if (acpi_target_system_state() < ACPI_STATE_S3)
264 		return true;
265 #endif
266 	return false;
267 }
268 
269 static void xe_display_flush_cleanup_work(struct xe_device *xe)
270 {
271 	struct intel_crtc *crtc;
272 
273 	for_each_intel_crtc(&xe->drm, crtc) {
274 		struct drm_crtc_commit *commit;
275 
276 		spin_lock(&crtc->base.commit_lock);
277 		commit = list_first_entry_or_null(&crtc->base.commit_list,
278 						  struct drm_crtc_commit, commit_entry);
279 		if (commit)
280 			drm_crtc_commit_get(commit);
281 		spin_unlock(&crtc->base.commit_lock);
282 
283 		if (commit) {
284 			wait_for_completion(&commit->cleanup_done);
285 			drm_crtc_commit_put(commit);
286 		}
287 	}
288 }
289 
290 /* TODO: System and runtime suspend/resume sequences will be sanitized as a follow-up. */
291 static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime)
292 {
293 	struct intel_display *display = &xe->display;
294 	bool s2idle = suspend_to_idle();
295 	if (!xe->info.probe_display)
296 		return;
297 
298 	/*
299 	 * We do a lot of poking in a lot of registers, make sure they work
300 	 * properly.
301 	 */
302 	intel_power_domains_disable(display);
303 	if (!runtime)
304 		intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
305 
306 	if (!runtime && has_display(xe)) {
307 		drm_kms_helper_poll_disable(&xe->drm);
308 		intel_display_driver_disable_user_access(display);
309 		intel_display_driver_suspend(display);
310 	}
311 
312 	xe_display_flush_cleanup_work(xe);
313 
314 	intel_hpd_cancel_work(xe);
315 
316 	if (!runtime && has_display(xe)) {
317 		intel_display_driver_suspend_access(display);
318 		intel_encoder_suspend_all(&xe->display);
319 	}
320 
321 	intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
322 
323 	intel_dmc_suspend(display);
324 
325 	if (runtime && has_display(xe))
326 		intel_hpd_poll_enable(xe);
327 }
328 
329 void xe_display_pm_suspend(struct xe_device *xe)
330 {
331 	__xe_display_pm_suspend(xe, false);
332 }
333 
334 void xe_display_pm_shutdown(struct xe_device *xe)
335 {
336 	struct intel_display *display = &xe->display;
337 
338 	if (!xe->info.probe_display)
339 		return;
340 
341 	intel_power_domains_disable(display);
342 	intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
343 	if (has_display(xe)) {
344 		drm_kms_helper_poll_disable(&xe->drm);
345 		intel_display_driver_disable_user_access(display);
346 		intel_display_driver_suspend(display);
347 	}
348 
349 	xe_display_flush_cleanup_work(xe);
350 	intel_dp_mst_suspend(display);
351 	intel_hpd_cancel_work(xe);
352 
353 	if (has_display(xe))
354 		intel_display_driver_suspend_access(display);
355 
356 	intel_encoder_suspend_all(display);
357 	intel_encoder_shutdown_all(display);
358 
359 	intel_opregion_suspend(display, PCI_D3cold);
360 
361 	intel_dmc_suspend(display);
362 }
363 
364 void xe_display_pm_runtime_suspend(struct xe_device *xe)
365 {
366 	if (!xe->info.probe_display)
367 		return;
368 
369 	if (xe->d3cold.allowed) {
370 		__xe_display_pm_suspend(xe, true);
371 		return;
372 	}
373 
374 	intel_hpd_poll_enable(xe);
375 }
376 
377 void xe_display_pm_suspend_late(struct xe_device *xe)
378 {
379 	struct intel_display *display = &xe->display;
380 	bool s2idle = suspend_to_idle();
381 
382 	if (!xe->info.probe_display)
383 		return;
384 
385 	intel_display_power_suspend_late(display, s2idle);
386 }
387 
388 void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
389 {
390 	struct intel_display *display = &xe->display;
391 
392 	if (!xe->info.probe_display)
393 		return;
394 
395 	if (xe->d3cold.allowed)
396 		xe_display_pm_suspend_late(xe);
397 
398 	/*
399 	 * If xe_display_pm_suspend_late() is not called, it is likely
400 	 * that we will be on dynamic DC states with DMC wakelock enabled. We
401 	 * need to flush the release work in that case.
402 	 */
403 	intel_dmc_wl_flush_release_work(display);
404 }
405 
406 void xe_display_pm_shutdown_late(struct xe_device *xe)
407 {
408 	struct intel_display *display = &xe->display;
409 
410 	if (!xe->info.probe_display)
411 		return;
412 
413 	/*
414 	 * The only requirement is to reboot with display DC states disabled,
415 	 * for now leaving all display power wells in the INIT power domain
416 	 * enabled.
417 	 */
418 	intel_power_domains_driver_remove(display);
419 }
420 
421 void xe_display_pm_resume_early(struct xe_device *xe)
422 {
423 	struct intel_display *display = &xe->display;
424 
425 	if (!xe->info.probe_display)
426 		return;
427 
428 	intel_display_power_resume_early(display);
429 }
430 
431 static void __xe_display_pm_resume(struct xe_device *xe, bool runtime)
432 {
433 	struct intel_display *display = &xe->display;
434 
435 	if (!xe->info.probe_display)
436 		return;
437 
438 	intel_dmc_resume(display);
439 
440 	if (has_display(xe))
441 		drm_mode_config_reset(&xe->drm);
442 
443 	intel_display_driver_init_hw(display);
444 
445 	if (!runtime && has_display(xe))
446 		intel_display_driver_resume_access(display);
447 
448 	intel_hpd_init(xe);
449 
450 	if (!runtime && has_display(xe)) {
451 		intel_display_driver_resume(display);
452 		drm_kms_helper_poll_enable(&xe->drm);
453 		intel_display_driver_enable_user_access(display);
454 	}
455 
456 	if (has_display(xe))
457 		intel_hpd_poll_disable(xe);
458 
459 	intel_opregion_resume(display);
460 
461 	if (!runtime)
462 		intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false);
463 
464 	intel_power_domains_enable(display);
465 }
466 
467 void xe_display_pm_resume(struct xe_device *xe)
468 {
469 	__xe_display_pm_resume(xe, false);
470 }
471 
472 void xe_display_pm_runtime_resume(struct xe_device *xe)
473 {
474 	if (!xe->info.probe_display)
475 		return;
476 
477 	if (xe->d3cold.allowed) {
478 		__xe_display_pm_resume(xe, true);
479 		return;
480 	}
481 
482 	intel_hpd_init(xe);
483 	intel_hpd_poll_disable(xe);
484 }
485 
486 
487 static void display_device_remove(struct drm_device *dev, void *arg)
488 {
489 	struct intel_display *display = arg;
490 
491 	intel_display_device_remove(display);
492 }
493 
494 int xe_display_probe(struct xe_device *xe)
495 {
496 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
497 	struct intel_display *display;
498 	int err;
499 
500 	if (!xe->info.probe_display)
501 		goto no_display;
502 
503 	display = intel_display_device_probe(pdev);
504 
505 	err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display);
506 	if (err)
507 		return err;
508 
509 	if (has_display(xe))
510 		return 0;
511 
512 no_display:
513 	xe->info.probe_display = false;
514 	unset_display_features(xe);
515 	return 0;
516 }
517