xref: /linux/drivers/gpu/drm/xe/display/xe_display.c (revision ddb7a62af2e766eabb4ab7080e6ed8d6b8915302)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_display.h"
7 #include "regs/xe_irq_regs.h"
8 
9 #include <linux/fb.h>
10 
11 #include <drm/drm_client.h>
12 #include <drm/drm_client_event.h>
13 #include <drm/drm_drv.h>
14 #include <drm/drm_managed.h>
15 #include <drm/drm_probe_helper.h>
16 #include <uapi/drm/xe_drm.h>
17 
18 #include "soc/intel_dram.h"
19 #include "intel_acpi.h"
20 #include "intel_audio.h"
21 #include "intel_bw.h"
22 #include "intel_display.h"
23 #include "intel_display_core.h"
24 #include "intel_display_driver.h"
25 #include "intel_display_irq.h"
26 #include "intel_display_types.h"
27 #include "intel_dmc.h"
28 #include "intel_dmc_wl.h"
29 #include "intel_dp.h"
30 #include "intel_encoder.h"
31 #include "intel_fbdev.h"
32 #include "intel_hdcp.h"
33 #include "intel_hotplug.h"
34 #include "intel_opregion.h"
35 #include "skl_watermark.h"
36 #include "xe_module.h"
37 
38 /* Xe device functions */
39 
has_display(struct xe_device * xe)40 static bool has_display(struct xe_device *xe)
41 {
42 	struct intel_display *display = xe->display;
43 
44 	return HAS_DISPLAY(display);
45 }
46 
47 /**
48  * xe_display_driver_probe_defer - Detect if we need to wait for other drivers
49  *				   early on
50  * @pdev: PCI device
51  *
52  * Note: This is called before xe or display device creation.
53  *
54  * Returns: true if probe needs to be deferred, false otherwise
55  */
xe_display_driver_probe_defer(struct pci_dev * pdev)56 bool xe_display_driver_probe_defer(struct pci_dev *pdev)
57 {
58 	if (!xe_modparam.probe_display)
59 		return 0;
60 
61 	return intel_display_driver_probe_defer(pdev);
62 }
63 
64 /**
65  * xe_display_driver_set_hooks - Add driver flags and hooks for display
66  * @driver: DRM device driver
67  *
68  * Set features and function hooks in @driver that are needed for driving the
69  * display IP. This sets the driver's capability of driving display, regardless
70  * if the device has it enabled
71  *
72  * Note: This is called before xe or display device creation.
73  */
xe_display_driver_set_hooks(struct drm_driver * driver)74 void xe_display_driver_set_hooks(struct drm_driver *driver)
75 {
76 	if (!xe_modparam.probe_display)
77 		return;
78 
79 #ifdef CONFIG_DRM_FBDEV_EMULATION
80 	driver->fbdev_probe = intel_fbdev_driver_fbdev_probe;
81 #endif
82 
83 	driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
84 }
85 
unset_display_features(struct xe_device * xe)86 static void unset_display_features(struct xe_device *xe)
87 {
88 	xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
89 }
90 
xe_display_fini_early(void * arg)91 static void xe_display_fini_early(void *arg)
92 {
93 	struct xe_device *xe = arg;
94 	struct intel_display *display = xe->display;
95 
96 	if (!xe->info.probe_display)
97 		return;
98 
99 	intel_display_driver_remove_nogem(display);
100 	intel_display_driver_remove_noirq(display);
101 	intel_opregion_cleanup(display);
102 	intel_power_domains_cleanup(display);
103 }
104 
xe_display_init_early(struct xe_device * xe)105 int xe_display_init_early(struct xe_device *xe)
106 {
107 	struct intel_display *display = xe->display;
108 	int err;
109 
110 	if (!xe->info.probe_display)
111 		return 0;
112 
113 	/* Fake uncore lock */
114 	spin_lock_init(&xe->uncore.lock);
115 
116 	intel_display_driver_early_probe(display);
117 
118 	/* Early display init.. */
119 	intel_opregion_setup(display);
120 
121 	/*
122 	 * Fill the dram structure to get the system dram info. This will be
123 	 * used for memory latency calculation.
124 	 */
125 	err = intel_dram_detect(xe);
126 	if (err)
127 		goto err_opregion;
128 
129 	intel_bw_init_hw(display);
130 
131 	intel_display_device_info_runtime_init(display);
132 
133 	err = intel_display_driver_probe_noirq(display);
134 	if (err)
135 		goto err_opregion;
136 
137 	err = intel_display_driver_probe_nogem(display);
138 	if (err)
139 		goto err_noirq;
140 
141 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_early, xe);
142 err_noirq:
143 	intel_display_driver_remove_noirq(display);
144 	intel_power_domains_cleanup(display);
145 err_opregion:
146 	intel_opregion_cleanup(display);
147 	return err;
148 }
149 
xe_display_fini(void * arg)150 static void xe_display_fini(void *arg)
151 {
152 	struct xe_device *xe = arg;
153 	struct intel_display *display = xe->display;
154 
155 	intel_hpd_poll_fini(display);
156 	intel_hdcp_component_fini(display);
157 	intel_audio_deinit(display);
158 	intel_display_driver_remove(display);
159 }
160 
xe_display_init(struct xe_device * xe)161 int xe_display_init(struct xe_device *xe)
162 {
163 	struct intel_display *display = xe->display;
164 	int err;
165 
166 	if (!xe->info.probe_display)
167 		return 0;
168 
169 	err = intel_display_driver_probe(display);
170 	if (err)
171 		return err;
172 
173 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini, xe);
174 }
175 
xe_display_register(struct xe_device * xe)176 void xe_display_register(struct xe_device *xe)
177 {
178 	struct intel_display *display = xe->display;
179 
180 	if (!xe->info.probe_display)
181 		return;
182 
183 	intel_display_driver_register(display);
184 	intel_power_domains_enable(display);
185 }
186 
xe_display_unregister(struct xe_device * xe)187 void xe_display_unregister(struct xe_device *xe)
188 {
189 	struct intel_display *display = xe->display;
190 
191 	if (!xe->info.probe_display)
192 		return;
193 
194 	intel_power_domains_disable(display);
195 	intel_display_driver_unregister(display);
196 }
197 
198 /* IRQ-related functions */
199 
xe_display_irq_handler(struct xe_device * xe,u32 master_ctl)200 void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
201 {
202 	struct intel_display *display = xe->display;
203 
204 	if (!xe->info.probe_display)
205 		return;
206 
207 	if (master_ctl & DISPLAY_IRQ)
208 		gen11_display_irq_handler(display);
209 }
210 
xe_display_irq_enable(struct xe_device * xe,u32 gu_misc_iir)211 void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
212 {
213 	struct intel_display *display = xe->display;
214 
215 	if (!xe->info.probe_display)
216 		return;
217 
218 	if (gu_misc_iir & GU_MISC_GSE)
219 		intel_opregion_asle_intr(display);
220 }
221 
xe_display_irq_reset(struct xe_device * xe)222 void xe_display_irq_reset(struct xe_device *xe)
223 {
224 	struct intel_display *display = xe->display;
225 
226 	if (!xe->info.probe_display)
227 		return;
228 
229 	gen11_display_irq_reset(display);
230 }
231 
xe_display_irq_postinstall(struct xe_device * xe,struct xe_gt * gt)232 void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
233 {
234 	struct intel_display *display = xe->display;
235 
236 	if (!xe->info.probe_display)
237 		return;
238 
239 	if (gt->info.id == XE_GT0)
240 		gen11_de_irq_postinstall(display);
241 }
242 
suspend_to_idle(void)243 static bool suspend_to_idle(void)
244 {
245 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
246 	if (acpi_target_system_state() < ACPI_STATE_S3)
247 		return true;
248 #endif
249 	return false;
250 }
251 
xe_display_flush_cleanup_work(struct xe_device * xe)252 static void xe_display_flush_cleanup_work(struct xe_device *xe)
253 {
254 	struct intel_crtc *crtc;
255 
256 	for_each_intel_crtc(&xe->drm, crtc) {
257 		struct drm_crtc_commit *commit;
258 
259 		spin_lock(&crtc->base.commit_lock);
260 		commit = list_first_entry_or_null(&crtc->base.commit_list,
261 						  struct drm_crtc_commit, commit_entry);
262 		if (commit)
263 			drm_crtc_commit_get(commit);
264 		spin_unlock(&crtc->base.commit_lock);
265 
266 		if (commit) {
267 			wait_for_completion(&commit->cleanup_done);
268 			drm_crtc_commit_put(commit);
269 		}
270 	}
271 }
272 
xe_display_enable_d3cold(struct xe_device * xe)273 static void xe_display_enable_d3cold(struct xe_device *xe)
274 {
275 	struct intel_display *display = xe->display;
276 
277 	if (!xe->info.probe_display)
278 		return;
279 
280 	/*
281 	 * We do a lot of poking in a lot of registers, make sure they work
282 	 * properly.
283 	 */
284 	intel_power_domains_disable(display);
285 
286 	xe_display_flush_cleanup_work(xe);
287 
288 	intel_opregion_suspend(display, PCI_D3cold);
289 
290 	intel_dmc_suspend(display);
291 
292 	if (has_display(xe))
293 		intel_hpd_poll_enable(display);
294 }
295 
xe_display_disable_d3cold(struct xe_device * xe)296 static void xe_display_disable_d3cold(struct xe_device *xe)
297 {
298 	struct intel_display *display = xe->display;
299 
300 	if (!xe->info.probe_display)
301 		return;
302 
303 	intel_dmc_resume(display);
304 
305 	if (has_display(xe))
306 		drm_mode_config_reset(&xe->drm);
307 
308 	intel_display_driver_init_hw(display);
309 
310 	intel_hpd_init(display);
311 
312 	if (has_display(xe))
313 		intel_hpd_poll_disable(display);
314 
315 	intel_opregion_resume(display);
316 
317 	intel_power_domains_enable(display);
318 }
319 
xe_display_pm_suspend(struct xe_device * xe)320 void xe_display_pm_suspend(struct xe_device *xe)
321 {
322 	struct intel_display *display = xe->display;
323 	bool s2idle = suspend_to_idle();
324 
325 	if (!xe->info.probe_display)
326 		return;
327 
328 	/*
329 	 * We do a lot of poking in a lot of registers, make sure they work
330 	 * properly.
331 	 */
332 	intel_power_domains_disable(display);
333 	drm_client_dev_suspend(&xe->drm, false);
334 
335 	if (has_display(xe)) {
336 		drm_kms_helper_poll_disable(&xe->drm);
337 		intel_display_driver_disable_user_access(display);
338 		intel_display_driver_suspend(display);
339 	}
340 
341 	xe_display_flush_cleanup_work(xe);
342 
343 	intel_hpd_cancel_work(display);
344 
345 	if (has_display(xe)) {
346 		intel_display_driver_suspend_access(display);
347 		intel_encoder_suspend_all(display);
348 	}
349 
350 	intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
351 
352 	intel_dmc_suspend(display);
353 }
354 
xe_display_pm_shutdown(struct xe_device * xe)355 void xe_display_pm_shutdown(struct xe_device *xe)
356 {
357 	struct intel_display *display = xe->display;
358 
359 	if (!xe->info.probe_display)
360 		return;
361 
362 	intel_power_domains_disable(display);
363 	drm_client_dev_suspend(&xe->drm, false);
364 
365 	if (has_display(xe)) {
366 		drm_kms_helper_poll_disable(&xe->drm);
367 		intel_display_driver_disable_user_access(display);
368 		intel_display_driver_suspend(display);
369 	}
370 
371 	xe_display_flush_cleanup_work(xe);
372 	intel_dp_mst_suspend(display);
373 	intel_hpd_cancel_work(display);
374 
375 	if (has_display(xe))
376 		intel_display_driver_suspend_access(display);
377 
378 	intel_encoder_suspend_all(display);
379 	intel_encoder_shutdown_all(display);
380 
381 	intel_opregion_suspend(display, PCI_D3cold);
382 
383 	intel_dmc_suspend(display);
384 }
385 
xe_display_pm_runtime_suspend(struct xe_device * xe)386 void xe_display_pm_runtime_suspend(struct xe_device *xe)
387 {
388 	struct intel_display *display = xe->display;
389 
390 	if (!xe->info.probe_display)
391 		return;
392 
393 	if (xe->d3cold.allowed) {
394 		xe_display_enable_d3cold(xe);
395 		return;
396 	}
397 
398 	intel_hpd_poll_enable(display);
399 }
400 
xe_display_pm_suspend_late(struct xe_device * xe)401 void xe_display_pm_suspend_late(struct xe_device *xe)
402 {
403 	struct intel_display *display = xe->display;
404 	bool s2idle = suspend_to_idle();
405 
406 	if (!xe->info.probe_display)
407 		return;
408 
409 	intel_display_power_suspend_late(display, s2idle);
410 }
411 
xe_display_pm_runtime_suspend_late(struct xe_device * xe)412 void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
413 {
414 	struct intel_display *display = xe->display;
415 
416 	if (!xe->info.probe_display)
417 		return;
418 
419 	if (xe->d3cold.allowed)
420 		xe_display_pm_suspend_late(xe);
421 
422 	/*
423 	 * If xe_display_pm_suspend_late() is not called, it is likely
424 	 * that we will be on dynamic DC states with DMC wakelock enabled. We
425 	 * need to flush the release work in that case.
426 	 */
427 	intel_dmc_wl_flush_release_work(display);
428 }
429 
xe_display_pm_shutdown_late(struct xe_device * xe)430 void xe_display_pm_shutdown_late(struct xe_device *xe)
431 {
432 	struct intel_display *display = xe->display;
433 
434 	if (!xe->info.probe_display)
435 		return;
436 
437 	/*
438 	 * The only requirement is to reboot with display DC states disabled,
439 	 * for now leaving all display power wells in the INIT power domain
440 	 * enabled.
441 	 */
442 	intel_power_domains_driver_remove(display);
443 }
444 
xe_display_pm_resume_early(struct xe_device * xe)445 void xe_display_pm_resume_early(struct xe_device *xe)
446 {
447 	struct intel_display *display = xe->display;
448 
449 	if (!xe->info.probe_display)
450 		return;
451 
452 	intel_display_power_resume_early(display);
453 }
454 
xe_display_pm_resume(struct xe_device * xe)455 void xe_display_pm_resume(struct xe_device *xe)
456 {
457 	struct intel_display *display = xe->display;
458 
459 	if (!xe->info.probe_display)
460 		return;
461 
462 	intel_dmc_resume(display);
463 
464 	if (has_display(xe))
465 		drm_mode_config_reset(&xe->drm);
466 
467 	intel_display_driver_init_hw(display);
468 
469 	if (has_display(xe))
470 		intel_display_driver_resume_access(display);
471 
472 	intel_hpd_init(display);
473 
474 	if (has_display(xe)) {
475 		intel_display_driver_resume(display);
476 		drm_kms_helper_poll_enable(&xe->drm);
477 		intel_display_driver_enable_user_access(display);
478 	}
479 
480 	if (has_display(xe))
481 		intel_hpd_poll_disable(display);
482 
483 	intel_opregion_resume(display);
484 
485 	drm_client_dev_resume(&xe->drm, false);
486 
487 	intel_power_domains_enable(display);
488 }
489 
xe_display_pm_runtime_resume(struct xe_device * xe)490 void xe_display_pm_runtime_resume(struct xe_device *xe)
491 {
492 	struct intel_display *display = xe->display;
493 
494 	if (!xe->info.probe_display)
495 		return;
496 
497 	if (xe->d3cold.allowed) {
498 		xe_display_disable_d3cold(xe);
499 		return;
500 	}
501 
502 	intel_hpd_init(display);
503 	intel_hpd_poll_disable(display);
504 	skl_watermark_ipc_update(display);
505 }
506 
507 
display_device_remove(struct drm_device * dev,void * arg)508 static void display_device_remove(struct drm_device *dev, void *arg)
509 {
510 	struct intel_display *display = arg;
511 
512 	intel_display_device_remove(display);
513 }
514 
515 /**
516  * xe_display_probe - probe display and create display struct
517  * @xe: XE device instance
518  *
519  * Initialize all fields used by the display part.
520  *
521  * TODO: once everything can be inside a single struct, make the struct opaque
522  * to the rest of xe and return it to be xe->display.
523  *
524  * Returns: 0 on success
525  */
xe_display_probe(struct xe_device * xe)526 int xe_display_probe(struct xe_device *xe)
527 {
528 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
529 	struct intel_display *display;
530 	int err;
531 
532 	if (!xe->info.probe_display)
533 		goto no_display;
534 
535 	display = intel_display_device_probe(pdev);
536 	if (IS_ERR(display))
537 		return PTR_ERR(display);
538 
539 	err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display);
540 	if (err)
541 		return err;
542 
543 	xe->display = display;
544 
545 	if (has_display(xe))
546 		return 0;
547 
548 no_display:
549 	xe->info.probe_display = false;
550 	unset_display_features(xe);
551 	return 0;
552 }
553