xref: /linux/drivers/gpu/drm/xe/display/xe_display.c (revision 29fdc6e98d3c3657c8b4874ab3bfc75f9df59bf4)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_display.h"
7 #include "regs/xe_irq_regs.h"
8 
9 #include <linux/fb.h>
10 
11 #include <drm/drm_client.h>
12 #include <drm/drm_client_event.h>
13 #include <drm/drm_drv.h>
14 #include <drm/drm_managed.h>
15 #include <drm/drm_probe_helper.h>
16 #include <drm/intel/display_member.h>
17 #include <drm/intel/display_parent_interface.h>
18 #include <uapi/drm/xe_drm.h>
19 
20 #include "intel_acpi.h"
21 #include "intel_audio.h"
22 #include "intel_bw.h"
23 #include "intel_display.h"
24 #include "intel_display_device.h"
25 #include "intel_display_driver.h"
26 #include "intel_display_irq.h"
27 #include "intel_display_types.h"
28 #include "intel_dmc.h"
29 #include "intel_dmc_wl.h"
30 #include "intel_dp.h"
31 #include "intel_dram.h"
32 #include "intel_encoder.h"
33 #include "intel_fbdev.h"
34 #include "intel_hdcp.h"
35 #include "intel_hotplug.h"
36 #include "intel_opregion.h"
37 #include "skl_watermark.h"
38 #include "xe_display_rpm.h"
39 #include "xe_module.h"
40 #include "xe_hdcp_gsc.h"
41 
42 /* Ensure drm and display members are placed properly. */
43 INTEL_DISPLAY_MEMBER_STATIC_ASSERT(struct xe_device, drm, display);
44 
45 /* Xe device functions */
46 
47 /**
48  * xe_display_driver_probe_defer - Detect if we need to wait for other drivers
49  *				   early on
50  * @pdev: PCI device
51  *
52  * Note: This is called before xe or display device creation.
53  *
54  * Returns: true if probe needs to be deferred, false otherwise
55  */
56 bool xe_display_driver_probe_defer(struct pci_dev *pdev)
57 {
58 	if (!xe_modparam.probe_display)
59 		return 0;
60 
61 	return intel_display_driver_probe_defer(pdev);
62 }
63 
64 /**
65  * xe_display_driver_set_hooks - Add driver flags and hooks for display
66  * @driver: DRM device driver
67  *
68  * Set features and function hooks in @driver that are needed for driving the
69  * display IP. This sets the driver's capability of driving display, regardless
70  * if the device has it enabled
71  *
72  * Note: This is called before xe or display device creation.
73  */
74 void xe_display_driver_set_hooks(struct drm_driver *driver)
75 {
76 	if (!xe_modparam.probe_display)
77 		return;
78 
79 #ifdef CONFIG_DRM_FBDEV_EMULATION
80 	driver->fbdev_probe = intel_fbdev_driver_fbdev_probe;
81 #endif
82 
83 	driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
84 }
85 
86 static void unset_display_features(struct xe_device *xe)
87 {
88 	xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
89 }
90 
91 static void xe_display_fini_early(void *arg)
92 {
93 	struct xe_device *xe = arg;
94 	struct intel_display *display = xe->display;
95 
96 	if (!xe->info.probe_display)
97 		return;
98 
99 	intel_hpd_cancel_work(display);
100 	intel_display_driver_remove_nogem(display);
101 	intel_display_driver_remove_noirq(display);
102 	intel_opregion_cleanup(display);
103 	intel_power_domains_cleanup(display);
104 }
105 
106 int xe_display_init_early(struct xe_device *xe)
107 {
108 	struct intel_display *display = xe->display;
109 	int err;
110 
111 	if (!xe->info.probe_display)
112 		return 0;
113 
114 	/* Fake uncore lock */
115 	spin_lock_init(&xe->uncore.lock);
116 
117 	intel_display_driver_early_probe(display);
118 
119 	/* Early display init.. */
120 	intel_opregion_setup(display);
121 
122 	/*
123 	 * Fill the dram structure to get the system dram info. This will be
124 	 * used for memory latency calculation.
125 	 */
126 	err = intel_dram_detect(display);
127 	if (err)
128 		goto err_opregion;
129 
130 	intel_bw_init_hw(display);
131 
132 	intel_display_device_info_runtime_init(display);
133 
134 	err = intel_display_driver_probe_noirq(display);
135 	if (err)
136 		goto err_opregion;
137 
138 	err = intel_display_driver_probe_nogem(display);
139 	if (err)
140 		goto err_noirq;
141 
142 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_early, xe);
143 err_noirq:
144 	intel_display_driver_remove_noirq(display);
145 	intel_power_domains_cleanup(display);
146 err_opregion:
147 	intel_opregion_cleanup(display);
148 	return err;
149 }
150 
151 static void xe_display_fini(void *arg)
152 {
153 	struct xe_device *xe = arg;
154 	struct intel_display *display = xe->display;
155 
156 	intel_hpd_poll_fini(display);
157 	intel_hdcp_component_fini(display);
158 	intel_audio_deinit(display);
159 	intel_display_driver_remove(display);
160 }
161 
162 int xe_display_init(struct xe_device *xe)
163 {
164 	struct intel_display *display = xe->display;
165 	int err;
166 
167 	if (!xe->info.probe_display)
168 		return 0;
169 
170 	err = intel_display_driver_probe(display);
171 	if (err)
172 		return err;
173 
174 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini, xe);
175 }
176 
177 void xe_display_register(struct xe_device *xe)
178 {
179 	struct intel_display *display = xe->display;
180 
181 	if (!xe->info.probe_display)
182 		return;
183 
184 	intel_display_driver_register(display);
185 	intel_power_domains_enable(display);
186 }
187 
188 void xe_display_unregister(struct xe_device *xe)
189 {
190 	struct intel_display *display = xe->display;
191 
192 	if (!xe->info.probe_display)
193 		return;
194 
195 	intel_power_domains_disable(display);
196 	intel_display_driver_unregister(display);
197 }
198 
199 /* IRQ-related functions */
200 
201 void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
202 {
203 	struct intel_display *display = xe->display;
204 
205 	if (!xe->info.probe_display)
206 		return;
207 
208 	if (master_ctl & DISPLAY_IRQ)
209 		gen11_display_irq_handler(display);
210 }
211 
212 void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
213 {
214 	struct intel_display *display = xe->display;
215 
216 	if (!xe->info.probe_display)
217 		return;
218 
219 	if (gu_misc_iir & GU_MISC_GSE)
220 		intel_opregion_asle_intr(display);
221 }
222 
223 void xe_display_irq_reset(struct xe_device *xe)
224 {
225 	struct intel_display *display = xe->display;
226 
227 	if (!xe->info.probe_display)
228 		return;
229 
230 	gen11_display_irq_reset(display);
231 }
232 
233 void xe_display_irq_postinstall(struct xe_device *xe)
234 {
235 	struct intel_display *display = xe->display;
236 
237 	if (!xe->info.probe_display)
238 		return;
239 
240 	gen11_de_irq_postinstall(display);
241 }
242 
243 static bool suspend_to_idle(void)
244 {
245 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
246 	if (acpi_target_system_state() < ACPI_STATE_S3)
247 		return true;
248 #endif
249 	return false;
250 }
251 
252 static void xe_display_flush_cleanup_work(struct xe_device *xe)
253 {
254 	struct intel_crtc *crtc;
255 
256 	for_each_intel_crtc(&xe->drm, crtc) {
257 		struct drm_crtc_commit *commit;
258 
259 		spin_lock(&crtc->base.commit_lock);
260 		commit = list_first_entry_or_null(&crtc->base.commit_list,
261 						  struct drm_crtc_commit, commit_entry);
262 		if (commit)
263 			drm_crtc_commit_get(commit);
264 		spin_unlock(&crtc->base.commit_lock);
265 
266 		if (commit) {
267 			wait_for_completion(&commit->cleanup_done);
268 			drm_crtc_commit_put(commit);
269 		}
270 	}
271 }
272 
273 static void xe_display_enable_d3cold(struct xe_device *xe)
274 {
275 	struct intel_display *display = xe->display;
276 
277 	if (!xe->info.probe_display)
278 		return;
279 
280 	/*
281 	 * We do a lot of poking in a lot of registers, make sure they work
282 	 * properly.
283 	 */
284 	intel_power_domains_disable(display);
285 
286 	xe_display_flush_cleanup_work(xe);
287 
288 	intel_opregion_suspend(display, PCI_D3cold);
289 
290 	intel_dmc_suspend(display);
291 
292 	if (intel_display_device_present(display))
293 		intel_hpd_poll_enable(display);
294 }
295 
296 static void xe_display_disable_d3cold(struct xe_device *xe)
297 {
298 	struct intel_display *display = xe->display;
299 
300 	if (!xe->info.probe_display)
301 		return;
302 
303 	intel_dmc_resume(display);
304 
305 	if (intel_display_device_present(display))
306 		drm_mode_config_reset(&xe->drm);
307 
308 	intel_display_driver_init_hw(display);
309 
310 	intel_hpd_init(display);
311 
312 	if (intel_display_device_present(display))
313 		intel_hpd_poll_disable(display);
314 
315 	intel_opregion_resume(display);
316 
317 	intel_power_domains_enable(display);
318 }
319 
320 void xe_display_pm_suspend(struct xe_device *xe)
321 {
322 	struct intel_display *display = xe->display;
323 	bool s2idle = suspend_to_idle();
324 
325 	if (!xe->info.probe_display)
326 		return;
327 
328 	/*
329 	 * We do a lot of poking in a lot of registers, make sure they work
330 	 * properly.
331 	 */
332 	intel_power_domains_disable(display);
333 	drm_client_dev_suspend(&xe->drm);
334 
335 	if (intel_display_device_present(display)) {
336 		drm_kms_helper_poll_disable(&xe->drm);
337 		intel_display_driver_disable_user_access(display);
338 		intel_display_driver_suspend(display);
339 	}
340 
341 	xe_display_flush_cleanup_work(xe);
342 
343 	intel_encoder_block_all_hpds(display);
344 
345 	intel_hpd_cancel_work(display);
346 
347 	if (intel_display_device_present(display)) {
348 		intel_display_driver_suspend_access(display);
349 		intel_encoder_suspend_all(display);
350 	}
351 
352 	intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
353 
354 	intel_dmc_suspend(display);
355 }
356 
357 void xe_display_pm_shutdown(struct xe_device *xe)
358 {
359 	struct intel_display *display = xe->display;
360 
361 	if (!xe->info.probe_display)
362 		return;
363 
364 	intel_power_domains_disable(display);
365 	drm_client_dev_suspend(&xe->drm);
366 
367 	if (intel_display_device_present(display)) {
368 		drm_kms_helper_poll_disable(&xe->drm);
369 		intel_display_driver_disable_user_access(display);
370 		intel_display_driver_suspend(display);
371 	}
372 
373 	xe_display_flush_cleanup_work(xe);
374 	intel_dp_mst_suspend(display);
375 	intel_encoder_block_all_hpds(display);
376 	intel_hpd_cancel_work(display);
377 
378 	if (intel_display_device_present(display))
379 		intel_display_driver_suspend_access(display);
380 
381 	intel_encoder_suspend_all(display);
382 	intel_encoder_shutdown_all(display);
383 
384 	intel_opregion_suspend(display, PCI_D3cold);
385 
386 	intel_dmc_suspend(display);
387 }
388 
389 void xe_display_pm_runtime_suspend(struct xe_device *xe)
390 {
391 	struct intel_display *display = xe->display;
392 
393 	if (!xe->info.probe_display)
394 		return;
395 
396 	if (xe->d3cold.allowed) {
397 		xe_display_enable_d3cold(xe);
398 		return;
399 	}
400 
401 	intel_hpd_poll_enable(display);
402 }
403 
404 void xe_display_pm_suspend_late(struct xe_device *xe)
405 {
406 	struct intel_display *display = xe->display;
407 	bool s2idle = suspend_to_idle();
408 
409 	if (!xe->info.probe_display)
410 		return;
411 
412 	intel_display_power_suspend_late(display, s2idle);
413 }
414 
415 void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
416 {
417 	struct intel_display *display = xe->display;
418 
419 	if (!xe->info.probe_display)
420 		return;
421 
422 	if (xe->d3cold.allowed)
423 		xe_display_pm_suspend_late(xe);
424 
425 	/*
426 	 * If xe_display_pm_suspend_late() is not called, it is likely
427 	 * that we will be on dynamic DC states with DMC wakelock enabled. We
428 	 * need to flush the release work in that case.
429 	 */
430 	intel_dmc_wl_flush_release_work(display);
431 }
432 
433 void xe_display_pm_shutdown_late(struct xe_device *xe)
434 {
435 	struct intel_display *display = xe->display;
436 
437 	if (!xe->info.probe_display)
438 		return;
439 
440 	/*
441 	 * The only requirement is to reboot with display DC states disabled,
442 	 * for now leaving all display power wells in the INIT power domain
443 	 * enabled.
444 	 */
445 	intel_power_domains_driver_remove(display);
446 }
447 
448 void xe_display_pm_resume_early(struct xe_device *xe)
449 {
450 	struct intel_display *display = xe->display;
451 
452 	if (!xe->info.probe_display)
453 		return;
454 
455 	intel_display_power_resume_early(display);
456 }
457 
458 void xe_display_pm_resume(struct xe_device *xe)
459 {
460 	struct intel_display *display = xe->display;
461 
462 	if (!xe->info.probe_display)
463 		return;
464 
465 	intel_dmc_resume(display);
466 
467 	if (intel_display_device_present(display))
468 		drm_mode_config_reset(&xe->drm);
469 
470 	intel_display_driver_init_hw(display);
471 
472 	if (intel_display_device_present(display))
473 		intel_display_driver_resume_access(display);
474 
475 	intel_hpd_init(display);
476 
477 	intel_encoder_unblock_all_hpds(display);
478 
479 	if (intel_display_device_present(display)) {
480 		intel_display_driver_resume(display);
481 		drm_kms_helper_poll_enable(&xe->drm);
482 		intel_display_driver_enable_user_access(display);
483 	}
484 
485 	if (intel_display_device_present(display))
486 		intel_hpd_poll_disable(display);
487 
488 	intel_opregion_resume(display);
489 
490 	drm_client_dev_resume(&xe->drm);
491 
492 	intel_power_domains_enable(display);
493 }
494 
495 void xe_display_pm_runtime_resume(struct xe_device *xe)
496 {
497 	struct intel_display *display = xe->display;
498 
499 	if (!xe->info.probe_display)
500 		return;
501 
502 	if (xe->d3cold.allowed) {
503 		xe_display_disable_d3cold(xe);
504 		return;
505 	}
506 
507 	intel_hpd_init(display);
508 	intel_hpd_poll_disable(display);
509 	skl_watermark_ipc_update(display);
510 }
511 
512 
513 static void display_device_remove(struct drm_device *dev, void *arg)
514 {
515 	struct intel_display *display = arg;
516 
517 	intel_display_device_remove(display);
518 }
519 
520 static bool irq_enabled(struct drm_device *drm)
521 {
522 	struct xe_device *xe = to_xe_device(drm);
523 
524 	return atomic_read(&xe->irq.enabled);
525 }
526 
527 static void irq_synchronize(struct drm_device *drm)
528 {
529 	synchronize_irq(to_pci_dev(drm->dev)->irq);
530 }
531 
532 static const struct intel_display_irq_interface xe_display_irq_interface = {
533 	.enabled = irq_enabled,
534 	.synchronize = irq_synchronize,
535 };
536 
537 static const struct intel_display_parent_interface parent = {
538 	.hdcp = &xe_display_hdcp_interface,
539 	.rpm = &xe_display_rpm_interface,
540 	.irq = &xe_display_irq_interface,
541 };
542 
543 /**
544  * xe_display_probe - probe display and create display struct
545  * @xe: XE device instance
546  *
547  * Initialize all fields used by the display part.
548  *
549  * TODO: once everything can be inside a single struct, make the struct opaque
550  * to the rest of xe and return it to be xe->display.
551  *
552  * Returns: 0 on success
553  */
554 int xe_display_probe(struct xe_device *xe)
555 {
556 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
557 	struct intel_display *display;
558 	int err;
559 
560 	if (!xe->info.probe_display)
561 		goto no_display;
562 
563 	display = intel_display_device_probe(pdev, &parent);
564 	if (IS_ERR(display))
565 		return PTR_ERR(display);
566 
567 	err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display);
568 	if (err)
569 		return err;
570 
571 	xe->display = display;
572 
573 	if (intel_display_device_present(display))
574 		return 0;
575 
576 no_display:
577 	xe->info.probe_display = false;
578 	unset_display_features(xe);
579 	return 0;
580 }
581