xref: /linux/drivers/gpu/drm/xe/display/xe_display.c (revision a200cdbf95932631ec338d08a6e9e31b34c4e8a6)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_display.h"
7 #include "regs/xe_irq_regs.h"
8 
9 #include <linux/fb.h>
10 
11 #include <drm/drm_client.h>
12 #include <drm/drm_client_event.h>
13 #include <drm/drm_drv.h>
14 #include <drm/drm_managed.h>
15 #include <drm/drm_probe_helper.h>
16 #include <drm/intel/display_member.h>
17 #include <drm/intel/display_parent_interface.h>
18 #include <uapi/drm/xe_drm.h>
19 
20 #include "intel_acpi.h"
21 #include "intel_audio.h"
22 #include "intel_bw.h"
23 #include "intel_display.h"
24 #include "intel_display_core.h"
25 #include "intel_display_device.h"
26 #include "intel_display_driver.h"
27 #include "intel_display_irq.h"
28 #include "intel_display_types.h"
29 #include "intel_dmc.h"
30 #include "intel_dmc_wl.h"
31 #include "intel_dp.h"
32 #include "intel_dram.h"
33 #include "intel_encoder.h"
34 #include "intel_fbdev.h"
35 #include "intel_hdcp.h"
36 #include "intel_hotplug.h"
37 #include "intel_opregion.h"
38 #include "skl_watermark.h"
39 #include "xe_display_bo.h"
40 #include "xe_display_pcode.h"
41 #include "xe_display_rpm.h"
42 #include "xe_dsb_buffer.h"
43 #include "xe_frontbuffer.h"
44 #include "xe_hdcp_gsc.h"
45 #include "xe_initial_plane.h"
46 #include "xe_module.h"
47 #include "xe_panic.h"
48 #include "xe_stolen.h"
49 
50 /* Ensure drm and display members are placed properly. */
51 INTEL_DISPLAY_MEMBER_STATIC_ASSERT(struct xe_device, drm, display);
52 
53 /* Xe device functions */
54 
55 /**
56  * xe_display_driver_probe_defer - Detect if we need to wait for other drivers
57  *				   early on
58  * @pdev: PCI device
59  *
60  * Note: This is called before xe or display device creation.
61  *
62  * Returns: true if probe needs to be deferred, false otherwise
63  */
64 bool xe_display_driver_probe_defer(struct pci_dev *pdev)
65 {
66 	if (!xe_modparam.probe_display)
67 		return 0;
68 
69 	return intel_display_driver_probe_defer(pdev);
70 }
71 
72 /**
73  * xe_display_driver_set_hooks - Add driver flags and hooks for display
74  * @driver: DRM device driver
75  *
76  * Set features and function hooks in @driver that are needed for driving the
77  * display IP. This sets the driver's capability of driving display, regardless
78  * if the device has it enabled
79  *
80  * Note: This is called before xe or display device creation.
81  */
82 void xe_display_driver_set_hooks(struct drm_driver *driver)
83 {
84 	if (!xe_modparam.probe_display)
85 		return;
86 
87 #ifdef CONFIG_DRM_FBDEV_EMULATION
88 	driver->fbdev_probe = intel_fbdev_driver_fbdev_probe;
89 #endif
90 
91 	driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
92 }
93 
94 static void unset_display_features(struct xe_device *xe)
95 {
96 	xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
97 }
98 
99 static void xe_display_fini_early(void *arg)
100 {
101 	struct xe_device *xe = arg;
102 	struct intel_display *display = xe->display;
103 
104 	if (!xe->info.probe_display)
105 		return;
106 
107 	intel_hpd_cancel_work(display);
108 	intel_display_driver_remove_nogem(display);
109 	intel_display_driver_remove_noirq(display);
110 	intel_opregion_cleanup(display);
111 	intel_power_domains_cleanup(display);
112 }
113 
114 int xe_display_init_early(struct xe_device *xe)
115 {
116 	struct intel_display *display = xe->display;
117 	int err;
118 
119 	if (!xe->info.probe_display)
120 		return 0;
121 
122 	/* Fake uncore lock */
123 	spin_lock_init(&xe->uncore.lock);
124 
125 	intel_display_driver_early_probe(display);
126 
127 	/* Early display init.. */
128 	intel_opregion_setup(display);
129 
130 	/*
131 	 * Fill the dram structure to get the system dram info. This will be
132 	 * used for memory latency calculation.
133 	 */
134 	err = intel_dram_detect(display);
135 	if (err)
136 		goto err_opregion;
137 
138 	intel_bw_init_hw(display);
139 
140 	intel_display_device_info_runtime_init(display);
141 
142 	err = intel_display_driver_probe_noirq(display);
143 	if (err)
144 		goto err_opregion;
145 
146 	err = intel_display_driver_probe_nogem(display);
147 	if (err)
148 		goto err_noirq;
149 
150 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_early, xe);
151 err_noirq:
152 	intel_display_driver_remove_noirq(display);
153 	intel_power_domains_cleanup(display);
154 err_opregion:
155 	intel_opregion_cleanup(display);
156 	return err;
157 }
158 
159 static void xe_display_fini(void *arg)
160 {
161 	struct xe_device *xe = arg;
162 	struct intel_display *display = xe->display;
163 
164 	intel_hpd_poll_fini(display);
165 	intel_hdcp_component_fini(display);
166 	intel_audio_deinit(display);
167 	intel_display_driver_remove(display);
168 }
169 
170 int xe_display_init(struct xe_device *xe)
171 {
172 	struct intel_display *display = xe->display;
173 	int err;
174 
175 	if (!xe->info.probe_display)
176 		return 0;
177 
178 	err = intel_display_driver_probe(display);
179 	if (err)
180 		return err;
181 
182 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini, xe);
183 }
184 
185 void xe_display_register(struct xe_device *xe)
186 {
187 	struct intel_display *display = xe->display;
188 
189 	if (!xe->info.probe_display)
190 		return;
191 
192 	intel_display_driver_register(display);
193 	intel_power_domains_enable(display);
194 }
195 
196 void xe_display_unregister(struct xe_device *xe)
197 {
198 	struct intel_display *display = xe->display;
199 
200 	if (!xe->info.probe_display)
201 		return;
202 
203 	intel_power_domains_disable(display);
204 	intel_display_driver_unregister(display);
205 }
206 
207 /* IRQ-related functions */
208 
209 void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
210 {
211 	struct intel_display *display = xe->display;
212 
213 	if (!xe->info.probe_display)
214 		return;
215 
216 	if (master_ctl & DISPLAY_IRQ)
217 		gen11_display_irq_handler(display);
218 }
219 
220 void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
221 {
222 	struct intel_display *display = xe->display;
223 
224 	if (!xe->info.probe_display)
225 		return;
226 
227 	if (gu_misc_iir & GU_MISC_GSE)
228 		intel_opregion_asle_intr(display);
229 }
230 
231 void xe_display_irq_reset(struct xe_device *xe)
232 {
233 	struct intel_display *display = xe->display;
234 
235 	if (!xe->info.probe_display)
236 		return;
237 
238 	gen11_display_irq_reset(display);
239 }
240 
241 void xe_display_irq_postinstall(struct xe_device *xe)
242 {
243 	struct intel_display *display = xe->display;
244 
245 	if (!xe->info.probe_display)
246 		return;
247 
248 	gen11_de_irq_postinstall(display);
249 }
250 
251 static bool suspend_to_idle(void)
252 {
253 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
254 	if (acpi_target_system_state() < ACPI_STATE_S3)
255 		return true;
256 #endif
257 	return false;
258 }
259 
260 static void xe_display_flush_cleanup_work(struct xe_device *xe)
261 {
262 	struct intel_crtc *crtc;
263 
264 	for_each_intel_crtc(&xe->drm, crtc) {
265 		struct drm_crtc_commit *commit;
266 
267 		spin_lock(&crtc->base.commit_lock);
268 		commit = list_first_entry_or_null(&crtc->base.commit_list,
269 						  struct drm_crtc_commit, commit_entry);
270 		if (commit)
271 			drm_crtc_commit_get(commit);
272 		spin_unlock(&crtc->base.commit_lock);
273 
274 		if (commit) {
275 			wait_for_completion(&commit->cleanup_done);
276 			drm_crtc_commit_put(commit);
277 		}
278 	}
279 }
280 
281 static void xe_display_enable_d3cold(struct xe_device *xe)
282 {
283 	struct intel_display *display = xe->display;
284 
285 	if (!xe->info.probe_display)
286 		return;
287 
288 	/*
289 	 * We do a lot of poking in a lot of registers, make sure they work
290 	 * properly.
291 	 */
292 	intel_power_domains_disable(display);
293 
294 	xe_display_flush_cleanup_work(xe);
295 
296 	intel_opregion_suspend(display, PCI_D3cold);
297 
298 	intel_dmc_suspend(display);
299 
300 	if (intel_display_device_present(display))
301 		intel_hpd_poll_enable(display);
302 }
303 
304 static void xe_display_disable_d3cold(struct xe_device *xe)
305 {
306 	struct intel_display *display = xe->display;
307 
308 	if (!xe->info.probe_display)
309 		return;
310 
311 	intel_dmc_resume(display);
312 
313 	if (intel_display_device_present(display))
314 		drm_mode_config_reset(&xe->drm);
315 
316 	intel_display_driver_init_hw(display);
317 
318 	intel_hpd_init(display);
319 
320 	if (intel_display_device_present(display))
321 		intel_hpd_poll_disable(display);
322 
323 	intel_opregion_resume(display);
324 
325 	intel_power_domains_enable(display);
326 }
327 
328 void xe_display_pm_suspend(struct xe_device *xe)
329 {
330 	struct intel_display *display = xe->display;
331 	bool s2idle = suspend_to_idle();
332 
333 	if (!xe->info.probe_display)
334 		return;
335 
336 	/*
337 	 * We do a lot of poking in a lot of registers, make sure they work
338 	 * properly.
339 	 */
340 	intel_power_domains_disable(display);
341 	drm_client_dev_suspend(&xe->drm);
342 
343 	if (intel_display_device_present(display)) {
344 		drm_kms_helper_poll_disable(&xe->drm);
345 		intel_display_driver_disable_user_access(display);
346 		intel_display_driver_suspend(display);
347 	}
348 
349 	xe_display_flush_cleanup_work(xe);
350 
351 	intel_encoder_block_all_hpds(display);
352 
353 	intel_hpd_cancel_work(display);
354 
355 	if (intel_display_device_present(display)) {
356 		intel_display_driver_suspend_access(display);
357 		intel_encoder_suspend_all(display);
358 	}
359 
360 	intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
361 
362 	intel_dmc_suspend(display);
363 }
364 
365 void xe_display_pm_shutdown(struct xe_device *xe)
366 {
367 	struct intel_display *display = xe->display;
368 
369 	if (!xe->info.probe_display)
370 		return;
371 
372 	intel_power_domains_disable(display);
373 	drm_client_dev_suspend(&xe->drm);
374 
375 	if (intel_display_device_present(display)) {
376 		drm_kms_helper_poll_disable(&xe->drm);
377 		intel_display_driver_disable_user_access(display);
378 		intel_display_driver_suspend(display);
379 	}
380 
381 	xe_display_flush_cleanup_work(xe);
382 	intel_dp_mst_suspend(display);
383 	intel_encoder_block_all_hpds(display);
384 	intel_hpd_cancel_work(display);
385 
386 	if (intel_display_device_present(display))
387 		intel_display_driver_suspend_access(display);
388 
389 	intel_encoder_suspend_all(display);
390 	intel_encoder_shutdown_all(display);
391 
392 	intel_opregion_suspend(display, PCI_D3cold);
393 
394 	intel_dmc_suspend(display);
395 }
396 
397 void xe_display_pm_runtime_suspend(struct xe_device *xe)
398 {
399 	struct intel_display *display = xe->display;
400 
401 	if (!xe->info.probe_display)
402 		return;
403 
404 	if (xe->d3cold.allowed) {
405 		xe_display_enable_d3cold(xe);
406 		return;
407 	}
408 
409 	intel_hpd_poll_enable(display);
410 }
411 
412 void xe_display_pm_suspend_late(struct xe_device *xe)
413 {
414 	struct intel_display *display = xe->display;
415 	bool s2idle = suspend_to_idle();
416 
417 	if (!xe->info.probe_display)
418 		return;
419 
420 	intel_display_power_suspend_late(display, s2idle);
421 }
422 
423 void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
424 {
425 	struct intel_display *display = xe->display;
426 
427 	if (!xe->info.probe_display)
428 		return;
429 
430 	if (xe->d3cold.allowed)
431 		xe_display_pm_suspend_late(xe);
432 
433 	/*
434 	 * If xe_display_pm_suspend_late() is not called, it is likely
435 	 * that we will be on dynamic DC states with DMC wakelock enabled. We
436 	 * need to flush the release work in that case.
437 	 */
438 	intel_dmc_wl_flush_release_work(display);
439 }
440 
441 void xe_display_pm_shutdown_late(struct xe_device *xe)
442 {
443 	struct intel_display *display = xe->display;
444 
445 	if (!xe->info.probe_display)
446 		return;
447 
448 	/*
449 	 * The only requirement is to reboot with display DC states disabled,
450 	 * for now leaving all display power wells in the INIT power domain
451 	 * enabled.
452 	 */
453 	intel_power_domains_driver_remove(display);
454 }
455 
456 void xe_display_pm_resume_early(struct xe_device *xe)
457 {
458 	struct intel_display *display = xe->display;
459 
460 	if (!xe->info.probe_display)
461 		return;
462 
463 	intel_display_power_resume_early(display);
464 }
465 
466 void xe_display_pm_resume(struct xe_device *xe)
467 {
468 	struct intel_display *display = xe->display;
469 
470 	if (!xe->info.probe_display)
471 		return;
472 
473 	intel_dmc_resume(display);
474 
475 	if (intel_display_device_present(display))
476 		drm_mode_config_reset(&xe->drm);
477 
478 	intel_display_driver_init_hw(display);
479 
480 	if (intel_display_device_present(display))
481 		intel_display_driver_resume_access(display);
482 
483 	intel_hpd_init(display);
484 
485 	intel_encoder_unblock_all_hpds(display);
486 
487 	if (intel_display_device_present(display)) {
488 		intel_display_driver_resume(display);
489 		drm_kms_helper_poll_enable(&xe->drm);
490 		intel_display_driver_enable_user_access(display);
491 	}
492 
493 	if (intel_display_device_present(display))
494 		intel_hpd_poll_disable(display);
495 
496 	intel_opregion_resume(display);
497 
498 	drm_client_dev_resume(&xe->drm);
499 
500 	intel_power_domains_enable(display);
501 }
502 
503 void xe_display_pm_runtime_resume(struct xe_device *xe)
504 {
505 	struct intel_display *display = xe->display;
506 
507 	if (!xe->info.probe_display)
508 		return;
509 
510 	if (xe->d3cold.allowed) {
511 		xe_display_disable_d3cold(xe);
512 		return;
513 	}
514 
515 	intel_hpd_init(display);
516 	intel_hpd_poll_disable(display);
517 	skl_watermark_ipc_update(display);
518 }
519 
520 
521 static void display_device_remove(struct drm_device *dev, void *arg)
522 {
523 	struct intel_display *display = arg;
524 
525 	intel_display_device_remove(display);
526 }
527 
528 static bool irq_enabled(struct drm_device *drm)
529 {
530 	struct xe_device *xe = to_xe_device(drm);
531 
532 	return atomic_read(&xe->irq.enabled);
533 }
534 
535 static void irq_synchronize(struct drm_device *drm)
536 {
537 	synchronize_irq(to_pci_dev(drm->dev)->irq);
538 }
539 
540 static const struct intel_display_irq_interface xe_display_irq_interface = {
541 	.enabled = irq_enabled,
542 	.synchronize = irq_synchronize,
543 };
544 
545 static bool has_auxccs(struct drm_device *drm)
546 {
547 	struct xe_device *xe = to_xe_device(drm);
548 
549 	return xe->info.platform == XE_ALDERLAKE_P;
550 }
551 
552 static const struct intel_display_parent_interface parent = {
553 	.bo = &xe_display_bo_interface,
554 	.dsb = &xe_display_dsb_interface,
555 	.frontbuffer = &xe_display_frontbuffer_interface,
556 	.hdcp = &xe_display_hdcp_interface,
557 	.initial_plane = &xe_display_initial_plane_interface,
558 	.irq = &xe_display_irq_interface,
559 	.panic = &xe_display_panic_interface,
560 	.pcode = &xe_display_pcode_interface,
561 	.rpm = &xe_display_rpm_interface,
562 	.stolen = &xe_display_stolen_interface,
563 	.has_auxccs = has_auxccs,
564 };
565 
566 /**
567  * xe_display_probe - probe display and create display struct
568  * @xe: XE device instance
569  *
570  * Initialize all fields used by the display part.
571  *
572  * TODO: once everything can be inside a single struct, make the struct opaque
573  * to the rest of xe and return it to be xe->display.
574  *
575  * Returns: 0 on success
576  */
577 int xe_display_probe(struct xe_device *xe)
578 {
579 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
580 	struct intel_display *display;
581 	int err;
582 
583 	if (!xe->info.probe_display)
584 		goto no_display;
585 
586 	display = intel_display_device_probe(pdev, &parent);
587 	if (IS_ERR(display))
588 		return PTR_ERR(display);
589 
590 	err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display);
591 	if (err)
592 		return err;
593 
594 	xe->display = display;
595 
596 	if (intel_display_device_present(display))
597 		return 0;
598 
599 no_display:
600 	xe->info.probe_display = false;
601 	unset_display_features(xe);
602 	return 0;
603 }
604