xref: /linux/drivers/gpu/drm/xe/display/xe_display.c (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_display.h"
7 #include "regs/xe_irq_regs.h"
8 
9 #include <linux/fb.h>
10 
11 #include <drm/drm_client.h>
12 #include <drm/drm_client_event.h>
13 #include <drm/drm_drv.h>
14 #include <drm/drm_managed.h>
15 #include <drm/drm_probe_helper.h>
16 #include <drm/intel/display_member.h>
17 #include <drm/intel/display_parent_interface.h>
18 #include <uapi/drm/xe_drm.h>
19 
20 #include "intel_acpi.h"
21 #include "intel_audio.h"
22 #include "intel_bw.h"
23 #include "intel_display.h"
24 #include "intel_display_device.h"
25 #include "intel_display_driver.h"
26 #include "intel_display_irq.h"
27 #include "intel_display_types.h"
28 #include "intel_dmc.h"
29 #include "intel_dmc_wl.h"
30 #include "intel_dp.h"
31 #include "intel_dram.h"
32 #include "intel_encoder.h"
33 #include "intel_fbdev.h"
34 #include "intel_hdcp.h"
35 #include "intel_hotplug.h"
36 #include "intel_opregion.h"
37 #include "skl_watermark.h"
38 #include "xe_display_rpm.h"
39 #include "xe_hdcp_gsc.h"
40 #include "xe_initial_plane.h"
41 #include "xe_module.h"
42 #include "xe_panic.h"
43 #include "xe_stolen.h"
44 
45 /* Ensure drm and display members are placed properly. */
46 INTEL_DISPLAY_MEMBER_STATIC_ASSERT(struct xe_device, drm, display);
47 
48 /* Xe device functions */
49 
50 /**
51  * xe_display_driver_probe_defer - Detect if we need to wait for other drivers
52  *				   early on
53  * @pdev: PCI device
54  *
55  * Note: This is called before xe or display device creation.
56  *
57  * Returns: true if probe needs to be deferred, false otherwise
58  */
59 bool xe_display_driver_probe_defer(struct pci_dev *pdev)
60 {
61 	if (!xe_modparam.probe_display)
62 		return 0;
63 
64 	return intel_display_driver_probe_defer(pdev);
65 }
66 
67 /**
68  * xe_display_driver_set_hooks - Add driver flags and hooks for display
69  * @driver: DRM device driver
70  *
71  * Set features and function hooks in @driver that are needed for driving the
72  * display IP. This sets the driver's capability of driving display, regardless
73  * if the device has it enabled
74  *
75  * Note: This is called before xe or display device creation.
76  */
77 void xe_display_driver_set_hooks(struct drm_driver *driver)
78 {
79 	if (!xe_modparam.probe_display)
80 		return;
81 
82 #ifdef CONFIG_DRM_FBDEV_EMULATION
83 	driver->fbdev_probe = intel_fbdev_driver_fbdev_probe;
84 #endif
85 
86 	driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
87 }
88 
89 static void unset_display_features(struct xe_device *xe)
90 {
91 	xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
92 }
93 
94 static void xe_display_fini_early(void *arg)
95 {
96 	struct xe_device *xe = arg;
97 	struct intel_display *display = xe->display;
98 
99 	if (!xe->info.probe_display)
100 		return;
101 
102 	intel_hpd_cancel_work(display);
103 	intel_display_driver_remove_nogem(display);
104 	intel_display_driver_remove_noirq(display);
105 	intel_opregion_cleanup(display);
106 	intel_power_domains_cleanup(display);
107 }
108 
109 int xe_display_init_early(struct xe_device *xe)
110 {
111 	struct intel_display *display = xe->display;
112 	int err;
113 
114 	if (!xe->info.probe_display)
115 		return 0;
116 
117 	/* Fake uncore lock */
118 	spin_lock_init(&xe->uncore.lock);
119 
120 	intel_display_driver_early_probe(display);
121 
122 	/* Early display init.. */
123 	intel_opregion_setup(display);
124 
125 	/*
126 	 * Fill the dram structure to get the system dram info. This will be
127 	 * used for memory latency calculation.
128 	 */
129 	err = intel_dram_detect(display);
130 	if (err)
131 		goto err_opregion;
132 
133 	intel_bw_init_hw(display);
134 
135 	intel_display_device_info_runtime_init(display);
136 
137 	err = intel_display_driver_probe_noirq(display);
138 	if (err)
139 		goto err_opregion;
140 
141 	err = intel_display_driver_probe_nogem(display);
142 	if (err)
143 		goto err_noirq;
144 
145 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_early, xe);
146 err_noirq:
147 	intel_display_driver_remove_noirq(display);
148 	intel_power_domains_cleanup(display);
149 err_opregion:
150 	intel_opregion_cleanup(display);
151 	return err;
152 }
153 
154 static void xe_display_fini(void *arg)
155 {
156 	struct xe_device *xe = arg;
157 	struct intel_display *display = xe->display;
158 
159 	intel_hpd_poll_fini(display);
160 	intel_hdcp_component_fini(display);
161 	intel_audio_deinit(display);
162 	intel_display_driver_remove(display);
163 }
164 
165 int xe_display_init(struct xe_device *xe)
166 {
167 	struct intel_display *display = xe->display;
168 	int err;
169 
170 	if (!xe->info.probe_display)
171 		return 0;
172 
173 	err = intel_display_driver_probe(display);
174 	if (err)
175 		return err;
176 
177 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini, xe);
178 }
179 
180 void xe_display_register(struct xe_device *xe)
181 {
182 	struct intel_display *display = xe->display;
183 
184 	if (!xe->info.probe_display)
185 		return;
186 
187 	intel_display_driver_register(display);
188 	intel_power_domains_enable(display);
189 }
190 
191 void xe_display_unregister(struct xe_device *xe)
192 {
193 	struct intel_display *display = xe->display;
194 
195 	if (!xe->info.probe_display)
196 		return;
197 
198 	intel_power_domains_disable(display);
199 	intel_display_driver_unregister(display);
200 }
201 
202 /* IRQ-related functions */
203 
204 void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
205 {
206 	struct intel_display *display = xe->display;
207 
208 	if (!xe->info.probe_display)
209 		return;
210 
211 	if (master_ctl & DISPLAY_IRQ)
212 		gen11_display_irq_handler(display);
213 }
214 
215 void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
216 {
217 	struct intel_display *display = xe->display;
218 
219 	if (!xe->info.probe_display)
220 		return;
221 
222 	if (gu_misc_iir & GU_MISC_GSE)
223 		intel_opregion_asle_intr(display);
224 }
225 
226 void xe_display_irq_reset(struct xe_device *xe)
227 {
228 	struct intel_display *display = xe->display;
229 
230 	if (!xe->info.probe_display)
231 		return;
232 
233 	gen11_display_irq_reset(display);
234 }
235 
236 void xe_display_irq_postinstall(struct xe_device *xe)
237 {
238 	struct intel_display *display = xe->display;
239 
240 	if (!xe->info.probe_display)
241 		return;
242 
243 	gen11_de_irq_postinstall(display);
244 }
245 
246 static bool suspend_to_idle(void)
247 {
248 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
249 	if (acpi_target_system_state() < ACPI_STATE_S3)
250 		return true;
251 #endif
252 	return false;
253 }
254 
255 static void xe_display_flush_cleanup_work(struct xe_device *xe)
256 {
257 	struct intel_crtc *crtc;
258 
259 	for_each_intel_crtc(&xe->drm, crtc) {
260 		struct drm_crtc_commit *commit;
261 
262 		spin_lock(&crtc->base.commit_lock);
263 		commit = list_first_entry_or_null(&crtc->base.commit_list,
264 						  struct drm_crtc_commit, commit_entry);
265 		if (commit)
266 			drm_crtc_commit_get(commit);
267 		spin_unlock(&crtc->base.commit_lock);
268 
269 		if (commit) {
270 			wait_for_completion(&commit->cleanup_done);
271 			drm_crtc_commit_put(commit);
272 		}
273 	}
274 }
275 
276 static void xe_display_enable_d3cold(struct xe_device *xe)
277 {
278 	struct intel_display *display = xe->display;
279 
280 	if (!xe->info.probe_display)
281 		return;
282 
283 	/*
284 	 * We do a lot of poking in a lot of registers, make sure they work
285 	 * properly.
286 	 */
287 	intel_power_domains_disable(display);
288 
289 	xe_display_flush_cleanup_work(xe);
290 
291 	intel_opregion_suspend(display, PCI_D3cold);
292 
293 	intel_dmc_suspend(display);
294 
295 	if (intel_display_device_present(display))
296 		intel_hpd_poll_enable(display);
297 }
298 
299 static void xe_display_disable_d3cold(struct xe_device *xe)
300 {
301 	struct intel_display *display = xe->display;
302 
303 	if (!xe->info.probe_display)
304 		return;
305 
306 	intel_dmc_resume(display);
307 
308 	if (intel_display_device_present(display))
309 		drm_mode_config_reset(&xe->drm);
310 
311 	intel_display_driver_init_hw(display);
312 
313 	intel_hpd_init(display);
314 
315 	if (intel_display_device_present(display))
316 		intel_hpd_poll_disable(display);
317 
318 	intel_opregion_resume(display);
319 
320 	intel_power_domains_enable(display);
321 }
322 
323 void xe_display_pm_suspend(struct xe_device *xe)
324 {
325 	struct intel_display *display = xe->display;
326 	bool s2idle = suspend_to_idle();
327 
328 	if (!xe->info.probe_display)
329 		return;
330 
331 	/*
332 	 * We do a lot of poking in a lot of registers, make sure they work
333 	 * properly.
334 	 */
335 	intel_power_domains_disable(display);
336 	drm_client_dev_suspend(&xe->drm);
337 
338 	if (intel_display_device_present(display)) {
339 		drm_kms_helper_poll_disable(&xe->drm);
340 		intel_display_driver_disable_user_access(display);
341 		intel_display_driver_suspend(display);
342 	}
343 
344 	xe_display_flush_cleanup_work(xe);
345 
346 	intel_encoder_block_all_hpds(display);
347 
348 	intel_hpd_cancel_work(display);
349 
350 	if (intel_display_device_present(display)) {
351 		intel_display_driver_suspend_access(display);
352 		intel_encoder_suspend_all(display);
353 	}
354 
355 	intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
356 
357 	intel_dmc_suspend(display);
358 }
359 
360 void xe_display_pm_shutdown(struct xe_device *xe)
361 {
362 	struct intel_display *display = xe->display;
363 
364 	if (!xe->info.probe_display)
365 		return;
366 
367 	intel_power_domains_disable(display);
368 	drm_client_dev_suspend(&xe->drm);
369 
370 	if (intel_display_device_present(display)) {
371 		drm_kms_helper_poll_disable(&xe->drm);
372 		intel_display_driver_disable_user_access(display);
373 		intel_display_driver_suspend(display);
374 	}
375 
376 	xe_display_flush_cleanup_work(xe);
377 	intel_dp_mst_suspend(display);
378 	intel_encoder_block_all_hpds(display);
379 	intel_hpd_cancel_work(display);
380 
381 	if (intel_display_device_present(display))
382 		intel_display_driver_suspend_access(display);
383 
384 	intel_encoder_suspend_all(display);
385 	intel_encoder_shutdown_all(display);
386 
387 	intel_opregion_suspend(display, PCI_D3cold);
388 
389 	intel_dmc_suspend(display);
390 }
391 
392 void xe_display_pm_runtime_suspend(struct xe_device *xe)
393 {
394 	struct intel_display *display = xe->display;
395 
396 	if (!xe->info.probe_display)
397 		return;
398 
399 	if (xe->d3cold.allowed) {
400 		xe_display_enable_d3cold(xe);
401 		return;
402 	}
403 
404 	intel_hpd_poll_enable(display);
405 }
406 
407 void xe_display_pm_suspend_late(struct xe_device *xe)
408 {
409 	struct intel_display *display = xe->display;
410 	bool s2idle = suspend_to_idle();
411 
412 	if (!xe->info.probe_display)
413 		return;
414 
415 	intel_display_power_suspend_late(display, s2idle);
416 }
417 
418 void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
419 {
420 	struct intel_display *display = xe->display;
421 
422 	if (!xe->info.probe_display)
423 		return;
424 
425 	if (xe->d3cold.allowed)
426 		xe_display_pm_suspend_late(xe);
427 
428 	/*
429 	 * If xe_display_pm_suspend_late() is not called, it is likely
430 	 * that we will be on dynamic DC states with DMC wakelock enabled. We
431 	 * need to flush the release work in that case.
432 	 */
433 	intel_dmc_wl_flush_release_work(display);
434 }
435 
436 void xe_display_pm_shutdown_late(struct xe_device *xe)
437 {
438 	struct intel_display *display = xe->display;
439 
440 	if (!xe->info.probe_display)
441 		return;
442 
443 	/*
444 	 * The only requirement is to reboot with display DC states disabled,
445 	 * for now leaving all display power wells in the INIT power domain
446 	 * enabled.
447 	 */
448 	intel_power_domains_driver_remove(display);
449 }
450 
451 void xe_display_pm_resume_early(struct xe_device *xe)
452 {
453 	struct intel_display *display = xe->display;
454 
455 	if (!xe->info.probe_display)
456 		return;
457 
458 	intel_display_power_resume_early(display);
459 }
460 
461 void xe_display_pm_resume(struct xe_device *xe)
462 {
463 	struct intel_display *display = xe->display;
464 
465 	if (!xe->info.probe_display)
466 		return;
467 
468 	intel_dmc_resume(display);
469 
470 	if (intel_display_device_present(display))
471 		drm_mode_config_reset(&xe->drm);
472 
473 	intel_display_driver_init_hw(display);
474 
475 	if (intel_display_device_present(display))
476 		intel_display_driver_resume_access(display);
477 
478 	intel_hpd_init(display);
479 
480 	intel_encoder_unblock_all_hpds(display);
481 
482 	if (intel_display_device_present(display)) {
483 		intel_display_driver_resume(display);
484 		drm_kms_helper_poll_enable(&xe->drm);
485 		intel_display_driver_enable_user_access(display);
486 	}
487 
488 	if (intel_display_device_present(display))
489 		intel_hpd_poll_disable(display);
490 
491 	intel_opregion_resume(display);
492 
493 	drm_client_dev_resume(&xe->drm);
494 
495 	intel_power_domains_enable(display);
496 }
497 
498 void xe_display_pm_runtime_resume(struct xe_device *xe)
499 {
500 	struct intel_display *display = xe->display;
501 
502 	if (!xe->info.probe_display)
503 		return;
504 
505 	if (xe->d3cold.allowed) {
506 		xe_display_disable_d3cold(xe);
507 		return;
508 	}
509 
510 	intel_hpd_init(display);
511 	intel_hpd_poll_disable(display);
512 	skl_watermark_ipc_update(display);
513 }
514 
515 
516 static void display_device_remove(struct drm_device *dev, void *arg)
517 {
518 	struct intel_display *display = arg;
519 
520 	intel_display_device_remove(display);
521 }
522 
523 static bool irq_enabled(struct drm_device *drm)
524 {
525 	struct xe_device *xe = to_xe_device(drm);
526 
527 	return atomic_read(&xe->irq.enabled);
528 }
529 
530 static void irq_synchronize(struct drm_device *drm)
531 {
532 	synchronize_irq(to_pci_dev(drm->dev)->irq);
533 }
534 
535 static const struct intel_display_irq_interface xe_display_irq_interface = {
536 	.enabled = irq_enabled,
537 	.synchronize = irq_synchronize,
538 };
539 
540 static const struct intel_display_parent_interface parent = {
541 	.hdcp = &xe_display_hdcp_interface,
542 	.initial_plane = &xe_display_initial_plane_interface,
543 	.irq = &xe_display_irq_interface,
544 	.panic = &xe_display_panic_interface,
545 	.rpm = &xe_display_rpm_interface,
546 	.stolen = &xe_display_stolen_interface,
547 };
548 
549 /**
550  * xe_display_probe - probe display and create display struct
551  * @xe: XE device instance
552  *
553  * Initialize all fields used by the display part.
554  *
555  * TODO: once everything can be inside a single struct, make the struct opaque
556  * to the rest of xe and return it to be xe->display.
557  *
558  * Returns: 0 on success
559  */
560 int xe_display_probe(struct xe_device *xe)
561 {
562 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
563 	struct intel_display *display;
564 	int err;
565 
566 	if (!xe->info.probe_display)
567 		goto no_display;
568 
569 	display = intel_display_device_probe(pdev, &parent);
570 	if (IS_ERR(display))
571 		return PTR_ERR(display);
572 
573 	err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display);
574 	if (err)
575 		return err;
576 
577 	xe->display = display;
578 
579 	if (intel_display_device_present(display))
580 		return 0;
581 
582 no_display:
583 	xe->info.probe_display = false;
584 	unset_display_features(xe);
585 	return 0;
586 }
587