xref: /linux/drivers/gpu/drm/xe/display/xe_display.c (revision face6a3615a649456eb4549f6d474221d877d604)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_display.h"
7 #include "regs/xe_irq_regs.h"
8 
9 #include <linux/fb.h>
10 
11 #include <drm/drm_client.h>
12 #include <drm/drm_client_event.h>
13 #include <drm/drm_drv.h>
14 #include <drm/drm_managed.h>
15 #include <drm/drm_probe_helper.h>
16 #include <uapi/drm/xe_drm.h>
17 
18 #include "soc/intel_dram.h"
19 #include "intel_acpi.h"
20 #include "intel_audio.h"
21 #include "intel_bw.h"
22 #include "intel_display.h"
23 #include "intel_display_device.h"
24 #include "intel_display_driver.h"
25 #include "intel_display_irq.h"
26 #include "intel_display_types.h"
27 #include "intel_dmc.h"
28 #include "intel_dmc_wl.h"
29 #include "intel_dp.h"
30 #include "intel_encoder.h"
31 #include "intel_fbdev.h"
32 #include "intel_hdcp.h"
33 #include "intel_hotplug.h"
34 #include "intel_opregion.h"
35 #include "skl_watermark.h"
36 #include "xe_module.h"
37 
38 /* Xe device functions */
39 
40 /**
41  * xe_display_driver_probe_defer - Detect if we need to wait for other drivers
42  *				   early on
43  * @pdev: PCI device
44  *
45  * Note: This is called before xe or display device creation.
46  *
47  * Returns: true if probe needs to be deferred, false otherwise
48  */
49 bool xe_display_driver_probe_defer(struct pci_dev *pdev)
50 {
51 	if (!xe_modparam.probe_display)
52 		return 0;
53 
54 	return intel_display_driver_probe_defer(pdev);
55 }
56 
57 /**
58  * xe_display_driver_set_hooks - Add driver flags and hooks for display
59  * @driver: DRM device driver
60  *
61  * Set features and function hooks in @driver that are needed for driving the
62  * display IP. This sets the driver's capability of driving display, regardless
63  * if the device has it enabled
64  *
65  * Note: This is called before xe or display device creation.
66  */
67 void xe_display_driver_set_hooks(struct drm_driver *driver)
68 {
69 	if (!xe_modparam.probe_display)
70 		return;
71 
72 #ifdef CONFIG_DRM_FBDEV_EMULATION
73 	driver->fbdev_probe = intel_fbdev_driver_fbdev_probe;
74 #endif
75 
76 	driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
77 }
78 
79 static void unset_display_features(struct xe_device *xe)
80 {
81 	xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
82 }
83 
84 static void xe_display_fini_early(void *arg)
85 {
86 	struct xe_device *xe = arg;
87 	struct intel_display *display = xe->display;
88 
89 	if (!xe->info.probe_display)
90 		return;
91 
92 	intel_hpd_cancel_work(display);
93 	intel_display_driver_remove_nogem(display);
94 	intel_display_driver_remove_noirq(display);
95 	intel_opregion_cleanup(display);
96 	intel_power_domains_cleanup(display);
97 }
98 
99 int xe_display_init_early(struct xe_device *xe)
100 {
101 	struct intel_display *display = xe->display;
102 	int err;
103 
104 	if (!xe->info.probe_display)
105 		return 0;
106 
107 	/* Fake uncore lock */
108 	spin_lock_init(&xe->uncore.lock);
109 
110 	intel_display_driver_early_probe(display);
111 
112 	/* Early display init.. */
113 	intel_opregion_setup(display);
114 
115 	/*
116 	 * Fill the dram structure to get the system dram info. This will be
117 	 * used for memory latency calculation.
118 	 */
119 	err = intel_dram_detect(xe);
120 	if (err)
121 		goto err_opregion;
122 
123 	intel_bw_init_hw(display);
124 
125 	intel_display_device_info_runtime_init(display);
126 
127 	err = intel_display_driver_probe_noirq(display);
128 	if (err)
129 		goto err_opregion;
130 
131 	err = intel_display_driver_probe_nogem(display);
132 	if (err)
133 		goto err_noirq;
134 
135 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_early, xe);
136 err_noirq:
137 	intel_display_driver_remove_noirq(display);
138 	intel_power_domains_cleanup(display);
139 err_opregion:
140 	intel_opregion_cleanup(display);
141 	return err;
142 }
143 
144 static void xe_display_fini(void *arg)
145 {
146 	struct xe_device *xe = arg;
147 	struct intel_display *display = xe->display;
148 
149 	intel_hpd_poll_fini(display);
150 	intel_hdcp_component_fini(display);
151 	intel_audio_deinit(display);
152 	intel_display_driver_remove(display);
153 }
154 
155 int xe_display_init(struct xe_device *xe)
156 {
157 	struct intel_display *display = xe->display;
158 	int err;
159 
160 	if (!xe->info.probe_display)
161 		return 0;
162 
163 	err = intel_display_driver_probe(display);
164 	if (err)
165 		return err;
166 
167 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini, xe);
168 }
169 
170 void xe_display_register(struct xe_device *xe)
171 {
172 	struct intel_display *display = xe->display;
173 
174 	if (!xe->info.probe_display)
175 		return;
176 
177 	intel_display_driver_register(display);
178 	intel_power_domains_enable(display);
179 }
180 
181 void xe_display_unregister(struct xe_device *xe)
182 {
183 	struct intel_display *display = xe->display;
184 
185 	if (!xe->info.probe_display)
186 		return;
187 
188 	intel_power_domains_disable(display);
189 	intel_display_driver_unregister(display);
190 }
191 
192 /* IRQ-related functions */
193 
194 void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
195 {
196 	struct intel_display *display = xe->display;
197 
198 	if (!xe->info.probe_display)
199 		return;
200 
201 	if (master_ctl & DISPLAY_IRQ)
202 		gen11_display_irq_handler(display);
203 }
204 
205 void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
206 {
207 	struct intel_display *display = xe->display;
208 
209 	if (!xe->info.probe_display)
210 		return;
211 
212 	if (gu_misc_iir & GU_MISC_GSE)
213 		intel_opregion_asle_intr(display);
214 }
215 
216 void xe_display_irq_reset(struct xe_device *xe)
217 {
218 	struct intel_display *display = xe->display;
219 
220 	if (!xe->info.probe_display)
221 		return;
222 
223 	gen11_display_irq_reset(display);
224 }
225 
226 void xe_display_irq_postinstall(struct xe_device *xe)
227 {
228 	struct intel_display *display = xe->display;
229 
230 	if (!xe->info.probe_display)
231 		return;
232 
233 	gen11_de_irq_postinstall(display);
234 }
235 
236 static bool suspend_to_idle(void)
237 {
238 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
239 	if (acpi_target_system_state() < ACPI_STATE_S3)
240 		return true;
241 #endif
242 	return false;
243 }
244 
245 static void xe_display_flush_cleanup_work(struct xe_device *xe)
246 {
247 	struct intel_crtc *crtc;
248 
249 	for_each_intel_crtc(&xe->drm, crtc) {
250 		struct drm_crtc_commit *commit;
251 
252 		spin_lock(&crtc->base.commit_lock);
253 		commit = list_first_entry_or_null(&crtc->base.commit_list,
254 						  struct drm_crtc_commit, commit_entry);
255 		if (commit)
256 			drm_crtc_commit_get(commit);
257 		spin_unlock(&crtc->base.commit_lock);
258 
259 		if (commit) {
260 			wait_for_completion(&commit->cleanup_done);
261 			drm_crtc_commit_put(commit);
262 		}
263 	}
264 }
265 
266 static void xe_display_enable_d3cold(struct xe_device *xe)
267 {
268 	struct intel_display *display = xe->display;
269 
270 	if (!xe->info.probe_display)
271 		return;
272 
273 	/*
274 	 * We do a lot of poking in a lot of registers, make sure they work
275 	 * properly.
276 	 */
277 	intel_power_domains_disable(display);
278 
279 	xe_display_flush_cleanup_work(xe);
280 
281 	intel_opregion_suspend(display, PCI_D3cold);
282 
283 	intel_dmc_suspend(display);
284 
285 	if (intel_display_device_present(display))
286 		intel_hpd_poll_enable(display);
287 }
288 
289 static void xe_display_disable_d3cold(struct xe_device *xe)
290 {
291 	struct intel_display *display = xe->display;
292 
293 	if (!xe->info.probe_display)
294 		return;
295 
296 	intel_dmc_resume(display);
297 
298 	if (intel_display_device_present(display))
299 		drm_mode_config_reset(&xe->drm);
300 
301 	intel_display_driver_init_hw(display);
302 
303 	intel_hpd_init(display);
304 
305 	if (intel_display_device_present(display))
306 		intel_hpd_poll_disable(display);
307 
308 	intel_opregion_resume(display);
309 
310 	intel_power_domains_enable(display);
311 }
312 
313 void xe_display_pm_suspend(struct xe_device *xe)
314 {
315 	struct intel_display *display = xe->display;
316 	bool s2idle = suspend_to_idle();
317 
318 	if (!xe->info.probe_display)
319 		return;
320 
321 	/*
322 	 * We do a lot of poking in a lot of registers, make sure they work
323 	 * properly.
324 	 */
325 	intel_power_domains_disable(display);
326 	drm_client_dev_suspend(&xe->drm);
327 
328 	if (intel_display_device_present(display)) {
329 		drm_kms_helper_poll_disable(&xe->drm);
330 		intel_display_driver_disable_user_access(display);
331 		intel_display_driver_suspend(display);
332 	}
333 
334 	xe_display_flush_cleanup_work(xe);
335 
336 	intel_encoder_block_all_hpds(display);
337 
338 	intel_hpd_cancel_work(display);
339 
340 	if (intel_display_device_present(display)) {
341 		intel_display_driver_suspend_access(display);
342 		intel_encoder_suspend_all(display);
343 	}
344 
345 	intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
346 
347 	intel_dmc_suspend(display);
348 }
349 
350 void xe_display_pm_shutdown(struct xe_device *xe)
351 {
352 	struct intel_display *display = xe->display;
353 
354 	if (!xe->info.probe_display)
355 		return;
356 
357 	intel_power_domains_disable(display);
358 	drm_client_dev_suspend(&xe->drm);
359 
360 	if (intel_display_device_present(display)) {
361 		drm_kms_helper_poll_disable(&xe->drm);
362 		intel_display_driver_disable_user_access(display);
363 		intel_display_driver_suspend(display);
364 	}
365 
366 	xe_display_flush_cleanup_work(xe);
367 	intel_dp_mst_suspend(display);
368 	intel_encoder_block_all_hpds(display);
369 	intel_hpd_cancel_work(display);
370 
371 	if (intel_display_device_present(display))
372 		intel_display_driver_suspend_access(display);
373 
374 	intel_encoder_suspend_all(display);
375 	intel_encoder_shutdown_all(display);
376 
377 	intel_opregion_suspend(display, PCI_D3cold);
378 
379 	intel_dmc_suspend(display);
380 }
381 
382 void xe_display_pm_runtime_suspend(struct xe_device *xe)
383 {
384 	struct intel_display *display = xe->display;
385 
386 	if (!xe->info.probe_display)
387 		return;
388 
389 	if (xe->d3cold.allowed) {
390 		xe_display_enable_d3cold(xe);
391 		return;
392 	}
393 
394 	intel_hpd_poll_enable(display);
395 }
396 
397 void xe_display_pm_suspend_late(struct xe_device *xe)
398 {
399 	struct intel_display *display = xe->display;
400 	bool s2idle = suspend_to_idle();
401 
402 	if (!xe->info.probe_display)
403 		return;
404 
405 	intel_display_power_suspend_late(display, s2idle);
406 }
407 
408 void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
409 {
410 	struct intel_display *display = xe->display;
411 
412 	if (!xe->info.probe_display)
413 		return;
414 
415 	if (xe->d3cold.allowed)
416 		xe_display_pm_suspend_late(xe);
417 
418 	/*
419 	 * If xe_display_pm_suspend_late() is not called, it is likely
420 	 * that we will be on dynamic DC states with DMC wakelock enabled. We
421 	 * need to flush the release work in that case.
422 	 */
423 	intel_dmc_wl_flush_release_work(display);
424 }
425 
426 void xe_display_pm_shutdown_late(struct xe_device *xe)
427 {
428 	struct intel_display *display = xe->display;
429 
430 	if (!xe->info.probe_display)
431 		return;
432 
433 	/*
434 	 * The only requirement is to reboot with display DC states disabled,
435 	 * for now leaving all display power wells in the INIT power domain
436 	 * enabled.
437 	 */
438 	intel_power_domains_driver_remove(display);
439 }
440 
441 void xe_display_pm_resume_early(struct xe_device *xe)
442 {
443 	struct intel_display *display = xe->display;
444 
445 	if (!xe->info.probe_display)
446 		return;
447 
448 	intel_display_power_resume_early(display);
449 }
450 
451 void xe_display_pm_resume(struct xe_device *xe)
452 {
453 	struct intel_display *display = xe->display;
454 
455 	if (!xe->info.probe_display)
456 		return;
457 
458 	intel_dmc_resume(display);
459 
460 	if (intel_display_device_present(display))
461 		drm_mode_config_reset(&xe->drm);
462 
463 	intel_display_driver_init_hw(display);
464 
465 	if (intel_display_device_present(display))
466 		intel_display_driver_resume_access(display);
467 
468 	intel_hpd_init(display);
469 
470 	intel_encoder_unblock_all_hpds(display);
471 
472 	if (intel_display_device_present(display)) {
473 		intel_display_driver_resume(display);
474 		drm_kms_helper_poll_enable(&xe->drm);
475 		intel_display_driver_enable_user_access(display);
476 	}
477 
478 	if (intel_display_device_present(display))
479 		intel_hpd_poll_disable(display);
480 
481 	intel_opregion_resume(display);
482 
483 	drm_client_dev_resume(&xe->drm);
484 
485 	intel_power_domains_enable(display);
486 }
487 
488 void xe_display_pm_runtime_resume(struct xe_device *xe)
489 {
490 	struct intel_display *display = xe->display;
491 
492 	if (!xe->info.probe_display)
493 		return;
494 
495 	if (xe->d3cold.allowed) {
496 		xe_display_disable_d3cold(xe);
497 		return;
498 	}
499 
500 	intel_hpd_init(display);
501 	intel_hpd_poll_disable(display);
502 	skl_watermark_ipc_update(display);
503 }
504 
505 
506 static void display_device_remove(struct drm_device *dev, void *arg)
507 {
508 	struct intel_display *display = arg;
509 
510 	intel_display_device_remove(display);
511 }
512 
513 /**
514  * xe_display_probe - probe display and create display struct
515  * @xe: XE device instance
516  *
517  * Initialize all fields used by the display part.
518  *
519  * TODO: once everything can be inside a single struct, make the struct opaque
520  * to the rest of xe and return it to be xe->display.
521  *
522  * Returns: 0 on success
523  */
524 int xe_display_probe(struct xe_device *xe)
525 {
526 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
527 	struct intel_display *display;
528 	int err;
529 
530 	if (!xe->info.probe_display)
531 		goto no_display;
532 
533 	display = intel_display_device_probe(pdev);
534 	if (IS_ERR(display))
535 		return PTR_ERR(display);
536 
537 	err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display);
538 	if (err)
539 		return err;
540 
541 	xe->display = display;
542 
543 	if (intel_display_device_present(display))
544 		return 0;
545 
546 no_display:
547 	xe->info.probe_display = false;
548 	unset_display_features(xe);
549 	return 0;
550 }
551