xref: /linux/drivers/gpu/drm/xe/display/xe_display.c (revision 6f17ab9a63e670bd62a287f95e3982f99eafd77e)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_display.h"
7 #include "regs/xe_irq_regs.h"
8 
9 #include <linux/fb.h>
10 
11 #include <drm/drm_client.h>
12 #include <drm/drm_client_event.h>
13 #include <drm/drm_drv.h>
14 #include <drm/drm_managed.h>
15 #include <drm/drm_probe_helper.h>
16 #include <uapi/drm/xe_drm.h>
17 
18 #include "soc/intel_dram.h"
19 #include "intel_acpi.h"
20 #include "intel_audio.h"
21 #include "intel_bw.h"
22 #include "intel_display.h"
23 #include "intel_display_core.h"
24 #include "intel_display_driver.h"
25 #include "intel_display_irq.h"
26 #include "intel_display_types.h"
27 #include "intel_dmc.h"
28 #include "intel_dmc_wl.h"
29 #include "intel_dp.h"
30 #include "intel_encoder.h"
31 #include "intel_fbdev.h"
32 #include "intel_hdcp.h"
33 #include "intel_hotplug.h"
34 #include "intel_opregion.h"
35 #include "skl_watermark.h"
36 #include "xe_module.h"
37 
38 /* Xe device functions */
39 
40 static bool has_display(struct xe_device *xe)
41 {
42 	struct intel_display *display = xe->display;
43 
44 	return HAS_DISPLAY(display);
45 }
46 
47 /**
48  * xe_display_driver_probe_defer - Detect if we need to wait for other drivers
49  *				   early on
50  * @pdev: PCI device
51  *
52  * Note: This is called before xe or display device creation.
53  *
54  * Returns: true if probe needs to be deferred, false otherwise
55  */
56 bool xe_display_driver_probe_defer(struct pci_dev *pdev)
57 {
58 	if (!xe_modparam.probe_display)
59 		return 0;
60 
61 	return intel_display_driver_probe_defer(pdev);
62 }
63 
64 /**
65  * xe_display_driver_set_hooks - Add driver flags and hooks for display
66  * @driver: DRM device driver
67  *
68  * Set features and function hooks in @driver that are needed for driving the
69  * display IP. This sets the driver's capability of driving display, regardless
70  * if the device has it enabled
71  *
72  * Note: This is called before xe or display device creation.
73  */
74 void xe_display_driver_set_hooks(struct drm_driver *driver)
75 {
76 	if (!xe_modparam.probe_display)
77 		return;
78 
79 #ifdef CONFIG_DRM_FBDEV_EMULATION
80 	driver->fbdev_probe = intel_fbdev_driver_fbdev_probe;
81 #endif
82 
83 	driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
84 }
85 
86 static void unset_display_features(struct xe_device *xe)
87 {
88 	xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
89 }
90 
91 static void xe_display_fini_early(void *arg)
92 {
93 	struct xe_device *xe = arg;
94 	struct intel_display *display = xe->display;
95 
96 	if (!xe->info.probe_display)
97 		return;
98 
99 	intel_hpd_cancel_work(display);
100 	intel_display_driver_remove_nogem(display);
101 	intel_display_driver_remove_noirq(display);
102 	intel_opregion_cleanup(display);
103 	intel_power_domains_cleanup(display);
104 }
105 
106 int xe_display_init_early(struct xe_device *xe)
107 {
108 	struct intel_display *display = xe->display;
109 	int err;
110 
111 	if (!xe->info.probe_display)
112 		return 0;
113 
114 	/* Fake uncore lock */
115 	spin_lock_init(&xe->uncore.lock);
116 
117 	intel_display_driver_early_probe(display);
118 
119 	/* Early display init.. */
120 	intel_opregion_setup(display);
121 
122 	/*
123 	 * Fill the dram structure to get the system dram info. This will be
124 	 * used for memory latency calculation.
125 	 */
126 	err = intel_dram_detect(xe);
127 	if (err)
128 		goto err_opregion;
129 
130 	intel_bw_init_hw(display);
131 
132 	intel_display_device_info_runtime_init(display);
133 
134 	err = intel_display_driver_probe_noirq(display);
135 	if (err)
136 		goto err_opregion;
137 
138 	err = intel_display_driver_probe_nogem(display);
139 	if (err)
140 		goto err_noirq;
141 
142 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_early, xe);
143 err_noirq:
144 	intel_display_driver_remove_noirq(display);
145 	intel_power_domains_cleanup(display);
146 err_opregion:
147 	intel_opregion_cleanup(display);
148 	return err;
149 }
150 
151 static void xe_display_fini(void *arg)
152 {
153 	struct xe_device *xe = arg;
154 	struct intel_display *display = xe->display;
155 
156 	intel_hpd_poll_fini(display);
157 	intel_hdcp_component_fini(display);
158 	intel_audio_deinit(display);
159 	intel_display_driver_remove(display);
160 }
161 
162 int xe_display_init(struct xe_device *xe)
163 {
164 	struct intel_display *display = xe->display;
165 	int err;
166 
167 	if (!xe->info.probe_display)
168 		return 0;
169 
170 	err = intel_display_driver_probe(display);
171 	if (err)
172 		return err;
173 
174 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini, xe);
175 }
176 
177 void xe_display_register(struct xe_device *xe)
178 {
179 	struct intel_display *display = xe->display;
180 
181 	if (!xe->info.probe_display)
182 		return;
183 
184 	intel_display_driver_register(display);
185 	intel_power_domains_enable(display);
186 }
187 
188 void xe_display_unregister(struct xe_device *xe)
189 {
190 	struct intel_display *display = xe->display;
191 
192 	if (!xe->info.probe_display)
193 		return;
194 
195 	intel_power_domains_disable(display);
196 	intel_display_driver_unregister(display);
197 }
198 
199 /* IRQ-related functions */
200 
201 void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
202 {
203 	struct intel_display *display = xe->display;
204 
205 	if (!xe->info.probe_display)
206 		return;
207 
208 	if (master_ctl & DISPLAY_IRQ)
209 		gen11_display_irq_handler(display);
210 }
211 
212 void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
213 {
214 	struct intel_display *display = xe->display;
215 
216 	if (!xe->info.probe_display)
217 		return;
218 
219 	if (gu_misc_iir & GU_MISC_GSE)
220 		intel_opregion_asle_intr(display);
221 }
222 
223 void xe_display_irq_reset(struct xe_device *xe)
224 {
225 	struct intel_display *display = xe->display;
226 
227 	if (!xe->info.probe_display)
228 		return;
229 
230 	gen11_display_irq_reset(display);
231 }
232 
233 void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
234 {
235 	struct intel_display *display = xe->display;
236 
237 	if (!xe->info.probe_display)
238 		return;
239 
240 	if (gt->info.id == XE_GT0)
241 		gen11_de_irq_postinstall(display);
242 }
243 
244 static bool suspend_to_idle(void)
245 {
246 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
247 	if (acpi_target_system_state() < ACPI_STATE_S3)
248 		return true;
249 #endif
250 	return false;
251 }
252 
253 static void xe_display_flush_cleanup_work(struct xe_device *xe)
254 {
255 	struct intel_crtc *crtc;
256 
257 	for_each_intel_crtc(&xe->drm, crtc) {
258 		struct drm_crtc_commit *commit;
259 
260 		spin_lock(&crtc->base.commit_lock);
261 		commit = list_first_entry_or_null(&crtc->base.commit_list,
262 						  struct drm_crtc_commit, commit_entry);
263 		if (commit)
264 			drm_crtc_commit_get(commit);
265 		spin_unlock(&crtc->base.commit_lock);
266 
267 		if (commit) {
268 			wait_for_completion(&commit->cleanup_done);
269 			drm_crtc_commit_put(commit);
270 		}
271 	}
272 }
273 
274 static void xe_display_enable_d3cold(struct xe_device *xe)
275 {
276 	struct intel_display *display = xe->display;
277 
278 	if (!xe->info.probe_display)
279 		return;
280 
281 	/*
282 	 * We do a lot of poking in a lot of registers, make sure they work
283 	 * properly.
284 	 */
285 	intel_power_domains_disable(display);
286 
287 	xe_display_flush_cleanup_work(xe);
288 
289 	intel_opregion_suspend(display, PCI_D3cold);
290 
291 	intel_dmc_suspend(display);
292 
293 	if (has_display(xe))
294 		intel_hpd_poll_enable(display);
295 }
296 
297 static void xe_display_disable_d3cold(struct xe_device *xe)
298 {
299 	struct intel_display *display = xe->display;
300 
301 	if (!xe->info.probe_display)
302 		return;
303 
304 	intel_dmc_resume(display);
305 
306 	if (has_display(xe))
307 		drm_mode_config_reset(&xe->drm);
308 
309 	intel_display_driver_init_hw(display);
310 
311 	intel_hpd_init(display);
312 
313 	if (has_display(xe))
314 		intel_hpd_poll_disable(display);
315 
316 	intel_opregion_resume(display);
317 
318 	intel_power_domains_enable(display);
319 }
320 
321 void xe_display_pm_suspend(struct xe_device *xe)
322 {
323 	struct intel_display *display = xe->display;
324 	bool s2idle = suspend_to_idle();
325 
326 	if (!xe->info.probe_display)
327 		return;
328 
329 	/*
330 	 * We do a lot of poking in a lot of registers, make sure they work
331 	 * properly.
332 	 */
333 	intel_power_domains_disable(display);
334 	drm_client_dev_suspend(&xe->drm, false);
335 
336 	if (has_display(xe)) {
337 		drm_kms_helper_poll_disable(&xe->drm);
338 		intel_display_driver_disable_user_access(display);
339 		intel_display_driver_suspend(display);
340 	}
341 
342 	xe_display_flush_cleanup_work(xe);
343 
344 	intel_encoder_block_all_hpds(display);
345 
346 	intel_hpd_cancel_work(display);
347 
348 	if (has_display(xe)) {
349 		intel_display_driver_suspend_access(display);
350 		intel_encoder_suspend_all(display);
351 	}
352 
353 	intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
354 
355 	intel_dmc_suspend(display);
356 }
357 
358 void xe_display_pm_shutdown(struct xe_device *xe)
359 {
360 	struct intel_display *display = xe->display;
361 
362 	if (!xe->info.probe_display)
363 		return;
364 
365 	intel_power_domains_disable(display);
366 	drm_client_dev_suspend(&xe->drm, false);
367 
368 	if (has_display(xe)) {
369 		drm_kms_helper_poll_disable(&xe->drm);
370 		intel_display_driver_disable_user_access(display);
371 		intel_display_driver_suspend(display);
372 	}
373 
374 	xe_display_flush_cleanup_work(xe);
375 	intel_dp_mst_suspend(display);
376 	intel_encoder_block_all_hpds(display);
377 	intel_hpd_cancel_work(display);
378 
379 	if (has_display(xe))
380 		intel_display_driver_suspend_access(display);
381 
382 	intel_encoder_suspend_all(display);
383 	intel_encoder_shutdown_all(display);
384 
385 	intel_opregion_suspend(display, PCI_D3cold);
386 
387 	intel_dmc_suspend(display);
388 }
389 
390 void xe_display_pm_runtime_suspend(struct xe_device *xe)
391 {
392 	struct intel_display *display = xe->display;
393 
394 	if (!xe->info.probe_display)
395 		return;
396 
397 	if (xe->d3cold.allowed) {
398 		xe_display_enable_d3cold(xe);
399 		return;
400 	}
401 
402 	intel_hpd_poll_enable(display);
403 }
404 
405 void xe_display_pm_suspend_late(struct xe_device *xe)
406 {
407 	struct intel_display *display = xe->display;
408 	bool s2idle = suspend_to_idle();
409 
410 	if (!xe->info.probe_display)
411 		return;
412 
413 	intel_display_power_suspend_late(display, s2idle);
414 }
415 
416 void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
417 {
418 	struct intel_display *display = xe->display;
419 
420 	if (!xe->info.probe_display)
421 		return;
422 
423 	if (xe->d3cold.allowed)
424 		xe_display_pm_suspend_late(xe);
425 
426 	/*
427 	 * If xe_display_pm_suspend_late() is not called, it is likely
428 	 * that we will be on dynamic DC states with DMC wakelock enabled. We
429 	 * need to flush the release work in that case.
430 	 */
431 	intel_dmc_wl_flush_release_work(display);
432 }
433 
434 void xe_display_pm_shutdown_late(struct xe_device *xe)
435 {
436 	struct intel_display *display = xe->display;
437 
438 	if (!xe->info.probe_display)
439 		return;
440 
441 	/*
442 	 * The only requirement is to reboot with display DC states disabled,
443 	 * for now leaving all display power wells in the INIT power domain
444 	 * enabled.
445 	 */
446 	intel_power_domains_driver_remove(display);
447 }
448 
449 void xe_display_pm_resume_early(struct xe_device *xe)
450 {
451 	struct intel_display *display = xe->display;
452 
453 	if (!xe->info.probe_display)
454 		return;
455 
456 	intel_display_power_resume_early(display);
457 }
458 
459 void xe_display_pm_resume(struct xe_device *xe)
460 {
461 	struct intel_display *display = xe->display;
462 
463 	if (!xe->info.probe_display)
464 		return;
465 
466 	intel_dmc_resume(display);
467 
468 	if (has_display(xe))
469 		drm_mode_config_reset(&xe->drm);
470 
471 	intel_display_driver_init_hw(display);
472 
473 	if (has_display(xe))
474 		intel_display_driver_resume_access(display);
475 
476 	intel_hpd_init(display);
477 
478 	intel_encoder_unblock_all_hpds(display);
479 
480 	if (has_display(xe)) {
481 		intel_display_driver_resume(display);
482 		drm_kms_helper_poll_enable(&xe->drm);
483 		intel_display_driver_enable_user_access(display);
484 	}
485 
486 	if (has_display(xe))
487 		intel_hpd_poll_disable(display);
488 
489 	intel_opregion_resume(display);
490 
491 	drm_client_dev_resume(&xe->drm, false);
492 
493 	intel_power_domains_enable(display);
494 }
495 
496 void xe_display_pm_runtime_resume(struct xe_device *xe)
497 {
498 	struct intel_display *display = xe->display;
499 
500 	if (!xe->info.probe_display)
501 		return;
502 
503 	if (xe->d3cold.allowed) {
504 		xe_display_disable_d3cold(xe);
505 		return;
506 	}
507 
508 	intel_hpd_init(display);
509 	intel_hpd_poll_disable(display);
510 	skl_watermark_ipc_update(display);
511 }
512 
513 
514 static void display_device_remove(struct drm_device *dev, void *arg)
515 {
516 	struct intel_display *display = arg;
517 
518 	intel_display_device_remove(display);
519 }
520 
521 /**
522  * xe_display_probe - probe display and create display struct
523  * @xe: XE device instance
524  *
525  * Initialize all fields used by the display part.
526  *
527  * TODO: once everything can be inside a single struct, make the struct opaque
528  * to the rest of xe and return it to be xe->display.
529  *
530  * Returns: 0 on success
531  */
532 int xe_display_probe(struct xe_device *xe)
533 {
534 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
535 	struct intel_display *display;
536 	int err;
537 
538 	if (!xe->info.probe_display)
539 		goto no_display;
540 
541 	display = intel_display_device_probe(pdev);
542 	if (IS_ERR(display))
543 		return PTR_ERR(display);
544 
545 	err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display);
546 	if (err)
547 		return err;
548 
549 	xe->display = display;
550 
551 	if (has_display(xe))
552 		return 0;
553 
554 no_display:
555 	xe->info.probe_display = false;
556 	unset_display_features(xe);
557 	return 0;
558 }
559