xref: /linux/drivers/gpu/drm/xe/display/xe_display.c (revision fb7399cf2d0b33825b8039f95c45395c7deba25c)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_display.h"
7 #include "regs/xe_irq_regs.h"
8 
9 #include <linux/fb.h>
10 
11 #include <drm/drm_client.h>
12 #include <drm/drm_client_event.h>
13 #include <drm/drm_drv.h>
14 #include <drm/drm_managed.h>
15 #include <drm/drm_probe_helper.h>
16 #include <uapi/drm/xe_drm.h>
17 
18 #include "soc/intel_dram.h"
19 #include "intel_acpi.h"
20 #include "intel_audio.h"
21 #include "intel_bw.h"
22 #include "intel_display.h"
23 #include "intel_display_driver.h"
24 #include "intel_display_irq.h"
25 #include "intel_display_types.h"
26 #include "intel_dmc.h"
27 #include "intel_dmc_wl.h"
28 #include "intel_dp.h"
29 #include "intel_encoder.h"
30 #include "intel_fbdev.h"
31 #include "intel_hdcp.h"
32 #include "intel_hotplug.h"
33 #include "intel_opregion.h"
34 #include "skl_watermark.h"
35 #include "xe_module.h"
36 
37 /* Xe device functions */
38 
39 static bool has_display(struct xe_device *xe)
40 {
41 	return HAS_DISPLAY(&xe->display);
42 }
43 
44 /**
45  * xe_display_driver_probe_defer - Detect if we need to wait for other drivers
46  *				   early on
47  * @pdev: PCI device
48  *
49  * Returns: true if probe needs to be deferred, false otherwise
50  */
51 bool xe_display_driver_probe_defer(struct pci_dev *pdev)
52 {
53 	if (!xe_modparam.probe_display)
54 		return 0;
55 
56 	return intel_display_driver_probe_defer(pdev);
57 }
58 
59 /**
60  * xe_display_driver_set_hooks - Add driver flags and hooks for display
61  * @driver: DRM device driver
62  *
63  * Set features and function hooks in @driver that are needed for driving the
64  * display IP. This sets the driver's capability of driving display, regardless
65  * if the device has it enabled
66  */
67 void xe_display_driver_set_hooks(struct drm_driver *driver)
68 {
69 	if (!xe_modparam.probe_display)
70 		return;
71 
72 #ifdef CONFIG_DRM_FBDEV_EMULATION
73 	driver->fbdev_probe = intel_fbdev_driver_fbdev_probe;
74 #endif
75 
76 	driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
77 }
78 
79 static void unset_display_features(struct xe_device *xe)
80 {
81 	xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
82 }
83 
84 static void display_destroy(struct drm_device *dev, void *dummy)
85 {
86 	struct xe_device *xe = to_xe_device(dev);
87 
88 	destroy_workqueue(xe->display.hotplug.dp_wq);
89 }
90 
91 /**
92  * xe_display_create - create display struct
93  * @xe: XE device instance
94  *
95  * Initialize all fields used by the display part.
96  *
97  * TODO: once everything can be inside a single struct, make the struct opaque
98  * to the rest of xe and return it to be xe->display.
99  *
100  * Returns: 0 on success
101  */
102 int xe_display_create(struct xe_device *xe)
103 {
104 	spin_lock_init(&xe->display.fb_tracking.lock);
105 
106 	xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
107 
108 	return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
109 }
110 
111 static void xe_display_fini_early(void *arg)
112 {
113 	struct xe_device *xe = arg;
114 	struct intel_display *display = &xe->display;
115 
116 	if (!xe->info.probe_display)
117 		return;
118 
119 	intel_display_driver_remove_nogem(display);
120 	intel_display_driver_remove_noirq(display);
121 	intel_opregion_cleanup(display);
122 	intel_power_domains_cleanup(display);
123 }
124 
125 int xe_display_init_early(struct xe_device *xe)
126 {
127 	struct intel_display *display = &xe->display;
128 	int err;
129 
130 	if (!xe->info.probe_display)
131 		return 0;
132 
133 	/* Fake uncore lock */
134 	spin_lock_init(&xe->uncore.lock);
135 
136 	intel_display_driver_early_probe(display);
137 
138 	/* Early display init.. */
139 	intel_opregion_setup(display);
140 
141 	/*
142 	 * Fill the dram structure to get the system dram info. This will be
143 	 * used for memory latency calculation.
144 	 */
145 	intel_dram_detect(xe);
146 
147 	intel_bw_init_hw(display);
148 
149 	intel_display_device_info_runtime_init(display);
150 
151 	err = intel_display_driver_probe_noirq(display);
152 	if (err)
153 		goto err_opregion;
154 
155 	err = intel_display_driver_probe_nogem(display);
156 	if (err)
157 		goto err_noirq;
158 
159 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_early, xe);
160 err_noirq:
161 	intel_display_driver_remove_noirq(display);
162 	intel_power_domains_cleanup(display);
163 err_opregion:
164 	intel_opregion_cleanup(display);
165 	return err;
166 }
167 
168 static void xe_display_fini(void *arg)
169 {
170 	struct xe_device *xe = arg;
171 	struct intel_display *display = &xe->display;
172 
173 	intel_hpd_poll_fini(display);
174 	intel_hdcp_component_fini(display);
175 	intel_audio_deinit(display);
176 	intel_display_driver_remove(display);
177 }
178 
179 int xe_display_init(struct xe_device *xe)
180 {
181 	struct intel_display *display = &xe->display;
182 	int err;
183 
184 	if (!xe->info.probe_display)
185 		return 0;
186 
187 	err = intel_display_driver_probe(display);
188 	if (err)
189 		return err;
190 
191 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini, xe);
192 }
193 
194 void xe_display_register(struct xe_device *xe)
195 {
196 	struct intel_display *display = &xe->display;
197 
198 	if (!xe->info.probe_display)
199 		return;
200 
201 	intel_display_driver_register(display);
202 	intel_power_domains_enable(display);
203 }
204 
205 void xe_display_unregister(struct xe_device *xe)
206 {
207 	struct intel_display *display = &xe->display;
208 
209 	if (!xe->info.probe_display)
210 		return;
211 
212 	intel_power_domains_disable(display);
213 	intel_display_driver_unregister(display);
214 }
215 
216 /* IRQ-related functions */
217 
218 void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
219 {
220 	struct intel_display *display = &xe->display;
221 
222 	if (!xe->info.probe_display)
223 		return;
224 
225 	if (master_ctl & DISPLAY_IRQ)
226 		gen11_display_irq_handler(display);
227 }
228 
229 void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
230 {
231 	struct intel_display *display = &xe->display;
232 
233 	if (!xe->info.probe_display)
234 		return;
235 
236 	if (gu_misc_iir & GU_MISC_GSE)
237 		intel_opregion_asle_intr(display);
238 }
239 
240 void xe_display_irq_reset(struct xe_device *xe)
241 {
242 	struct intel_display *display = &xe->display;
243 
244 	if (!xe->info.probe_display)
245 		return;
246 
247 	gen11_display_irq_reset(display);
248 }
249 
250 void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
251 {
252 	struct intel_display *display = &xe->display;
253 
254 	if (!xe->info.probe_display)
255 		return;
256 
257 	if (gt->info.id == XE_GT0)
258 		gen11_de_irq_postinstall(display);
259 }
260 
261 static bool suspend_to_idle(void)
262 {
263 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
264 	if (acpi_target_system_state() < ACPI_STATE_S3)
265 		return true;
266 #endif
267 	return false;
268 }
269 
270 static void xe_display_flush_cleanup_work(struct xe_device *xe)
271 {
272 	struct intel_crtc *crtc;
273 
274 	for_each_intel_crtc(&xe->drm, crtc) {
275 		struct drm_crtc_commit *commit;
276 
277 		spin_lock(&crtc->base.commit_lock);
278 		commit = list_first_entry_or_null(&crtc->base.commit_list,
279 						  struct drm_crtc_commit, commit_entry);
280 		if (commit)
281 			drm_crtc_commit_get(commit);
282 		spin_unlock(&crtc->base.commit_lock);
283 
284 		if (commit) {
285 			wait_for_completion(&commit->cleanup_done);
286 			drm_crtc_commit_put(commit);
287 		}
288 	}
289 }
290 
291 static void xe_display_enable_d3cold(struct xe_device *xe)
292 {
293 	struct intel_display *display = &xe->display;
294 
295 	if (!xe->info.probe_display)
296 		return;
297 
298 	/*
299 	 * We do a lot of poking in a lot of registers, make sure they work
300 	 * properly.
301 	 */
302 	intel_power_domains_disable(display);
303 
304 	xe_display_flush_cleanup_work(xe);
305 
306 	intel_opregion_suspend(display, PCI_D3cold);
307 
308 	intel_dmc_suspend(display);
309 
310 	if (has_display(xe))
311 		intel_hpd_poll_enable(display);
312 }
313 
314 static void xe_display_disable_d3cold(struct xe_device *xe)
315 {
316 	struct intel_display *display = &xe->display;
317 
318 	if (!xe->info.probe_display)
319 		return;
320 
321 	intel_dmc_resume(display);
322 
323 	if (has_display(xe))
324 		drm_mode_config_reset(&xe->drm);
325 
326 	intel_display_driver_init_hw(display);
327 
328 	intel_hpd_init(display);
329 
330 	if (has_display(xe))
331 		intel_hpd_poll_disable(display);
332 
333 	intel_opregion_resume(display);
334 
335 	intel_power_domains_enable(display);
336 }
337 
338 void xe_display_pm_suspend(struct xe_device *xe)
339 {
340 	struct intel_display *display = &xe->display;
341 	bool s2idle = suspend_to_idle();
342 
343 	if (!xe->info.probe_display)
344 		return;
345 
346 	/*
347 	 * We do a lot of poking in a lot of registers, make sure they work
348 	 * properly.
349 	 */
350 	intel_power_domains_disable(display);
351 	drm_client_dev_suspend(&xe->drm, false);
352 
353 	if (has_display(xe)) {
354 		drm_kms_helper_poll_disable(&xe->drm);
355 		intel_display_driver_disable_user_access(display);
356 		intel_display_driver_suspend(display);
357 	}
358 
359 	xe_display_flush_cleanup_work(xe);
360 
361 	intel_hpd_cancel_work(display);
362 
363 	if (has_display(xe)) {
364 		intel_display_driver_suspend_access(display);
365 		intel_encoder_suspend_all(&xe->display);
366 	}
367 
368 	intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
369 
370 	intel_dmc_suspend(display);
371 }
372 
373 void xe_display_pm_shutdown(struct xe_device *xe)
374 {
375 	struct intel_display *display = &xe->display;
376 
377 	if (!xe->info.probe_display)
378 		return;
379 
380 	intel_power_domains_disable(display);
381 	drm_client_dev_suspend(&xe->drm, false);
382 
383 	if (has_display(xe)) {
384 		drm_kms_helper_poll_disable(&xe->drm);
385 		intel_display_driver_disable_user_access(display);
386 		intel_display_driver_suspend(display);
387 	}
388 
389 	xe_display_flush_cleanup_work(xe);
390 	intel_dp_mst_suspend(display);
391 	intel_hpd_cancel_work(display);
392 
393 	if (has_display(xe))
394 		intel_display_driver_suspend_access(display);
395 
396 	intel_encoder_suspend_all(display);
397 	intel_encoder_shutdown_all(display);
398 
399 	intel_opregion_suspend(display, PCI_D3cold);
400 
401 	intel_dmc_suspend(display);
402 }
403 
404 void xe_display_pm_runtime_suspend(struct xe_device *xe)
405 {
406 	struct intel_display *display = &xe->display;
407 
408 	if (!xe->info.probe_display)
409 		return;
410 
411 	if (xe->d3cold.allowed) {
412 		xe_display_enable_d3cold(xe);
413 		return;
414 	}
415 
416 	intel_hpd_poll_enable(display);
417 }
418 
419 void xe_display_pm_suspend_late(struct xe_device *xe)
420 {
421 	struct intel_display *display = &xe->display;
422 	bool s2idle = suspend_to_idle();
423 
424 	if (!xe->info.probe_display)
425 		return;
426 
427 	intel_display_power_suspend_late(display, s2idle);
428 }
429 
430 void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
431 {
432 	struct intel_display *display = &xe->display;
433 
434 	if (!xe->info.probe_display)
435 		return;
436 
437 	if (xe->d3cold.allowed)
438 		xe_display_pm_suspend_late(xe);
439 
440 	/*
441 	 * If xe_display_pm_suspend_late() is not called, it is likely
442 	 * that we will be on dynamic DC states with DMC wakelock enabled. We
443 	 * need to flush the release work in that case.
444 	 */
445 	intel_dmc_wl_flush_release_work(display);
446 }
447 
448 void xe_display_pm_shutdown_late(struct xe_device *xe)
449 {
450 	struct intel_display *display = &xe->display;
451 
452 	if (!xe->info.probe_display)
453 		return;
454 
455 	/*
456 	 * The only requirement is to reboot with display DC states disabled,
457 	 * for now leaving all display power wells in the INIT power domain
458 	 * enabled.
459 	 */
460 	intel_power_domains_driver_remove(display);
461 }
462 
463 void xe_display_pm_resume_early(struct xe_device *xe)
464 {
465 	struct intel_display *display = &xe->display;
466 
467 	if (!xe->info.probe_display)
468 		return;
469 
470 	intel_display_power_resume_early(display);
471 }
472 
473 void xe_display_pm_resume(struct xe_device *xe)
474 {
475 	struct intel_display *display = &xe->display;
476 
477 	if (!xe->info.probe_display)
478 		return;
479 
480 	intel_dmc_resume(display);
481 
482 	if (has_display(xe))
483 		drm_mode_config_reset(&xe->drm);
484 
485 	intel_display_driver_init_hw(display);
486 
487 	if (has_display(xe))
488 		intel_display_driver_resume_access(display);
489 
490 	intel_hpd_init(display);
491 
492 	if (has_display(xe)) {
493 		intel_display_driver_resume(display);
494 		drm_kms_helper_poll_enable(&xe->drm);
495 		intel_display_driver_enable_user_access(display);
496 	}
497 
498 	if (has_display(xe))
499 		intel_hpd_poll_disable(display);
500 
501 	intel_opregion_resume(display);
502 
503 	drm_client_dev_resume(&xe->drm, false);
504 
505 	intel_power_domains_enable(display);
506 }
507 
508 void xe_display_pm_runtime_resume(struct xe_device *xe)
509 {
510 	struct intel_display *display = &xe->display;
511 
512 	if (!xe->info.probe_display)
513 		return;
514 
515 	if (xe->d3cold.allowed) {
516 		xe_display_disable_d3cold(xe);
517 		return;
518 	}
519 
520 	intel_hpd_init(display);
521 	intel_hpd_poll_disable(display);
522 	skl_watermark_ipc_update(display);
523 }
524 
525 
526 static void display_device_remove(struct drm_device *dev, void *arg)
527 {
528 	struct intel_display *display = arg;
529 
530 	intel_display_device_remove(display);
531 }
532 
533 int xe_display_probe(struct xe_device *xe)
534 {
535 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
536 	struct intel_display *display;
537 	int err;
538 
539 	if (!xe->info.probe_display)
540 		goto no_display;
541 
542 	display = intel_display_device_probe(pdev);
543 
544 	err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display);
545 	if (err)
546 		return err;
547 
548 	if (has_display(xe))
549 		return 0;
550 
551 no_display:
552 	xe->info.probe_display = false;
553 	unset_display_features(xe);
554 	return 0;
555 }
556