xref: /linux/drivers/gpu/drm/xe/display/xe_display.c (revision e814f3fd16acfb7f9966773953de8f740a1e3202)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_display.h"
7 #include "regs/xe_irq_regs.h"
8 
9 #include <linux/fb.h>
10 
11 #include <drm/drm_drv.h>
12 #include <drm/drm_managed.h>
13 #include <drm/drm_probe_helper.h>
14 #include <uapi/drm/xe_drm.h>
15 
16 #include "soc/intel_dram.h"
17 #include "intel_acpi.h"
18 #include "intel_audio.h"
19 #include "intel_bw.h"
20 #include "intel_display.h"
21 #include "intel_display_driver.h"
22 #include "intel_display_irq.h"
23 #include "intel_display_types.h"
24 #include "intel_dmc.h"
25 #include "intel_dmc_wl.h"
26 #include "intel_dp.h"
27 #include "intel_encoder.h"
28 #include "intel_fbdev.h"
29 #include "intel_hdcp.h"
30 #include "intel_hotplug.h"
31 #include "intel_opregion.h"
32 #include "xe_module.h"
33 
34 /* Xe device functions */
35 
36 static bool has_display(struct xe_device *xe)
37 {
38 	return HAS_DISPLAY(&xe->display);
39 }
40 
41 /**
42  * xe_display_driver_probe_defer - Detect if we need to wait for other drivers
43  *				   early on
44  * @pdev: PCI device
45  *
46  * Returns: true if probe needs to be deferred, false otherwise
47  */
48 bool xe_display_driver_probe_defer(struct pci_dev *pdev)
49 {
50 	if (!xe_modparam.probe_display)
51 		return 0;
52 
53 	return intel_display_driver_probe_defer(pdev);
54 }
55 
56 /**
57  * xe_display_driver_set_hooks - Add driver flags and hooks for display
58  * @driver: DRM device driver
59  *
60  * Set features and function hooks in @driver that are needed for driving the
61  * display IP. This sets the driver's capability of driving display, regardless
62  * if the device has it enabled
63  */
64 void xe_display_driver_set_hooks(struct drm_driver *driver)
65 {
66 	if (!xe_modparam.probe_display)
67 		return;
68 
69 	driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
70 }
71 
72 static void unset_display_features(struct xe_device *xe)
73 {
74 	xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
75 }
76 
77 static void display_destroy(struct drm_device *dev, void *dummy)
78 {
79 	struct xe_device *xe = to_xe_device(dev);
80 
81 	destroy_workqueue(xe->display.hotplug.dp_wq);
82 }
83 
84 /**
85  * xe_display_create - create display struct
86  * @xe: XE device instance
87  *
88  * Initialize all fields used by the display part.
89  *
90  * TODO: once everything can be inside a single struct, make the struct opaque
91  * to the rest of xe and return it to be xe->display.
92  *
93  * Returns: 0 on success
94  */
95 int xe_display_create(struct xe_device *xe)
96 {
97 	spin_lock_init(&xe->display.fb_tracking.lock);
98 
99 	xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
100 
101 	return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
102 }
103 
104 static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
105 {
106 	struct xe_device *xe = to_xe_device(dev);
107 	struct intel_display *display = &xe->display;
108 
109 	if (!xe->info.probe_display)
110 		return;
111 
112 	intel_power_domains_cleanup(display);
113 }
114 
115 int xe_display_init_nommio(struct xe_device *xe)
116 {
117 	if (!xe->info.probe_display)
118 		return 0;
119 
120 	/* Fake uncore lock */
121 	spin_lock_init(&xe->uncore.lock);
122 
123 	/* This must be called before any calls to HAS_PCH_* */
124 	intel_detect_pch(xe);
125 
126 	return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe);
127 }
128 
129 static void xe_display_fini_noirq(void *arg)
130 {
131 	struct xe_device *xe = arg;
132 	struct intel_display *display = &xe->display;
133 
134 	if (!xe->info.probe_display)
135 		return;
136 
137 	intel_display_driver_remove_noirq(display);
138 	intel_opregion_cleanup(display);
139 }
140 
141 int xe_display_init_noirq(struct xe_device *xe)
142 {
143 	struct intel_display *display = &xe->display;
144 	int err;
145 
146 	if (!xe->info.probe_display)
147 		return 0;
148 
149 	intel_display_driver_early_probe(display);
150 
151 	/* Early display init.. */
152 	intel_opregion_setup(display);
153 
154 	/*
155 	 * Fill the dram structure to get the system dram info. This will be
156 	 * used for memory latency calculation.
157 	 */
158 	intel_dram_detect(xe);
159 
160 	intel_bw_init_hw(xe);
161 
162 	intel_display_device_info_runtime_init(display);
163 
164 	err = intel_display_driver_probe_noirq(display);
165 	if (err) {
166 		intel_opregion_cleanup(display);
167 		return err;
168 	}
169 
170 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_noirq, xe);
171 }
172 
173 static void xe_display_fini_noaccel(void *arg)
174 {
175 	struct xe_device *xe = arg;
176 	struct intel_display *display = &xe->display;
177 
178 	if (!xe->info.probe_display)
179 		return;
180 
181 	intel_display_driver_remove_nogem(display);
182 }
183 
184 int xe_display_init_noaccel(struct xe_device *xe)
185 {
186 	struct intel_display *display = &xe->display;
187 	int err;
188 
189 	if (!xe->info.probe_display)
190 		return 0;
191 
192 	err = intel_display_driver_probe_nogem(display);
193 	if (err)
194 		return err;
195 
196 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_noaccel, xe);
197 }
198 
199 int xe_display_init(struct xe_device *xe)
200 {
201 	struct intel_display *display = &xe->display;
202 
203 	if (!xe->info.probe_display)
204 		return 0;
205 
206 	return intel_display_driver_probe(display);
207 }
208 
209 void xe_display_fini(struct xe_device *xe)
210 {
211 	struct intel_display *display = &xe->display;
212 
213 	if (!xe->info.probe_display)
214 		return;
215 
216 	intel_hpd_poll_fini(xe);
217 
218 	intel_hdcp_component_fini(display);
219 	intel_audio_deinit(xe);
220 }
221 
222 void xe_display_register(struct xe_device *xe)
223 {
224 	struct intel_display *display = &xe->display;
225 
226 	if (!xe->info.probe_display)
227 		return;
228 
229 	intel_display_driver_register(display);
230 	intel_power_domains_enable(display);
231 	intel_register_dsm_handler();
232 }
233 
234 void xe_display_unregister(struct xe_device *xe)
235 {
236 	struct intel_display *display = &xe->display;
237 
238 	if (!xe->info.probe_display)
239 		return;
240 
241 	intel_unregister_dsm_handler();
242 	intel_power_domains_disable(display);
243 	intel_display_driver_unregister(display);
244 }
245 
246 void xe_display_driver_remove(struct xe_device *xe)
247 {
248 	struct intel_display *display = &xe->display;
249 
250 	if (!xe->info.probe_display)
251 		return;
252 
253 	intel_display_driver_remove(display);
254 }
255 
256 /* IRQ-related functions */
257 
258 void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
259 {
260 	if (!xe->info.probe_display)
261 		return;
262 
263 	if (master_ctl & DISPLAY_IRQ)
264 		gen11_display_irq_handler(xe);
265 }
266 
267 void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
268 {
269 	struct intel_display *display = &xe->display;
270 
271 	if (!xe->info.probe_display)
272 		return;
273 
274 	if (gu_misc_iir & GU_MISC_GSE)
275 		intel_opregion_asle_intr(display);
276 }
277 
278 void xe_display_irq_reset(struct xe_device *xe)
279 {
280 	if (!xe->info.probe_display)
281 		return;
282 
283 	gen11_display_irq_reset(xe);
284 }
285 
286 void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
287 {
288 	if (!xe->info.probe_display)
289 		return;
290 
291 	if (gt->info.id == XE_GT0)
292 		gen11_de_irq_postinstall(xe);
293 }
294 
295 static bool suspend_to_idle(void)
296 {
297 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
298 	if (acpi_target_system_state() < ACPI_STATE_S3)
299 		return true;
300 #endif
301 	return false;
302 }
303 
304 static void xe_display_flush_cleanup_work(struct xe_device *xe)
305 {
306 	struct intel_crtc *crtc;
307 
308 	for_each_intel_crtc(&xe->drm, crtc) {
309 		struct drm_crtc_commit *commit;
310 
311 		spin_lock(&crtc->base.commit_lock);
312 		commit = list_first_entry_or_null(&crtc->base.commit_list,
313 						  struct drm_crtc_commit, commit_entry);
314 		if (commit)
315 			drm_crtc_commit_get(commit);
316 		spin_unlock(&crtc->base.commit_lock);
317 
318 		if (commit) {
319 			wait_for_completion(&commit->cleanup_done);
320 			drm_crtc_commit_put(commit);
321 		}
322 	}
323 }
324 
325 /* TODO: System and runtime suspend/resume sequences will be sanitized as a follow-up. */
326 static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime)
327 {
328 	struct intel_display *display = &xe->display;
329 	bool s2idle = suspend_to_idle();
330 	if (!xe->info.probe_display)
331 		return;
332 
333 	/*
334 	 * We do a lot of poking in a lot of registers, make sure they work
335 	 * properly.
336 	 */
337 	intel_power_domains_disable(display);
338 	if (!runtime)
339 		intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
340 
341 	if (!runtime && has_display(xe)) {
342 		drm_kms_helper_poll_disable(&xe->drm);
343 		intel_display_driver_disable_user_access(display);
344 		intel_display_driver_suspend(display);
345 	}
346 
347 	xe_display_flush_cleanup_work(xe);
348 
349 	intel_hpd_cancel_work(xe);
350 
351 	if (!runtime && has_display(xe)) {
352 		intel_display_driver_suspend_access(display);
353 		intel_encoder_suspend_all(&xe->display);
354 	}
355 
356 	intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
357 
358 	intel_dmc_suspend(display);
359 
360 	if (runtime && has_display(xe))
361 		intel_hpd_poll_enable(xe);
362 }
363 
364 void xe_display_pm_suspend(struct xe_device *xe)
365 {
366 	__xe_display_pm_suspend(xe, false);
367 }
368 
369 void xe_display_pm_shutdown(struct xe_device *xe)
370 {
371 	struct intel_display *display = &xe->display;
372 
373 	if (!xe->info.probe_display)
374 		return;
375 
376 	intel_power_domains_disable(display);
377 	intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
378 	if (has_display(xe)) {
379 		drm_kms_helper_poll_disable(&xe->drm);
380 		intel_display_driver_disable_user_access(display);
381 		intel_display_driver_suspend(display);
382 	}
383 
384 	xe_display_flush_cleanup_work(xe);
385 	intel_dp_mst_suspend(display);
386 	intel_hpd_cancel_work(xe);
387 
388 	if (has_display(xe))
389 		intel_display_driver_suspend_access(display);
390 
391 	intel_encoder_suspend_all(display);
392 	intel_encoder_shutdown_all(display);
393 
394 	intel_opregion_suspend(display, PCI_D3cold);
395 
396 	intel_dmc_suspend(display);
397 }
398 
399 void xe_display_pm_runtime_suspend(struct xe_device *xe)
400 {
401 	if (!xe->info.probe_display)
402 		return;
403 
404 	if (xe->d3cold.allowed) {
405 		__xe_display_pm_suspend(xe, true);
406 		return;
407 	}
408 
409 	intel_hpd_poll_enable(xe);
410 }
411 
412 void xe_display_pm_suspend_late(struct xe_device *xe)
413 {
414 	struct intel_display *display = &xe->display;
415 	bool s2idle = suspend_to_idle();
416 
417 	if (!xe->info.probe_display)
418 		return;
419 
420 	intel_display_power_suspend_late(display, s2idle);
421 }
422 
423 void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
424 {
425 	struct intel_display *display = &xe->display;
426 
427 	if (!xe->info.probe_display)
428 		return;
429 
430 	if (xe->d3cold.allowed)
431 		xe_display_pm_suspend_late(xe);
432 
433 	/*
434 	 * If xe_display_pm_suspend_late() is not called, it is likely
435 	 * that we will be on dynamic DC states with DMC wakelock enabled. We
436 	 * need to flush the release work in that case.
437 	 */
438 	intel_dmc_wl_flush_release_work(display);
439 }
440 
441 void xe_display_pm_shutdown_late(struct xe_device *xe)
442 {
443 	struct intel_display *display = &xe->display;
444 
445 	if (!xe->info.probe_display)
446 		return;
447 
448 	/*
449 	 * The only requirement is to reboot with display DC states disabled,
450 	 * for now leaving all display power wells in the INIT power domain
451 	 * enabled.
452 	 */
453 	intel_power_domains_driver_remove(display);
454 }
455 
456 void xe_display_pm_resume_early(struct xe_device *xe)
457 {
458 	struct intel_display *display = &xe->display;
459 
460 	if (!xe->info.probe_display)
461 		return;
462 
463 	intel_display_power_resume_early(display);
464 }
465 
466 static void __xe_display_pm_resume(struct xe_device *xe, bool runtime)
467 {
468 	struct intel_display *display = &xe->display;
469 
470 	if (!xe->info.probe_display)
471 		return;
472 
473 	intel_dmc_resume(display);
474 
475 	if (has_display(xe))
476 		drm_mode_config_reset(&xe->drm);
477 
478 	intel_display_driver_init_hw(display);
479 
480 	if (!runtime && has_display(xe))
481 		intel_display_driver_resume_access(display);
482 
483 	intel_hpd_init(xe);
484 
485 	if (!runtime && has_display(xe)) {
486 		intel_display_driver_resume(display);
487 		drm_kms_helper_poll_enable(&xe->drm);
488 		intel_display_driver_enable_user_access(display);
489 	}
490 
491 	if (has_display(xe))
492 		intel_hpd_poll_disable(xe);
493 
494 	intel_opregion_resume(display);
495 
496 	if (!runtime)
497 		intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false);
498 
499 	intel_power_domains_enable(display);
500 }
501 
502 void xe_display_pm_resume(struct xe_device *xe)
503 {
504 	__xe_display_pm_resume(xe, false);
505 }
506 
507 void xe_display_pm_runtime_resume(struct xe_device *xe)
508 {
509 	if (!xe->info.probe_display)
510 		return;
511 
512 	if (xe->d3cold.allowed) {
513 		__xe_display_pm_resume(xe, true);
514 		return;
515 	}
516 
517 	intel_hpd_init(xe);
518 	intel_hpd_poll_disable(xe);
519 }
520 
521 
522 static void display_device_remove(struct drm_device *dev, void *arg)
523 {
524 	struct intel_display *display = arg;
525 
526 	intel_display_device_remove(display);
527 }
528 
529 int xe_display_probe(struct xe_device *xe)
530 {
531 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
532 	struct intel_display *display;
533 	int err;
534 
535 	if (!xe->info.probe_display)
536 		goto no_display;
537 
538 	display = intel_display_device_probe(pdev);
539 
540 	err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display);
541 	if (err)
542 		return err;
543 
544 	if (has_display(xe))
545 		return 0;
546 
547 no_display:
548 	xe->info.probe_display = false;
549 	unset_display_features(xe);
550 	return 0;
551 }
552