xref: /linux/drivers/gpu/drm/xe/display/xe_display.c (revision bcfe43f0ea77c42c2154fb79b99b7d1d82ac3231)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_display.h"
7 #include "regs/xe_irq_regs.h"
8 
9 #include <linux/fb.h>
10 
11 #include <drm/drm_drv.h>
12 #include <drm/drm_managed.h>
13 #include <drm/drm_probe_helper.h>
14 #include <uapi/drm/xe_drm.h>
15 
16 #include "soc/intel_dram.h"
17 #include "intel_acpi.h"
18 #include "intel_audio.h"
19 #include "intel_bw.h"
20 #include "intel_display.h"
21 #include "intel_display_driver.h"
22 #include "intel_display_irq.h"
23 #include "intel_display_types.h"
24 #include "intel_dmc.h"
25 #include "intel_dp.h"
26 #include "intel_encoder.h"
27 #include "intel_fbdev.h"
28 #include "intel_hdcp.h"
29 #include "intel_hotplug.h"
30 #include "intel_opregion.h"
31 #include "xe_module.h"
32 
33 /* Xe device functions */
34 
35 static bool has_display(struct xe_device *xe)
36 {
37 	return HAS_DISPLAY(&xe->display);
38 }
39 
40 /**
41  * xe_display_driver_probe_defer - Detect if we need to wait for other drivers
42  *				   early on
43  * @pdev: PCI device
44  *
45  * Returns: true if probe needs to be deferred, false otherwise
46  */
47 bool xe_display_driver_probe_defer(struct pci_dev *pdev)
48 {
49 	if (!xe_modparam.probe_display)
50 		return 0;
51 
52 	return intel_display_driver_probe_defer(pdev);
53 }
54 
55 /**
56  * xe_display_driver_set_hooks - Add driver flags and hooks for display
57  * @driver: DRM device driver
58  *
59  * Set features and function hooks in @driver that are needed for driving the
60  * display IP. This sets the driver's capability of driving display, regardless
61  * if the device has it enabled
62  */
63 void xe_display_driver_set_hooks(struct drm_driver *driver)
64 {
65 	if (!xe_modparam.probe_display)
66 		return;
67 
68 	driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
69 }
70 
71 static void unset_display_features(struct xe_device *xe)
72 {
73 	xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
74 }
75 
76 static void display_destroy(struct drm_device *dev, void *dummy)
77 {
78 	struct xe_device *xe = to_xe_device(dev);
79 
80 	destroy_workqueue(xe->display.hotplug.dp_wq);
81 }
82 
83 /**
84  * xe_display_create - create display struct
85  * @xe: XE device instance
86  *
87  * Initialize all fields used by the display part.
88  *
89  * TODO: once everything can be inside a single struct, make the struct opaque
90  * to the rest of xe and return it to be xe->display.
91  *
92  * Returns: 0 on success
93  */
94 int xe_display_create(struct xe_device *xe)
95 {
96 	spin_lock_init(&xe->display.fb_tracking.lock);
97 
98 	xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
99 
100 	return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
101 }
102 
103 static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
104 {
105 	struct xe_device *xe = to_xe_device(dev);
106 
107 	if (!xe->info.probe_display)
108 		return;
109 
110 	intel_power_domains_cleanup(xe);
111 }
112 
113 int xe_display_init_nommio(struct xe_device *xe)
114 {
115 	if (!xe->info.probe_display)
116 		return 0;
117 
118 	/* Fake uncore lock */
119 	spin_lock_init(&xe->uncore.lock);
120 
121 	/* This must be called before any calls to HAS_PCH_* */
122 	intel_detect_pch(xe);
123 
124 	return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe);
125 }
126 
127 static void xe_display_fini_noirq(void *arg)
128 {
129 	struct xe_device *xe = arg;
130 	struct intel_display *display = &xe->display;
131 
132 	if (!xe->info.probe_display)
133 		return;
134 
135 	intel_display_driver_remove_noirq(xe);
136 	intel_opregion_cleanup(display);
137 }
138 
139 int xe_display_init_noirq(struct xe_device *xe)
140 {
141 	struct intel_display *display = &xe->display;
142 	int err;
143 
144 	if (!xe->info.probe_display)
145 		return 0;
146 
147 	intel_display_driver_early_probe(xe);
148 
149 	/* Early display init.. */
150 	intel_opregion_setup(display);
151 
152 	/*
153 	 * Fill the dram structure to get the system dram info. This will be
154 	 * used for memory latency calculation.
155 	 */
156 	intel_dram_detect(xe);
157 
158 	intel_bw_init_hw(xe);
159 
160 	intel_display_device_info_runtime_init(xe);
161 
162 	err = intel_display_driver_probe_noirq(xe);
163 	if (err) {
164 		intel_opregion_cleanup(display);
165 		return err;
166 	}
167 
168 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_noirq, xe);
169 }
170 
171 static void xe_display_fini_noaccel(void *arg)
172 {
173 	struct xe_device *xe = arg;
174 
175 	if (!xe->info.probe_display)
176 		return;
177 
178 	intel_display_driver_remove_nogem(xe);
179 }
180 
181 int xe_display_init_noaccel(struct xe_device *xe)
182 {
183 	int err;
184 
185 	if (!xe->info.probe_display)
186 		return 0;
187 
188 	err = intel_display_driver_probe_nogem(xe);
189 	if (err)
190 		return err;
191 
192 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_noaccel, xe);
193 }
194 
195 int xe_display_init(struct xe_device *xe)
196 {
197 	if (!xe->info.probe_display)
198 		return 0;
199 
200 	return intel_display_driver_probe(xe);
201 }
202 
203 void xe_display_fini(struct xe_device *xe)
204 {
205 	if (!xe->info.probe_display)
206 		return;
207 
208 	intel_hpd_poll_fini(xe);
209 
210 	intel_hdcp_component_fini(xe);
211 	intel_audio_deinit(xe);
212 }
213 
214 void xe_display_register(struct xe_device *xe)
215 {
216 	if (!xe->info.probe_display)
217 		return;
218 
219 	intel_display_driver_register(xe);
220 	intel_register_dsm_handler();
221 	intel_power_domains_enable(xe);
222 }
223 
224 void xe_display_unregister(struct xe_device *xe)
225 {
226 	if (!xe->info.probe_display)
227 		return;
228 
229 	intel_unregister_dsm_handler();
230 	intel_power_domains_disable(xe);
231 	intel_display_driver_unregister(xe);
232 }
233 
234 void xe_display_driver_remove(struct xe_device *xe)
235 {
236 	if (!xe->info.probe_display)
237 		return;
238 
239 	intel_display_driver_remove(xe);
240 }
241 
242 /* IRQ-related functions */
243 
244 void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
245 {
246 	if (!xe->info.probe_display)
247 		return;
248 
249 	if (master_ctl & DISPLAY_IRQ)
250 		gen11_display_irq_handler(xe);
251 }
252 
253 void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
254 {
255 	struct intel_display *display = &xe->display;
256 
257 	if (!xe->info.probe_display)
258 		return;
259 
260 	if (gu_misc_iir & GU_MISC_GSE)
261 		intel_opregion_asle_intr(display);
262 }
263 
264 void xe_display_irq_reset(struct xe_device *xe)
265 {
266 	if (!xe->info.probe_display)
267 		return;
268 
269 	gen11_display_irq_reset(xe);
270 }
271 
272 void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
273 {
274 	if (!xe->info.probe_display)
275 		return;
276 
277 	if (gt->info.id == XE_GT0)
278 		gen11_de_irq_postinstall(xe);
279 }
280 
281 static bool suspend_to_idle(void)
282 {
283 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
284 	if (acpi_target_system_state() < ACPI_STATE_S3)
285 		return true;
286 #endif
287 	return false;
288 }
289 
290 static void xe_display_flush_cleanup_work(struct xe_device *xe)
291 {
292 	struct intel_crtc *crtc;
293 
294 	for_each_intel_crtc(&xe->drm, crtc) {
295 		struct drm_crtc_commit *commit;
296 
297 		spin_lock(&crtc->base.commit_lock);
298 		commit = list_first_entry_or_null(&crtc->base.commit_list,
299 						  struct drm_crtc_commit, commit_entry);
300 		if (commit)
301 			drm_crtc_commit_get(commit);
302 		spin_unlock(&crtc->base.commit_lock);
303 
304 		if (commit) {
305 			wait_for_completion(&commit->cleanup_done);
306 			drm_crtc_commit_put(commit);
307 		}
308 	}
309 }
310 
311 /* TODO: System and runtime suspend/resume sequences will be sanitized as a follow-up. */
312 static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime)
313 {
314 	struct intel_display *display = &xe->display;
315 	bool s2idle = suspend_to_idle();
316 	if (!xe->info.probe_display)
317 		return;
318 
319 	/*
320 	 * We do a lot of poking in a lot of registers, make sure they work
321 	 * properly.
322 	 */
323 	intel_power_domains_disable(xe);
324 	if (!runtime)
325 		intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
326 
327 	if (!runtime && has_display(xe)) {
328 		drm_kms_helper_poll_disable(&xe->drm);
329 		intel_display_driver_disable_user_access(xe);
330 		intel_display_driver_suspend(xe);
331 	}
332 
333 	xe_display_flush_cleanup_work(xe);
334 
335 	if (!runtime)
336 		intel_dp_mst_suspend(xe);
337 
338 	intel_hpd_cancel_work(xe);
339 
340 	if (!runtime && has_display(xe)) {
341 		intel_display_driver_suspend_access(xe);
342 		intel_encoder_suspend_all(&xe->display);
343 	}
344 
345 	intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
346 
347 	intel_dmc_suspend(display);
348 
349 	if (runtime && has_display(xe))
350 		intel_hpd_poll_enable(xe);
351 }
352 
353 void xe_display_pm_suspend(struct xe_device *xe)
354 {
355 	__xe_display_pm_suspend(xe, false);
356 }
357 
358 void xe_display_pm_shutdown(struct xe_device *xe)
359 {
360 	struct intel_display *display = &xe->display;
361 
362 	if (!xe->info.probe_display)
363 		return;
364 
365 	intel_power_domains_disable(xe);
366 	intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
367 	if (has_display(xe)) {
368 		drm_kms_helper_poll_disable(&xe->drm);
369 		intel_display_driver_disable_user_access(xe);
370 		intel_display_driver_suspend(xe);
371 	}
372 
373 	xe_display_flush_cleanup_work(xe);
374 	intel_dp_mst_suspend(xe);
375 	intel_hpd_cancel_work(xe);
376 
377 	if (has_display(xe))
378 		intel_display_driver_suspend_access(xe);
379 
380 	intel_encoder_suspend_all(display);
381 	intel_encoder_shutdown_all(display);
382 
383 	intel_opregion_suspend(display, PCI_D3cold);
384 
385 	intel_dmc_suspend(display);
386 }
387 
388 void xe_display_pm_runtime_suspend(struct xe_device *xe)
389 {
390 	if (!xe->info.probe_display)
391 		return;
392 
393 	if (xe->d3cold.allowed) {
394 		__xe_display_pm_suspend(xe, true);
395 		return;
396 	}
397 
398 	intel_hpd_poll_enable(xe);
399 }
400 
401 void xe_display_pm_suspend_late(struct xe_device *xe)
402 {
403 	bool s2idle = suspend_to_idle();
404 	if (!xe->info.probe_display)
405 		return;
406 
407 	intel_power_domains_suspend(xe, s2idle);
408 
409 	intel_display_power_suspend_late(xe);
410 }
411 
412 void xe_display_pm_shutdown_late(struct xe_device *xe)
413 {
414 	if (!xe->info.probe_display)
415 		return;
416 
417 	/*
418 	 * The only requirement is to reboot with display DC states disabled,
419 	 * for now leaving all display power wells in the INIT power domain
420 	 * enabled.
421 	 */
422 	intel_power_domains_driver_remove(xe);
423 }
424 
425 void xe_display_pm_resume_early(struct xe_device *xe)
426 {
427 	if (!xe->info.probe_display)
428 		return;
429 
430 	intel_display_power_resume_early(xe);
431 
432 	intel_power_domains_resume(xe);
433 }
434 
435 static void __xe_display_pm_resume(struct xe_device *xe, bool runtime)
436 {
437 	struct intel_display *display = &xe->display;
438 
439 	if (!xe->info.probe_display)
440 		return;
441 
442 	intel_dmc_resume(display);
443 
444 	if (has_display(xe))
445 		drm_mode_config_reset(&xe->drm);
446 
447 	intel_display_driver_init_hw(xe);
448 	intel_hpd_init(xe);
449 
450 	if (!runtime && has_display(xe))
451 		intel_display_driver_resume_access(xe);
452 
453 	/* MST sideband requires HPD interrupts enabled */
454 	if (!runtime)
455 		intel_dp_mst_resume(xe);
456 
457 	if (!runtime && has_display(xe)) {
458 		intel_display_driver_resume(xe);
459 		drm_kms_helper_poll_enable(&xe->drm);
460 		intel_display_driver_enable_user_access(xe);
461 	}
462 
463 	if (has_display(xe))
464 		intel_hpd_poll_disable(xe);
465 
466 	intel_opregion_resume(display);
467 
468 	if (!runtime)
469 		intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false);
470 
471 	intel_power_domains_enable(xe);
472 }
473 
474 void xe_display_pm_resume(struct xe_device *xe)
475 {
476 	__xe_display_pm_resume(xe, false);
477 }
478 
479 void xe_display_pm_runtime_resume(struct xe_device *xe)
480 {
481 	if (!xe->info.probe_display)
482 		return;
483 
484 	if (xe->d3cold.allowed) {
485 		__xe_display_pm_resume(xe, true);
486 		return;
487 	}
488 
489 	intel_hpd_init(xe);
490 	intel_hpd_poll_disable(xe);
491 }
492 
493 
494 static void display_device_remove(struct drm_device *dev, void *arg)
495 {
496 	struct xe_device *xe = arg;
497 
498 	intel_display_device_remove(xe);
499 }
500 
501 int xe_display_probe(struct xe_device *xe)
502 {
503 	int err;
504 
505 	if (!xe->info.probe_display)
506 		goto no_display;
507 
508 	intel_display_device_probe(xe);
509 
510 	err = drmm_add_action_or_reset(&xe->drm, display_device_remove, xe);
511 	if (err)
512 		return err;
513 
514 	if (has_display(xe))
515 		return 0;
516 
517 no_display:
518 	xe->info.probe_display = false;
519 	unset_display_features(xe);
520 	return 0;
521 }
522