xref: /linux/drivers/gpu/drm/xe/display/xe_display.c (revision 5488bec96bccbd87335921338f8dc38b87db7d2c)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_display.h"
7 #include "regs/xe_irq_regs.h"
8 
9 #include <linux/fb.h>
10 
11 #include <drm/drm_drv.h>
12 #include <drm/drm_managed.h>
13 #include <drm/drm_probe_helper.h>
14 #include <uapi/drm/xe_drm.h>
15 
16 #include "soc/intel_dram.h"
17 #include "intel_acpi.h"
18 #include "intel_audio.h"
19 #include "intel_bw.h"
20 #include "intel_display.h"
21 #include "intel_display_driver.h"
22 #include "intel_display_irq.h"
23 #include "intel_display_types.h"
24 #include "intel_dmc.h"
25 #include "intel_dmc_wl.h"
26 #include "intel_dp.h"
27 #include "intel_encoder.h"
28 #include "intel_fbdev.h"
29 #include "intel_hdcp.h"
30 #include "intel_hotplug.h"
31 #include "intel_opregion.h"
32 #include "skl_watermark.h"
33 #include "xe_module.h"
34 
35 /* Xe device functions */
36 
37 static bool has_display(struct xe_device *xe)
38 {
39 	return HAS_DISPLAY(&xe->display);
40 }
41 
42 /**
43  * xe_display_driver_probe_defer - Detect if we need to wait for other drivers
44  *				   early on
45  * @pdev: PCI device
46  *
47  * Returns: true if probe needs to be deferred, false otherwise
48  */
49 bool xe_display_driver_probe_defer(struct pci_dev *pdev)
50 {
51 	if (!xe_modparam.probe_display)
52 		return 0;
53 
54 	return intel_display_driver_probe_defer(pdev);
55 }
56 
57 /**
58  * xe_display_driver_set_hooks - Add driver flags and hooks for display
59  * @driver: DRM device driver
60  *
61  * Set features and function hooks in @driver that are needed for driving the
62  * display IP. This sets the driver's capability of driving display, regardless
63  * if the device has it enabled
64  */
65 void xe_display_driver_set_hooks(struct drm_driver *driver)
66 {
67 	if (!xe_modparam.probe_display)
68 		return;
69 
70 	driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
71 }
72 
73 static void unset_display_features(struct xe_device *xe)
74 {
75 	xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
76 }
77 
78 static void display_destroy(struct drm_device *dev, void *dummy)
79 {
80 	struct xe_device *xe = to_xe_device(dev);
81 
82 	destroy_workqueue(xe->display.hotplug.dp_wq);
83 }
84 
85 /**
86  * xe_display_create - create display struct
87  * @xe: XE device instance
88  *
89  * Initialize all fields used by the display part.
90  *
91  * TODO: once everything can be inside a single struct, make the struct opaque
92  * to the rest of xe and return it to be xe->display.
93  *
94  * Returns: 0 on success
95  */
96 int xe_display_create(struct xe_device *xe)
97 {
98 	spin_lock_init(&xe->display.fb_tracking.lock);
99 
100 	xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
101 
102 	return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
103 }
104 
105 static void xe_display_fini_early(void *arg)
106 {
107 	struct xe_device *xe = arg;
108 	struct intel_display *display = &xe->display;
109 
110 	if (!xe->info.probe_display)
111 		return;
112 
113 	intel_display_driver_remove_nogem(display);
114 	intel_display_driver_remove_noirq(display);
115 	intel_opregion_cleanup(display);
116 	intel_power_domains_cleanup(display);
117 }
118 
119 int xe_display_init_early(struct xe_device *xe)
120 {
121 	struct intel_display *display = &xe->display;
122 	int err;
123 
124 	if (!xe->info.probe_display)
125 		return 0;
126 
127 	/* Fake uncore lock */
128 	spin_lock_init(&xe->uncore.lock);
129 
130 	/* This must be called before any calls to HAS_PCH_* */
131 	intel_detect_pch(xe);
132 
133 	intel_display_driver_early_probe(display);
134 
135 	/* Early display init.. */
136 	intel_opregion_setup(display);
137 
138 	/*
139 	 * Fill the dram structure to get the system dram info. This will be
140 	 * used for memory latency calculation.
141 	 */
142 	intel_dram_detect(xe);
143 
144 	intel_bw_init_hw(xe);
145 
146 	intel_display_device_info_runtime_init(display);
147 
148 	err = intel_display_driver_probe_noirq(display);
149 	if (err)
150 		goto err_opregion;
151 
152 	err = intel_display_driver_probe_nogem(display);
153 	if (err)
154 		goto err_noirq;
155 
156 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_early, xe);
157 err_noirq:
158 	intel_display_driver_remove_noirq(display);
159 	intel_power_domains_cleanup(display);
160 err_opregion:
161 	intel_opregion_cleanup(display);
162 	return err;
163 }
164 
165 static void xe_display_fini(void *arg)
166 {
167 	struct xe_device *xe = arg;
168 	struct intel_display *display = &xe->display;
169 
170 	intel_hpd_poll_fini(xe);
171 	intel_hdcp_component_fini(display);
172 	intel_audio_deinit(display);
173 	intel_display_driver_remove(display);
174 }
175 
176 int xe_display_init(struct xe_device *xe)
177 {
178 	struct intel_display *display = &xe->display;
179 	int err;
180 
181 	if (!xe->info.probe_display)
182 		return 0;
183 
184 	err = intel_display_driver_probe(display);
185 	if (err)
186 		return err;
187 
188 	return devm_add_action_or_reset(xe->drm.dev, xe_display_fini, xe);
189 }
190 
191 void xe_display_register(struct xe_device *xe)
192 {
193 	struct intel_display *display = &xe->display;
194 
195 	if (!xe->info.probe_display)
196 		return;
197 
198 	intel_display_driver_register(display);
199 	intel_power_domains_enable(display);
200 }
201 
202 void xe_display_unregister(struct xe_device *xe)
203 {
204 	struct intel_display *display = &xe->display;
205 
206 	if (!xe->info.probe_display)
207 		return;
208 
209 	intel_power_domains_disable(display);
210 	intel_display_driver_unregister(display);
211 }
212 
213 /* IRQ-related functions */
214 
215 void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
216 {
217 	if (!xe->info.probe_display)
218 		return;
219 
220 	if (master_ctl & DISPLAY_IRQ)
221 		gen11_display_irq_handler(xe);
222 }
223 
224 void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
225 {
226 	struct intel_display *display = &xe->display;
227 
228 	if (!xe->info.probe_display)
229 		return;
230 
231 	if (gu_misc_iir & GU_MISC_GSE)
232 		intel_opregion_asle_intr(display);
233 }
234 
235 void xe_display_irq_reset(struct xe_device *xe)
236 {
237 	if (!xe->info.probe_display)
238 		return;
239 
240 	gen11_display_irq_reset(xe);
241 }
242 
243 void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
244 {
245 	if (!xe->info.probe_display)
246 		return;
247 
248 	if (gt->info.id == XE_GT0)
249 		gen11_de_irq_postinstall(xe);
250 }
251 
252 static bool suspend_to_idle(void)
253 {
254 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
255 	if (acpi_target_system_state() < ACPI_STATE_S3)
256 		return true;
257 #endif
258 	return false;
259 }
260 
261 static void xe_display_flush_cleanup_work(struct xe_device *xe)
262 {
263 	struct intel_crtc *crtc;
264 
265 	for_each_intel_crtc(&xe->drm, crtc) {
266 		struct drm_crtc_commit *commit;
267 
268 		spin_lock(&crtc->base.commit_lock);
269 		commit = list_first_entry_or_null(&crtc->base.commit_list,
270 						  struct drm_crtc_commit, commit_entry);
271 		if (commit)
272 			drm_crtc_commit_get(commit);
273 		spin_unlock(&crtc->base.commit_lock);
274 
275 		if (commit) {
276 			wait_for_completion(&commit->cleanup_done);
277 			drm_crtc_commit_put(commit);
278 		}
279 	}
280 }
281 
282 static void xe_display_enable_d3cold(struct xe_device *xe)
283 {
284 	struct intel_display *display = &xe->display;
285 
286 	if (!xe->info.probe_display)
287 		return;
288 
289 	/*
290 	 * We do a lot of poking in a lot of registers, make sure they work
291 	 * properly.
292 	 */
293 	intel_power_domains_disable(display);
294 
295 	xe_display_flush_cleanup_work(xe);
296 
297 	intel_opregion_suspend(display, PCI_D3cold);
298 
299 	intel_dmc_suspend(display);
300 
301 	if (has_display(xe))
302 		intel_hpd_poll_enable(xe);
303 }
304 
305 static void xe_display_disable_d3cold(struct xe_device *xe)
306 {
307 	struct intel_display *display = &xe->display;
308 
309 	if (!xe->info.probe_display)
310 		return;
311 
312 	intel_dmc_resume(display);
313 
314 	if (has_display(xe))
315 		drm_mode_config_reset(&xe->drm);
316 
317 	intel_display_driver_init_hw(display);
318 
319 	intel_hpd_init(xe);
320 
321 	if (has_display(xe))
322 		intel_hpd_poll_disable(xe);
323 
324 	intel_opregion_resume(display);
325 
326 	intel_power_domains_enable(display);
327 }
328 
329 void xe_display_pm_suspend(struct xe_device *xe)
330 {
331 	struct intel_display *display = &xe->display;
332 	bool s2idle = suspend_to_idle();
333 
334 	if (!xe->info.probe_display)
335 		return;
336 
337 	/*
338 	 * We do a lot of poking in a lot of registers, make sure they work
339 	 * properly.
340 	 */
341 	intel_power_domains_disable(display);
342 	intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
343 
344 	if (has_display(xe)) {
345 		drm_kms_helper_poll_disable(&xe->drm);
346 		intel_display_driver_disable_user_access(display);
347 		intel_display_driver_suspend(display);
348 	}
349 
350 	xe_display_flush_cleanup_work(xe);
351 
352 	intel_hpd_cancel_work(xe);
353 
354 	if (has_display(xe)) {
355 		intel_display_driver_suspend_access(display);
356 		intel_encoder_suspend_all(&xe->display);
357 	}
358 
359 	intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
360 
361 	intel_dmc_suspend(display);
362 }
363 
364 void xe_display_pm_shutdown(struct xe_device *xe)
365 {
366 	struct intel_display *display = &xe->display;
367 
368 	if (!xe->info.probe_display)
369 		return;
370 
371 	intel_power_domains_disable(display);
372 	intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
373 	if (has_display(xe)) {
374 		drm_kms_helper_poll_disable(&xe->drm);
375 		intel_display_driver_disable_user_access(display);
376 		intel_display_driver_suspend(display);
377 	}
378 
379 	xe_display_flush_cleanup_work(xe);
380 	intel_dp_mst_suspend(display);
381 	intel_hpd_cancel_work(xe);
382 
383 	if (has_display(xe))
384 		intel_display_driver_suspend_access(display);
385 
386 	intel_encoder_suspend_all(display);
387 	intel_encoder_shutdown_all(display);
388 
389 	intel_opregion_suspend(display, PCI_D3cold);
390 
391 	intel_dmc_suspend(display);
392 }
393 
394 void xe_display_pm_runtime_suspend(struct xe_device *xe)
395 {
396 	if (!xe->info.probe_display)
397 		return;
398 
399 	if (xe->d3cold.allowed) {
400 		xe_display_enable_d3cold(xe);
401 		return;
402 	}
403 
404 	intel_hpd_poll_enable(xe);
405 }
406 
407 void xe_display_pm_suspend_late(struct xe_device *xe)
408 {
409 	struct intel_display *display = &xe->display;
410 	bool s2idle = suspend_to_idle();
411 
412 	if (!xe->info.probe_display)
413 		return;
414 
415 	intel_display_power_suspend_late(display, s2idle);
416 }
417 
418 void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
419 {
420 	struct intel_display *display = &xe->display;
421 
422 	if (!xe->info.probe_display)
423 		return;
424 
425 	if (xe->d3cold.allowed)
426 		xe_display_pm_suspend_late(xe);
427 
428 	/*
429 	 * If xe_display_pm_suspend_late() is not called, it is likely
430 	 * that we will be on dynamic DC states with DMC wakelock enabled. We
431 	 * need to flush the release work in that case.
432 	 */
433 	intel_dmc_wl_flush_release_work(display);
434 }
435 
436 void xe_display_pm_shutdown_late(struct xe_device *xe)
437 {
438 	struct intel_display *display = &xe->display;
439 
440 	if (!xe->info.probe_display)
441 		return;
442 
443 	/*
444 	 * The only requirement is to reboot with display DC states disabled,
445 	 * for now leaving all display power wells in the INIT power domain
446 	 * enabled.
447 	 */
448 	intel_power_domains_driver_remove(display);
449 }
450 
451 void xe_display_pm_resume_early(struct xe_device *xe)
452 {
453 	struct intel_display *display = &xe->display;
454 
455 	if (!xe->info.probe_display)
456 		return;
457 
458 	intel_display_power_resume_early(display);
459 }
460 
461 void xe_display_pm_resume(struct xe_device *xe)
462 {
463 	struct intel_display *display = &xe->display;
464 
465 	if (!xe->info.probe_display)
466 		return;
467 
468 	intel_dmc_resume(display);
469 
470 	if (has_display(xe))
471 		drm_mode_config_reset(&xe->drm);
472 
473 	intel_display_driver_init_hw(display);
474 
475 	if (has_display(xe))
476 		intel_display_driver_resume_access(display);
477 
478 	intel_hpd_init(xe);
479 
480 	if (has_display(xe)) {
481 		intel_display_driver_resume(display);
482 		drm_kms_helper_poll_enable(&xe->drm);
483 		intel_display_driver_enable_user_access(display);
484 	}
485 
486 	if (has_display(xe))
487 		intel_hpd_poll_disable(xe);
488 
489 	intel_opregion_resume(display);
490 
491 	intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false);
492 
493 	intel_power_domains_enable(display);
494 }
495 
496 void xe_display_pm_runtime_resume(struct xe_device *xe)
497 {
498 	if (!xe->info.probe_display)
499 		return;
500 
501 	if (xe->d3cold.allowed) {
502 		xe_display_disable_d3cold(xe);
503 		return;
504 	}
505 
506 	intel_hpd_init(xe);
507 	intel_hpd_poll_disable(xe);
508 	skl_watermark_ipc_update(xe);
509 }
510 
511 
512 static void display_device_remove(struct drm_device *dev, void *arg)
513 {
514 	struct intel_display *display = arg;
515 
516 	intel_display_device_remove(display);
517 }
518 
519 int xe_display_probe(struct xe_device *xe)
520 {
521 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
522 	struct intel_display *display;
523 	int err;
524 
525 	if (!xe->info.probe_display)
526 		goto no_display;
527 
528 	display = intel_display_device_probe(pdev);
529 
530 	err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display);
531 	if (err)
532 		return err;
533 
534 	if (has_display(xe))
535 		return 0;
536 
537 no_display:
538 	xe->info.probe_display = false;
539 	unset_display_features(xe);
540 	return 0;
541 }
542