1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #include "xe_display.h"
7 #include "regs/xe_irq_regs.h"
8
9 #include <linux/fb.h>
10
11 #include <drm/drm_client.h>
12 #include <drm/drm_client_event.h>
13 #include <drm/drm_drv.h>
14 #include <drm/drm_managed.h>
15 #include <drm/drm_probe_helper.h>
16 #include <uapi/drm/xe_drm.h>
17
18 #include "soc/intel_dram.h"
19 #include "intel_acpi.h"
20 #include "intel_audio.h"
21 #include "intel_bw.h"
22 #include "intel_display.h"
23 #include "intel_display_driver.h"
24 #include "intel_display_irq.h"
25 #include "intel_display_types.h"
26 #include "intel_dmc.h"
27 #include "intel_dmc_wl.h"
28 #include "intel_dp.h"
29 #include "intel_encoder.h"
30 #include "intel_fbdev.h"
31 #include "intel_hdcp.h"
32 #include "intel_hotplug.h"
33 #include "intel_opregion.h"
34 #include "skl_watermark.h"
35 #include "xe_module.h"
36
37 /* Xe device functions */
38
has_display(struct xe_device * xe)39 static bool has_display(struct xe_device *xe)
40 {
41 return HAS_DISPLAY(&xe->display);
42 }
43
44 /**
45 * xe_display_driver_probe_defer - Detect if we need to wait for other drivers
46 * early on
47 * @pdev: PCI device
48 *
49 * Returns: true if probe needs to be deferred, false otherwise
50 */
xe_display_driver_probe_defer(struct pci_dev * pdev)51 bool xe_display_driver_probe_defer(struct pci_dev *pdev)
52 {
53 if (!xe_modparam.probe_display)
54 return 0;
55
56 return intel_display_driver_probe_defer(pdev);
57 }
58
59 /**
60 * xe_display_driver_set_hooks - Add driver flags and hooks for display
61 * @driver: DRM device driver
62 *
63 * Set features and function hooks in @driver that are needed for driving the
64 * display IP. This sets the driver's capability of driving display, regardless
65 * if the device has it enabled
66 */
xe_display_driver_set_hooks(struct drm_driver * driver)67 void xe_display_driver_set_hooks(struct drm_driver *driver)
68 {
69 if (!xe_modparam.probe_display)
70 return;
71
72 #ifdef CONFIG_DRM_FBDEV_EMULATION
73 driver->fbdev_probe = intel_fbdev_driver_fbdev_probe;
74 #endif
75
76 driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
77 }
78
unset_display_features(struct xe_device * xe)79 static void unset_display_features(struct xe_device *xe)
80 {
81 xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
82 }
83
display_destroy(struct drm_device * dev,void * dummy)84 static void display_destroy(struct drm_device *dev, void *dummy)
85 {
86 struct xe_device *xe = to_xe_device(dev);
87
88 destroy_workqueue(xe->display.hotplug.dp_wq);
89 }
90
91 /**
92 * xe_display_create - create display struct
93 * @xe: XE device instance
94 *
95 * Initialize all fields used by the display part.
96 *
97 * TODO: once everything can be inside a single struct, make the struct opaque
98 * to the rest of xe and return it to be xe->display.
99 *
100 * Returns: 0 on success
101 */
xe_display_create(struct xe_device * xe)102 int xe_display_create(struct xe_device *xe)
103 {
104 spin_lock_init(&xe->display.fb_tracking.lock);
105
106 xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
107 if (!xe->display.hotplug.dp_wq)
108 return -ENOMEM;
109
110 return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
111 }
112
xe_display_fini_early(void * arg)113 static void xe_display_fini_early(void *arg)
114 {
115 struct xe_device *xe = arg;
116 struct intel_display *display = &xe->display;
117
118 if (!xe->info.probe_display)
119 return;
120
121 intel_display_driver_remove_nogem(display);
122 intel_display_driver_remove_noirq(display);
123 intel_opregion_cleanup(display);
124 intel_power_domains_cleanup(display);
125 }
126
xe_display_init_early(struct xe_device * xe)127 int xe_display_init_early(struct xe_device *xe)
128 {
129 struct intel_display *display = &xe->display;
130 int err;
131
132 if (!xe->info.probe_display)
133 return 0;
134
135 /* Fake uncore lock */
136 spin_lock_init(&xe->uncore.lock);
137
138 intel_display_driver_early_probe(display);
139
140 /* Early display init.. */
141 intel_opregion_setup(display);
142
143 /*
144 * Fill the dram structure to get the system dram info. This will be
145 * used for memory latency calculation.
146 */
147 intel_dram_detect(xe);
148
149 intel_bw_init_hw(display);
150
151 intel_display_device_info_runtime_init(display);
152
153 err = intel_display_driver_probe_noirq(display);
154 if (err)
155 goto err_opregion;
156
157 err = intel_display_driver_probe_nogem(display);
158 if (err)
159 goto err_noirq;
160
161 return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_early, xe);
162 err_noirq:
163 intel_display_driver_remove_noirq(display);
164 intel_power_domains_cleanup(display);
165 err_opregion:
166 intel_opregion_cleanup(display);
167 return err;
168 }
169
xe_display_fini(void * arg)170 static void xe_display_fini(void *arg)
171 {
172 struct xe_device *xe = arg;
173 struct intel_display *display = &xe->display;
174
175 intel_hpd_poll_fini(display);
176 intel_hdcp_component_fini(display);
177 intel_audio_deinit(display);
178 intel_display_driver_remove(display);
179 }
180
xe_display_init(struct xe_device * xe)181 int xe_display_init(struct xe_device *xe)
182 {
183 struct intel_display *display = &xe->display;
184 int err;
185
186 if (!xe->info.probe_display)
187 return 0;
188
189 err = intel_display_driver_probe(display);
190 if (err)
191 return err;
192
193 return devm_add_action_or_reset(xe->drm.dev, xe_display_fini, xe);
194 }
195
xe_display_register(struct xe_device * xe)196 void xe_display_register(struct xe_device *xe)
197 {
198 struct intel_display *display = &xe->display;
199
200 if (!xe->info.probe_display)
201 return;
202
203 intel_display_driver_register(display);
204 intel_power_domains_enable(display);
205 }
206
xe_display_unregister(struct xe_device * xe)207 void xe_display_unregister(struct xe_device *xe)
208 {
209 struct intel_display *display = &xe->display;
210
211 if (!xe->info.probe_display)
212 return;
213
214 intel_power_domains_disable(display);
215 intel_display_driver_unregister(display);
216 }
217
218 /* IRQ-related functions */
219
xe_display_irq_handler(struct xe_device * xe,u32 master_ctl)220 void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
221 {
222 struct intel_display *display = &xe->display;
223
224 if (!xe->info.probe_display)
225 return;
226
227 if (master_ctl & DISPLAY_IRQ)
228 gen11_display_irq_handler(display);
229 }
230
xe_display_irq_enable(struct xe_device * xe,u32 gu_misc_iir)231 void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
232 {
233 struct intel_display *display = &xe->display;
234
235 if (!xe->info.probe_display)
236 return;
237
238 if (gu_misc_iir & GU_MISC_GSE)
239 intel_opregion_asle_intr(display);
240 }
241
xe_display_irq_reset(struct xe_device * xe)242 void xe_display_irq_reset(struct xe_device *xe)
243 {
244 struct intel_display *display = &xe->display;
245
246 if (!xe->info.probe_display)
247 return;
248
249 gen11_display_irq_reset(display);
250 }
251
xe_display_irq_postinstall(struct xe_device * xe,struct xe_gt * gt)252 void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
253 {
254 struct intel_display *display = &xe->display;
255
256 if (!xe->info.probe_display)
257 return;
258
259 if (gt->info.id == XE_GT0)
260 gen11_de_irq_postinstall(display);
261 }
262
suspend_to_idle(void)263 static bool suspend_to_idle(void)
264 {
265 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
266 if (acpi_target_system_state() < ACPI_STATE_S3)
267 return true;
268 #endif
269 return false;
270 }
271
xe_display_flush_cleanup_work(struct xe_device * xe)272 static void xe_display_flush_cleanup_work(struct xe_device *xe)
273 {
274 struct intel_crtc *crtc;
275
276 for_each_intel_crtc(&xe->drm, crtc) {
277 struct drm_crtc_commit *commit;
278
279 spin_lock(&crtc->base.commit_lock);
280 commit = list_first_entry_or_null(&crtc->base.commit_list,
281 struct drm_crtc_commit, commit_entry);
282 if (commit)
283 drm_crtc_commit_get(commit);
284 spin_unlock(&crtc->base.commit_lock);
285
286 if (commit) {
287 wait_for_completion(&commit->cleanup_done);
288 drm_crtc_commit_put(commit);
289 }
290 }
291 }
292
xe_display_enable_d3cold(struct xe_device * xe)293 static void xe_display_enable_d3cold(struct xe_device *xe)
294 {
295 struct intel_display *display = &xe->display;
296
297 if (!xe->info.probe_display)
298 return;
299
300 /*
301 * We do a lot of poking in a lot of registers, make sure they work
302 * properly.
303 */
304 intel_power_domains_disable(display);
305
306 xe_display_flush_cleanup_work(xe);
307
308 intel_opregion_suspend(display, PCI_D3cold);
309
310 intel_dmc_suspend(display);
311
312 if (has_display(xe))
313 intel_hpd_poll_enable(display);
314 }
315
xe_display_disable_d3cold(struct xe_device * xe)316 static void xe_display_disable_d3cold(struct xe_device *xe)
317 {
318 struct intel_display *display = &xe->display;
319
320 if (!xe->info.probe_display)
321 return;
322
323 intel_dmc_resume(display);
324
325 if (has_display(xe))
326 drm_mode_config_reset(&xe->drm);
327
328 intel_display_driver_init_hw(display);
329
330 intel_hpd_init(display);
331
332 if (has_display(xe))
333 intel_hpd_poll_disable(display);
334
335 intel_opregion_resume(display);
336
337 intel_power_domains_enable(display);
338 }
339
xe_display_pm_suspend(struct xe_device * xe)340 void xe_display_pm_suspend(struct xe_device *xe)
341 {
342 struct intel_display *display = &xe->display;
343 bool s2idle = suspend_to_idle();
344
345 if (!xe->info.probe_display)
346 return;
347
348 /*
349 * We do a lot of poking in a lot of registers, make sure they work
350 * properly.
351 */
352 intel_power_domains_disable(display);
353 drm_client_dev_suspend(&xe->drm, false);
354
355 if (has_display(xe)) {
356 drm_kms_helper_poll_disable(&xe->drm);
357 intel_display_driver_disable_user_access(display);
358 intel_display_driver_suspend(display);
359 }
360
361 xe_display_flush_cleanup_work(xe);
362
363 intel_hpd_cancel_work(display);
364
365 if (has_display(xe)) {
366 intel_display_driver_suspend_access(display);
367 intel_encoder_suspend_all(&xe->display);
368 }
369
370 intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
371
372 intel_dmc_suspend(display);
373 }
374
xe_display_pm_shutdown(struct xe_device * xe)375 void xe_display_pm_shutdown(struct xe_device *xe)
376 {
377 struct intel_display *display = &xe->display;
378
379 if (!xe->info.probe_display)
380 return;
381
382 intel_power_domains_disable(display);
383 drm_client_dev_suspend(&xe->drm, false);
384
385 if (has_display(xe)) {
386 drm_kms_helper_poll_disable(&xe->drm);
387 intel_display_driver_disable_user_access(display);
388 intel_display_driver_suspend(display);
389 }
390
391 xe_display_flush_cleanup_work(xe);
392 intel_dp_mst_suspend(display);
393 intel_hpd_cancel_work(display);
394
395 if (has_display(xe))
396 intel_display_driver_suspend_access(display);
397
398 intel_encoder_suspend_all(display);
399 intel_encoder_shutdown_all(display);
400
401 intel_opregion_suspend(display, PCI_D3cold);
402
403 intel_dmc_suspend(display);
404 }
405
xe_display_pm_runtime_suspend(struct xe_device * xe)406 void xe_display_pm_runtime_suspend(struct xe_device *xe)
407 {
408 struct intel_display *display = &xe->display;
409
410 if (!xe->info.probe_display)
411 return;
412
413 if (xe->d3cold.allowed) {
414 xe_display_enable_d3cold(xe);
415 return;
416 }
417
418 intel_hpd_poll_enable(display);
419 }
420
xe_display_pm_suspend_late(struct xe_device * xe)421 void xe_display_pm_suspend_late(struct xe_device *xe)
422 {
423 struct intel_display *display = &xe->display;
424 bool s2idle = suspend_to_idle();
425
426 if (!xe->info.probe_display)
427 return;
428
429 intel_display_power_suspend_late(display, s2idle);
430 }
431
xe_display_pm_runtime_suspend_late(struct xe_device * xe)432 void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
433 {
434 struct intel_display *display = &xe->display;
435
436 if (!xe->info.probe_display)
437 return;
438
439 if (xe->d3cold.allowed)
440 xe_display_pm_suspend_late(xe);
441
442 /*
443 * If xe_display_pm_suspend_late() is not called, it is likely
444 * that we will be on dynamic DC states with DMC wakelock enabled. We
445 * need to flush the release work in that case.
446 */
447 intel_dmc_wl_flush_release_work(display);
448 }
449
xe_display_pm_shutdown_late(struct xe_device * xe)450 void xe_display_pm_shutdown_late(struct xe_device *xe)
451 {
452 struct intel_display *display = &xe->display;
453
454 if (!xe->info.probe_display)
455 return;
456
457 /*
458 * The only requirement is to reboot with display DC states disabled,
459 * for now leaving all display power wells in the INIT power domain
460 * enabled.
461 */
462 intel_power_domains_driver_remove(display);
463 }
464
xe_display_pm_resume_early(struct xe_device * xe)465 void xe_display_pm_resume_early(struct xe_device *xe)
466 {
467 struct intel_display *display = &xe->display;
468
469 if (!xe->info.probe_display)
470 return;
471
472 intel_display_power_resume_early(display);
473 }
474
xe_display_pm_resume(struct xe_device * xe)475 void xe_display_pm_resume(struct xe_device *xe)
476 {
477 struct intel_display *display = &xe->display;
478
479 if (!xe->info.probe_display)
480 return;
481
482 intel_dmc_resume(display);
483
484 if (has_display(xe))
485 drm_mode_config_reset(&xe->drm);
486
487 intel_display_driver_init_hw(display);
488
489 if (has_display(xe))
490 intel_display_driver_resume_access(display);
491
492 intel_hpd_init(display);
493
494 if (has_display(xe)) {
495 intel_display_driver_resume(display);
496 drm_kms_helper_poll_enable(&xe->drm);
497 intel_display_driver_enable_user_access(display);
498 }
499
500 if (has_display(xe))
501 intel_hpd_poll_disable(display);
502
503 intel_opregion_resume(display);
504
505 drm_client_dev_resume(&xe->drm, false);
506
507 intel_power_domains_enable(display);
508 }
509
xe_display_pm_runtime_resume(struct xe_device * xe)510 void xe_display_pm_runtime_resume(struct xe_device *xe)
511 {
512 struct intel_display *display = &xe->display;
513
514 if (!xe->info.probe_display)
515 return;
516
517 if (xe->d3cold.allowed) {
518 xe_display_disable_d3cold(xe);
519 return;
520 }
521
522 intel_hpd_init(display);
523 intel_hpd_poll_disable(display);
524 skl_watermark_ipc_update(display);
525 }
526
527
display_device_remove(struct drm_device * dev,void * arg)528 static void display_device_remove(struct drm_device *dev, void *arg)
529 {
530 struct intel_display *display = arg;
531
532 intel_display_device_remove(display);
533 }
534
xe_display_probe(struct xe_device * xe)535 int xe_display_probe(struct xe_device *xe)
536 {
537 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
538 struct intel_display *display;
539 int err;
540
541 if (!xe->info.probe_display)
542 goto no_display;
543
544 display = intel_display_device_probe(pdev);
545
546 err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display);
547 if (err)
548 return err;
549
550 if (has_display(xe))
551 return 0;
552
553 no_display:
554 xe->info.probe_display = false;
555 unset_display_features(xe);
556 return 0;
557 }
558