1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022-2023 Intel Corporation 4 * 5 * High level display driver entry points. This is a layer between top level 6 * driver code and low level display functionality; no low level display code or 7 * details here. 8 */ 9 10 #include <linux/vga_switcheroo.h> 11 #include <acpi/video.h> 12 #include <drm/display/drm_dp_mst_helper.h> 13 #include <drm/drm_atomic_helper.h> 14 #include <drm/drm_client_event.h> 15 #include <drm/drm_mode_config.h> 16 #include <drm/drm_privacy_screen_consumer.h> 17 #include <drm/drm_probe_helper.h> 18 #include <drm/drm_vblank.h> 19 20 #include "i915_drv.h" 21 #include "i9xx_wm.h" 22 #include "intel_acpi.h" 23 #include "intel_atomic.h" 24 #include "intel_audio.h" 25 #include "intel_bios.h" 26 #include "intel_bw.h" 27 #include "intel_cdclk.h" 28 #include "intel_color.h" 29 #include "intel_crtc.h" 30 #include "intel_display_debugfs.h" 31 #include "intel_display_driver.h" 32 #include "intel_display_irq.h" 33 #include "intel_display_power.h" 34 #include "intel_display_types.h" 35 #include "intel_display_wa.h" 36 #include "intel_dkl_phy.h" 37 #include "intel_dmc.h" 38 #include "intel_dp.h" 39 #include "intel_dp_tunnel.h" 40 #include "intel_dpll.h" 41 #include "intel_dpll_mgr.h" 42 #include "intel_fb.h" 43 #include "intel_fbc.h" 44 #include "intel_fbdev.h" 45 #include "intel_fdi.h" 46 #include "intel_gmbus.h" 47 #include "intel_hdcp.h" 48 #include "intel_hotplug.h" 49 #include "intel_hti.h" 50 #include "intel_modeset_lock.h" 51 #include "intel_modeset_setup.h" 52 #include "intel_opregion.h" 53 #include "intel_overlay.h" 54 #include "intel_plane_initial.h" 55 #include "intel_pmdemand.h" 56 #include "intel_pps.h" 57 #include "intel_psr.h" 58 #include "intel_quirks.h" 59 #include "intel_vga.h" 60 #include "intel_wm.h" 61 #include "skl_watermark.h" 62 63 bool intel_display_driver_probe_defer(struct pci_dev *pdev) 64 { 65 struct drm_privacy_screen *privacy_screen; 66 67 /* 68 * apple-gmux is needed on dual GPU MacBook Pro 69 * to probe the panel if we're the inactive GPU. 70 */ 71 if (vga_switcheroo_client_probe_defer(pdev)) 72 return true; 73 74 /* If the LCD panel has a privacy-screen, wait for it */ 75 privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL); 76 if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER) 77 return true; 78 79 drm_privacy_screen_put(privacy_screen); 80 81 return false; 82 } 83 84 void intel_display_driver_init_hw(struct intel_display *display) 85 { 86 struct intel_cdclk_state *cdclk_state; 87 88 if (!HAS_DISPLAY(display)) 89 return; 90 91 cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state); 92 93 intel_update_cdclk(display); 94 intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK"); 95 cdclk_state->logical = cdclk_state->actual = display->cdclk.hw; 96 97 intel_display_wa_apply(display); 98 } 99 100 static const struct drm_mode_config_funcs intel_mode_funcs = { 101 .fb_create = intel_user_framebuffer_create, 102 .get_format_info = intel_fb_get_format_info, 103 .mode_valid = intel_mode_valid, 104 .atomic_check = intel_atomic_check, 105 .atomic_commit = intel_atomic_commit, 106 .atomic_state_alloc = intel_atomic_state_alloc, 107 .atomic_state_clear = intel_atomic_state_clear, 108 .atomic_state_free = intel_atomic_state_free, 109 }; 110 111 static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = { 112 .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, 113 }; 114 115 static void intel_mode_config_init(struct intel_display *display) 116 { 117 struct drm_mode_config *mode_config = &display->drm->mode_config; 118 119 drm_mode_config_init(display->drm); 120 INIT_LIST_HEAD(&display->global.obj_list); 121 122 mode_config->min_width = 0; 123 mode_config->min_height = 0; 124 125 mode_config->preferred_depth = 24; 126 mode_config->prefer_shadow = 1; 127 128 mode_config->funcs = &intel_mode_funcs; 129 mode_config->helper_private = &intel_mode_config_funcs; 130 131 mode_config->async_page_flip = HAS_ASYNC_FLIPS(display); 132 133 /* 134 * Maximum framebuffer dimensions, chosen to match 135 * the maximum render engine surface size on gen4+. 136 */ 137 if (DISPLAY_VER(display) >= 7) { 138 mode_config->max_width = 16384; 139 mode_config->max_height = 16384; 140 } else if (DISPLAY_VER(display) >= 4) { 141 mode_config->max_width = 8192; 142 mode_config->max_height = 8192; 143 } else if (DISPLAY_VER(display) == 3) { 144 mode_config->max_width = 4096; 145 mode_config->max_height = 4096; 146 } else { 147 mode_config->max_width = 2048; 148 mode_config->max_height = 2048; 149 } 150 151 if (display->platform.i845g || display->platform.i865g) { 152 mode_config->cursor_width = display->platform.i845g ? 64 : 512; 153 mode_config->cursor_height = 1023; 154 } else if (display->platform.i830 || display->platform.i85x || 155 display->platform.i915g || display->platform.i915gm) { 156 mode_config->cursor_width = 64; 157 mode_config->cursor_height = 64; 158 } else { 159 mode_config->cursor_width = 256; 160 mode_config->cursor_height = 256; 161 } 162 } 163 164 static void intel_mode_config_cleanup(struct intel_display *display) 165 { 166 intel_atomic_global_obj_cleanup(display); 167 drm_mode_config_cleanup(display->drm); 168 } 169 170 static void intel_plane_possible_crtcs_init(struct intel_display *display) 171 { 172 struct intel_plane *plane; 173 174 for_each_intel_plane(display->drm, plane) { 175 struct intel_crtc *crtc = intel_crtc_for_pipe(display, 176 plane->pipe); 177 178 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base); 179 } 180 } 181 182 void intel_display_driver_early_probe(struct intel_display *display) 183 { 184 /* This must be called before any calls to HAS_PCH_* */ 185 intel_pch_detect(display); 186 187 if (!HAS_DISPLAY(display)) 188 return; 189 190 spin_lock_init(&display->fb_tracking.lock); 191 mutex_init(&display->backlight.lock); 192 mutex_init(&display->audio.mutex); 193 mutex_init(&display->wm.wm_mutex); 194 mutex_init(&display->pps.mutex); 195 mutex_init(&display->hdcp.hdcp_mutex); 196 197 intel_display_irq_init(display); 198 intel_dkl_phy_init(display); 199 intel_color_init_hooks(display); 200 intel_init_cdclk_hooks(display); 201 intel_audio_hooks_init(display); 202 intel_dpll_init_clock_hook(display); 203 intel_init_display_hooks(display); 204 intel_fdi_init_hook(display); 205 intel_dmc_wl_init(display); 206 } 207 208 /* part #1: call before irq install */ 209 int intel_display_driver_probe_noirq(struct intel_display *display) 210 { 211 struct drm_i915_private *i915 = to_i915(display->drm); 212 int ret; 213 214 if (i915_inject_probe_failure(i915)) 215 return -ENODEV; 216 217 if (HAS_DISPLAY(display)) { 218 ret = drm_vblank_init(display->drm, 219 INTEL_NUM_PIPES(display)); 220 if (ret) 221 return ret; 222 } 223 224 intel_bios_init(display); 225 226 ret = intel_vga_register(display); 227 if (ret) 228 goto cleanup_bios; 229 230 intel_psr_dc5_dc6_wa_init(display); 231 232 /* FIXME: completely on the wrong abstraction layer */ 233 ret = intel_power_domains_init(display); 234 if (ret < 0) 235 goto cleanup_vga; 236 237 intel_pmdemand_init_early(display); 238 239 intel_power_domains_init_hw(display, false); 240 241 if (!HAS_DISPLAY(display)) 242 return 0; 243 244 intel_dmc_init(display); 245 246 display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0); 247 display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI | 248 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); 249 display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI, 0); 250 251 intel_mode_config_init(display); 252 253 ret = intel_cdclk_init(display); 254 if (ret) 255 goto cleanup_vga_client_pw_domain_dmc; 256 257 ret = intel_color_init(display); 258 if (ret) 259 goto cleanup_vga_client_pw_domain_dmc; 260 261 ret = intel_dbuf_init(display); 262 if (ret) 263 goto cleanup_vga_client_pw_domain_dmc; 264 265 ret = intel_bw_init(display); 266 if (ret) 267 goto cleanup_vga_client_pw_domain_dmc; 268 269 ret = intel_pmdemand_init(display); 270 if (ret) 271 goto cleanup_vga_client_pw_domain_dmc; 272 273 intel_init_quirks(display); 274 275 intel_fbc_init(display); 276 277 return 0; 278 279 cleanup_vga_client_pw_domain_dmc: 280 intel_dmc_fini(display); 281 intel_power_domains_driver_remove(display); 282 cleanup_vga: 283 intel_vga_unregister(display); 284 cleanup_bios: 285 intel_bios_driver_remove(display); 286 287 return ret; 288 } 289 290 static void set_display_access(struct intel_display *display, 291 bool any_task_allowed, 292 struct task_struct *allowed_task) 293 { 294 struct drm_modeset_acquire_ctx ctx; 295 int err; 296 297 intel_modeset_lock_ctx_retry(&ctx, NULL, 0, err) { 298 err = drm_modeset_lock_all_ctx(display->drm, &ctx); 299 if (err) 300 continue; 301 302 display->access.any_task_allowed = any_task_allowed; 303 display->access.allowed_task = allowed_task; 304 } 305 306 drm_WARN_ON(display->drm, err); 307 } 308 309 /** 310 * intel_display_driver_enable_user_access - Enable display HW access for all threads 311 * @display: display device instance 312 * 313 * Enable the display HW access for all threads. Examples for such accesses 314 * are modeset commits and connector probing. 315 * 316 * This function should be called during driver loading and system resume once 317 * all the HW initialization steps are done. 318 */ 319 void intel_display_driver_enable_user_access(struct intel_display *display) 320 { 321 set_display_access(display, true, NULL); 322 323 intel_hpd_enable_detection_work(display); 324 } 325 326 /** 327 * intel_display_driver_disable_user_access - Disable display HW access for user threads 328 * @display: display device instance 329 * 330 * Disable the display HW access for user threads. Examples for such accesses 331 * are modeset commits and connector probing. For the current thread the 332 * access is still enabled, which should only perform HW init/deinit 333 * programming (as the initial modeset during driver loading or the disabling 334 * modeset during driver unloading and system suspend/shutdown). This function 335 * should be followed by calling either intel_display_driver_enable_user_access() 336 * after completing the HW init programming or 337 * intel_display_driver_suspend_access() after completing the HW deinit 338 * programming. 339 * 340 * This function should be called during driver loading/unloading and system 341 * suspend/shutdown before starting the HW init/deinit programming. 342 */ 343 void intel_display_driver_disable_user_access(struct intel_display *display) 344 { 345 intel_hpd_disable_detection_work(display); 346 347 set_display_access(display, false, current); 348 } 349 350 /** 351 * intel_display_driver_suspend_access - Suspend display HW access for all threads 352 * @display: display device instance 353 * 354 * Disable the display HW access for all threads. Examples for such accesses 355 * are modeset commits and connector probing. This call should be either 356 * followed by calling intel_display_driver_resume_access(), or the driver 357 * should be unloaded/shutdown. 358 * 359 * This function should be called during driver unloading and system 360 * suspend/shutdown after completing the HW deinit programming. 361 */ 362 void intel_display_driver_suspend_access(struct intel_display *display) 363 { 364 set_display_access(display, false, NULL); 365 } 366 367 /** 368 * intel_display_driver_resume_access - Resume display HW access for the resume thread 369 * @display: display device instance 370 * 371 * Enable the display HW access for the current resume thread, keeping the 372 * access disabled for all other (user) threads. Examples for such accesses 373 * are modeset commits and connector probing. The resume thread should only 374 * perform HW init programming (as the restoring modeset). This function 375 * should be followed by calling intel_display_driver_enable_user_access(), 376 * after completing the HW init programming steps. 377 * 378 * This function should be called during system resume before starting the HW 379 * init steps. 380 */ 381 void intel_display_driver_resume_access(struct intel_display *display) 382 { 383 set_display_access(display, false, current); 384 } 385 386 /** 387 * intel_display_driver_check_access - Check if the current thread has disaplay HW access 388 * @display: display device instance 389 * 390 * Check whether the current thread has display HW access, print a debug 391 * message if it doesn't. Such accesses are modeset commits and connector 392 * probing. If the function returns %false any HW access should be prevented. 393 * 394 * Returns %true if the current thread has display HW access, %false 395 * otherwise. 396 */ 397 bool intel_display_driver_check_access(struct intel_display *display) 398 { 399 char current_task[TASK_COMM_LEN + 16]; 400 char allowed_task[TASK_COMM_LEN + 16] = "none"; 401 402 if (display->access.any_task_allowed || 403 display->access.allowed_task == current) 404 return true; 405 406 snprintf(current_task, sizeof(current_task), "%s[%d]", 407 current->comm, task_pid_vnr(current)); 408 409 if (display->access.allowed_task) 410 snprintf(allowed_task, sizeof(allowed_task), "%s[%d]", 411 display->access.allowed_task->comm, 412 task_pid_vnr(display->access.allowed_task)); 413 414 drm_dbg_kms(display->drm, 415 "Reject display access from task %s (allowed to %s)\n", 416 current_task, allowed_task); 417 418 return false; 419 } 420 421 /* part #2: call after irq install, but before gem init */ 422 int intel_display_driver_probe_nogem(struct intel_display *display) 423 { 424 enum pipe pipe; 425 int ret; 426 427 if (!HAS_DISPLAY(display)) 428 return 0; 429 430 intel_wm_init(display); 431 432 intel_panel_sanitize_ssc(display); 433 434 intel_pps_setup(display); 435 436 intel_gmbus_setup(display); 437 438 drm_dbg_kms(display->drm, "%d display pipe%s available.\n", 439 INTEL_NUM_PIPES(display), 440 INTEL_NUM_PIPES(display) > 1 ? "s" : ""); 441 442 for_each_pipe(display, pipe) { 443 ret = intel_crtc_init(display, pipe); 444 if (ret) 445 goto err_mode_config; 446 } 447 448 intel_plane_possible_crtcs_init(display); 449 intel_shared_dpll_init(display); 450 intel_fdi_pll_freq_update(display); 451 452 intel_update_czclk(display); 453 intel_display_driver_init_hw(display); 454 intel_dpll_update_ref_clks(display); 455 456 if (display->cdclk.max_cdclk_freq == 0) 457 intel_update_max_cdclk(display); 458 459 intel_hti_init(display); 460 461 intel_setup_outputs(display); 462 463 ret = intel_dp_tunnel_mgr_init(display); 464 if (ret) 465 goto err_hdcp; 466 467 intel_display_driver_disable_user_access(display); 468 469 drm_modeset_lock_all(display->drm); 470 intel_modeset_setup_hw_state(display, display->drm->mode_config.acquire_ctx); 471 intel_acpi_assign_connector_fwnodes(display); 472 drm_modeset_unlock_all(display->drm); 473 474 intel_initial_plane_config(display); 475 476 /* 477 * Make sure hardware watermarks really match the state we read out. 478 * Note that we need to do this after reconstructing the BIOS fb's 479 * since the watermark calculation done here will use pstate->fb. 480 */ 481 if (!HAS_GMCH(display)) 482 ilk_wm_sanitize(display); 483 484 return 0; 485 486 err_hdcp: 487 intel_hdcp_component_fini(display); 488 err_mode_config: 489 intel_mode_config_cleanup(display); 490 491 return ret; 492 } 493 494 /* part #3: call after gem init */ 495 int intel_display_driver_probe(struct intel_display *display) 496 { 497 int ret; 498 499 if (!HAS_DISPLAY(display)) 500 return 0; 501 502 /* 503 * This will bind stuff into ggtt, so it needs to be done after 504 * the BIOS fb takeover and whatever else magic ggtt reservations 505 * happen during gem/ggtt init. 506 */ 507 intel_hdcp_component_init(display); 508 509 /* 510 * Force all active planes to recompute their states. So that on 511 * mode_setcrtc after probe, all the intel_plane_state variables 512 * are already calculated and there is no assert_plane warnings 513 * during bootup. 514 */ 515 ret = intel_initial_commit(display); 516 if (ret) 517 drm_dbg_kms(display->drm, "Initial modeset failed, %d\n", ret); 518 519 intel_overlay_setup(display); 520 521 /* Only enable hotplug handling once the fbdev is fully set up. */ 522 intel_hpd_init(display); 523 524 skl_watermark_ipc_init(display); 525 526 return 0; 527 } 528 529 void intel_display_driver_register(struct intel_display *display) 530 { 531 struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS, 532 "i915 display info:"); 533 534 if (!HAS_DISPLAY(display)) 535 return; 536 537 /* Must be done after probing outputs */ 538 intel_opregion_register(display); 539 intel_acpi_video_register(display); 540 541 intel_audio_init(display); 542 543 intel_display_driver_enable_user_access(display); 544 545 intel_audio_register(display); 546 547 intel_display_debugfs_register(display); 548 549 /* 550 * We need to coordinate the hotplugs with the asynchronous 551 * fbdev configuration, for which we use the 552 * fbdev->async_cookie. 553 */ 554 drm_kms_helper_poll_init(display->drm); 555 intel_hpd_poll_disable(display); 556 557 intel_fbdev_setup(display); 558 559 intel_display_device_info_print(DISPLAY_INFO(display), 560 DISPLAY_RUNTIME_INFO(display), &p); 561 562 intel_register_dsm_handler(); 563 } 564 565 /* part #1: call before irq uninstall */ 566 void intel_display_driver_remove(struct intel_display *display) 567 { 568 if (!HAS_DISPLAY(display)) 569 return; 570 571 flush_workqueue(display->wq.flip); 572 flush_workqueue(display->wq.modeset); 573 flush_workqueue(display->wq.cleanup); 574 575 /* 576 * MST topology needs to be suspended so we don't have any calls to 577 * fbdev after it's finalized. MST will be destroyed later as part of 578 * drm_mode_config_cleanup() 579 */ 580 intel_dp_mst_suspend(display); 581 } 582 583 /* part #2: call after irq uninstall */ 584 void intel_display_driver_remove_noirq(struct intel_display *display) 585 { 586 struct drm_i915_private *i915 = to_i915(display->drm); 587 588 if (!HAS_DISPLAY(display)) 589 return; 590 591 intel_display_driver_suspend_access(display); 592 593 /* 594 * Due to the hpd irq storm handling the hotplug work can re-arm the 595 * poll handlers. Hence disable polling after hpd handling is shut down. 596 */ 597 intel_hpd_poll_fini(display); 598 599 intel_unregister_dsm_handler(); 600 601 /* flush any delayed tasks or pending work */ 602 flush_workqueue(i915->unordered_wq); 603 604 intel_hdcp_component_fini(display); 605 606 intel_mode_config_cleanup(display); 607 608 intel_dp_tunnel_mgr_cleanup(display); 609 610 intel_overlay_cleanup(display); 611 612 intel_gmbus_teardown(display); 613 614 destroy_workqueue(display->wq.flip); 615 destroy_workqueue(display->wq.modeset); 616 destroy_workqueue(display->wq.cleanup); 617 618 intel_fbc_cleanup(display); 619 } 620 621 /* part #3: call after gem init */ 622 void intel_display_driver_remove_nogem(struct intel_display *display) 623 { 624 intel_dmc_fini(display); 625 626 intel_power_domains_driver_remove(display); 627 628 intel_vga_unregister(display); 629 630 intel_bios_driver_remove(display); 631 } 632 633 void intel_display_driver_unregister(struct intel_display *display) 634 { 635 if (!HAS_DISPLAY(display)) 636 return; 637 638 intel_unregister_dsm_handler(); 639 640 drm_client_dev_unregister(display->drm); 641 642 /* 643 * After flushing the fbdev (incl. a late async config which 644 * will have delayed queuing of a hotplug event), then flush 645 * the hotplug events. 646 */ 647 drm_kms_helper_poll_fini(display->drm); 648 649 intel_display_driver_disable_user_access(display); 650 651 intel_audio_deinit(display); 652 653 drm_atomic_helper_shutdown(display->drm); 654 655 acpi_video_unregister(); 656 intel_opregion_unregister(display); 657 } 658 659 /* 660 * turn all crtc's off, but do not adjust state 661 * This has to be paired with a call to intel_modeset_setup_hw_state. 662 */ 663 int intel_display_driver_suspend(struct intel_display *display) 664 { 665 struct drm_atomic_state *state; 666 int ret; 667 668 if (!HAS_DISPLAY(display)) 669 return 0; 670 671 state = drm_atomic_helper_suspend(display->drm); 672 ret = PTR_ERR_OR_ZERO(state); 673 if (ret) 674 drm_err(display->drm, "Suspending crtc's failed with %i\n", 675 ret); 676 else 677 display->restore.modeset_state = state; 678 679 /* ensure all DPT VMAs have been unpinned for intel_dpt_suspend() */ 680 flush_workqueue(display->wq.cleanup); 681 682 intel_dp_mst_suspend(display); 683 684 return ret; 685 } 686 687 int 688 __intel_display_driver_resume(struct intel_display *display, 689 struct drm_atomic_state *state, 690 struct drm_modeset_acquire_ctx *ctx) 691 { 692 struct drm_crtc_state *crtc_state; 693 struct drm_crtc *crtc; 694 int ret, i; 695 696 intel_modeset_setup_hw_state(display, ctx); 697 698 if (!state) 699 return 0; 700 701 /* 702 * We've duplicated the state, pointers to the old state are invalid. 703 * 704 * Don't attempt to use the old state until we commit the duplicated state. 705 */ 706 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 707 /* 708 * Force recalculation even if we restore 709 * current state. With fast modeset this may not result 710 * in a modeset when the state is compatible. 711 */ 712 crtc_state->mode_changed = true; 713 } 714 715 /* ignore any reset values/BIOS leftovers in the WM registers */ 716 if (!HAS_GMCH(display)) 717 to_intel_atomic_state(state)->skip_intermediate_wm = true; 718 719 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 720 721 drm_WARN_ON(display->drm, ret == -EDEADLK); 722 723 return ret; 724 } 725 726 void intel_display_driver_resume(struct intel_display *display) 727 { 728 struct drm_atomic_state *state = display->restore.modeset_state; 729 struct drm_modeset_acquire_ctx ctx; 730 int ret; 731 732 if (!HAS_DISPLAY(display)) 733 return; 734 735 /* MST sideband requires HPD interrupts enabled */ 736 intel_dp_mst_resume(display); 737 738 display->restore.modeset_state = NULL; 739 if (state) 740 state->acquire_ctx = &ctx; 741 742 drm_modeset_acquire_init(&ctx, 0); 743 744 while (1) { 745 ret = drm_modeset_lock_all_ctx(display->drm, &ctx); 746 if (ret != -EDEADLK) 747 break; 748 749 drm_modeset_backoff(&ctx); 750 } 751 752 if (!ret) 753 ret = __intel_display_driver_resume(display, state, &ctx); 754 755 skl_watermark_ipc_update(display); 756 drm_modeset_drop_locks(&ctx); 757 drm_modeset_acquire_fini(&ctx); 758 759 if (ret) 760 drm_err(display->drm, 761 "Restoring old state failed with %i\n", ret); 762 if (state) 763 drm_atomic_state_put(state); 764 } 765