1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #include <linux/acpi.h> 31 #include <linux/device.h> 32 #include <linux/module.h> 33 #include <linux/oom.h> 34 #include <linux/pci.h> 35 #include <linux/pm.h> 36 #include <linux/pm_runtime.h> 37 #include <linux/slab.h> 38 #include <linux/string_helpers.h> 39 #include <linux/vga_switcheroo.h> 40 #include <linux/vt.h> 41 42 #include <drm/drm_aperture.h> 43 #include <drm/drm_atomic_helper.h> 44 #include <drm/drm_ioctl.h> 45 #include <drm/drm_managed.h> 46 #include <drm/drm_probe_helper.h> 47 48 #include "display/intel_acpi.h" 49 #include "display/intel_bw.h" 50 #include "display/intel_cdclk.h" 51 #include "display/intel_display_driver.h" 52 #include "display/intel_display_types.h" 53 #include "display/intel_dmc.h" 54 #include "display/intel_dp.h" 55 #include "display/intel_dpt.h" 56 #include "display/intel_fbdev.h" 57 #include "display/intel_hotplug.h" 58 #include "display/intel_overlay.h" 59 #include "display/intel_pch_refclk.h" 60 #include "display/intel_pipe_crc.h" 61 #include "display/intel_pps.h" 62 #include "display/intel_sprite.h" 63 #include "display/intel_vga.h" 64 #include "display/skl_watermark.h" 65 66 #include "gem/i915_gem_context.h" 67 #include "gem/i915_gem_create.h" 68 #include "gem/i915_gem_dmabuf.h" 69 #include "gem/i915_gem_ioctls.h" 70 #include "gem/i915_gem_mman.h" 71 #include "gem/i915_gem_pm.h" 72 #include "gt/intel_gt.h" 73 #include "gt/intel_gt_pm.h" 74 #include "gt/intel_gt_print.h" 75 #include "gt/intel_rc6.h" 76 77 #include "pxp/intel_pxp.h" 78 #include "pxp/intel_pxp_debugfs.h" 79 #include "pxp/intel_pxp_pm.h" 80 81 #include "soc/intel_dram.h" 82 #include "soc/intel_gmch.h" 83 84 #include "i915_debugfs.h" 85 #include "i915_driver.h" 86 #include "i915_drm_client.h" 87 #include "i915_drv.h" 88 #include "i915_file_private.h" 89 #include "i915_getparam.h" 90 #include "i915_hwmon.h" 91 #include "i915_ioc32.h" 92 #include "i915_ioctl.h" 93 #include "i915_irq.h" 94 #include "i915_memcpy.h" 95 #include "i915_perf.h" 96 #include "i915_query.h" 97 #include "i915_suspend.h" 98 #include "i915_switcheroo.h" 99 #include "i915_sysfs.h" 100 #include "i915_utils.h" 101 #include "i915_vgpu.h" 102 #include "intel_clock_gating.h" 103 #include "intel_gvt.h" 104 #include "intel_memory_region.h" 105 #include "intel_pci_config.h" 106 #include "intel_pcode.h" 107 #include "intel_region_ttm.h" 108 #include "vlv_suspend.h" 109 110 static const struct drm_driver i915_drm_driver; 111 112 static int i915_workqueues_init(struct drm_i915_private *dev_priv) 113 { 114 /* 115 * The i915 workqueue is primarily used for batched retirement of 116 * requests (and thus managing bo) once the task has been completed 117 * by the GPU. i915_retire_requests() is called directly when we 118 * need high-priority retirement, such as waiting for an explicit 119 * bo. 120 * 121 * It is also used for periodic low-priority events, such as 122 * idle-timers and recording error state. 123 * 124 * All tasks on the workqueue are expected to acquire the dev mutex 125 * so there is no point in running more than one instance of the 126 * workqueue at any time. Use an ordered one. 127 */ 128 dev_priv->wq = alloc_ordered_workqueue("i915", 0); 129 if (dev_priv->wq == NULL) 130 goto out_err; 131 132 dev_priv->display.hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); 133 if (dev_priv->display.hotplug.dp_wq == NULL) 134 goto out_free_wq; 135 136 /* 137 * The unordered i915 workqueue should be used for all work 138 * scheduling that do not require running in order, which used 139 * to be scheduled on the system_wq before moving to a driver 140 * instance due deprecation of flush_scheduled_work(). 141 */ 142 dev_priv->unordered_wq = alloc_workqueue("i915-unordered", 0, 0); 143 if (dev_priv->unordered_wq == NULL) 144 goto out_free_dp_wq; 145 146 return 0; 147 148 out_free_dp_wq: 149 destroy_workqueue(dev_priv->display.hotplug.dp_wq); 150 out_free_wq: 151 destroy_workqueue(dev_priv->wq); 152 out_err: 153 drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n"); 154 155 return -ENOMEM; 156 } 157 158 static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) 159 { 160 destroy_workqueue(dev_priv->unordered_wq); 161 destroy_workqueue(dev_priv->display.hotplug.dp_wq); 162 destroy_workqueue(dev_priv->wq); 163 } 164 165 /* 166 * We don't keep the workarounds for pre-production hardware, so we expect our 167 * driver to fail on these machines in one way or another. A little warning on 168 * dmesg may help both the user and the bug triagers. 169 * 170 * Our policy for removing pre-production workarounds is to keep the 171 * current gen workarounds as a guide to the bring-up of the next gen 172 * (workarounds have a habit of persisting!). Anything older than that 173 * should be removed along with the complications they introduce. 174 */ 175 static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) 176 { 177 bool pre = false; 178 179 pre |= IS_HASWELL_EARLY_SDV(dev_priv); 180 pre |= IS_SKYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x6; 181 pre |= IS_BROXTON(dev_priv) && INTEL_REVID(dev_priv) < 0xA; 182 pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1; 183 pre |= IS_GEMINILAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x3; 184 pre |= IS_ICELAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x7; 185 pre |= IS_TIGERLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1; 186 pre |= IS_DG1(dev_priv) && INTEL_REVID(dev_priv) < 0x1; 187 pre |= IS_DG2_G10(dev_priv) && INTEL_REVID(dev_priv) < 0x8; 188 pre |= IS_DG2_G11(dev_priv) && INTEL_REVID(dev_priv) < 0x5; 189 pre |= IS_DG2_G12(dev_priv) && INTEL_REVID(dev_priv) < 0x1; 190 191 if (pre) { 192 drm_err(&dev_priv->drm, "This is a pre-production stepping. " 193 "It may not be fully functional.\n"); 194 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); 195 } 196 } 197 198 static void sanitize_gpu(struct drm_i915_private *i915) 199 { 200 if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) { 201 struct intel_gt *gt; 202 unsigned int i; 203 204 for_each_gt(gt, i915, i) 205 __intel_gt_reset(gt, ALL_ENGINES); 206 } 207 } 208 209 /** 210 * i915_driver_early_probe - setup state not requiring device access 211 * @dev_priv: device private 212 * 213 * Initialize everything that is a "SW-only" state, that is state not 214 * requiring accessing the device or exposing the driver via kernel internal 215 * or userspace interfaces. Example steps belonging here: lock initialization, 216 * system memory allocation, setting up device specific attributes and 217 * function hooks not requiring accessing the device. 218 */ 219 static int i915_driver_early_probe(struct drm_i915_private *dev_priv) 220 { 221 int ret = 0; 222 223 if (i915_inject_probe_failure(dev_priv)) 224 return -ENODEV; 225 226 intel_device_info_runtime_init_early(dev_priv); 227 228 intel_step_init(dev_priv); 229 230 intel_uncore_mmio_debug_init_early(dev_priv); 231 232 spin_lock_init(&dev_priv->irq_lock); 233 spin_lock_init(&dev_priv->gpu_error.lock); 234 235 mutex_init(&dev_priv->sb_lock); 236 cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE); 237 238 i915_memcpy_init_early(dev_priv); 239 intel_runtime_pm_init_early(&dev_priv->runtime_pm); 240 241 ret = i915_workqueues_init(dev_priv); 242 if (ret < 0) 243 return ret; 244 245 ret = vlv_suspend_init(dev_priv); 246 if (ret < 0) 247 goto err_workqueues; 248 249 ret = intel_region_ttm_device_init(dev_priv); 250 if (ret) 251 goto err_ttm; 252 253 ret = intel_root_gt_init_early(dev_priv); 254 if (ret < 0) 255 goto err_rootgt; 256 257 i915_gem_init_early(dev_priv); 258 259 /* This must be called before any calls to HAS_PCH_* */ 260 intel_detect_pch(dev_priv); 261 262 intel_irq_init(dev_priv); 263 intel_display_driver_early_probe(dev_priv); 264 intel_clock_gating_hooks_init(dev_priv); 265 266 intel_detect_preproduction_hw(dev_priv); 267 268 return 0; 269 270 err_rootgt: 271 intel_region_ttm_device_fini(dev_priv); 272 err_ttm: 273 vlv_suspend_cleanup(dev_priv); 274 err_workqueues: 275 i915_workqueues_cleanup(dev_priv); 276 return ret; 277 } 278 279 /** 280 * i915_driver_late_release - cleanup the setup done in 281 * i915_driver_early_probe() 282 * @dev_priv: device private 283 */ 284 static void i915_driver_late_release(struct drm_i915_private *dev_priv) 285 { 286 intel_irq_fini(dev_priv); 287 intel_power_domains_cleanup(dev_priv); 288 i915_gem_cleanup_early(dev_priv); 289 intel_gt_driver_late_release_all(dev_priv); 290 intel_region_ttm_device_fini(dev_priv); 291 vlv_suspend_cleanup(dev_priv); 292 i915_workqueues_cleanup(dev_priv); 293 294 cpu_latency_qos_remove_request(&dev_priv->sb_qos); 295 mutex_destroy(&dev_priv->sb_lock); 296 297 i915_params_free(&dev_priv->params); 298 } 299 300 /** 301 * i915_driver_mmio_probe - setup device MMIO 302 * @dev_priv: device private 303 * 304 * Setup minimal device state necessary for MMIO accesses later in the 305 * initialization sequence. The setup here should avoid any other device-wide 306 * side effects or exposing the driver via kernel internal or user space 307 * interfaces. 308 */ 309 static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) 310 { 311 struct intel_gt *gt; 312 int ret, i; 313 314 if (i915_inject_probe_failure(dev_priv)) 315 return -ENODEV; 316 317 ret = intel_gmch_bridge_setup(dev_priv); 318 if (ret < 0) 319 return ret; 320 321 for_each_gt(gt, dev_priv, i) { 322 ret = intel_uncore_init_mmio(gt->uncore); 323 if (ret) 324 return ret; 325 326 ret = drmm_add_action_or_reset(&dev_priv->drm, 327 intel_uncore_fini_mmio, 328 gt->uncore); 329 if (ret) 330 return ret; 331 } 332 333 /* Try to make sure MCHBAR is enabled before poking at it */ 334 intel_gmch_bar_setup(dev_priv); 335 intel_device_info_runtime_init(dev_priv); 336 intel_display_device_info_runtime_init(dev_priv); 337 338 for_each_gt(gt, dev_priv, i) { 339 ret = intel_gt_init_mmio(gt); 340 if (ret) 341 goto err_uncore; 342 } 343 344 /* As early as possible, scrub existing GPU state before clobbering */ 345 sanitize_gpu(dev_priv); 346 347 return 0; 348 349 err_uncore: 350 intel_gmch_bar_teardown(dev_priv); 351 352 return ret; 353 } 354 355 /** 356 * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe() 357 * @dev_priv: device private 358 */ 359 static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) 360 { 361 intel_gmch_bar_teardown(dev_priv); 362 } 363 364 /** 365 * i915_set_dma_info - set all relevant PCI dma info as configured for the 366 * platform 367 * @i915: valid i915 instance 368 * 369 * Set the dma max segment size, device and coherent masks. The dma mask set 370 * needs to occur before i915_ggtt_probe_hw. 371 * 372 * A couple of platforms have special needs. Address them as well. 373 * 374 */ 375 static int i915_set_dma_info(struct drm_i915_private *i915) 376 { 377 unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size; 378 int ret; 379 380 GEM_BUG_ON(!mask_size); 381 382 /* 383 * We don't have a max segment size, so set it to the max so sg's 384 * debugging layer doesn't complain 385 */ 386 dma_set_max_seg_size(i915->drm.dev, UINT_MAX); 387 388 ret = dma_set_mask(i915->drm.dev, DMA_BIT_MASK(mask_size)); 389 if (ret) 390 goto mask_err; 391 392 /* overlay on gen2 is broken and can't address above 1G */ 393 if (GRAPHICS_VER(i915) == 2) 394 mask_size = 30; 395 396 /* 397 * 965GM sometimes incorrectly writes to hardware status page (HWS) 398 * using 32bit addressing, overwriting memory if HWS is located 399 * above 4GB. 400 * 401 * The documentation also mentions an issue with undefined 402 * behaviour if any general state is accessed within a page above 4GB, 403 * which also needs to be handled carefully. 404 */ 405 if (IS_I965G(i915) || IS_I965GM(i915)) 406 mask_size = 32; 407 408 ret = dma_set_coherent_mask(i915->drm.dev, DMA_BIT_MASK(mask_size)); 409 if (ret) 410 goto mask_err; 411 412 return 0; 413 414 mask_err: 415 drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret); 416 return ret; 417 } 418 419 static int i915_pcode_init(struct drm_i915_private *i915) 420 { 421 struct intel_gt *gt; 422 int id, ret; 423 424 for_each_gt(gt, i915, id) { 425 ret = intel_pcode_init(gt->uncore); 426 if (ret) { 427 gt_err(gt, "intel_pcode_init failed %d\n", ret); 428 return ret; 429 } 430 } 431 432 return 0; 433 } 434 435 /** 436 * i915_driver_hw_probe - setup state requiring device access 437 * @dev_priv: device private 438 * 439 * Setup state that requires accessing the device, but doesn't require 440 * exposing the driver via kernel internal or userspace interfaces. 441 */ 442 static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) 443 { 444 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 445 int ret; 446 447 if (i915_inject_probe_failure(dev_priv)) 448 return -ENODEV; 449 450 if (HAS_PPGTT(dev_priv)) { 451 if (intel_vgpu_active(dev_priv) && 452 !intel_vgpu_has_full_ppgtt(dev_priv)) { 453 i915_report_error(dev_priv, 454 "incompatible vGPU found, support for isolated ppGTT required\n"); 455 return -ENXIO; 456 } 457 } 458 459 if (HAS_EXECLISTS(dev_priv)) { 460 /* 461 * Older GVT emulation depends upon intercepting CSB mmio, 462 * which we no longer use, preferring to use the HWSP cache 463 * instead. 464 */ 465 if (intel_vgpu_active(dev_priv) && 466 !intel_vgpu_has_hwsp_emulation(dev_priv)) { 467 i915_report_error(dev_priv, 468 "old vGPU host found, support for HWSP emulation required\n"); 469 return -ENXIO; 470 } 471 } 472 473 /* needs to be done before ggtt probe */ 474 intel_dram_edram_detect(dev_priv); 475 476 ret = i915_set_dma_info(dev_priv); 477 if (ret) 478 return ret; 479 480 ret = i915_perf_init(dev_priv); 481 if (ret) 482 return ret; 483 484 ret = i915_ggtt_probe_hw(dev_priv); 485 if (ret) 486 goto err_perf; 487 488 ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, dev_priv->drm.driver); 489 if (ret) 490 goto err_ggtt; 491 492 ret = i915_ggtt_init_hw(dev_priv); 493 if (ret) 494 goto err_ggtt; 495 496 /* 497 * Make sure we probe lmem before we probe stolen-lmem. The BAR size 498 * might be different due to bar resizing. 499 */ 500 ret = intel_gt_tiles_init(dev_priv); 501 if (ret) 502 goto err_ggtt; 503 504 ret = intel_memory_regions_hw_probe(dev_priv); 505 if (ret) 506 goto err_ggtt; 507 508 ret = i915_ggtt_enable_hw(dev_priv); 509 if (ret) { 510 drm_err(&dev_priv->drm, "failed to enable GGTT\n"); 511 goto err_mem_regions; 512 } 513 514 pci_set_master(pdev); 515 516 /* On the 945G/GM, the chipset reports the MSI capability on the 517 * integrated graphics even though the support isn't actually there 518 * according to the published specs. It doesn't appear to function 519 * correctly in testing on 945G. 520 * This may be a side effect of MSI having been made available for PEG 521 * and the registers being closely associated. 522 * 523 * According to chipset errata, on the 965GM, MSI interrupts may 524 * be lost or delayed, and was defeatured. MSI interrupts seem to 525 * get lost on g4x as well, and interrupt delivery seems to stay 526 * properly dead afterwards. So we'll just disable them for all 527 * pre-gen5 chipsets. 528 * 529 * dp aux and gmbus irq on gen4 seems to be able to generate legacy 530 * interrupts even when in MSI mode. This results in spurious 531 * interrupt warnings if the legacy irq no. is shared with another 532 * device. The kernel then disables that interrupt source and so 533 * prevents the other device from working properly. 534 */ 535 if (GRAPHICS_VER(dev_priv) >= 5) { 536 if (pci_enable_msi(pdev) < 0) 537 drm_dbg(&dev_priv->drm, "can't enable MSI"); 538 } 539 540 ret = intel_gvt_init(dev_priv); 541 if (ret) 542 goto err_msi; 543 544 intel_opregion_setup(dev_priv); 545 546 ret = i915_pcode_init(dev_priv); 547 if (ret) 548 goto err_opregion; 549 550 /* 551 * Fill the dram structure to get the system dram info. This will be 552 * used for memory latency calculation. 553 */ 554 intel_dram_detect(dev_priv); 555 556 intel_bw_init_hw(dev_priv); 557 558 return 0; 559 560 err_opregion: 561 intel_opregion_cleanup(dev_priv); 562 err_msi: 563 if (pdev->msi_enabled) 564 pci_disable_msi(pdev); 565 err_mem_regions: 566 intel_memory_regions_driver_release(dev_priv); 567 err_ggtt: 568 i915_ggtt_driver_release(dev_priv); 569 i915_gem_drain_freed_objects(dev_priv); 570 i915_ggtt_driver_late_release(dev_priv); 571 err_perf: 572 i915_perf_fini(dev_priv); 573 return ret; 574 } 575 576 /** 577 * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe() 578 * @dev_priv: device private 579 */ 580 static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) 581 { 582 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 583 584 i915_perf_fini(dev_priv); 585 586 intel_opregion_cleanup(dev_priv); 587 588 if (pdev->msi_enabled) 589 pci_disable_msi(pdev); 590 } 591 592 /** 593 * i915_driver_register - register the driver with the rest of the system 594 * @dev_priv: device private 595 * 596 * Perform any steps necessary to make the driver available via kernel 597 * internal or userspace interfaces. 598 */ 599 static void i915_driver_register(struct drm_i915_private *dev_priv) 600 { 601 struct intel_gt *gt; 602 unsigned int i; 603 604 i915_gem_driver_register(dev_priv); 605 i915_pmu_register(dev_priv); 606 607 intel_vgpu_register(dev_priv); 608 609 /* Reveal our presence to userspace */ 610 if (drm_dev_register(&dev_priv->drm, 0)) { 611 drm_err(&dev_priv->drm, 612 "Failed to register driver for userspace access!\n"); 613 return; 614 } 615 616 i915_debugfs_register(dev_priv); 617 i915_setup_sysfs(dev_priv); 618 619 /* Depends on sysfs having been initialized */ 620 i915_perf_register(dev_priv); 621 622 for_each_gt(gt, dev_priv, i) 623 intel_gt_driver_register(gt); 624 625 intel_pxp_debugfs_register(dev_priv->pxp); 626 627 i915_hwmon_register(dev_priv); 628 629 intel_display_driver_register(dev_priv); 630 631 intel_power_domains_enable(dev_priv); 632 intel_runtime_pm_enable(&dev_priv->runtime_pm); 633 634 intel_register_dsm_handler(); 635 636 if (i915_switcheroo_register(dev_priv)) 637 drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n"); 638 } 639 640 /** 641 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser() 642 * @dev_priv: device private 643 */ 644 static void i915_driver_unregister(struct drm_i915_private *dev_priv) 645 { 646 struct intel_gt *gt; 647 unsigned int i; 648 649 i915_switcheroo_unregister(dev_priv); 650 651 intel_unregister_dsm_handler(); 652 653 intel_runtime_pm_disable(&dev_priv->runtime_pm); 654 intel_power_domains_disable(dev_priv); 655 656 intel_display_driver_unregister(dev_priv); 657 658 intel_pxp_fini(dev_priv); 659 660 for_each_gt(gt, dev_priv, i) 661 intel_gt_driver_unregister(gt); 662 663 i915_hwmon_unregister(dev_priv); 664 665 i915_perf_unregister(dev_priv); 666 i915_pmu_unregister(dev_priv); 667 668 i915_teardown_sysfs(dev_priv); 669 drm_dev_unplug(&dev_priv->drm); 670 671 i915_gem_driver_unregister(dev_priv); 672 } 673 674 void 675 i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p) 676 { 677 drm_printf(p, "iommu: %s\n", 678 str_enabled_disabled(i915_vtd_active(i915))); 679 } 680 681 static void i915_welcome_messages(struct drm_i915_private *dev_priv) 682 { 683 if (drm_debug_enabled(DRM_UT_DRIVER)) { 684 struct drm_printer p = drm_debug_printer("i915 device info:"); 685 struct intel_gt *gt; 686 unsigned int i; 687 688 drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n", 689 INTEL_DEVID(dev_priv), 690 INTEL_REVID(dev_priv), 691 intel_platform_name(INTEL_INFO(dev_priv)->platform), 692 intel_subplatform(RUNTIME_INFO(dev_priv), 693 INTEL_INFO(dev_priv)->platform), 694 GRAPHICS_VER(dev_priv)); 695 696 intel_device_info_print(INTEL_INFO(dev_priv), 697 RUNTIME_INFO(dev_priv), &p); 698 i915_print_iommu_status(dev_priv, &p); 699 for_each_gt(gt, dev_priv, i) 700 intel_gt_info_print(>->info, &p); 701 } 702 703 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) 704 drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n"); 705 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 706 drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n"); 707 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) 708 drm_info(&dev_priv->drm, 709 "DRM_I915_DEBUG_RUNTIME_PM enabled\n"); 710 } 711 712 static struct drm_i915_private * 713 i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) 714 { 715 const struct intel_device_info *match_info = 716 (struct intel_device_info *)ent->driver_data; 717 struct drm_i915_private *i915; 718 719 i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver, 720 struct drm_i915_private, drm); 721 if (IS_ERR(i915)) 722 return i915; 723 724 pci_set_drvdata(pdev, i915); 725 726 /* Device parameters start as a copy of module parameters. */ 727 i915_params_copy(&i915->params, &i915_modparams); 728 729 /* Set up device info and initial runtime info. */ 730 intel_device_info_driver_create(i915, pdev->device, match_info); 731 732 intel_display_device_probe(i915); 733 734 return i915; 735 } 736 737 /** 738 * i915_driver_probe - setup chip and create an initial config 739 * @pdev: PCI device 740 * @ent: matching PCI ID entry 741 * 742 * The driver probe routine has to do several things: 743 * - drive output discovery via intel_display_driver_probe() 744 * - initialize the memory manager 745 * - allocate initial config memory 746 * - setup the DRM framebuffer with the allocated memory 747 */ 748 int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 749 { 750 struct drm_i915_private *i915; 751 int ret; 752 753 ret = pci_enable_device(pdev); 754 if (ret) { 755 pr_err("Failed to enable graphics device: %pe\n", ERR_PTR(ret)); 756 return ret; 757 } 758 759 i915 = i915_driver_create(pdev, ent); 760 if (IS_ERR(i915)) { 761 pci_disable_device(pdev); 762 return PTR_ERR(i915); 763 } 764 765 ret = i915_driver_early_probe(i915); 766 if (ret < 0) 767 goto out_pci_disable; 768 769 disable_rpm_wakeref_asserts(&i915->runtime_pm); 770 771 intel_vgpu_detect(i915); 772 773 ret = intel_gt_probe_all(i915); 774 if (ret < 0) 775 goto out_runtime_pm_put; 776 777 ret = i915_driver_mmio_probe(i915); 778 if (ret < 0) 779 goto out_runtime_pm_put; 780 781 ret = i915_driver_hw_probe(i915); 782 if (ret < 0) 783 goto out_cleanup_mmio; 784 785 ret = intel_display_driver_probe_noirq(i915); 786 if (ret < 0) 787 goto out_cleanup_hw; 788 789 ret = intel_irq_install(i915); 790 if (ret) 791 goto out_cleanup_modeset; 792 793 ret = intel_display_driver_probe_nogem(i915); 794 if (ret) 795 goto out_cleanup_irq; 796 797 ret = i915_gem_init(i915); 798 if (ret) 799 goto out_cleanup_modeset2; 800 801 intel_pxp_init(i915); 802 803 ret = intel_display_driver_probe(i915); 804 if (ret) 805 goto out_cleanup_gem; 806 807 i915_driver_register(i915); 808 809 enable_rpm_wakeref_asserts(&i915->runtime_pm); 810 811 i915_welcome_messages(i915); 812 813 i915->do_release = true; 814 815 return 0; 816 817 out_cleanup_gem: 818 i915_gem_suspend(i915); 819 i915_gem_driver_remove(i915); 820 i915_gem_driver_release(i915); 821 out_cleanup_modeset2: 822 /* FIXME clean up the error path */ 823 intel_display_driver_remove(i915); 824 intel_irq_uninstall(i915); 825 intel_display_driver_remove_noirq(i915); 826 goto out_cleanup_modeset; 827 out_cleanup_irq: 828 intel_irq_uninstall(i915); 829 out_cleanup_modeset: 830 intel_display_driver_remove_nogem(i915); 831 out_cleanup_hw: 832 i915_driver_hw_remove(i915); 833 intel_memory_regions_driver_release(i915); 834 i915_ggtt_driver_release(i915); 835 i915_gem_drain_freed_objects(i915); 836 i915_ggtt_driver_late_release(i915); 837 out_cleanup_mmio: 838 i915_driver_mmio_release(i915); 839 out_runtime_pm_put: 840 enable_rpm_wakeref_asserts(&i915->runtime_pm); 841 i915_driver_late_release(i915); 842 out_pci_disable: 843 pci_disable_device(pdev); 844 i915_probe_error(i915, "Device initialization failed (%d)\n", ret); 845 return ret; 846 } 847 848 void i915_driver_remove(struct drm_i915_private *i915) 849 { 850 intel_wakeref_t wakeref; 851 852 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 853 854 i915_driver_unregister(i915); 855 856 /* Flush any external code that still may be under the RCU lock */ 857 synchronize_rcu(); 858 859 i915_gem_suspend(i915); 860 861 intel_gvt_driver_remove(i915); 862 863 intel_display_driver_remove(i915); 864 865 intel_irq_uninstall(i915); 866 867 intel_display_driver_remove_noirq(i915); 868 869 i915_reset_error_state(i915); 870 i915_gem_driver_remove(i915); 871 872 intel_display_driver_remove_nogem(i915); 873 874 i915_driver_hw_remove(i915); 875 876 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 877 } 878 879 static void i915_driver_release(struct drm_device *dev) 880 { 881 struct drm_i915_private *dev_priv = to_i915(dev); 882 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 883 intel_wakeref_t wakeref; 884 885 if (!dev_priv->do_release) 886 return; 887 888 wakeref = intel_runtime_pm_get(rpm); 889 890 i915_gem_driver_release(dev_priv); 891 892 intel_memory_regions_driver_release(dev_priv); 893 i915_ggtt_driver_release(dev_priv); 894 i915_gem_drain_freed_objects(dev_priv); 895 i915_ggtt_driver_late_release(dev_priv); 896 897 i915_driver_mmio_release(dev_priv); 898 899 intel_runtime_pm_put(rpm, wakeref); 900 901 intel_runtime_pm_driver_release(rpm); 902 903 i915_driver_late_release(dev_priv); 904 905 intel_display_device_remove(dev_priv); 906 } 907 908 static int i915_driver_open(struct drm_device *dev, struct drm_file *file) 909 { 910 struct drm_i915_private *i915 = to_i915(dev); 911 int ret; 912 913 ret = i915_gem_open(i915, file); 914 if (ret) 915 return ret; 916 917 return 0; 918 } 919 920 /** 921 * i915_driver_lastclose - clean up after all DRM clients have exited 922 * @dev: DRM device 923 * 924 * Take care of cleaning up after all DRM clients have exited. In the 925 * mode setting case, we want to restore the kernel's initial mode (just 926 * in case the last client left us in a bad state). 927 * 928 * Additionally, in the non-mode setting case, we'll tear down the GTT 929 * and DMA structures, since the kernel won't be using them, and clea 930 * up any GEM state. 931 */ 932 static void i915_driver_lastclose(struct drm_device *dev) 933 { 934 struct drm_i915_private *i915 = to_i915(dev); 935 936 intel_fbdev_restore_mode(i915); 937 938 vga_switcheroo_process_delayed_switch(); 939 } 940 941 static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 942 { 943 struct drm_i915_file_private *file_priv = file->driver_priv; 944 945 i915_gem_context_close(file); 946 i915_drm_client_put(file_priv->client); 947 948 kfree_rcu(file_priv, rcu); 949 950 /* Catch up with all the deferred frees from "this" client */ 951 i915_gem_flush_free_objects(to_i915(dev)); 952 } 953 954 static void intel_suspend_encoders(struct drm_i915_private *dev_priv) 955 { 956 struct intel_encoder *encoder; 957 958 if (!HAS_DISPLAY(dev_priv)) 959 return; 960 961 /* 962 * TODO: check and remove holding the modeset locks if none of 963 * the encoders depends on this. 964 */ 965 drm_modeset_lock_all(&dev_priv->drm); 966 for_each_intel_encoder(&dev_priv->drm, encoder) 967 if (encoder->suspend) 968 encoder->suspend(encoder); 969 drm_modeset_unlock_all(&dev_priv->drm); 970 971 for_each_intel_encoder(&dev_priv->drm, encoder) 972 if (encoder->suspend_complete) 973 encoder->suspend_complete(encoder); 974 } 975 976 static void intel_shutdown_encoders(struct drm_i915_private *dev_priv) 977 { 978 struct intel_encoder *encoder; 979 980 if (!HAS_DISPLAY(dev_priv)) 981 return; 982 983 /* 984 * TODO: check and remove holding the modeset locks if none of 985 * the encoders depends on this. 986 */ 987 drm_modeset_lock_all(&dev_priv->drm); 988 for_each_intel_encoder(&dev_priv->drm, encoder) 989 if (encoder->shutdown) 990 encoder->shutdown(encoder); 991 drm_modeset_unlock_all(&dev_priv->drm); 992 993 for_each_intel_encoder(&dev_priv->drm, encoder) 994 if (encoder->shutdown_complete) 995 encoder->shutdown_complete(encoder); 996 } 997 998 void i915_driver_shutdown(struct drm_i915_private *i915) 999 { 1000 disable_rpm_wakeref_asserts(&i915->runtime_pm); 1001 intel_runtime_pm_disable(&i915->runtime_pm); 1002 intel_power_domains_disable(i915); 1003 1004 if (HAS_DISPLAY(i915)) { 1005 drm_kms_helper_poll_disable(&i915->drm); 1006 1007 drm_atomic_helper_shutdown(&i915->drm); 1008 } 1009 1010 intel_dp_mst_suspend(i915); 1011 1012 intel_runtime_pm_disable_interrupts(i915); 1013 intel_hpd_cancel_work(i915); 1014 1015 intel_suspend_encoders(i915); 1016 intel_shutdown_encoders(i915); 1017 1018 intel_dmc_suspend(i915); 1019 1020 i915_gem_suspend(i915); 1021 1022 /* 1023 * The only requirement is to reboot with display DC states disabled, 1024 * for now leaving all display power wells in the INIT power domain 1025 * enabled. 1026 * 1027 * TODO: 1028 * - unify the pci_driver::shutdown sequence here with the 1029 * pci_driver.driver.pm.poweroff,poweroff_late sequence. 1030 * - unify the driver remove and system/runtime suspend sequences with 1031 * the above unified shutdown/poweroff sequence. 1032 */ 1033 intel_power_domains_driver_remove(i915); 1034 enable_rpm_wakeref_asserts(&i915->runtime_pm); 1035 1036 intel_runtime_pm_driver_release(&i915->runtime_pm); 1037 } 1038 1039 static bool suspend_to_idle(struct drm_i915_private *dev_priv) 1040 { 1041 #if IS_ENABLED(CONFIG_ACPI_SLEEP) 1042 if (acpi_target_system_state() < ACPI_STATE_S3) 1043 return true; 1044 #endif 1045 return false; 1046 } 1047 1048 static void i915_drm_complete(struct drm_device *dev) 1049 { 1050 struct drm_i915_private *i915 = to_i915(dev); 1051 1052 intel_pxp_resume_complete(i915->pxp); 1053 } 1054 1055 static int i915_drm_prepare(struct drm_device *dev) 1056 { 1057 struct drm_i915_private *i915 = to_i915(dev); 1058 1059 intel_pxp_suspend_prepare(i915->pxp); 1060 1061 /* 1062 * NB intel_display_driver_suspend() may issue new requests after we've 1063 * ostensibly marked the GPU as ready-to-sleep here. We need to 1064 * split out that work and pull it forward so that after point, 1065 * the GPU is not woken again. 1066 */ 1067 return i915_gem_backup_suspend(i915); 1068 } 1069 1070 static int i915_drm_suspend(struct drm_device *dev) 1071 { 1072 struct drm_i915_private *dev_priv = to_i915(dev); 1073 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 1074 pci_power_t opregion_target_state; 1075 1076 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1077 1078 /* We do a lot of poking in a lot of registers, make sure they work 1079 * properly. */ 1080 intel_power_domains_disable(dev_priv); 1081 if (HAS_DISPLAY(dev_priv)) 1082 drm_kms_helper_poll_disable(dev); 1083 1084 pci_save_state(pdev); 1085 1086 intel_display_driver_suspend(dev_priv); 1087 1088 intel_dp_mst_suspend(dev_priv); 1089 1090 intel_runtime_pm_disable_interrupts(dev_priv); 1091 intel_hpd_cancel_work(dev_priv); 1092 1093 intel_suspend_encoders(dev_priv); 1094 1095 /* Must be called before GGTT is suspended. */ 1096 intel_dpt_suspend(dev_priv); 1097 i915_ggtt_suspend(to_gt(dev_priv)->ggtt); 1098 1099 i915_save_display(dev_priv); 1100 1101 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; 1102 intel_opregion_suspend(dev_priv, opregion_target_state); 1103 1104 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); 1105 1106 dev_priv->suspend_count++; 1107 1108 intel_dmc_suspend(dev_priv); 1109 1110 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1111 1112 i915_gem_drain_freed_objects(dev_priv); 1113 1114 return 0; 1115 } 1116 1117 static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) 1118 { 1119 struct drm_i915_private *dev_priv = to_i915(dev); 1120 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 1121 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 1122 struct intel_gt *gt; 1123 int ret, i; 1124 bool s2idle = !hibernation && suspend_to_idle(dev_priv); 1125 1126 disable_rpm_wakeref_asserts(rpm); 1127 1128 intel_pxp_suspend(dev_priv->pxp); 1129 1130 i915_gem_suspend_late(dev_priv); 1131 1132 for_each_gt(gt, dev_priv, i) 1133 intel_uncore_suspend(gt->uncore); 1134 1135 intel_power_domains_suspend(dev_priv, s2idle); 1136 1137 intel_display_power_suspend_late(dev_priv); 1138 1139 ret = vlv_suspend_complete(dev_priv); 1140 if (ret) { 1141 drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret); 1142 intel_power_domains_resume(dev_priv); 1143 1144 goto out; 1145 } 1146 1147 pci_disable_device(pdev); 1148 /* 1149 * During hibernation on some platforms the BIOS may try to access 1150 * the device even though it's already in D3 and hang the machine. So 1151 * leave the device in D0 on those platforms and hope the BIOS will 1152 * power down the device properly. The issue was seen on multiple old 1153 * GENs with different BIOS vendors, so having an explicit blacklist 1154 * is inpractical; apply the workaround on everything pre GEN6. The 1155 * platforms where the issue was seen: 1156 * Lenovo Thinkpad X301, X61s, X60, T60, X41 1157 * Fujitsu FSC S7110 1158 * Acer Aspire 1830T 1159 */ 1160 if (!(hibernation && GRAPHICS_VER(dev_priv) < 6)) 1161 pci_set_power_state(pdev, PCI_D3hot); 1162 1163 out: 1164 enable_rpm_wakeref_asserts(rpm); 1165 if (!dev_priv->uncore.user_forcewake_count) 1166 intel_runtime_pm_driver_release(rpm); 1167 1168 return ret; 1169 } 1170 1171 int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, 1172 pm_message_t state) 1173 { 1174 int error; 1175 1176 if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND && 1177 state.event != PM_EVENT_FREEZE)) 1178 return -EINVAL; 1179 1180 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 1181 return 0; 1182 1183 error = i915_drm_suspend(&i915->drm); 1184 if (error) 1185 return error; 1186 1187 return i915_drm_suspend_late(&i915->drm, false); 1188 } 1189 1190 static int i915_drm_resume(struct drm_device *dev) 1191 { 1192 struct drm_i915_private *dev_priv = to_i915(dev); 1193 struct intel_gt *gt; 1194 int ret, i; 1195 1196 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1197 1198 ret = i915_pcode_init(dev_priv); 1199 if (ret) 1200 return ret; 1201 1202 sanitize_gpu(dev_priv); 1203 1204 ret = i915_ggtt_enable_hw(dev_priv); 1205 if (ret) 1206 drm_err(&dev_priv->drm, "failed to re-enable GGTT\n"); 1207 1208 i915_ggtt_resume(to_gt(dev_priv)->ggtt); 1209 1210 for_each_gt(gt, dev_priv, i) 1211 if (GRAPHICS_VER(gt->i915) >= 8) 1212 setup_private_pat(gt); 1213 1214 /* Must be called after GGTT is resumed. */ 1215 intel_dpt_resume(dev_priv); 1216 1217 intel_dmc_resume(dev_priv); 1218 1219 i915_restore_display(dev_priv); 1220 intel_pps_unlock_regs_wa(dev_priv); 1221 1222 intel_init_pch_refclk(dev_priv); 1223 1224 /* 1225 * Interrupts have to be enabled before any batches are run. If not the 1226 * GPU will hang. i915_gem_init_hw() will initiate batches to 1227 * update/restore the context. 1228 * 1229 * drm_mode_config_reset() needs AUX interrupts. 1230 * 1231 * Modeset enabling in intel_display_driver_init_hw() also needs working 1232 * interrupts. 1233 */ 1234 intel_runtime_pm_enable_interrupts(dev_priv); 1235 1236 if (HAS_DISPLAY(dev_priv)) 1237 drm_mode_config_reset(dev); 1238 1239 i915_gem_resume(dev_priv); 1240 1241 intel_display_driver_init_hw(dev_priv); 1242 1243 intel_clock_gating_init(dev_priv); 1244 intel_hpd_init(dev_priv); 1245 1246 /* MST sideband requires HPD interrupts enabled */ 1247 intel_dp_mst_resume(dev_priv); 1248 intel_display_driver_resume(dev_priv); 1249 1250 intel_hpd_poll_disable(dev_priv); 1251 if (HAS_DISPLAY(dev_priv)) 1252 drm_kms_helper_poll_enable(dev); 1253 1254 intel_opregion_resume(dev_priv); 1255 1256 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); 1257 1258 intel_power_domains_enable(dev_priv); 1259 1260 intel_gvt_resume(dev_priv); 1261 1262 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1263 1264 return 0; 1265 } 1266 1267 static int i915_drm_resume_early(struct drm_device *dev) 1268 { 1269 struct drm_i915_private *dev_priv = to_i915(dev); 1270 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 1271 struct intel_gt *gt; 1272 int ret, i; 1273 1274 /* 1275 * We have a resume ordering issue with the snd-hda driver also 1276 * requiring our device to be power up. Due to the lack of a 1277 * parent/child relationship we currently solve this with an early 1278 * resume hook. 1279 * 1280 * FIXME: This should be solved with a special hdmi sink device or 1281 * similar so that power domains can be employed. 1282 */ 1283 1284 /* 1285 * Note that we need to set the power state explicitly, since we 1286 * powered off the device during freeze and the PCI core won't power 1287 * it back up for us during thaw. Powering off the device during 1288 * freeze is not a hard requirement though, and during the 1289 * suspend/resume phases the PCI core makes sure we get here with the 1290 * device powered on. So in case we change our freeze logic and keep 1291 * the device powered we can also remove the following set power state 1292 * call. 1293 */ 1294 ret = pci_set_power_state(pdev, PCI_D0); 1295 if (ret) { 1296 drm_err(&dev_priv->drm, 1297 "failed to set PCI D0 power state (%d)\n", ret); 1298 return ret; 1299 } 1300 1301 /* 1302 * Note that pci_enable_device() first enables any parent bridge 1303 * device and only then sets the power state for this device. The 1304 * bridge enabling is a nop though, since bridge devices are resumed 1305 * first. The order of enabling power and enabling the device is 1306 * imposed by the PCI core as described above, so here we preserve the 1307 * same order for the freeze/thaw phases. 1308 * 1309 * TODO: eventually we should remove pci_disable_device() / 1310 * pci_enable_enable_device() from suspend/resume. Due to how they 1311 * depend on the device enable refcount we can't anyway depend on them 1312 * disabling/enabling the device. 1313 */ 1314 if (pci_enable_device(pdev)) 1315 return -EIO; 1316 1317 pci_set_master(pdev); 1318 1319 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1320 1321 ret = vlv_resume_prepare(dev_priv, false); 1322 if (ret) 1323 drm_err(&dev_priv->drm, 1324 "Resume prepare failed: %d, continuing anyway\n", ret); 1325 1326 for_each_gt(gt, dev_priv, i) 1327 intel_gt_resume_early(gt); 1328 1329 intel_display_power_resume_early(dev_priv); 1330 1331 intel_power_domains_resume(dev_priv); 1332 1333 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1334 1335 return ret; 1336 } 1337 1338 int i915_driver_resume_switcheroo(struct drm_i915_private *i915) 1339 { 1340 int ret; 1341 1342 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 1343 return 0; 1344 1345 ret = i915_drm_resume_early(&i915->drm); 1346 if (ret) 1347 return ret; 1348 1349 return i915_drm_resume(&i915->drm); 1350 } 1351 1352 static int i915_pm_prepare(struct device *kdev) 1353 { 1354 struct drm_i915_private *i915 = kdev_to_i915(kdev); 1355 1356 if (!i915) { 1357 dev_err(kdev, "DRM not initialized, aborting suspend.\n"); 1358 return -ENODEV; 1359 } 1360 1361 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 1362 return 0; 1363 1364 return i915_drm_prepare(&i915->drm); 1365 } 1366 1367 static int i915_pm_suspend(struct device *kdev) 1368 { 1369 struct drm_i915_private *i915 = kdev_to_i915(kdev); 1370 1371 if (!i915) { 1372 dev_err(kdev, "DRM not initialized, aborting suspend.\n"); 1373 return -ENODEV; 1374 } 1375 1376 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 1377 return 0; 1378 1379 return i915_drm_suspend(&i915->drm); 1380 } 1381 1382 static int i915_pm_suspend_late(struct device *kdev) 1383 { 1384 struct drm_i915_private *i915 = kdev_to_i915(kdev); 1385 1386 /* 1387 * We have a suspend ordering issue with the snd-hda driver also 1388 * requiring our device to be power up. Due to the lack of a 1389 * parent/child relationship we currently solve this with an late 1390 * suspend hook. 1391 * 1392 * FIXME: This should be solved with a special hdmi sink device or 1393 * similar so that power domains can be employed. 1394 */ 1395 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 1396 return 0; 1397 1398 return i915_drm_suspend_late(&i915->drm, false); 1399 } 1400 1401 static int i915_pm_poweroff_late(struct device *kdev) 1402 { 1403 struct drm_i915_private *i915 = kdev_to_i915(kdev); 1404 1405 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 1406 return 0; 1407 1408 return i915_drm_suspend_late(&i915->drm, true); 1409 } 1410 1411 static int i915_pm_resume_early(struct device *kdev) 1412 { 1413 struct drm_i915_private *i915 = kdev_to_i915(kdev); 1414 1415 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 1416 return 0; 1417 1418 return i915_drm_resume_early(&i915->drm); 1419 } 1420 1421 static int i915_pm_resume(struct device *kdev) 1422 { 1423 struct drm_i915_private *i915 = kdev_to_i915(kdev); 1424 1425 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 1426 return 0; 1427 1428 return i915_drm_resume(&i915->drm); 1429 } 1430 1431 static void i915_pm_complete(struct device *kdev) 1432 { 1433 struct drm_i915_private *i915 = kdev_to_i915(kdev); 1434 1435 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 1436 return; 1437 1438 i915_drm_complete(&i915->drm); 1439 } 1440 1441 /* freeze: before creating the hibernation_image */ 1442 static int i915_pm_freeze(struct device *kdev) 1443 { 1444 struct drm_i915_private *i915 = kdev_to_i915(kdev); 1445 int ret; 1446 1447 if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) { 1448 ret = i915_drm_suspend(&i915->drm); 1449 if (ret) 1450 return ret; 1451 } 1452 1453 ret = i915_gem_freeze(i915); 1454 if (ret) 1455 return ret; 1456 1457 return 0; 1458 } 1459 1460 static int i915_pm_freeze_late(struct device *kdev) 1461 { 1462 struct drm_i915_private *i915 = kdev_to_i915(kdev); 1463 int ret; 1464 1465 if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) { 1466 ret = i915_drm_suspend_late(&i915->drm, true); 1467 if (ret) 1468 return ret; 1469 } 1470 1471 ret = i915_gem_freeze_late(i915); 1472 if (ret) 1473 return ret; 1474 1475 return 0; 1476 } 1477 1478 /* thaw: called after creating the hibernation image, but before turning off. */ 1479 static int i915_pm_thaw_early(struct device *kdev) 1480 { 1481 return i915_pm_resume_early(kdev); 1482 } 1483 1484 static int i915_pm_thaw(struct device *kdev) 1485 { 1486 return i915_pm_resume(kdev); 1487 } 1488 1489 /* restore: called after loading the hibernation image. */ 1490 static int i915_pm_restore_early(struct device *kdev) 1491 { 1492 return i915_pm_resume_early(kdev); 1493 } 1494 1495 static int i915_pm_restore(struct device *kdev) 1496 { 1497 return i915_pm_resume(kdev); 1498 } 1499 1500 static int intel_runtime_suspend(struct device *kdev) 1501 { 1502 struct drm_i915_private *dev_priv = kdev_to_i915(kdev); 1503 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 1504 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 1505 struct pci_dev *root_pdev; 1506 struct intel_gt *gt; 1507 int ret, i; 1508 1509 if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) 1510 return -ENODEV; 1511 1512 drm_dbg(&dev_priv->drm, "Suspending device\n"); 1513 1514 disable_rpm_wakeref_asserts(rpm); 1515 1516 /* 1517 * We are safe here against re-faults, since the fault handler takes 1518 * an RPM reference. 1519 */ 1520 i915_gem_runtime_suspend(dev_priv); 1521 1522 intel_pxp_runtime_suspend(dev_priv->pxp); 1523 1524 for_each_gt(gt, dev_priv, i) 1525 intel_gt_runtime_suspend(gt); 1526 1527 intel_runtime_pm_disable_interrupts(dev_priv); 1528 1529 for_each_gt(gt, dev_priv, i) 1530 intel_uncore_suspend(gt->uncore); 1531 1532 intel_display_power_suspend(dev_priv); 1533 1534 ret = vlv_suspend_complete(dev_priv); 1535 if (ret) { 1536 drm_err(&dev_priv->drm, 1537 "Runtime suspend failed, disabling it (%d)\n", ret); 1538 intel_uncore_runtime_resume(&dev_priv->uncore); 1539 1540 intel_runtime_pm_enable_interrupts(dev_priv); 1541 1542 for_each_gt(gt, dev_priv, i) 1543 intel_gt_runtime_resume(gt); 1544 1545 enable_rpm_wakeref_asserts(rpm); 1546 1547 return ret; 1548 } 1549 1550 enable_rpm_wakeref_asserts(rpm); 1551 intel_runtime_pm_driver_release(rpm); 1552 1553 if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore)) 1554 drm_err(&dev_priv->drm, 1555 "Unclaimed access detected prior to suspending\n"); 1556 1557 /* 1558 * FIXME: Temporary hammer to avoid freezing the machine on our DGFX 1559 * This should be totally removed when we handle the pci states properly 1560 * on runtime PM. 1561 */ 1562 root_pdev = pcie_find_root_port(pdev); 1563 if (root_pdev) 1564 pci_d3cold_disable(root_pdev); 1565 1566 /* 1567 * FIXME: We really should find a document that references the arguments 1568 * used below! 1569 */ 1570 if (IS_BROADWELL(dev_priv)) { 1571 /* 1572 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop 1573 * being detected, and the call we do at intel_runtime_resume() 1574 * won't be able to restore them. Since PCI_D3hot matches the 1575 * actual specification and appears to be working, use it. 1576 */ 1577 intel_opregion_notify_adapter(dev_priv, PCI_D3hot); 1578 } else { 1579 /* 1580 * current versions of firmware which depend on this opregion 1581 * notification have repurposed the D1 definition to mean 1582 * "runtime suspended" vs. what you would normally expect (D3) 1583 * to distinguish it from notifications that might be sent via 1584 * the suspend path. 1585 */ 1586 intel_opregion_notify_adapter(dev_priv, PCI_D1); 1587 } 1588 1589 assert_forcewakes_inactive(&dev_priv->uncore); 1590 1591 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 1592 intel_hpd_poll_enable(dev_priv); 1593 1594 drm_dbg(&dev_priv->drm, "Device suspended\n"); 1595 return 0; 1596 } 1597 1598 static int intel_runtime_resume(struct device *kdev) 1599 { 1600 struct drm_i915_private *dev_priv = kdev_to_i915(kdev); 1601 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 1602 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 1603 struct pci_dev *root_pdev; 1604 struct intel_gt *gt; 1605 int ret, i; 1606 1607 if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) 1608 return -ENODEV; 1609 1610 drm_dbg(&dev_priv->drm, "Resuming device\n"); 1611 1612 drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count)); 1613 disable_rpm_wakeref_asserts(rpm); 1614 1615 intel_opregion_notify_adapter(dev_priv, PCI_D0); 1616 1617 root_pdev = pcie_find_root_port(pdev); 1618 if (root_pdev) 1619 pci_d3cold_enable(root_pdev); 1620 1621 if (intel_uncore_unclaimed_mmio(&dev_priv->uncore)) 1622 drm_dbg(&dev_priv->drm, 1623 "Unclaimed access during suspend, bios?\n"); 1624 1625 intel_display_power_resume(dev_priv); 1626 1627 ret = vlv_resume_prepare(dev_priv, true); 1628 1629 for_each_gt(gt, dev_priv, i) 1630 intel_uncore_runtime_resume(gt->uncore); 1631 1632 intel_runtime_pm_enable_interrupts(dev_priv); 1633 1634 /* 1635 * No point of rolling back things in case of an error, as the best 1636 * we can do is to hope that things will still work (and disable RPM). 1637 */ 1638 for_each_gt(gt, dev_priv, i) 1639 intel_gt_runtime_resume(gt); 1640 1641 intel_pxp_runtime_resume(dev_priv->pxp); 1642 1643 /* 1644 * On VLV/CHV display interrupts are part of the display 1645 * power well, so hpd is reinitialized from there. For 1646 * everyone else do it here. 1647 */ 1648 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 1649 intel_hpd_init(dev_priv); 1650 intel_hpd_poll_disable(dev_priv); 1651 } 1652 1653 skl_watermark_ipc_update(dev_priv); 1654 1655 enable_rpm_wakeref_asserts(rpm); 1656 1657 if (ret) 1658 drm_err(&dev_priv->drm, 1659 "Runtime resume failed, disabling it (%d)\n", ret); 1660 else 1661 drm_dbg(&dev_priv->drm, "Device resumed\n"); 1662 1663 return ret; 1664 } 1665 1666 const struct dev_pm_ops i915_pm_ops = { 1667 /* 1668 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, 1669 * PMSG_RESUME] 1670 */ 1671 .prepare = i915_pm_prepare, 1672 .suspend = i915_pm_suspend, 1673 .suspend_late = i915_pm_suspend_late, 1674 .resume_early = i915_pm_resume_early, 1675 .resume = i915_pm_resume, 1676 .complete = i915_pm_complete, 1677 1678 /* 1679 * S4 event handlers 1680 * @freeze, @freeze_late : called (1) before creating the 1681 * hibernation image [PMSG_FREEZE] and 1682 * (2) after rebooting, before restoring 1683 * the image [PMSG_QUIESCE] 1684 * @thaw, @thaw_early : called (1) after creating the hibernation 1685 * image, before writing it [PMSG_THAW] 1686 * and (2) after failing to create or 1687 * restore the image [PMSG_RECOVER] 1688 * @poweroff, @poweroff_late: called after writing the hibernation 1689 * image, before rebooting [PMSG_HIBERNATE] 1690 * @restore, @restore_early : called after rebooting and restoring the 1691 * hibernation image [PMSG_RESTORE] 1692 */ 1693 .freeze = i915_pm_freeze, 1694 .freeze_late = i915_pm_freeze_late, 1695 .thaw_early = i915_pm_thaw_early, 1696 .thaw = i915_pm_thaw, 1697 .poweroff = i915_pm_suspend, 1698 .poweroff_late = i915_pm_poweroff_late, 1699 .restore_early = i915_pm_restore_early, 1700 .restore = i915_pm_restore, 1701 1702 /* S0ix (via runtime suspend) event handlers */ 1703 .runtime_suspend = intel_runtime_suspend, 1704 .runtime_resume = intel_runtime_resume, 1705 }; 1706 1707 static const struct file_operations i915_driver_fops = { 1708 .owner = THIS_MODULE, 1709 .open = drm_open, 1710 .release = drm_release_noglobal, 1711 .unlocked_ioctl = drm_ioctl, 1712 .mmap = i915_gem_mmap, 1713 .poll = drm_poll, 1714 .read = drm_read, 1715 .compat_ioctl = i915_ioc32_compat_ioctl, 1716 .llseek = noop_llseek, 1717 #ifdef CONFIG_PROC_FS 1718 .show_fdinfo = drm_show_fdinfo, 1719 #endif 1720 }; 1721 1722 static int 1723 i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, 1724 struct drm_file *file) 1725 { 1726 return -ENODEV; 1727 } 1728 1729 static const struct drm_ioctl_desc i915_ioctls[] = { 1730 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1731 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), 1732 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), 1733 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), 1734 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), 1735 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), 1736 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW), 1737 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1738 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), 1739 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), 1740 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1741 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), 1742 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1743 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1744 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), 1745 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), 1746 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1747 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1748 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, drm_invalid_op, DRM_AUTH), 1749 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW), 1750 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1751 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1752 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW), 1753 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), 1754 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), 1755 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW), 1756 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1757 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1758 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), 1759 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE_EXT, i915_gem_create_ext_ioctl, DRM_RENDER_ALLOW), 1760 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), 1761 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), 1762 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), 1763 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW), 1764 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), 1765 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), 1766 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW), 1767 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW), 1768 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), 1769 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0), 1770 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), 1771 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER), 1772 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER), 1773 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER), 1774 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER), 1775 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW), 1776 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), 1777 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), 1778 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), 1779 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), 1780 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), 1781 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), 1782 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), 1783 DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW), 1784 DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW), 1785 DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW), 1786 DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW), 1787 DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW), 1788 DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW), 1789 }; 1790 1791 /* 1792 * Interface history: 1793 * 1794 * 1.1: Original. 1795 * 1.2: Add Power Management 1796 * 1.3: Add vblank support 1797 * 1.4: Fix cmdbuffer path, add heap destroy 1798 * 1.5: Add vblank pipe configuration 1799 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 1800 * - Support vertical blank on secondary display pipe 1801 */ 1802 #define DRIVER_MAJOR 1 1803 #define DRIVER_MINOR 6 1804 #define DRIVER_PATCHLEVEL 0 1805 1806 static const struct drm_driver i915_drm_driver = { 1807 /* Don't use MTRRs here; the Xserver or userspace app should 1808 * deal with them for Intel hardware. 1809 */ 1810 .driver_features = 1811 DRIVER_GEM | 1812 DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ | 1813 DRIVER_SYNCOBJ_TIMELINE, 1814 .release = i915_driver_release, 1815 .open = i915_driver_open, 1816 .lastclose = i915_driver_lastclose, 1817 .postclose = i915_driver_postclose, 1818 .show_fdinfo = PTR_IF(IS_ENABLED(CONFIG_PROC_FS), i915_drm_client_fdinfo), 1819 1820 .gem_prime_import = i915_gem_prime_import, 1821 1822 .dumb_create = i915_gem_dumb_create, 1823 .dumb_map_offset = i915_gem_dumb_mmap_offset, 1824 1825 .ioctls = i915_ioctls, 1826 .num_ioctls = ARRAY_SIZE(i915_ioctls), 1827 .fops = &i915_driver_fops, 1828 .name = DRIVER_NAME, 1829 .desc = DRIVER_DESC, 1830 .date = DRIVER_DATE, 1831 .major = DRIVER_MAJOR, 1832 .minor = DRIVER_MINOR, 1833 .patchlevel = DRIVER_PATCHLEVEL, 1834 }; 1835