158471f63SJani Nikula /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- 258471f63SJani Nikula */ 358471f63SJani Nikula /* 458471f63SJani Nikula * 558471f63SJani Nikula * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 658471f63SJani Nikula * All Rights Reserved. 758471f63SJani Nikula * 858471f63SJani Nikula * Permission is hereby granted, free of charge, to any person obtaining a 958471f63SJani Nikula * copy of this software and associated documentation files (the 1058471f63SJani Nikula * "Software"), to deal in the Software without restriction, including 1158471f63SJani Nikula * without limitation the rights to use, copy, modify, merge, publish, 1258471f63SJani Nikula * distribute, sub license, and/or sell copies of the Software, and to 1358471f63SJani Nikula * permit persons to whom the Software is furnished to do so, subject to 1458471f63SJani Nikula * the following conditions: 1558471f63SJani Nikula * 1658471f63SJani Nikula * The above copyright notice and this permission notice (including the 1758471f63SJani Nikula * next paragraph) shall be included in all copies or substantial portions 1858471f63SJani Nikula * of the Software. 1958471f63SJani Nikula * 2058471f63SJani Nikula * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 2158471f63SJani Nikula * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 2258471f63SJani Nikula * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 2358471f63SJani Nikula * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 2458471f63SJani Nikula * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 2558471f63SJani Nikula * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 2658471f63SJani Nikula * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 2758471f63SJani Nikula * 2858471f63SJani Nikula */ 2958471f63SJani Nikula 3058471f63SJani Nikula #include <linux/acpi.h> 3158471f63SJani Nikula #include <linux/device.h> 3258471f63SJani Nikula #include <linux/module.h> 3358471f63SJani Nikula #include <linux/oom.h> 3458471f63SJani Nikula #include <linux/pci.h> 3558471f63SJani Nikula #include <linux/pm.h> 3658471f63SJani Nikula #include <linux/pm_runtime.h> 3758471f63SJani Nikula #include <linux/pnp.h> 3858471f63SJani Nikula #include <linux/slab.h> 39ff9fbe7cSLucas De Marchi #include <linux/string_helpers.h> 4058471f63SJani Nikula #include <linux/vga_switcheroo.h> 4158471f63SJani Nikula #include <linux/vt.h> 4258471f63SJani Nikula 4358471f63SJani Nikula #include <drm/drm_aperture.h> 4458471f63SJani Nikula #include <drm/drm_atomic_helper.h> 4558471f63SJani Nikula #include <drm/drm_ioctl.h> 4658471f63SJani Nikula #include <drm/drm_managed.h> 4758471f63SJani Nikula #include <drm/drm_probe_helper.h> 4858471f63SJani Nikula 4958471f63SJani Nikula #include "display/intel_acpi.h" 5058471f63SJani Nikula #include "display/intel_bw.h" 5158471f63SJani Nikula #include "display/intel_cdclk.h" 5258471f63SJani Nikula #include "display/intel_display_types.h" 5358471f63SJani Nikula #include "display/intel_dmc.h" 5458471f63SJani Nikula #include "display/intel_dp.h" 5558471f63SJani Nikula #include "display/intel_dpt.h" 5658471f63SJani Nikula #include "display/intel_fbdev.h" 5758471f63SJani Nikula #include "display/intel_hotplug.h" 5858471f63SJani Nikula #include "display/intel_overlay.h" 5958471f63SJani Nikula #include "display/intel_pch_refclk.h" 6058471f63SJani Nikula #include "display/intel_pipe_crc.h" 6158471f63SJani Nikula #include "display/intel_pps.h" 6258471f63SJani Nikula #include "display/intel_sprite.h" 6358471f63SJani Nikula #include "display/intel_vga.h" 6442a0d256SVille Syrjälä #include "display/skl_watermark.h" 6558471f63SJani Nikula 6658471f63SJani Nikula #include "gem/i915_gem_context.h" 67be137d79SJani Nikula #include "gem/i915_gem_create.h" 68c8eb426dSJani Nikula #include "gem/i915_gem_dmabuf.h" 6958471f63SJani Nikula #include "gem/i915_gem_ioctls.h" 7058471f63SJani Nikula #include "gem/i915_gem_mman.h" 7158471f63SJani Nikula #include "gem/i915_gem_pm.h" 7258471f63SJani Nikula #include "gt/intel_gt.h" 7358471f63SJani Nikula #include "gt/intel_gt_pm.h" 7458471f63SJani Nikula #include "gt/intel_rc6.h" 7558471f63SJani Nikula 76f67986b0SAlan Previn #include "pxp/intel_pxp.h" 77f67986b0SAlan Previn #include "pxp/intel_pxp_debugfs.h" 7858471f63SJani Nikula #include "pxp/intel_pxp_pm.h" 7958471f63SJani Nikula 80f052febdSJani Nikula #include "soc/intel_dram.h" 81f052febdSJani Nikula 825472b3f2SJani Nikula #include "i915_file_private.h" 8358471f63SJani Nikula #include "i915_debugfs.h" 8458471f63SJani Nikula #include "i915_driver.h" 855f0d4d14STvrtko Ursulin #include "i915_drm_client.h" 8658471f63SJani Nikula #include "i915_drv.h" 872564c35dSJani Nikula #include "i915_getparam.h" 88b3b088e2SDale B Stimson #include "i915_hwmon.h" 8958471f63SJani Nikula #include "i915_ioc32.h" 90198bca93SJani Nikula #include "i915_ioctl.h" 9158471f63SJani Nikula #include "i915_irq.h" 9258471f63SJani Nikula #include "i915_memcpy.h" 9358471f63SJani Nikula #include "i915_perf.h" 9458471f63SJani Nikula #include "i915_query.h" 9558471f63SJani Nikula #include "i915_suspend.h" 9658471f63SJani Nikula #include "i915_switcheroo.h" 9758471f63SJani Nikula #include "i915_sysfs.h" 98a7f46d5bSTvrtko Ursulin #include "i915_utils.h" 9958471f63SJani Nikula #include "i915_vgpu.h" 10058471f63SJani Nikula #include "intel_gvt.h" 10158471f63SJani Nikula #include "intel_memory_region.h" 1027e470f10SJani Nikula #include "intel_pci_config.h" 10358471f63SJani Nikula #include "intel_pcode.h" 10458471f63SJani Nikula #include "intel_pm.h" 10558471f63SJani Nikula #include "intel_region_ttm.h" 10658471f63SJani Nikula #include "vlv_suspend.h" 10758471f63SJani Nikula 1084588d7ebSJani Nikula static const struct drm_driver i915_drm_driver; 10958471f63SJani Nikula 1106438452dSMatt Roper static void i915_release_bridge_dev(struct drm_device *dev, 1116438452dSMatt Roper void *bridge) 1126438452dSMatt Roper { 1136438452dSMatt Roper pci_dev_put(bridge); 1146438452dSMatt Roper } 1156438452dSMatt Roper 11658471f63SJani Nikula static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) 11758471f63SJani Nikula { 11858471f63SJani Nikula int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus); 11958471f63SJani Nikula 12058471f63SJani Nikula dev_priv->bridge_dev = 12158471f63SJani Nikula pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0)); 12258471f63SJani Nikula if (!dev_priv->bridge_dev) { 12358471f63SJani Nikula drm_err(&dev_priv->drm, "bridge device not found\n"); 12458471f63SJani Nikula return -EIO; 12558471f63SJani Nikula } 1266438452dSMatt Roper 1276438452dSMatt Roper return drmm_add_action_or_reset(&dev_priv->drm, i915_release_bridge_dev, 1286438452dSMatt Roper dev_priv->bridge_dev); 12958471f63SJani Nikula } 13058471f63SJani Nikula 13158471f63SJani Nikula /* Allocate space for the MCH regs if needed, return nonzero on error */ 13258471f63SJani Nikula static int 13358471f63SJani Nikula intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv) 13458471f63SJani Nikula { 13558471f63SJani Nikula int reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; 13658471f63SJani Nikula u32 temp_lo, temp_hi = 0; 13758471f63SJani Nikula u64 mchbar_addr; 13858471f63SJani Nikula int ret; 13958471f63SJani Nikula 14058471f63SJani Nikula if (GRAPHICS_VER(dev_priv) >= 4) 14158471f63SJani Nikula pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); 14258471f63SJani Nikula pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); 14358471f63SJani Nikula mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 14458471f63SJani Nikula 14558471f63SJani Nikula /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 14658471f63SJani Nikula #ifdef CONFIG_PNP 14758471f63SJani Nikula if (mchbar_addr && 14858471f63SJani Nikula pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) 14958471f63SJani Nikula return 0; 15058471f63SJani Nikula #endif 15158471f63SJani Nikula 15258471f63SJani Nikula /* Get some space for it */ 15358471f63SJani Nikula dev_priv->mch_res.name = "i915 MCHBAR"; 15458471f63SJani Nikula dev_priv->mch_res.flags = IORESOURCE_MEM; 15558471f63SJani Nikula ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, 15658471f63SJani Nikula &dev_priv->mch_res, 15758471f63SJani Nikula MCHBAR_SIZE, MCHBAR_SIZE, 15858471f63SJani Nikula PCIBIOS_MIN_MEM, 15958471f63SJani Nikula 0, pcibios_align_resource, 16058471f63SJani Nikula dev_priv->bridge_dev); 16158471f63SJani Nikula if (ret) { 16258471f63SJani Nikula drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret); 16358471f63SJani Nikula dev_priv->mch_res.start = 0; 16458471f63SJani Nikula return ret; 16558471f63SJani Nikula } 16658471f63SJani Nikula 16758471f63SJani Nikula if (GRAPHICS_VER(dev_priv) >= 4) 16858471f63SJani Nikula pci_write_config_dword(dev_priv->bridge_dev, reg + 4, 16958471f63SJani Nikula upper_32_bits(dev_priv->mch_res.start)); 17058471f63SJani Nikula 17158471f63SJani Nikula pci_write_config_dword(dev_priv->bridge_dev, reg, 17258471f63SJani Nikula lower_32_bits(dev_priv->mch_res.start)); 17358471f63SJani Nikula return 0; 17458471f63SJani Nikula } 17558471f63SJani Nikula 17658471f63SJani Nikula /* Setup MCHBAR if possible, return true if we should disable it again */ 17758471f63SJani Nikula static void 17858471f63SJani Nikula intel_setup_mchbar(struct drm_i915_private *dev_priv) 17958471f63SJani Nikula { 18058471f63SJani Nikula int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; 18158471f63SJani Nikula u32 temp; 18258471f63SJani Nikula bool enabled; 18358471f63SJani Nikula 18458471f63SJani Nikula if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 18558471f63SJani Nikula return; 18658471f63SJani Nikula 18758471f63SJani Nikula dev_priv->mchbar_need_disable = false; 18858471f63SJani Nikula 18958471f63SJani Nikula if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { 19058471f63SJani Nikula pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp); 19158471f63SJani Nikula enabled = !!(temp & DEVEN_MCHBAR_EN); 19258471f63SJani Nikula } else { 19358471f63SJani Nikula pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 19458471f63SJani Nikula enabled = temp & 1; 19558471f63SJani Nikula } 19658471f63SJani Nikula 19758471f63SJani Nikula /* If it's already enabled, don't have to do anything */ 19858471f63SJani Nikula if (enabled) 19958471f63SJani Nikula return; 20058471f63SJani Nikula 20158471f63SJani Nikula if (intel_alloc_mchbar_resource(dev_priv)) 20258471f63SJani Nikula return; 20358471f63SJani Nikula 20458471f63SJani Nikula dev_priv->mchbar_need_disable = true; 20558471f63SJani Nikula 20658471f63SJani Nikula /* Space is allocated or reserved, so enable it. */ 20758471f63SJani Nikula if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { 20858471f63SJani Nikula pci_write_config_dword(dev_priv->bridge_dev, DEVEN, 20958471f63SJani Nikula temp | DEVEN_MCHBAR_EN); 21058471f63SJani Nikula } else { 21158471f63SJani Nikula pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 21258471f63SJani Nikula pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); 21358471f63SJani Nikula } 21458471f63SJani Nikula } 21558471f63SJani Nikula 21658471f63SJani Nikula static void 21758471f63SJani Nikula intel_teardown_mchbar(struct drm_i915_private *dev_priv) 21858471f63SJani Nikula { 21958471f63SJani Nikula int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; 22058471f63SJani Nikula 22158471f63SJani Nikula if (dev_priv->mchbar_need_disable) { 22258471f63SJani Nikula if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { 22358471f63SJani Nikula u32 deven_val; 22458471f63SJani Nikula 22558471f63SJani Nikula pci_read_config_dword(dev_priv->bridge_dev, DEVEN, 22658471f63SJani Nikula &deven_val); 22758471f63SJani Nikula deven_val &= ~DEVEN_MCHBAR_EN; 22858471f63SJani Nikula pci_write_config_dword(dev_priv->bridge_dev, DEVEN, 22958471f63SJani Nikula deven_val); 23058471f63SJani Nikula } else { 23158471f63SJani Nikula u32 mchbar_val; 23258471f63SJani Nikula 23358471f63SJani Nikula pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, 23458471f63SJani Nikula &mchbar_val); 23558471f63SJani Nikula mchbar_val &= ~1; 23658471f63SJani Nikula pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, 23758471f63SJani Nikula mchbar_val); 23858471f63SJani Nikula } 23958471f63SJani Nikula } 24058471f63SJani Nikula 24158471f63SJani Nikula if (dev_priv->mch_res.start) 24258471f63SJani Nikula release_resource(&dev_priv->mch_res); 24358471f63SJani Nikula } 24458471f63SJani Nikula 24558471f63SJani Nikula static int i915_workqueues_init(struct drm_i915_private *dev_priv) 24658471f63SJani Nikula { 24758471f63SJani Nikula /* 24858471f63SJani Nikula * The i915 workqueue is primarily used for batched retirement of 24958471f63SJani Nikula * requests (and thus managing bo) once the task has been completed 25058471f63SJani Nikula * by the GPU. i915_retire_requests() is called directly when we 25158471f63SJani Nikula * need high-priority retirement, such as waiting for an explicit 25258471f63SJani Nikula * bo. 25358471f63SJani Nikula * 25458471f63SJani Nikula * It is also used for periodic low-priority events, such as 25558471f63SJani Nikula * idle-timers and recording error state. 25658471f63SJani Nikula * 25758471f63SJani Nikula * All tasks on the workqueue are expected to acquire the dev mutex 25858471f63SJani Nikula * so there is no point in running more than one instance of the 25958471f63SJani Nikula * workqueue at any time. Use an ordered one. 26058471f63SJani Nikula */ 26158471f63SJani Nikula dev_priv->wq = alloc_ordered_workqueue("i915", 0); 26258471f63SJani Nikula if (dev_priv->wq == NULL) 26358471f63SJani Nikula goto out_err; 26458471f63SJani Nikula 2655a4dd6f0SJani Nikula dev_priv->display.hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); 2665a4dd6f0SJani Nikula if (dev_priv->display.hotplug.dp_wq == NULL) 26758471f63SJani Nikula goto out_free_wq; 26858471f63SJani Nikula 26958471f63SJani Nikula return 0; 27058471f63SJani Nikula 27158471f63SJani Nikula out_free_wq: 27258471f63SJani Nikula destroy_workqueue(dev_priv->wq); 27358471f63SJani Nikula out_err: 27458471f63SJani Nikula drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n"); 27558471f63SJani Nikula 27658471f63SJani Nikula return -ENOMEM; 27758471f63SJani Nikula } 27858471f63SJani Nikula 27958471f63SJani Nikula static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) 28058471f63SJani Nikula { 2815a4dd6f0SJani Nikula destroy_workqueue(dev_priv->display.hotplug.dp_wq); 28258471f63SJani Nikula destroy_workqueue(dev_priv->wq); 28358471f63SJani Nikula } 28458471f63SJani Nikula 28558471f63SJani Nikula /* 28658471f63SJani Nikula * We don't keep the workarounds for pre-production hardware, so we expect our 28758471f63SJani Nikula * driver to fail on these machines in one way or another. A little warning on 28858471f63SJani Nikula * dmesg may help both the user and the bug triagers. 28958471f63SJani Nikula * 29058471f63SJani Nikula * Our policy for removing pre-production workarounds is to keep the 29158471f63SJani Nikula * current gen workarounds as a guide to the bring-up of the next gen 29258471f63SJani Nikula * (workarounds have a habit of persisting!). Anything older than that 29358471f63SJani Nikula * should be removed along with the complications they introduce. 29458471f63SJani Nikula */ 29558471f63SJani Nikula static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) 29658471f63SJani Nikula { 29758471f63SJani Nikula bool pre = false; 29858471f63SJani Nikula 29958471f63SJani Nikula pre |= IS_HSW_EARLY_SDV(dev_priv); 30058471f63SJani Nikula pre |= IS_SKYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x6; 30158471f63SJani Nikula pre |= IS_BROXTON(dev_priv) && INTEL_REVID(dev_priv) < 0xA; 30258471f63SJani Nikula pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1; 30358471f63SJani Nikula pre |= IS_GEMINILAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x3; 30458471f63SJani Nikula pre |= IS_ICELAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x7; 30558471f63SJani Nikula 30658471f63SJani Nikula if (pre) { 30758471f63SJani Nikula drm_err(&dev_priv->drm, "This is a pre-production stepping. " 30858471f63SJani Nikula "It may not be fully functional.\n"); 30958471f63SJani Nikula add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); 31058471f63SJani Nikula } 31158471f63SJani Nikula } 31258471f63SJani Nikula 31358471f63SJani Nikula static void sanitize_gpu(struct drm_i915_private *i915) 31458471f63SJani Nikula { 3151c66a12aSMatt Roper if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) { 3161c66a12aSMatt Roper struct intel_gt *gt; 3171c66a12aSMatt Roper unsigned int i; 3181c66a12aSMatt Roper 3191c66a12aSMatt Roper for_each_gt(gt, i915, i) 3201c66a12aSMatt Roper __intel_gt_reset(gt, ALL_ENGINES); 3211c66a12aSMatt Roper } 32258471f63SJani Nikula } 32358471f63SJani Nikula 32458471f63SJani Nikula /** 32558471f63SJani Nikula * i915_driver_early_probe - setup state not requiring device access 32658471f63SJani Nikula * @dev_priv: device private 32758471f63SJani Nikula * 32858471f63SJani Nikula * Initialize everything that is a "SW-only" state, that is state not 32958471f63SJani Nikula * requiring accessing the device or exposing the driver via kernel internal 33058471f63SJani Nikula * or userspace interfaces. Example steps belonging here: lock initialization, 33158471f63SJani Nikula * system memory allocation, setting up device specific attributes and 33258471f63SJani Nikula * function hooks not requiring accessing the device. 33358471f63SJani Nikula */ 33458471f63SJani Nikula static int i915_driver_early_probe(struct drm_i915_private *dev_priv) 33558471f63SJani Nikula { 33658471f63SJani Nikula int ret = 0; 33758471f63SJani Nikula 33858471f63SJani Nikula if (i915_inject_probe_failure(dev_priv)) 33958471f63SJani Nikula return -ENODEV; 34058471f63SJani Nikula 341c2c70752SMatt Roper intel_device_info_runtime_init_early(dev_priv); 342c2c70752SMatt Roper 34358471f63SJani Nikula intel_step_init(dev_priv); 34458471f63SJani Nikula 345639e30eeSMatt Roper intel_uncore_mmio_debug_init_early(dev_priv); 34658471f63SJani Nikula 34758471f63SJani Nikula spin_lock_init(&dev_priv->irq_lock); 34858471f63SJani Nikula spin_lock_init(&dev_priv->gpu_error.lock); 3492fee35fcSJani Nikula mutex_init(&dev_priv->display.backlight.lock); 35058471f63SJani Nikula 35158471f63SJani Nikula mutex_init(&dev_priv->sb_lock); 35258471f63SJani Nikula cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE); 35358471f63SJani Nikula 3544be1c12cSJani Nikula mutex_init(&dev_priv->display.audio.mutex); 355a30a6fe9SJani Nikula mutex_init(&dev_priv->display.wm.wm_mutex); 35612dc5082SJani Nikula mutex_init(&dev_priv->display.pps.mutex); 357eb11eabcSJani Nikula mutex_init(&dev_priv->display.hdcp.comp_mutex); 35889cb0ba4SImre Deak spin_lock_init(&dev_priv->display.dkl.phy_lock); 35958471f63SJani Nikula 36058471f63SJani Nikula i915_memcpy_init_early(dev_priv); 36158471f63SJani Nikula intel_runtime_pm_init_early(&dev_priv->runtime_pm); 36258471f63SJani Nikula 36358471f63SJani Nikula ret = i915_workqueues_init(dev_priv); 36458471f63SJani Nikula if (ret < 0) 36558471f63SJani Nikula return ret; 36658471f63SJani Nikula 36758471f63SJani Nikula ret = vlv_suspend_init(dev_priv); 36858471f63SJani Nikula if (ret < 0) 36958471f63SJani Nikula goto err_workqueues; 37058471f63SJani Nikula 37158471f63SJani Nikula ret = intel_region_ttm_device_init(dev_priv); 37258471f63SJani Nikula if (ret) 37358471f63SJani Nikula goto err_ttm; 37458471f63SJani Nikula 37503d2c54dSMatt Roper ret = intel_root_gt_init_early(dev_priv); 37603d2c54dSMatt Roper if (ret < 0) 37703d2c54dSMatt Roper goto err_rootgt; 37858471f63SJani Nikula 3795f0d4d14STvrtko Ursulin i915_drm_clients_init(&dev_priv->clients, dev_priv); 3805f0d4d14STvrtko Ursulin 38158471f63SJani Nikula i915_gem_init_early(dev_priv); 38258471f63SJani Nikula 38358471f63SJani Nikula /* This must be called before any calls to HAS_PCH_* */ 38458471f63SJani Nikula intel_detect_pch(dev_priv); 38558471f63SJani Nikula 38658471f63SJani Nikula intel_pm_setup(dev_priv); 38758471f63SJani Nikula ret = intel_power_domains_init(dev_priv); 38858471f63SJani Nikula if (ret < 0) 38958471f63SJani Nikula goto err_gem; 39058471f63SJani Nikula intel_irq_init(dev_priv); 39158471f63SJani Nikula intel_init_display_hooks(dev_priv); 39258471f63SJani Nikula intel_init_clock_gating_hooks(dev_priv); 39358471f63SJani Nikula 39458471f63SJani Nikula intel_detect_preproduction_hw(dev_priv); 39558471f63SJani Nikula 39658471f63SJani Nikula return 0; 39758471f63SJani Nikula 39858471f63SJani Nikula err_gem: 39958471f63SJani Nikula i915_gem_cleanup_early(dev_priv); 400bec68cc9STvrtko Ursulin intel_gt_driver_late_release_all(dev_priv); 4015f0d4d14STvrtko Ursulin i915_drm_clients_fini(&dev_priv->clients); 40203d2c54dSMatt Roper err_rootgt: 40358471f63SJani Nikula intel_region_ttm_device_fini(dev_priv); 40458471f63SJani Nikula err_ttm: 40558471f63SJani Nikula vlv_suspend_cleanup(dev_priv); 40658471f63SJani Nikula err_workqueues: 40758471f63SJani Nikula i915_workqueues_cleanup(dev_priv); 40858471f63SJani Nikula return ret; 40958471f63SJani Nikula } 41058471f63SJani Nikula 41158471f63SJani Nikula /** 41258471f63SJani Nikula * i915_driver_late_release - cleanup the setup done in 41358471f63SJani Nikula * i915_driver_early_probe() 41458471f63SJani Nikula * @dev_priv: device private 41558471f63SJani Nikula */ 41658471f63SJani Nikula static void i915_driver_late_release(struct drm_i915_private *dev_priv) 41758471f63SJani Nikula { 41858471f63SJani Nikula intel_irq_fini(dev_priv); 41958471f63SJani Nikula intel_power_domains_cleanup(dev_priv); 42058471f63SJani Nikula i915_gem_cleanup_early(dev_priv); 421bec68cc9STvrtko Ursulin intel_gt_driver_late_release_all(dev_priv); 4225f0d4d14STvrtko Ursulin i915_drm_clients_fini(&dev_priv->clients); 42358471f63SJani Nikula intel_region_ttm_device_fini(dev_priv); 42458471f63SJani Nikula vlv_suspend_cleanup(dev_priv); 42558471f63SJani Nikula i915_workqueues_cleanup(dev_priv); 42658471f63SJani Nikula 42758471f63SJani Nikula cpu_latency_qos_remove_request(&dev_priv->sb_qos); 42858471f63SJani Nikula mutex_destroy(&dev_priv->sb_lock); 42958471f63SJani Nikula 43058471f63SJani Nikula i915_params_free(&dev_priv->params); 43158471f63SJani Nikula } 43258471f63SJani Nikula 43358471f63SJani Nikula /** 43458471f63SJani Nikula * i915_driver_mmio_probe - setup device MMIO 43558471f63SJani Nikula * @dev_priv: device private 43658471f63SJani Nikula * 43758471f63SJani Nikula * Setup minimal device state necessary for MMIO accesses later in the 43858471f63SJani Nikula * initialization sequence. The setup here should avoid any other device-wide 43958471f63SJani Nikula * side effects or exposing the driver via kernel internal or user space 44058471f63SJani Nikula * interfaces. 44158471f63SJani Nikula */ 44258471f63SJani Nikula static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) 44358471f63SJani Nikula { 444cfb0fa42SMatt Roper struct intel_gt *gt; 445cfb0fa42SMatt Roper int ret, i; 44658471f63SJani Nikula 44758471f63SJani Nikula if (i915_inject_probe_failure(dev_priv)) 44858471f63SJani Nikula return -ENODEV; 44958471f63SJani Nikula 45058471f63SJani Nikula ret = i915_get_bridge_dev(dev_priv); 45158471f63SJani Nikula if (ret < 0) 45258471f63SJani Nikula return ret; 45358471f63SJani Nikula 454cfb0fa42SMatt Roper for_each_gt(gt, dev_priv, i) { 455cfb0fa42SMatt Roper ret = intel_uncore_init_mmio(gt->uncore); 456211b4dbcSDave Airlie if (ret) 457bec68cc9STvrtko Ursulin return ret; 458211b4dbcSDave Airlie 459cfb0fa42SMatt Roper ret = drmm_add_action_or_reset(&dev_priv->drm, 460cfb0fa42SMatt Roper intel_uncore_fini_mmio, 461cfb0fa42SMatt Roper gt->uncore); 462cfb0fa42SMatt Roper if (ret) 463cfb0fa42SMatt Roper return ret; 464cfb0fa42SMatt Roper } 465cfb0fa42SMatt Roper 46658471f63SJani Nikula /* Try to make sure MCHBAR is enabled before poking at it */ 46758471f63SJani Nikula intel_setup_mchbar(dev_priv); 46858471f63SJani Nikula intel_device_info_runtime_init(dev_priv); 46958471f63SJani Nikula 470cfb0fa42SMatt Roper for_each_gt(gt, dev_priv, i) { 471cfb0fa42SMatt Roper ret = intel_gt_init_mmio(gt); 47258471f63SJani Nikula if (ret) 47358471f63SJani Nikula goto err_uncore; 474cfb0fa42SMatt Roper } 47558471f63SJani Nikula 47658471f63SJani Nikula /* As early as possible, scrub existing GPU state before clobbering */ 47758471f63SJani Nikula sanitize_gpu(dev_priv); 47858471f63SJani Nikula 47958471f63SJani Nikula return 0; 48058471f63SJani Nikula 48158471f63SJani Nikula err_uncore: 48258471f63SJani Nikula intel_teardown_mchbar(dev_priv); 48358471f63SJani Nikula 48458471f63SJani Nikula return ret; 48558471f63SJani Nikula } 48658471f63SJani Nikula 48758471f63SJani Nikula /** 48858471f63SJani Nikula * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe() 48958471f63SJani Nikula * @dev_priv: device private 49058471f63SJani Nikula */ 49158471f63SJani Nikula static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) 49258471f63SJani Nikula { 49358471f63SJani Nikula intel_teardown_mchbar(dev_priv); 49458471f63SJani Nikula } 49558471f63SJani Nikula 49658471f63SJani Nikula /** 49758471f63SJani Nikula * i915_set_dma_info - set all relevant PCI dma info as configured for the 49858471f63SJani Nikula * platform 49958471f63SJani Nikula * @i915: valid i915 instance 50058471f63SJani Nikula * 50158471f63SJani Nikula * Set the dma max segment size, device and coherent masks. The dma mask set 50258471f63SJani Nikula * needs to occur before i915_ggtt_probe_hw. 50358471f63SJani Nikula * 50458471f63SJani Nikula * A couple of platforms have special needs. Address them as well. 50558471f63SJani Nikula * 50658471f63SJani Nikula */ 50758471f63SJani Nikula static int i915_set_dma_info(struct drm_i915_private *i915) 50858471f63SJani Nikula { 50958471f63SJani Nikula unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size; 51058471f63SJani Nikula int ret; 51158471f63SJani Nikula 51258471f63SJani Nikula GEM_BUG_ON(!mask_size); 51358471f63SJani Nikula 51458471f63SJani Nikula /* 51558471f63SJani Nikula * We don't have a max segment size, so set it to the max so sg's 51658471f63SJani Nikula * debugging layer doesn't complain 51758471f63SJani Nikula */ 51858471f63SJani Nikula dma_set_max_seg_size(i915->drm.dev, UINT_MAX); 51958471f63SJani Nikula 52058471f63SJani Nikula ret = dma_set_mask(i915->drm.dev, DMA_BIT_MASK(mask_size)); 52158471f63SJani Nikula if (ret) 52258471f63SJani Nikula goto mask_err; 52358471f63SJani Nikula 52458471f63SJani Nikula /* overlay on gen2 is broken and can't address above 1G */ 52558471f63SJani Nikula if (GRAPHICS_VER(i915) == 2) 52658471f63SJani Nikula mask_size = 30; 52758471f63SJani Nikula 52858471f63SJani Nikula /* 52958471f63SJani Nikula * 965GM sometimes incorrectly writes to hardware status page (HWS) 53058471f63SJani Nikula * using 32bit addressing, overwriting memory if HWS is located 53158471f63SJani Nikula * above 4GB. 53258471f63SJani Nikula * 53358471f63SJani Nikula * The documentation also mentions an issue with undefined 53458471f63SJani Nikula * behaviour if any general state is accessed within a page above 4GB, 53558471f63SJani Nikula * which also needs to be handled carefully. 53658471f63SJani Nikula */ 53758471f63SJani Nikula if (IS_I965G(i915) || IS_I965GM(i915)) 53858471f63SJani Nikula mask_size = 32; 53958471f63SJani Nikula 54058471f63SJani Nikula ret = dma_set_coherent_mask(i915->drm.dev, DMA_BIT_MASK(mask_size)); 54158471f63SJani Nikula if (ret) 54258471f63SJani Nikula goto mask_err; 54358471f63SJani Nikula 54458471f63SJani Nikula return 0; 54558471f63SJani Nikula 54658471f63SJani Nikula mask_err: 54758471f63SJani Nikula drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret); 54858471f63SJani Nikula return ret; 54958471f63SJani Nikula } 55058471f63SJani Nikula 5516a735552SAshutosh Dixit static int i915_pcode_init(struct drm_i915_private *i915) 5526a735552SAshutosh Dixit { 5536a735552SAshutosh Dixit struct intel_gt *gt; 5546a735552SAshutosh Dixit int id, ret; 5556a735552SAshutosh Dixit 5566a735552SAshutosh Dixit for_each_gt(gt, i915, id) { 5576a735552SAshutosh Dixit ret = intel_pcode_init(gt->uncore); 5586a735552SAshutosh Dixit if (ret) { 5596a735552SAshutosh Dixit drm_err(>->i915->drm, "gt%d: intel_pcode_init failed %d\n", id, ret); 5606a735552SAshutosh Dixit return ret; 5616a735552SAshutosh Dixit } 5626a735552SAshutosh Dixit } 5636a735552SAshutosh Dixit 5646a735552SAshutosh Dixit return 0; 5656a735552SAshutosh Dixit } 5666a735552SAshutosh Dixit 56758471f63SJani Nikula /** 56858471f63SJani Nikula * i915_driver_hw_probe - setup state requiring device access 56958471f63SJani Nikula * @dev_priv: device private 57058471f63SJani Nikula * 57158471f63SJani Nikula * Setup state that requires accessing the device, but doesn't require 57258471f63SJani Nikula * exposing the driver via kernel internal or userspace interfaces. 57358471f63SJani Nikula */ 57458471f63SJani Nikula static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) 57558471f63SJani Nikula { 57658471f63SJani Nikula struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 577138c2fcaSAnshuman Gupta struct pci_dev *root_pdev; 57858471f63SJani Nikula int ret; 57958471f63SJani Nikula 58058471f63SJani Nikula if (i915_inject_probe_failure(dev_priv)) 58158471f63SJani Nikula return -ENODEV; 58258471f63SJani Nikula 58358471f63SJani Nikula if (HAS_PPGTT(dev_priv)) { 58458471f63SJani Nikula if (intel_vgpu_active(dev_priv) && 58558471f63SJani Nikula !intel_vgpu_has_full_ppgtt(dev_priv)) { 58658471f63SJani Nikula i915_report_error(dev_priv, 58758471f63SJani Nikula "incompatible vGPU found, support for isolated ppGTT required\n"); 58858471f63SJani Nikula return -ENXIO; 58958471f63SJani Nikula } 59058471f63SJani Nikula } 59158471f63SJani Nikula 59258471f63SJani Nikula if (HAS_EXECLISTS(dev_priv)) { 59358471f63SJani Nikula /* 59458471f63SJani Nikula * Older GVT emulation depends upon intercepting CSB mmio, 59558471f63SJani Nikula * which we no longer use, preferring to use the HWSP cache 59658471f63SJani Nikula * instead. 59758471f63SJani Nikula */ 59858471f63SJani Nikula if (intel_vgpu_active(dev_priv) && 59958471f63SJani Nikula !intel_vgpu_has_hwsp_emulation(dev_priv)) { 60058471f63SJani Nikula i915_report_error(dev_priv, 60158471f63SJani Nikula "old vGPU host found, support for HWSP emulation required\n"); 60258471f63SJani Nikula return -ENXIO; 60358471f63SJani Nikula } 60458471f63SJani Nikula } 60558471f63SJani Nikula 60658471f63SJani Nikula /* needs to be done before ggtt probe */ 60758471f63SJani Nikula intel_dram_edram_detect(dev_priv); 60858471f63SJani Nikula 60958471f63SJani Nikula ret = i915_set_dma_info(dev_priv); 61058471f63SJani Nikula if (ret) 61158471f63SJani Nikula return ret; 61258471f63SJani Nikula 613*772a5803SUmesh Nerlige Ramappa ret = i915_perf_init(dev_priv); 614*772a5803SUmesh Nerlige Ramappa if (ret) 615*772a5803SUmesh Nerlige Ramappa return ret; 61658471f63SJani Nikula 61758471f63SJani Nikula ret = i915_ggtt_probe_hw(dev_priv); 61858471f63SJani Nikula if (ret) 61958471f63SJani Nikula goto err_perf; 62058471f63SJani Nikula 62158471f63SJani Nikula ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, dev_priv->drm.driver); 62258471f63SJani Nikula if (ret) 62358471f63SJani Nikula goto err_ggtt; 62458471f63SJani Nikula 62558471f63SJani Nikula ret = i915_ggtt_init_hw(dev_priv); 62658471f63SJani Nikula if (ret) 62758471f63SJani Nikula goto err_ggtt; 62858471f63SJani Nikula 629957565a4SMatthew Auld /* 630957565a4SMatthew Auld * Make sure we probe lmem before we probe stolen-lmem. The BAR size 631957565a4SMatthew Auld * might be different due to bar resizing. 632957565a4SMatthew Auld */ 633957565a4SMatthew Auld ret = intel_gt_tiles_init(dev_priv); 63458471f63SJani Nikula if (ret) 63558471f63SJani Nikula goto err_ggtt; 63658471f63SJani Nikula 637957565a4SMatthew Auld ret = intel_memory_regions_hw_probe(dev_priv); 63858471f63SJani Nikula if (ret) 639957565a4SMatthew Auld goto err_ggtt; 64058471f63SJani Nikula 64158471f63SJani Nikula ret = i915_ggtt_enable_hw(dev_priv); 64258471f63SJani Nikula if (ret) { 64358471f63SJani Nikula drm_err(&dev_priv->drm, "failed to enable GGTT\n"); 64458471f63SJani Nikula goto err_mem_regions; 64558471f63SJani Nikula } 64658471f63SJani Nikula 64758471f63SJani Nikula pci_set_master(pdev); 64858471f63SJani Nikula 64958471f63SJani Nikula /* On the 945G/GM, the chipset reports the MSI capability on the 65058471f63SJani Nikula * integrated graphics even though the support isn't actually there 65158471f63SJani Nikula * according to the published specs. It doesn't appear to function 65258471f63SJani Nikula * correctly in testing on 945G. 65358471f63SJani Nikula * This may be a side effect of MSI having been made available for PEG 65458471f63SJani Nikula * and the registers being closely associated. 65558471f63SJani Nikula * 65658471f63SJani Nikula * According to chipset errata, on the 965GM, MSI interrupts may 65758471f63SJani Nikula * be lost or delayed, and was defeatured. MSI interrupts seem to 65858471f63SJani Nikula * get lost on g4x as well, and interrupt delivery seems to stay 65958471f63SJani Nikula * properly dead afterwards. So we'll just disable them for all 66058471f63SJani Nikula * pre-gen5 chipsets. 66158471f63SJani Nikula * 66258471f63SJani Nikula * dp aux and gmbus irq on gen4 seems to be able to generate legacy 66358471f63SJani Nikula * interrupts even when in MSI mode. This results in spurious 66458471f63SJani Nikula * interrupt warnings if the legacy irq no. is shared with another 66558471f63SJani Nikula * device. The kernel then disables that interrupt source and so 66658471f63SJani Nikula * prevents the other device from working properly. 66758471f63SJani Nikula */ 66858471f63SJani Nikula if (GRAPHICS_VER(dev_priv) >= 5) { 66958471f63SJani Nikula if (pci_enable_msi(pdev) < 0) 67058471f63SJani Nikula drm_dbg(&dev_priv->drm, "can't enable MSI"); 67158471f63SJani Nikula } 67258471f63SJani Nikula 67358471f63SJani Nikula ret = intel_gvt_init(dev_priv); 67458471f63SJani Nikula if (ret) 67558471f63SJani Nikula goto err_msi; 67658471f63SJani Nikula 67758471f63SJani Nikula intel_opregion_setup(dev_priv); 67858471f63SJani Nikula 6796a735552SAshutosh Dixit ret = i915_pcode_init(dev_priv); 68058471f63SJani Nikula if (ret) 68158471f63SJani Nikula goto err_msi; 68258471f63SJani Nikula 68358471f63SJani Nikula /* 68458471f63SJani Nikula * Fill the dram structure to get the system dram info. This will be 68558471f63SJani Nikula * used for memory latency calculation. 68658471f63SJani Nikula */ 68758471f63SJani Nikula intel_dram_detect(dev_priv); 68858471f63SJani Nikula 68958471f63SJani Nikula intel_bw_init_hw(dev_priv); 69058471f63SJani Nikula 691138c2fcaSAnshuman Gupta /* 692138c2fcaSAnshuman Gupta * FIXME: Temporary hammer to avoid freezing the machine on our DGFX 693138c2fcaSAnshuman Gupta * This should be totally removed when we handle the pci states properly 694138c2fcaSAnshuman Gupta * on runtime PM and on s2idle cases. 695138c2fcaSAnshuman Gupta */ 696138c2fcaSAnshuman Gupta root_pdev = pcie_find_root_port(pdev); 697138c2fcaSAnshuman Gupta if (root_pdev) 698138c2fcaSAnshuman Gupta pci_d3cold_disable(root_pdev); 699138c2fcaSAnshuman Gupta 70058471f63SJani Nikula return 0; 70158471f63SJani Nikula 70258471f63SJani Nikula err_msi: 70358471f63SJani Nikula if (pdev->msi_enabled) 70458471f63SJani Nikula pci_disable_msi(pdev); 70558471f63SJani Nikula err_mem_regions: 70658471f63SJani Nikula intel_memory_regions_driver_release(dev_priv); 70758471f63SJani Nikula err_ggtt: 70858471f63SJani Nikula i915_ggtt_driver_release(dev_priv); 70958471f63SJani Nikula i915_gem_drain_freed_objects(dev_priv); 71058471f63SJani Nikula i915_ggtt_driver_late_release(dev_priv); 71158471f63SJani Nikula err_perf: 71258471f63SJani Nikula i915_perf_fini(dev_priv); 71358471f63SJani Nikula return ret; 71458471f63SJani Nikula } 71558471f63SJani Nikula 71658471f63SJani Nikula /** 71758471f63SJani Nikula * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe() 71858471f63SJani Nikula * @dev_priv: device private 71958471f63SJani Nikula */ 72058471f63SJani Nikula static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) 72158471f63SJani Nikula { 72258471f63SJani Nikula struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 723138c2fcaSAnshuman Gupta struct pci_dev *root_pdev; 72458471f63SJani Nikula 72558471f63SJani Nikula i915_perf_fini(dev_priv); 72658471f63SJani Nikula 72758471f63SJani Nikula if (pdev->msi_enabled) 72858471f63SJani Nikula pci_disable_msi(pdev); 729138c2fcaSAnshuman Gupta 730138c2fcaSAnshuman Gupta root_pdev = pcie_find_root_port(pdev); 731138c2fcaSAnshuman Gupta if (root_pdev) 732138c2fcaSAnshuman Gupta pci_d3cold_enable(root_pdev); 73358471f63SJani Nikula } 73458471f63SJani Nikula 73558471f63SJani Nikula /** 73658471f63SJani Nikula * i915_driver_register - register the driver with the rest of the system 73758471f63SJani Nikula * @dev_priv: device private 73858471f63SJani Nikula * 73958471f63SJani Nikula * Perform any steps necessary to make the driver available via kernel 74058471f63SJani Nikula * internal or userspace interfaces. 74158471f63SJani Nikula */ 74258471f63SJani Nikula static void i915_driver_register(struct drm_i915_private *dev_priv) 74358471f63SJani Nikula { 7441c66a12aSMatt Roper struct intel_gt *gt; 7451c66a12aSMatt Roper unsigned int i; 74658471f63SJani Nikula 74758471f63SJani Nikula i915_gem_driver_register(dev_priv); 74858471f63SJani Nikula i915_pmu_register(dev_priv); 74958471f63SJani Nikula 75058471f63SJani Nikula intel_vgpu_register(dev_priv); 75158471f63SJani Nikula 75258471f63SJani Nikula /* Reveal our presence to userspace */ 7533703060dSAndrzej Hajda if (drm_dev_register(&dev_priv->drm, 0)) { 75458471f63SJani Nikula drm_err(&dev_priv->drm, 75558471f63SJani Nikula "Failed to register driver for userspace access!\n"); 75658471f63SJani Nikula return; 75758471f63SJani Nikula } 75858471f63SJani Nikula 75958471f63SJani Nikula i915_debugfs_register(dev_priv); 76058471f63SJani Nikula i915_setup_sysfs(dev_priv); 76158471f63SJani Nikula 76258471f63SJani Nikula /* Depends on sysfs having been initialized */ 76358471f63SJani Nikula i915_perf_register(dev_priv); 76458471f63SJani Nikula 7651c66a12aSMatt Roper for_each_gt(gt, dev_priv, i) 7661c66a12aSMatt Roper intel_gt_driver_register(gt); 76758471f63SJani Nikula 768f67986b0SAlan Previn intel_pxp_debugfs_register(dev_priv->pxp); 769f67986b0SAlan Previn 770b3b088e2SDale B Stimson i915_hwmon_register(dev_priv); 771b3b088e2SDale B Stimson 77258471f63SJani Nikula intel_display_driver_register(dev_priv); 77358471f63SJani Nikula 77458471f63SJani Nikula intel_power_domains_enable(dev_priv); 77558471f63SJani Nikula intel_runtime_pm_enable(&dev_priv->runtime_pm); 77658471f63SJani Nikula 77758471f63SJani Nikula intel_register_dsm_handler(); 77858471f63SJani Nikula 77958471f63SJani Nikula if (i915_switcheroo_register(dev_priv)) 78058471f63SJani Nikula drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n"); 78158471f63SJani Nikula } 78258471f63SJani Nikula 78358471f63SJani Nikula /** 78458471f63SJani Nikula * i915_driver_unregister - cleanup the registration done in i915_driver_regiser() 78558471f63SJani Nikula * @dev_priv: device private 78658471f63SJani Nikula */ 78758471f63SJani Nikula static void i915_driver_unregister(struct drm_i915_private *dev_priv) 78858471f63SJani Nikula { 7891c66a12aSMatt Roper struct intel_gt *gt; 7901c66a12aSMatt Roper unsigned int i; 7911c66a12aSMatt Roper 79258471f63SJani Nikula i915_switcheroo_unregister(dev_priv); 79358471f63SJani Nikula 79458471f63SJani Nikula intel_unregister_dsm_handler(); 79558471f63SJani Nikula 79658471f63SJani Nikula intel_runtime_pm_disable(&dev_priv->runtime_pm); 79758471f63SJani Nikula intel_power_domains_disable(dev_priv); 79858471f63SJani Nikula 79958471f63SJani Nikula intel_display_driver_unregister(dev_priv); 80058471f63SJani Nikula 801f67986b0SAlan Previn intel_pxp_fini(dev_priv); 802f67986b0SAlan Previn 8031c66a12aSMatt Roper for_each_gt(gt, dev_priv, i) 8041c66a12aSMatt Roper intel_gt_driver_unregister(gt); 80558471f63SJani Nikula 806b3b088e2SDale B Stimson i915_hwmon_unregister(dev_priv); 807b3b088e2SDale B Stimson 80858471f63SJani Nikula i915_perf_unregister(dev_priv); 80958471f63SJani Nikula i915_pmu_unregister(dev_priv); 81058471f63SJani Nikula 81158471f63SJani Nikula i915_teardown_sysfs(dev_priv); 81258471f63SJani Nikula drm_dev_unplug(&dev_priv->drm); 81358471f63SJani Nikula 81458471f63SJani Nikula i915_gem_driver_unregister(dev_priv); 81558471f63SJani Nikula } 81658471f63SJani Nikula 817211b4dbcSDave Airlie void 818211b4dbcSDave Airlie i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p) 819211b4dbcSDave Airlie { 820ff9fbe7cSLucas De Marchi drm_printf(p, "iommu: %s\n", 821a7f46d5bSTvrtko Ursulin str_enabled_disabled(i915_vtd_active(i915))); 822211b4dbcSDave Airlie } 823211b4dbcSDave Airlie 82458471f63SJani Nikula static void i915_welcome_messages(struct drm_i915_private *dev_priv) 82558471f63SJani Nikula { 82658471f63SJani Nikula if (drm_debug_enabled(DRM_UT_DRIVER)) { 82758471f63SJani Nikula struct drm_printer p = drm_debug_printer("i915 device info:"); 8281c66a12aSMatt Roper struct intel_gt *gt; 8291c66a12aSMatt Roper unsigned int i; 83058471f63SJani Nikula 83158471f63SJani Nikula drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n", 83258471f63SJani Nikula INTEL_DEVID(dev_priv), 83358471f63SJani Nikula INTEL_REVID(dev_priv), 83458471f63SJani Nikula intel_platform_name(INTEL_INFO(dev_priv)->platform), 83558471f63SJani Nikula intel_subplatform(RUNTIME_INFO(dev_priv), 83658471f63SJani Nikula INTEL_INFO(dev_priv)->platform), 83758471f63SJani Nikula GRAPHICS_VER(dev_priv)); 83858471f63SJani Nikula 839c7d3c844SJani Nikula intel_device_info_print(INTEL_INFO(dev_priv), 840c7d3c844SJani Nikula RUNTIME_INFO(dev_priv), &p); 841211b4dbcSDave Airlie i915_print_iommu_status(dev_priv, &p); 8421c66a12aSMatt Roper for_each_gt(gt, dev_priv, i) 8431c66a12aSMatt Roper intel_gt_info_print(>->info, &p); 84458471f63SJani Nikula } 84558471f63SJani Nikula 84658471f63SJani Nikula if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) 84758471f63SJani Nikula drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n"); 84858471f63SJani Nikula if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 84958471f63SJani Nikula drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n"); 85058471f63SJani Nikula if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) 85158471f63SJani Nikula drm_info(&dev_priv->drm, 85258471f63SJani Nikula "DRM_I915_DEBUG_RUNTIME_PM enabled\n"); 85358471f63SJani Nikula } 85458471f63SJani Nikula 85558471f63SJani Nikula static struct drm_i915_private * 85658471f63SJani Nikula i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) 85758471f63SJani Nikula { 85858471f63SJani Nikula const struct intel_device_info *match_info = 85958471f63SJani Nikula (struct intel_device_info *)ent->driver_data; 86058471f63SJani Nikula struct intel_device_info *device_info; 8612c93e7b7SJani Nikula struct intel_runtime_info *runtime; 86258471f63SJani Nikula struct drm_i915_private *i915; 86358471f63SJani Nikula 8644588d7ebSJani Nikula i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver, 86558471f63SJani Nikula struct drm_i915_private, drm); 86658471f63SJani Nikula if (IS_ERR(i915)) 86758471f63SJani Nikula return i915; 86858471f63SJani Nikula 86958471f63SJani Nikula pci_set_drvdata(pdev, i915); 87058471f63SJani Nikula 87158471f63SJani Nikula /* Device parameters start as a copy of module parameters. */ 87258471f63SJani Nikula i915_params_copy(&i915->params, &i915_modparams); 87358471f63SJani Nikula 87458471f63SJani Nikula /* Setup the write-once "constant" device info */ 87558471f63SJani Nikula device_info = mkwrite_device_info(i915); 87658471f63SJani Nikula memcpy(device_info, match_info, sizeof(*device_info)); 8772c93e7b7SJani Nikula 8782c93e7b7SJani Nikula /* Initialize initial runtime info from static const data and pdev. */ 8792c93e7b7SJani Nikula runtime = RUNTIME_INFO(i915); 8802c93e7b7SJani Nikula memcpy(runtime, &INTEL_INFO(i915)->__runtime, sizeof(*runtime)); 8812c93e7b7SJani Nikula runtime->device_id = pdev->device; 88258471f63SJani Nikula 88358471f63SJani Nikula return i915; 88458471f63SJani Nikula } 88558471f63SJani Nikula 88658471f63SJani Nikula /** 88758471f63SJani Nikula * i915_driver_probe - setup chip and create an initial config 88858471f63SJani Nikula * @pdev: PCI device 88958471f63SJani Nikula * @ent: matching PCI ID entry 89058471f63SJani Nikula * 89158471f63SJani Nikula * The driver probe routine has to do several things: 89258471f63SJani Nikula * - drive output discovery via intel_modeset_init() 89358471f63SJani Nikula * - initialize the memory manager 89458471f63SJani Nikula * - allocate initial config memory 89558471f63SJani Nikula * - setup the DRM framebuffer with the allocated memory 89658471f63SJani Nikula */ 89758471f63SJani Nikula int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 89858471f63SJani Nikula { 89958471f63SJani Nikula struct drm_i915_private *i915; 90058471f63SJani Nikula int ret; 90158471f63SJani Nikula 90258471f63SJani Nikula i915 = i915_driver_create(pdev, ent); 90358471f63SJani Nikula if (IS_ERR(i915)) 90458471f63SJani Nikula return PTR_ERR(i915); 90558471f63SJani Nikula 90658471f63SJani Nikula ret = pci_enable_device(pdev); 90758471f63SJani Nikula if (ret) 90858471f63SJani Nikula goto out_fini; 90958471f63SJani Nikula 91058471f63SJani Nikula ret = i915_driver_early_probe(i915); 91158471f63SJani Nikula if (ret < 0) 91258471f63SJani Nikula goto out_pci_disable; 91358471f63SJani Nikula 91458471f63SJani Nikula disable_rpm_wakeref_asserts(&i915->runtime_pm); 91558471f63SJani Nikula 91658471f63SJani Nikula intel_vgpu_detect(i915); 91758471f63SJani Nikula 918bec68cc9STvrtko Ursulin ret = intel_gt_probe_all(i915); 91958471f63SJani Nikula if (ret < 0) 92058471f63SJani Nikula goto out_runtime_pm_put; 92158471f63SJani Nikula 922bec68cc9STvrtko Ursulin ret = i915_driver_mmio_probe(i915); 923bec68cc9STvrtko Ursulin if (ret < 0) 924bec68cc9STvrtko Ursulin goto out_tiles_cleanup; 925bec68cc9STvrtko Ursulin 92658471f63SJani Nikula ret = i915_driver_hw_probe(i915); 92758471f63SJani Nikula if (ret < 0) 92858471f63SJani Nikula goto out_cleanup_mmio; 92958471f63SJani Nikula 93058471f63SJani Nikula ret = intel_modeset_init_noirq(i915); 93158471f63SJani Nikula if (ret < 0) 93258471f63SJani Nikula goto out_cleanup_hw; 93358471f63SJani Nikula 93458471f63SJani Nikula ret = intel_irq_install(i915); 93558471f63SJani Nikula if (ret) 93658471f63SJani Nikula goto out_cleanup_modeset; 93758471f63SJani Nikula 93858471f63SJani Nikula ret = intel_modeset_init_nogem(i915); 93958471f63SJani Nikula if (ret) 94058471f63SJani Nikula goto out_cleanup_irq; 94158471f63SJani Nikula 94258471f63SJani Nikula ret = i915_gem_init(i915); 94358471f63SJani Nikula if (ret) 94458471f63SJani Nikula goto out_cleanup_modeset2; 94558471f63SJani Nikula 946f67986b0SAlan Previn intel_pxp_init(i915); 947f67986b0SAlan Previn 94858471f63SJani Nikula ret = intel_modeset_init(i915); 94958471f63SJani Nikula if (ret) 95058471f63SJani Nikula goto out_cleanup_gem; 95158471f63SJani Nikula 95258471f63SJani Nikula i915_driver_register(i915); 95358471f63SJani Nikula 95458471f63SJani Nikula enable_rpm_wakeref_asserts(&i915->runtime_pm); 95558471f63SJani Nikula 95658471f63SJani Nikula i915_welcome_messages(i915); 95758471f63SJani Nikula 95858471f63SJani Nikula i915->do_release = true; 95958471f63SJani Nikula 96058471f63SJani Nikula return 0; 96158471f63SJani Nikula 96258471f63SJani Nikula out_cleanup_gem: 96358471f63SJani Nikula i915_gem_suspend(i915); 96458471f63SJani Nikula i915_gem_driver_remove(i915); 96558471f63SJani Nikula i915_gem_driver_release(i915); 96658471f63SJani Nikula out_cleanup_modeset2: 96758471f63SJani Nikula /* FIXME clean up the error path */ 96858471f63SJani Nikula intel_modeset_driver_remove(i915); 96958471f63SJani Nikula intel_irq_uninstall(i915); 97058471f63SJani Nikula intel_modeset_driver_remove_noirq(i915); 97158471f63SJani Nikula goto out_cleanup_modeset; 97258471f63SJani Nikula out_cleanup_irq: 97358471f63SJani Nikula intel_irq_uninstall(i915); 97458471f63SJani Nikula out_cleanup_modeset: 97558471f63SJani Nikula intel_modeset_driver_remove_nogem(i915); 97658471f63SJani Nikula out_cleanup_hw: 97758471f63SJani Nikula i915_driver_hw_remove(i915); 97858471f63SJani Nikula intel_memory_regions_driver_release(i915); 97958471f63SJani Nikula i915_ggtt_driver_release(i915); 98058471f63SJani Nikula i915_gem_drain_freed_objects(i915); 98158471f63SJani Nikula i915_ggtt_driver_late_release(i915); 98258471f63SJani Nikula out_cleanup_mmio: 98358471f63SJani Nikula i915_driver_mmio_release(i915); 984bec68cc9STvrtko Ursulin out_tiles_cleanup: 985bec68cc9STvrtko Ursulin intel_gt_release_all(i915); 98658471f63SJani Nikula out_runtime_pm_put: 98758471f63SJani Nikula enable_rpm_wakeref_asserts(&i915->runtime_pm); 98858471f63SJani Nikula i915_driver_late_release(i915); 98958471f63SJani Nikula out_pci_disable: 99058471f63SJani Nikula pci_disable_device(pdev); 99158471f63SJani Nikula out_fini: 99258471f63SJani Nikula i915_probe_error(i915, "Device initialization failed (%d)\n", ret); 99358471f63SJani Nikula return ret; 99458471f63SJani Nikula } 99558471f63SJani Nikula 99658471f63SJani Nikula void i915_driver_remove(struct drm_i915_private *i915) 99758471f63SJani Nikula { 9989aa32034SMitul Golani intel_wakeref_t wakeref; 9999aa32034SMitul Golani 10009aa32034SMitul Golani wakeref = intel_runtime_pm_get(&i915->runtime_pm); 100158471f63SJani Nikula 100258471f63SJani Nikula i915_driver_unregister(i915); 100358471f63SJani Nikula 100458471f63SJani Nikula /* Flush any external code that still may be under the RCU lock */ 100558471f63SJani Nikula synchronize_rcu(); 100658471f63SJani Nikula 100758471f63SJani Nikula i915_gem_suspend(i915); 100858471f63SJani Nikula 100958471f63SJani Nikula intel_gvt_driver_remove(i915); 101058471f63SJani Nikula 101158471f63SJani Nikula intel_modeset_driver_remove(i915); 101258471f63SJani Nikula 101358471f63SJani Nikula intel_irq_uninstall(i915); 101458471f63SJani Nikula 101558471f63SJani Nikula intel_modeset_driver_remove_noirq(i915); 101658471f63SJani Nikula 101758471f63SJani Nikula i915_reset_error_state(i915); 101858471f63SJani Nikula i915_gem_driver_remove(i915); 101958471f63SJani Nikula 102058471f63SJani Nikula intel_modeset_driver_remove_nogem(i915); 102158471f63SJani Nikula 102258471f63SJani Nikula i915_driver_hw_remove(i915); 102358471f63SJani Nikula 10249aa32034SMitul Golani intel_runtime_pm_put(&i915->runtime_pm, wakeref); 102558471f63SJani Nikula } 102658471f63SJani Nikula 102758471f63SJani Nikula static void i915_driver_release(struct drm_device *dev) 102858471f63SJani Nikula { 102958471f63SJani Nikula struct drm_i915_private *dev_priv = to_i915(dev); 103058471f63SJani Nikula struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 10319aa32034SMitul Golani intel_wakeref_t wakeref; 103258471f63SJani Nikula 103358471f63SJani Nikula if (!dev_priv->do_release) 103458471f63SJani Nikula return; 103558471f63SJani Nikula 10369aa32034SMitul Golani wakeref = intel_runtime_pm_get(rpm); 103758471f63SJani Nikula 103858471f63SJani Nikula i915_gem_driver_release(dev_priv); 103958471f63SJani Nikula 104058471f63SJani Nikula intel_memory_regions_driver_release(dev_priv); 104158471f63SJani Nikula i915_ggtt_driver_release(dev_priv); 104258471f63SJani Nikula i915_gem_drain_freed_objects(dev_priv); 104358471f63SJani Nikula i915_ggtt_driver_late_release(dev_priv); 104458471f63SJani Nikula 104558471f63SJani Nikula i915_driver_mmio_release(dev_priv); 104658471f63SJani Nikula 10479aa32034SMitul Golani intel_runtime_pm_put(rpm, wakeref); 10489aa32034SMitul Golani 104958471f63SJani Nikula intel_runtime_pm_driver_release(rpm); 105058471f63SJani Nikula 105158471f63SJani Nikula i915_driver_late_release(dev_priv); 105258471f63SJani Nikula } 105358471f63SJani Nikula 105458471f63SJani Nikula static int i915_driver_open(struct drm_device *dev, struct drm_file *file) 105558471f63SJani Nikula { 105658471f63SJani Nikula struct drm_i915_private *i915 = to_i915(dev); 105758471f63SJani Nikula int ret; 105858471f63SJani Nikula 105958471f63SJani Nikula ret = i915_gem_open(i915, file); 106058471f63SJani Nikula if (ret) 106158471f63SJani Nikula return ret; 106258471f63SJani Nikula 106358471f63SJani Nikula return 0; 106458471f63SJani Nikula } 106558471f63SJani Nikula 106658471f63SJani Nikula /** 106758471f63SJani Nikula * i915_driver_lastclose - clean up after all DRM clients have exited 106858471f63SJani Nikula * @dev: DRM device 106958471f63SJani Nikula * 107058471f63SJani Nikula * Take care of cleaning up after all DRM clients have exited. In the 107158471f63SJani Nikula * mode setting case, we want to restore the kernel's initial mode (just 107258471f63SJani Nikula * in case the last client left us in a bad state). 107358471f63SJani Nikula * 107458471f63SJani Nikula * Additionally, in the non-mode setting case, we'll tear down the GTT 107558471f63SJani Nikula * and DMA structures, since the kernel won't be using them, and clea 107658471f63SJani Nikula * up any GEM state. 107758471f63SJani Nikula */ 107858471f63SJani Nikula static void i915_driver_lastclose(struct drm_device *dev) 107958471f63SJani Nikula { 108058471f63SJani Nikula struct drm_i915_private *i915 = to_i915(dev); 108158471f63SJani Nikula 108258471f63SJani Nikula intel_fbdev_restore_mode(dev); 108358471f63SJani Nikula 108458471f63SJani Nikula if (HAS_DISPLAY(i915)) 108558471f63SJani Nikula vga_switcheroo_process_delayed_switch(); 108658471f63SJani Nikula } 108758471f63SJani Nikula 108858471f63SJani Nikula static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 108958471f63SJani Nikula { 109058471f63SJani Nikula struct drm_i915_file_private *file_priv = file->driver_priv; 109158471f63SJani Nikula 109258471f63SJani Nikula i915_gem_context_close(file); 10935f0d4d14STvrtko Ursulin i915_drm_client_put(file_priv->client); 109458471f63SJani Nikula 109558471f63SJani Nikula kfree_rcu(file_priv, rcu); 109658471f63SJani Nikula 109758471f63SJani Nikula /* Catch up with all the deferred frees from "this" client */ 109858471f63SJani Nikula i915_gem_flush_free_objects(to_i915(dev)); 109958471f63SJani Nikula } 110058471f63SJani Nikula 110158471f63SJani Nikula static void intel_suspend_encoders(struct drm_i915_private *dev_priv) 110258471f63SJani Nikula { 110358471f63SJani Nikula struct intel_encoder *encoder; 110458471f63SJani Nikula 110558471f63SJani Nikula if (!HAS_DISPLAY(dev_priv)) 110658471f63SJani Nikula return; 110758471f63SJani Nikula 11083703060dSAndrzej Hajda drm_modeset_lock_all(&dev_priv->drm); 11093703060dSAndrzej Hajda for_each_intel_encoder(&dev_priv->drm, encoder) 111058471f63SJani Nikula if (encoder->suspend) 111158471f63SJani Nikula encoder->suspend(encoder); 11123703060dSAndrzej Hajda drm_modeset_unlock_all(&dev_priv->drm); 111358471f63SJani Nikula } 111458471f63SJani Nikula 111558471f63SJani Nikula static void intel_shutdown_encoders(struct drm_i915_private *dev_priv) 111658471f63SJani Nikula { 111758471f63SJani Nikula struct intel_encoder *encoder; 111858471f63SJani Nikula 111958471f63SJani Nikula if (!HAS_DISPLAY(dev_priv)) 112058471f63SJani Nikula return; 112158471f63SJani Nikula 11223703060dSAndrzej Hajda drm_modeset_lock_all(&dev_priv->drm); 11233703060dSAndrzej Hajda for_each_intel_encoder(&dev_priv->drm, encoder) 112458471f63SJani Nikula if (encoder->shutdown) 112558471f63SJani Nikula encoder->shutdown(encoder); 11263703060dSAndrzej Hajda drm_modeset_unlock_all(&dev_priv->drm); 112758471f63SJani Nikula } 112858471f63SJani Nikula 112958471f63SJani Nikula void i915_driver_shutdown(struct drm_i915_private *i915) 113058471f63SJani Nikula { 113158471f63SJani Nikula disable_rpm_wakeref_asserts(&i915->runtime_pm); 113258471f63SJani Nikula intel_runtime_pm_disable(&i915->runtime_pm); 113358471f63SJani Nikula intel_power_domains_disable(i915); 113458471f63SJani Nikula 113558471f63SJani Nikula if (HAS_DISPLAY(i915)) { 113658471f63SJani Nikula drm_kms_helper_poll_disable(&i915->drm); 113758471f63SJani Nikula 113858471f63SJani Nikula drm_atomic_helper_shutdown(&i915->drm); 113958471f63SJani Nikula } 114058471f63SJani Nikula 114158471f63SJani Nikula intel_dp_mst_suspend(i915); 114258471f63SJani Nikula 114358471f63SJani Nikula intel_runtime_pm_disable_interrupts(i915); 114458471f63SJani Nikula intel_hpd_cancel_work(i915); 114558471f63SJani Nikula 114658471f63SJani Nikula intel_suspend_encoders(i915); 114758471f63SJani Nikula intel_shutdown_encoders(i915); 114858471f63SJani Nikula 114958471f63SJani Nikula intel_dmc_ucode_suspend(i915); 115058471f63SJani Nikula 1151421f5410SJosé Roberto de Souza i915_gem_suspend(i915); 1152421f5410SJosé Roberto de Souza 115358471f63SJani Nikula /* 115458471f63SJani Nikula * The only requirement is to reboot with display DC states disabled, 115558471f63SJani Nikula * for now leaving all display power wells in the INIT power domain 115658471f63SJani Nikula * enabled. 115758471f63SJani Nikula * 115858471f63SJani Nikula * TODO: 115958471f63SJani Nikula * - unify the pci_driver::shutdown sequence here with the 116058471f63SJani Nikula * pci_driver.driver.pm.poweroff,poweroff_late sequence. 116158471f63SJani Nikula * - unify the driver remove and system/runtime suspend sequences with 116258471f63SJani Nikula * the above unified shutdown/poweroff sequence. 116358471f63SJani Nikula */ 116458471f63SJani Nikula intel_power_domains_driver_remove(i915); 116558471f63SJani Nikula enable_rpm_wakeref_asserts(&i915->runtime_pm); 116658471f63SJani Nikula 116758471f63SJani Nikula intel_runtime_pm_driver_release(&i915->runtime_pm); 116858471f63SJani Nikula } 116958471f63SJani Nikula 117058471f63SJani Nikula static bool suspend_to_idle(struct drm_i915_private *dev_priv) 117158471f63SJani Nikula { 117258471f63SJani Nikula #if IS_ENABLED(CONFIG_ACPI_SLEEP) 117358471f63SJani Nikula if (acpi_target_system_state() < ACPI_STATE_S3) 117458471f63SJani Nikula return true; 117558471f63SJani Nikula #endif 117658471f63SJani Nikula return false; 117758471f63SJani Nikula } 117858471f63SJani Nikula 117958471f63SJani Nikula static int i915_drm_prepare(struct drm_device *dev) 118058471f63SJani Nikula { 118158471f63SJani Nikula struct drm_i915_private *i915 = to_i915(dev); 118258471f63SJani Nikula 1183f67986b0SAlan Previn intel_pxp_suspend_prepare(i915->pxp); 1184f67986b0SAlan Previn 118558471f63SJani Nikula /* 118658471f63SJani Nikula * NB intel_display_suspend() may issue new requests after we've 118758471f63SJani Nikula * ostensibly marked the GPU as ready-to-sleep here. We need to 118858471f63SJani Nikula * split out that work and pull it forward so that after point, 118958471f63SJani Nikula * the GPU is not woken again. 119058471f63SJani Nikula */ 119158471f63SJani Nikula return i915_gem_backup_suspend(i915); 119258471f63SJani Nikula } 119358471f63SJani Nikula 119458471f63SJani Nikula static int i915_drm_suspend(struct drm_device *dev) 119558471f63SJani Nikula { 119658471f63SJani Nikula struct drm_i915_private *dev_priv = to_i915(dev); 119758471f63SJani Nikula struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 119858471f63SJani Nikula pci_power_t opregion_target_state; 119958471f63SJani Nikula 120058471f63SJani Nikula disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 120158471f63SJani Nikula 120258471f63SJani Nikula /* We do a lot of poking in a lot of registers, make sure they work 120358471f63SJani Nikula * properly. */ 120458471f63SJani Nikula intel_power_domains_disable(dev_priv); 120558471f63SJani Nikula if (HAS_DISPLAY(dev_priv)) 120658471f63SJani Nikula drm_kms_helper_poll_disable(dev); 120758471f63SJani Nikula 120858471f63SJani Nikula pci_save_state(pdev); 120958471f63SJani Nikula 121058471f63SJani Nikula intel_display_suspend(dev); 121158471f63SJani Nikula 121258471f63SJani Nikula intel_dp_mst_suspend(dev_priv); 121358471f63SJani Nikula 121458471f63SJani Nikula intel_runtime_pm_disable_interrupts(dev_priv); 121558471f63SJani Nikula intel_hpd_cancel_work(dev_priv); 121658471f63SJani Nikula 121758471f63SJani Nikula intel_suspend_encoders(dev_priv); 121858471f63SJani Nikula 121958471f63SJani Nikula intel_suspend_hw(dev_priv); 122058471f63SJani Nikula 122158471f63SJani Nikula /* Must be called before GGTT is suspended. */ 122258471f63SJani Nikula intel_dpt_suspend(dev_priv); 1223647bfd26STvrtko Ursulin i915_ggtt_suspend(to_gt(dev_priv)->ggtt); 122458471f63SJani Nikula 122558471f63SJani Nikula i915_save_display(dev_priv); 122658471f63SJani Nikula 122758471f63SJani Nikula opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; 122858471f63SJani Nikula intel_opregion_suspend(dev_priv, opregion_target_state); 122958471f63SJani Nikula 123058471f63SJani Nikula intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); 123158471f63SJani Nikula 123258471f63SJani Nikula dev_priv->suspend_count++; 123358471f63SJani Nikula 123458471f63SJani Nikula intel_dmc_ucode_suspend(dev_priv); 123558471f63SJani Nikula 123658471f63SJani Nikula enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 123758471f63SJani Nikula 123887a7d535SJosé Roberto de Souza i915_gem_drain_freed_objects(dev_priv); 123987a7d535SJosé Roberto de Souza 124058471f63SJani Nikula return 0; 124158471f63SJani Nikula } 124258471f63SJani Nikula 124358471f63SJani Nikula static enum i915_drm_suspend_mode 124458471f63SJani Nikula get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate) 124558471f63SJani Nikula { 124658471f63SJani Nikula if (hibernate) 124758471f63SJani Nikula return I915_DRM_SUSPEND_HIBERNATE; 124858471f63SJani Nikula 124958471f63SJani Nikula if (suspend_to_idle(dev_priv)) 125058471f63SJani Nikula return I915_DRM_SUSPEND_IDLE; 125158471f63SJani Nikula 125258471f63SJani Nikula return I915_DRM_SUSPEND_MEM; 125358471f63SJani Nikula } 125458471f63SJani Nikula 125558471f63SJani Nikula static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) 125658471f63SJani Nikula { 125758471f63SJani Nikula struct drm_i915_private *dev_priv = to_i915(dev); 125858471f63SJani Nikula struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 125958471f63SJani Nikula struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 12601c66a12aSMatt Roper struct intel_gt *gt; 12611c66a12aSMatt Roper int ret, i; 126258471f63SJani Nikula 126358471f63SJani Nikula disable_rpm_wakeref_asserts(rpm); 126458471f63SJani Nikula 1265f67986b0SAlan Previn intel_pxp_suspend(dev_priv->pxp); 1266f67986b0SAlan Previn 126758471f63SJani Nikula i915_gem_suspend_late(dev_priv); 126858471f63SJani Nikula 12691c66a12aSMatt Roper for_each_gt(gt, dev_priv, i) 12701c66a12aSMatt Roper intel_uncore_suspend(gt->uncore); 127158471f63SJani Nikula 127258471f63SJani Nikula intel_power_domains_suspend(dev_priv, 127358471f63SJani Nikula get_suspend_mode(dev_priv, hibernation)); 127458471f63SJani Nikula 127558471f63SJani Nikula intel_display_power_suspend_late(dev_priv); 127658471f63SJani Nikula 127758471f63SJani Nikula ret = vlv_suspend_complete(dev_priv); 127858471f63SJani Nikula if (ret) { 127958471f63SJani Nikula drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret); 128058471f63SJani Nikula intel_power_domains_resume(dev_priv); 128158471f63SJani Nikula 128258471f63SJani Nikula goto out; 128358471f63SJani Nikula } 128458471f63SJani Nikula 128558471f63SJani Nikula pci_disable_device(pdev); 128658471f63SJani Nikula /* 128758471f63SJani Nikula * During hibernation on some platforms the BIOS may try to access 128858471f63SJani Nikula * the device even though it's already in D3 and hang the machine. So 128958471f63SJani Nikula * leave the device in D0 on those platforms and hope the BIOS will 129058471f63SJani Nikula * power down the device properly. The issue was seen on multiple old 129158471f63SJani Nikula * GENs with different BIOS vendors, so having an explicit blacklist 129258471f63SJani Nikula * is inpractical; apply the workaround on everything pre GEN6. The 129358471f63SJani Nikula * platforms where the issue was seen: 129458471f63SJani Nikula * Lenovo Thinkpad X301, X61s, X60, T60, X41 129558471f63SJani Nikula * Fujitsu FSC S7110 129658471f63SJani Nikula * Acer Aspire 1830T 129758471f63SJani Nikula */ 129858471f63SJani Nikula if (!(hibernation && GRAPHICS_VER(dev_priv) < 6)) 129958471f63SJani Nikula pci_set_power_state(pdev, PCI_D3hot); 130058471f63SJani Nikula 130158471f63SJani Nikula out: 130258471f63SJani Nikula enable_rpm_wakeref_asserts(rpm); 130358471f63SJani Nikula if (!dev_priv->uncore.user_forcewake_count) 130458471f63SJani Nikula intel_runtime_pm_driver_release(rpm); 130558471f63SJani Nikula 130658471f63SJani Nikula return ret; 130758471f63SJani Nikula } 130858471f63SJani Nikula 1309b8d65b8aSJani Nikula int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, 1310b8d65b8aSJani Nikula pm_message_t state) 131158471f63SJani Nikula { 131258471f63SJani Nikula int error; 131358471f63SJani Nikula 131458471f63SJani Nikula if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND && 131558471f63SJani Nikula state.event != PM_EVENT_FREEZE)) 131658471f63SJani Nikula return -EINVAL; 131758471f63SJani Nikula 131858471f63SJani Nikula if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 131958471f63SJani Nikula return 0; 132058471f63SJani Nikula 132158471f63SJani Nikula error = i915_drm_suspend(&i915->drm); 132258471f63SJani Nikula if (error) 132358471f63SJani Nikula return error; 132458471f63SJani Nikula 132558471f63SJani Nikula return i915_drm_suspend_late(&i915->drm, false); 132658471f63SJani Nikula } 132758471f63SJani Nikula 132858471f63SJani Nikula static int i915_drm_resume(struct drm_device *dev) 132958471f63SJani Nikula { 133058471f63SJani Nikula struct drm_i915_private *dev_priv = to_i915(dev); 13310f857158SAravind Iddamsetty struct intel_gt *gt; 13320f857158SAravind Iddamsetty int ret, i; 133358471f63SJani Nikula 133458471f63SJani Nikula disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 133558471f63SJani Nikula 13366a735552SAshutosh Dixit ret = i915_pcode_init(dev_priv); 133758471f63SJani Nikula if (ret) 133858471f63SJani Nikula return ret; 133958471f63SJani Nikula 134058471f63SJani Nikula sanitize_gpu(dev_priv); 134158471f63SJani Nikula 134258471f63SJani Nikula ret = i915_ggtt_enable_hw(dev_priv); 134358471f63SJani Nikula if (ret) 134458471f63SJani Nikula drm_err(&dev_priv->drm, "failed to re-enable GGTT\n"); 134558471f63SJani Nikula 1346647bfd26STvrtko Ursulin i915_ggtt_resume(to_gt(dev_priv)->ggtt); 13470f857158SAravind Iddamsetty 13480f857158SAravind Iddamsetty for_each_gt(gt, dev_priv, i) 13490f857158SAravind Iddamsetty if (GRAPHICS_VER(gt->i915) >= 8) 13500f857158SAravind Iddamsetty setup_private_pat(gt); 13510f857158SAravind Iddamsetty 135258471f63SJani Nikula /* Must be called after GGTT is resumed. */ 135358471f63SJani Nikula intel_dpt_resume(dev_priv); 135458471f63SJani Nikula 135558471f63SJani Nikula intel_dmc_ucode_resume(dev_priv); 135658471f63SJani Nikula 135758471f63SJani Nikula i915_restore_display(dev_priv); 135858471f63SJani Nikula intel_pps_unlock_regs_wa(dev_priv); 135958471f63SJani Nikula 136058471f63SJani Nikula intel_init_pch_refclk(dev_priv); 136158471f63SJani Nikula 136258471f63SJani Nikula /* 136358471f63SJani Nikula * Interrupts have to be enabled before any batches are run. If not the 136458471f63SJani Nikula * GPU will hang. i915_gem_init_hw() will initiate batches to 136558471f63SJani Nikula * update/restore the context. 136658471f63SJani Nikula * 136758471f63SJani Nikula * drm_mode_config_reset() needs AUX interrupts. 136858471f63SJani Nikula * 136958471f63SJani Nikula * Modeset enabling in intel_modeset_init_hw() also needs working 137058471f63SJani Nikula * interrupts. 137158471f63SJani Nikula */ 137258471f63SJani Nikula intel_runtime_pm_enable_interrupts(dev_priv); 137358471f63SJani Nikula 137458471f63SJani Nikula if (HAS_DISPLAY(dev_priv)) 137558471f63SJani Nikula drm_mode_config_reset(dev); 137658471f63SJani Nikula 137758471f63SJani Nikula i915_gem_resume(dev_priv); 137858471f63SJani Nikula 1379f67986b0SAlan Previn intel_pxp_resume(dev_priv->pxp); 1380f67986b0SAlan Previn 138158471f63SJani Nikula intel_modeset_init_hw(dev_priv); 138258471f63SJani Nikula intel_init_clock_gating(dev_priv); 138358471f63SJani Nikula intel_hpd_init(dev_priv); 138458471f63SJani Nikula 138558471f63SJani Nikula /* MST sideband requires HPD interrupts enabled */ 138658471f63SJani Nikula intel_dp_mst_resume(dev_priv); 138758471f63SJani Nikula intel_display_resume(dev); 138858471f63SJani Nikula 138958471f63SJani Nikula intel_hpd_poll_disable(dev_priv); 139058471f63SJani Nikula if (HAS_DISPLAY(dev_priv)) 139158471f63SJani Nikula drm_kms_helper_poll_enable(dev); 139258471f63SJani Nikula 139358471f63SJani Nikula intel_opregion_resume(dev_priv); 139458471f63SJani Nikula 139558471f63SJani Nikula intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); 139658471f63SJani Nikula 139758471f63SJani Nikula intel_power_domains_enable(dev_priv); 139858471f63SJani Nikula 139958471f63SJani Nikula intel_gvt_resume(dev_priv); 140058471f63SJani Nikula 140158471f63SJani Nikula enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 140258471f63SJani Nikula 140358471f63SJani Nikula return 0; 140458471f63SJani Nikula } 140558471f63SJani Nikula 140658471f63SJani Nikula static int i915_drm_resume_early(struct drm_device *dev) 140758471f63SJani Nikula { 140858471f63SJani Nikula struct drm_i915_private *dev_priv = to_i915(dev); 140958471f63SJani Nikula struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 14101c66a12aSMatt Roper struct intel_gt *gt; 14111c66a12aSMatt Roper int ret, i; 141258471f63SJani Nikula 141358471f63SJani Nikula /* 141458471f63SJani Nikula * We have a resume ordering issue with the snd-hda driver also 141558471f63SJani Nikula * requiring our device to be power up. Due to the lack of a 141658471f63SJani Nikula * parent/child relationship we currently solve this with an early 141758471f63SJani Nikula * resume hook. 141858471f63SJani Nikula * 141958471f63SJani Nikula * FIXME: This should be solved with a special hdmi sink device or 142058471f63SJani Nikula * similar so that power domains can be employed. 142158471f63SJani Nikula */ 142258471f63SJani Nikula 142358471f63SJani Nikula /* 142458471f63SJani Nikula * Note that we need to set the power state explicitly, since we 142558471f63SJani Nikula * powered off the device during freeze and the PCI core won't power 142658471f63SJani Nikula * it back up for us during thaw. Powering off the device during 142758471f63SJani Nikula * freeze is not a hard requirement though, and during the 142858471f63SJani Nikula * suspend/resume phases the PCI core makes sure we get here with the 142958471f63SJani Nikula * device powered on. So in case we change our freeze logic and keep 143058471f63SJani Nikula * the device powered we can also remove the following set power state 143158471f63SJani Nikula * call. 143258471f63SJani Nikula */ 143358471f63SJani Nikula ret = pci_set_power_state(pdev, PCI_D0); 143458471f63SJani Nikula if (ret) { 143558471f63SJani Nikula drm_err(&dev_priv->drm, 143658471f63SJani Nikula "failed to set PCI D0 power state (%d)\n", ret); 143758471f63SJani Nikula return ret; 143858471f63SJani Nikula } 143958471f63SJani Nikula 144058471f63SJani Nikula /* 144158471f63SJani Nikula * Note that pci_enable_device() first enables any parent bridge 144258471f63SJani Nikula * device and only then sets the power state for this device. The 144358471f63SJani Nikula * bridge enabling is a nop though, since bridge devices are resumed 144458471f63SJani Nikula * first. The order of enabling power and enabling the device is 144558471f63SJani Nikula * imposed by the PCI core as described above, so here we preserve the 144658471f63SJani Nikula * same order for the freeze/thaw phases. 144758471f63SJani Nikula * 144858471f63SJani Nikula * TODO: eventually we should remove pci_disable_device() / 144958471f63SJani Nikula * pci_enable_enable_device() from suspend/resume. Due to how they 145058471f63SJani Nikula * depend on the device enable refcount we can't anyway depend on them 145158471f63SJani Nikula * disabling/enabling the device. 145258471f63SJani Nikula */ 145358471f63SJani Nikula if (pci_enable_device(pdev)) 145458471f63SJani Nikula return -EIO; 145558471f63SJani Nikula 145658471f63SJani Nikula pci_set_master(pdev); 145758471f63SJani Nikula 145858471f63SJani Nikula disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 145958471f63SJani Nikula 146058471f63SJani Nikula ret = vlv_resume_prepare(dev_priv, false); 146158471f63SJani Nikula if (ret) 146258471f63SJani Nikula drm_err(&dev_priv->drm, 146358471f63SJani Nikula "Resume prepare failed: %d, continuing anyway\n", ret); 146458471f63SJani Nikula 14651c66a12aSMatt Roper for_each_gt(gt, dev_priv, i) { 14661c66a12aSMatt Roper intel_uncore_resume_early(gt->uncore); 14671c66a12aSMatt Roper intel_gt_check_and_clear_faults(gt); 14681c66a12aSMatt Roper } 146958471f63SJani Nikula 147058471f63SJani Nikula intel_display_power_resume_early(dev_priv); 147158471f63SJani Nikula 147258471f63SJani Nikula intel_power_domains_resume(dev_priv); 147358471f63SJani Nikula 147458471f63SJani Nikula enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 147558471f63SJani Nikula 147658471f63SJani Nikula return ret; 147758471f63SJani Nikula } 147858471f63SJani Nikula 1479b8d65b8aSJani Nikula int i915_driver_resume_switcheroo(struct drm_i915_private *i915) 148058471f63SJani Nikula { 148158471f63SJani Nikula int ret; 148258471f63SJani Nikula 148358471f63SJani Nikula if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 148458471f63SJani Nikula return 0; 148558471f63SJani Nikula 148658471f63SJani Nikula ret = i915_drm_resume_early(&i915->drm); 148758471f63SJani Nikula if (ret) 148858471f63SJani Nikula return ret; 148958471f63SJani Nikula 149058471f63SJani Nikula return i915_drm_resume(&i915->drm); 149158471f63SJani Nikula } 149258471f63SJani Nikula 149358471f63SJani Nikula static int i915_pm_prepare(struct device *kdev) 149458471f63SJani Nikula { 149558471f63SJani Nikula struct drm_i915_private *i915 = kdev_to_i915(kdev); 149658471f63SJani Nikula 149758471f63SJani Nikula if (!i915) { 149858471f63SJani Nikula dev_err(kdev, "DRM not initialized, aborting suspend.\n"); 149958471f63SJani Nikula return -ENODEV; 150058471f63SJani Nikula } 150158471f63SJani Nikula 150258471f63SJani Nikula if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 150358471f63SJani Nikula return 0; 150458471f63SJani Nikula 150558471f63SJani Nikula return i915_drm_prepare(&i915->drm); 150658471f63SJani Nikula } 150758471f63SJani Nikula 150858471f63SJani Nikula static int i915_pm_suspend(struct device *kdev) 150958471f63SJani Nikula { 151058471f63SJani Nikula struct drm_i915_private *i915 = kdev_to_i915(kdev); 151158471f63SJani Nikula 151258471f63SJani Nikula if (!i915) { 151358471f63SJani Nikula dev_err(kdev, "DRM not initialized, aborting suspend.\n"); 151458471f63SJani Nikula return -ENODEV; 151558471f63SJani Nikula } 151658471f63SJani Nikula 151758471f63SJani Nikula if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 151858471f63SJani Nikula return 0; 151958471f63SJani Nikula 152058471f63SJani Nikula return i915_drm_suspend(&i915->drm); 152158471f63SJani Nikula } 152258471f63SJani Nikula 152358471f63SJani Nikula static int i915_pm_suspend_late(struct device *kdev) 152458471f63SJani Nikula { 152558471f63SJani Nikula struct drm_i915_private *i915 = kdev_to_i915(kdev); 152658471f63SJani Nikula 152758471f63SJani Nikula /* 152858471f63SJani Nikula * We have a suspend ordering issue with the snd-hda driver also 152958471f63SJani Nikula * requiring our device to be power up. Due to the lack of a 153058471f63SJani Nikula * parent/child relationship we currently solve this with an late 153158471f63SJani Nikula * suspend hook. 153258471f63SJani Nikula * 153358471f63SJani Nikula * FIXME: This should be solved with a special hdmi sink device or 153458471f63SJani Nikula * similar so that power domains can be employed. 153558471f63SJani Nikula */ 153658471f63SJani Nikula if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 153758471f63SJani Nikula return 0; 153858471f63SJani Nikula 153958471f63SJani Nikula return i915_drm_suspend_late(&i915->drm, false); 154058471f63SJani Nikula } 154158471f63SJani Nikula 154258471f63SJani Nikula static int i915_pm_poweroff_late(struct device *kdev) 154358471f63SJani Nikula { 154458471f63SJani Nikula struct drm_i915_private *i915 = kdev_to_i915(kdev); 154558471f63SJani Nikula 154658471f63SJani Nikula if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 154758471f63SJani Nikula return 0; 154858471f63SJani Nikula 154958471f63SJani Nikula return i915_drm_suspend_late(&i915->drm, true); 155058471f63SJani Nikula } 155158471f63SJani Nikula 155258471f63SJani Nikula static int i915_pm_resume_early(struct device *kdev) 155358471f63SJani Nikula { 155458471f63SJani Nikula struct drm_i915_private *i915 = kdev_to_i915(kdev); 155558471f63SJani Nikula 155658471f63SJani Nikula if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 155758471f63SJani Nikula return 0; 155858471f63SJani Nikula 155958471f63SJani Nikula return i915_drm_resume_early(&i915->drm); 156058471f63SJani Nikula } 156158471f63SJani Nikula 156258471f63SJani Nikula static int i915_pm_resume(struct device *kdev) 156358471f63SJani Nikula { 156458471f63SJani Nikula struct drm_i915_private *i915 = kdev_to_i915(kdev); 156558471f63SJani Nikula 156658471f63SJani Nikula if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 156758471f63SJani Nikula return 0; 156858471f63SJani Nikula 156958471f63SJani Nikula return i915_drm_resume(&i915->drm); 157058471f63SJani Nikula } 157158471f63SJani Nikula 157258471f63SJani Nikula /* freeze: before creating the hibernation_image */ 157358471f63SJani Nikula static int i915_pm_freeze(struct device *kdev) 157458471f63SJani Nikula { 157558471f63SJani Nikula struct drm_i915_private *i915 = kdev_to_i915(kdev); 157658471f63SJani Nikula int ret; 157758471f63SJani Nikula 157858471f63SJani Nikula if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) { 157958471f63SJani Nikula ret = i915_drm_suspend(&i915->drm); 158058471f63SJani Nikula if (ret) 158158471f63SJani Nikula return ret; 158258471f63SJani Nikula } 158358471f63SJani Nikula 158458471f63SJani Nikula ret = i915_gem_freeze(i915); 158558471f63SJani Nikula if (ret) 158658471f63SJani Nikula return ret; 158758471f63SJani Nikula 158858471f63SJani Nikula return 0; 158958471f63SJani Nikula } 159058471f63SJani Nikula 159158471f63SJani Nikula static int i915_pm_freeze_late(struct device *kdev) 159258471f63SJani Nikula { 159358471f63SJani Nikula struct drm_i915_private *i915 = kdev_to_i915(kdev); 159458471f63SJani Nikula int ret; 159558471f63SJani Nikula 159658471f63SJani Nikula if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) { 159758471f63SJani Nikula ret = i915_drm_suspend_late(&i915->drm, true); 159858471f63SJani Nikula if (ret) 159958471f63SJani Nikula return ret; 160058471f63SJani Nikula } 160158471f63SJani Nikula 160258471f63SJani Nikula ret = i915_gem_freeze_late(i915); 160358471f63SJani Nikula if (ret) 160458471f63SJani Nikula return ret; 160558471f63SJani Nikula 160658471f63SJani Nikula return 0; 160758471f63SJani Nikula } 160858471f63SJani Nikula 160958471f63SJani Nikula /* thaw: called after creating the hibernation image, but before turning off. */ 161058471f63SJani Nikula static int i915_pm_thaw_early(struct device *kdev) 161158471f63SJani Nikula { 161258471f63SJani Nikula return i915_pm_resume_early(kdev); 161358471f63SJani Nikula } 161458471f63SJani Nikula 161558471f63SJani Nikula static int i915_pm_thaw(struct device *kdev) 161658471f63SJani Nikula { 161758471f63SJani Nikula return i915_pm_resume(kdev); 161858471f63SJani Nikula } 161958471f63SJani Nikula 162058471f63SJani Nikula /* restore: called after loading the hibernation image. */ 162158471f63SJani Nikula static int i915_pm_restore_early(struct device *kdev) 162258471f63SJani Nikula { 162358471f63SJani Nikula return i915_pm_resume_early(kdev); 162458471f63SJani Nikula } 162558471f63SJani Nikula 162658471f63SJani Nikula static int i915_pm_restore(struct device *kdev) 162758471f63SJani Nikula { 162858471f63SJani Nikula return i915_pm_resume(kdev); 162958471f63SJani Nikula } 163058471f63SJani Nikula 163158471f63SJani Nikula static int intel_runtime_suspend(struct device *kdev) 163258471f63SJani Nikula { 163358471f63SJani Nikula struct drm_i915_private *dev_priv = kdev_to_i915(kdev); 163458471f63SJani Nikula struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 16351c66a12aSMatt Roper struct intel_gt *gt; 16361c66a12aSMatt Roper int ret, i; 163758471f63SJani Nikula 163858471f63SJani Nikula if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) 163958471f63SJani Nikula return -ENODEV; 164058471f63SJani Nikula 1641c3e57159SAnshuman Gupta drm_dbg(&dev_priv->drm, "Suspending device\n"); 164258471f63SJani Nikula 164358471f63SJani Nikula disable_rpm_wakeref_asserts(rpm); 164458471f63SJani Nikula 164558471f63SJani Nikula /* 164658471f63SJani Nikula * We are safe here against re-faults, since the fault handler takes 164758471f63SJani Nikula * an RPM reference. 164858471f63SJani Nikula */ 164958471f63SJani Nikula i915_gem_runtime_suspend(dev_priv); 165058471f63SJani Nikula 1651f67986b0SAlan Previn intel_pxp_runtime_suspend(dev_priv->pxp); 1652f67986b0SAlan Previn 16531c66a12aSMatt Roper for_each_gt(gt, dev_priv, i) 16541c66a12aSMatt Roper intel_gt_runtime_suspend(gt); 165558471f63SJani Nikula 165658471f63SJani Nikula intel_runtime_pm_disable_interrupts(dev_priv); 165758471f63SJani Nikula 16581c66a12aSMatt Roper for_each_gt(gt, dev_priv, i) 16591c66a12aSMatt Roper intel_uncore_suspend(gt->uncore); 166058471f63SJani Nikula 166158471f63SJani Nikula intel_display_power_suspend(dev_priv); 166258471f63SJani Nikula 166358471f63SJani Nikula ret = vlv_suspend_complete(dev_priv); 166458471f63SJani Nikula if (ret) { 166558471f63SJani Nikula drm_err(&dev_priv->drm, 166658471f63SJani Nikula "Runtime suspend failed, disabling it (%d)\n", ret); 166758471f63SJani Nikula intel_uncore_runtime_resume(&dev_priv->uncore); 166858471f63SJani Nikula 166958471f63SJani Nikula intel_runtime_pm_enable_interrupts(dev_priv); 167058471f63SJani Nikula 1671f569ae75STvrtko Ursulin for_each_gt(gt, dev_priv, i) 1672f569ae75STvrtko Ursulin intel_gt_runtime_resume(gt); 167358471f63SJani Nikula 167458471f63SJani Nikula enable_rpm_wakeref_asserts(rpm); 167558471f63SJani Nikula 167658471f63SJani Nikula return ret; 167758471f63SJani Nikula } 167858471f63SJani Nikula 167958471f63SJani Nikula enable_rpm_wakeref_asserts(rpm); 168058471f63SJani Nikula intel_runtime_pm_driver_release(rpm); 168158471f63SJani Nikula 168258471f63SJani Nikula if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore)) 168358471f63SJani Nikula drm_err(&dev_priv->drm, 168458471f63SJani Nikula "Unclaimed access detected prior to suspending\n"); 168558471f63SJani Nikula 168658471f63SJani Nikula rpm->suspended = true; 168758471f63SJani Nikula 168858471f63SJani Nikula /* 168958471f63SJani Nikula * FIXME: We really should find a document that references the arguments 169058471f63SJani Nikula * used below! 169158471f63SJani Nikula */ 169258471f63SJani Nikula if (IS_BROADWELL(dev_priv)) { 169358471f63SJani Nikula /* 169458471f63SJani Nikula * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop 169558471f63SJani Nikula * being detected, and the call we do at intel_runtime_resume() 169658471f63SJani Nikula * won't be able to restore them. Since PCI_D3hot matches the 169758471f63SJani Nikula * actual specification and appears to be working, use it. 169858471f63SJani Nikula */ 169958471f63SJani Nikula intel_opregion_notify_adapter(dev_priv, PCI_D3hot); 170058471f63SJani Nikula } else { 170158471f63SJani Nikula /* 170258471f63SJani Nikula * current versions of firmware which depend on this opregion 170358471f63SJani Nikula * notification have repurposed the D1 definition to mean 170458471f63SJani Nikula * "runtime suspended" vs. what you would normally expect (D3) 170558471f63SJani Nikula * to distinguish it from notifications that might be sent via 170658471f63SJani Nikula * the suspend path. 170758471f63SJani Nikula */ 170858471f63SJani Nikula intel_opregion_notify_adapter(dev_priv, PCI_D1); 170958471f63SJani Nikula } 171058471f63SJani Nikula 171158471f63SJani Nikula assert_forcewakes_inactive(&dev_priv->uncore); 171258471f63SJani Nikula 171358471f63SJani Nikula if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 171458471f63SJani Nikula intel_hpd_poll_enable(dev_priv); 171558471f63SJani Nikula 1716c3e57159SAnshuman Gupta drm_dbg(&dev_priv->drm, "Device suspended\n"); 171758471f63SJani Nikula return 0; 171858471f63SJani Nikula } 171958471f63SJani Nikula 172058471f63SJani Nikula static int intel_runtime_resume(struct device *kdev) 172158471f63SJani Nikula { 172258471f63SJani Nikula struct drm_i915_private *dev_priv = kdev_to_i915(kdev); 172358471f63SJani Nikula struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 17241c66a12aSMatt Roper struct intel_gt *gt; 17251c66a12aSMatt Roper int ret, i; 172658471f63SJani Nikula 172758471f63SJani Nikula if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) 172858471f63SJani Nikula return -ENODEV; 172958471f63SJani Nikula 1730c3e57159SAnshuman Gupta drm_dbg(&dev_priv->drm, "Resuming device\n"); 173158471f63SJani Nikula 173258471f63SJani Nikula drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count)); 173358471f63SJani Nikula disable_rpm_wakeref_asserts(rpm); 173458471f63SJani Nikula 173558471f63SJani Nikula intel_opregion_notify_adapter(dev_priv, PCI_D0); 173658471f63SJani Nikula rpm->suspended = false; 173758471f63SJani Nikula if (intel_uncore_unclaimed_mmio(&dev_priv->uncore)) 173858471f63SJani Nikula drm_dbg(&dev_priv->drm, 173958471f63SJani Nikula "Unclaimed access during suspend, bios?\n"); 174058471f63SJani Nikula 174158471f63SJani Nikula intel_display_power_resume(dev_priv); 174258471f63SJani Nikula 174358471f63SJani Nikula ret = vlv_resume_prepare(dev_priv, true); 174458471f63SJani Nikula 17451c66a12aSMatt Roper for_each_gt(gt, dev_priv, i) 17461c66a12aSMatt Roper intel_uncore_runtime_resume(gt->uncore); 174758471f63SJani Nikula 174858471f63SJani Nikula intel_runtime_pm_enable_interrupts(dev_priv); 174958471f63SJani Nikula 175058471f63SJani Nikula /* 175158471f63SJani Nikula * No point of rolling back things in case of an error, as the best 175258471f63SJani Nikula * we can do is to hope that things will still work (and disable RPM). 175358471f63SJani Nikula */ 17541c66a12aSMatt Roper for_each_gt(gt, dev_priv, i) 17551c66a12aSMatt Roper intel_gt_runtime_resume(gt); 175658471f63SJani Nikula 1757f67986b0SAlan Previn intel_pxp_runtime_resume(dev_priv->pxp); 1758f67986b0SAlan Previn 175958471f63SJani Nikula /* 176058471f63SJani Nikula * On VLV/CHV display interrupts are part of the display 176158471f63SJani Nikula * power well, so hpd is reinitialized from there. For 176258471f63SJani Nikula * everyone else do it here. 176358471f63SJani Nikula */ 176458471f63SJani Nikula if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 176558471f63SJani Nikula intel_hpd_init(dev_priv); 176658471f63SJani Nikula intel_hpd_poll_disable(dev_priv); 176758471f63SJani Nikula } 176858471f63SJani Nikula 176923fbdb07SJani Nikula skl_watermark_ipc_update(dev_priv); 177058471f63SJani Nikula 177158471f63SJani Nikula enable_rpm_wakeref_asserts(rpm); 177258471f63SJani Nikula 177358471f63SJani Nikula if (ret) 177458471f63SJani Nikula drm_err(&dev_priv->drm, 177558471f63SJani Nikula "Runtime resume failed, disabling it (%d)\n", ret); 177658471f63SJani Nikula else 1777c3e57159SAnshuman Gupta drm_dbg(&dev_priv->drm, "Device resumed\n"); 177858471f63SJani Nikula 177958471f63SJani Nikula return ret; 178058471f63SJani Nikula } 178158471f63SJani Nikula 178258471f63SJani Nikula const struct dev_pm_ops i915_pm_ops = { 178358471f63SJani Nikula /* 178458471f63SJani Nikula * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, 178558471f63SJani Nikula * PMSG_RESUME] 178658471f63SJani Nikula */ 178758471f63SJani Nikula .prepare = i915_pm_prepare, 178858471f63SJani Nikula .suspend = i915_pm_suspend, 178958471f63SJani Nikula .suspend_late = i915_pm_suspend_late, 179058471f63SJani Nikula .resume_early = i915_pm_resume_early, 179158471f63SJani Nikula .resume = i915_pm_resume, 179258471f63SJani Nikula 179358471f63SJani Nikula /* 179458471f63SJani Nikula * S4 event handlers 179558471f63SJani Nikula * @freeze, @freeze_late : called (1) before creating the 179658471f63SJani Nikula * hibernation image [PMSG_FREEZE] and 179758471f63SJani Nikula * (2) after rebooting, before restoring 179858471f63SJani Nikula * the image [PMSG_QUIESCE] 179958471f63SJani Nikula * @thaw, @thaw_early : called (1) after creating the hibernation 180058471f63SJani Nikula * image, before writing it [PMSG_THAW] 180158471f63SJani Nikula * and (2) after failing to create or 180258471f63SJani Nikula * restore the image [PMSG_RECOVER] 180358471f63SJani Nikula * @poweroff, @poweroff_late: called after writing the hibernation 180458471f63SJani Nikula * image, before rebooting [PMSG_HIBERNATE] 180558471f63SJani Nikula * @restore, @restore_early : called after rebooting and restoring the 180658471f63SJani Nikula * hibernation image [PMSG_RESTORE] 180758471f63SJani Nikula */ 180858471f63SJani Nikula .freeze = i915_pm_freeze, 180958471f63SJani Nikula .freeze_late = i915_pm_freeze_late, 181058471f63SJani Nikula .thaw_early = i915_pm_thaw_early, 181158471f63SJani Nikula .thaw = i915_pm_thaw, 181258471f63SJani Nikula .poweroff = i915_pm_suspend, 181358471f63SJani Nikula .poweroff_late = i915_pm_poweroff_late, 181458471f63SJani Nikula .restore_early = i915_pm_restore_early, 181558471f63SJani Nikula .restore = i915_pm_restore, 181658471f63SJani Nikula 181758471f63SJani Nikula /* S0ix (via runtime suspend) event handlers */ 181858471f63SJani Nikula .runtime_suspend = intel_runtime_suspend, 181958471f63SJani Nikula .runtime_resume = intel_runtime_resume, 182058471f63SJani Nikula }; 182158471f63SJani Nikula 182258471f63SJani Nikula static const struct file_operations i915_driver_fops = { 182358471f63SJani Nikula .owner = THIS_MODULE, 182458471f63SJani Nikula .open = drm_open, 182558471f63SJani Nikula .release = drm_release_noglobal, 182658471f63SJani Nikula .unlocked_ioctl = drm_ioctl, 182758471f63SJani Nikula .mmap = i915_gem_mmap, 182858471f63SJani Nikula .poll = drm_poll, 182958471f63SJani Nikula .read = drm_read, 183058471f63SJani Nikula .compat_ioctl = i915_ioc32_compat_ioctl, 183158471f63SJani Nikula .llseek = noop_llseek, 1832055634e4STvrtko Ursulin #ifdef CONFIG_PROC_FS 1833055634e4STvrtko Ursulin .show_fdinfo = i915_drm_client_fdinfo, 1834055634e4STvrtko Ursulin #endif 183558471f63SJani Nikula }; 183658471f63SJani Nikula 183758471f63SJani Nikula static int 183858471f63SJani Nikula i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, 183958471f63SJani Nikula struct drm_file *file) 184058471f63SJani Nikula { 184158471f63SJani Nikula return -ENODEV; 184258471f63SJani Nikula } 184358471f63SJani Nikula 184458471f63SJani Nikula static const struct drm_ioctl_desc i915_ioctls[] = { 184558471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 184658471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), 184758471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), 184858471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), 184958471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), 185058471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), 185158471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW), 185258471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 185358471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), 185458471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), 185558471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 185658471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), 185758471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 185858471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 185958471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), 186058471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), 186158471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 186258471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 186358471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, drm_invalid_op, DRM_AUTH), 186458471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW), 186558471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 186658471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 186758471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW), 186858471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), 186958471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), 187058471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW), 187158471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 187258471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 187358471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), 187458471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_CREATE_EXT, i915_gem_create_ext_ioctl, DRM_RENDER_ALLOW), 187558471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), 187658471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), 187758471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), 187858471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW), 187958471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), 188058471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), 188158471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW), 188258471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW), 188358471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), 188458471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0), 188558471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), 188658471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER), 188758471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER), 188858471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER), 188958471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER), 189058471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW), 189158471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), 189258471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), 189358471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), 189458471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), 189558471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), 189658471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), 189758471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), 189858471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW), 189958471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW), 190058471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW), 190158471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW), 190258471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW), 190358471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW), 190458471f63SJani Nikula }; 190558471f63SJani Nikula 190624524e3fSJani Nikula /* 190724524e3fSJani Nikula * Interface history: 190824524e3fSJani Nikula * 190924524e3fSJani Nikula * 1.1: Original. 191024524e3fSJani Nikula * 1.2: Add Power Management 191124524e3fSJani Nikula * 1.3: Add vblank support 191224524e3fSJani Nikula * 1.4: Fix cmdbuffer path, add heap destroy 191324524e3fSJani Nikula * 1.5: Add vblank pipe configuration 191424524e3fSJani Nikula * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 191524524e3fSJani Nikula * - Support vertical blank on secondary display pipe 191624524e3fSJani Nikula */ 191724524e3fSJani Nikula #define DRIVER_MAJOR 1 191824524e3fSJani Nikula #define DRIVER_MINOR 6 191924524e3fSJani Nikula #define DRIVER_PATCHLEVEL 0 192024524e3fSJani Nikula 19214588d7ebSJani Nikula static const struct drm_driver i915_drm_driver = { 192258471f63SJani Nikula /* Don't use MTRRs here; the Xserver or userspace app should 192358471f63SJani Nikula * deal with them for Intel hardware. 192458471f63SJani Nikula */ 192558471f63SJani Nikula .driver_features = 192658471f63SJani Nikula DRIVER_GEM | 192758471f63SJani Nikula DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ | 192858471f63SJani Nikula DRIVER_SYNCOBJ_TIMELINE, 192958471f63SJani Nikula .release = i915_driver_release, 193058471f63SJani Nikula .open = i915_driver_open, 193158471f63SJani Nikula .lastclose = i915_driver_lastclose, 193258471f63SJani Nikula .postclose = i915_driver_postclose, 193358471f63SJani Nikula 193458471f63SJani Nikula .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 193558471f63SJani Nikula .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 193658471f63SJani Nikula .gem_prime_import = i915_gem_prime_import, 193758471f63SJani Nikula 193858471f63SJani Nikula .dumb_create = i915_gem_dumb_create, 193958471f63SJani Nikula .dumb_map_offset = i915_gem_dumb_mmap_offset, 194058471f63SJani Nikula 194158471f63SJani Nikula .ioctls = i915_ioctls, 194258471f63SJani Nikula .num_ioctls = ARRAY_SIZE(i915_ioctls), 194358471f63SJani Nikula .fops = &i915_driver_fops, 194458471f63SJani Nikula .name = DRIVER_NAME, 194558471f63SJani Nikula .desc = DRIVER_DESC, 194658471f63SJani Nikula .date = DRIVER_DATE, 194758471f63SJani Nikula .major = DRIVER_MAJOR, 194858471f63SJani Nikula .minor = DRIVER_MINOR, 194958471f63SJani Nikula .patchlevel = DRIVER_PATCHLEVEL, 195058471f63SJani Nikula }; 1951