158471f63SJani Nikula /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- 258471f63SJani Nikula */ 358471f63SJani Nikula /* 458471f63SJani Nikula * 558471f63SJani Nikula * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 658471f63SJani Nikula * All Rights Reserved. 758471f63SJani Nikula * 858471f63SJani Nikula * Permission is hereby granted, free of charge, to any person obtaining a 958471f63SJani Nikula * copy of this software and associated documentation files (the 1058471f63SJani Nikula * "Software"), to deal in the Software without restriction, including 1158471f63SJani Nikula * without limitation the rights to use, copy, modify, merge, publish, 1258471f63SJani Nikula * distribute, sub license, and/or sell copies of the Software, and to 1358471f63SJani Nikula * permit persons to whom the Software is furnished to do so, subject to 1458471f63SJani Nikula * the following conditions: 1558471f63SJani Nikula * 1658471f63SJani Nikula * The above copyright notice and this permission notice (including the 1758471f63SJani Nikula * next paragraph) shall be included in all copies or substantial portions 1858471f63SJani Nikula * of the Software. 1958471f63SJani Nikula * 2058471f63SJani Nikula * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 2158471f63SJani Nikula * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 2258471f63SJani Nikula * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 2358471f63SJani Nikula * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 2458471f63SJani Nikula * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 2558471f63SJani Nikula * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 2658471f63SJani Nikula * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 2758471f63SJani Nikula * 2858471f63SJani Nikula */ 2958471f63SJani Nikula 3058471f63SJani Nikula #include <linux/acpi.h> 3158471f63SJani Nikula #include <linux/device.h> 3258471f63SJani Nikula #include <linux/module.h> 3358471f63SJani Nikula #include <linux/oom.h> 3458471f63SJani Nikula #include <linux/pci.h> 3558471f63SJani Nikula #include <linux/pm.h> 3658471f63SJani Nikula #include <linux/pm_runtime.h> 3758471f63SJani Nikula #include <linux/pnp.h> 3858471f63SJani Nikula #include <linux/slab.h> 39ff9fbe7cSLucas De Marchi #include <linux/string_helpers.h> 4058471f63SJani Nikula #include <linux/vga_switcheroo.h> 4158471f63SJani Nikula #include <linux/vt.h> 4258471f63SJani Nikula 4358471f63SJani Nikula #include <drm/drm_aperture.h> 4458471f63SJani Nikula #include <drm/drm_atomic_helper.h> 4558471f63SJani Nikula #include <drm/drm_ioctl.h> 4658471f63SJani Nikula #include <drm/drm_managed.h> 4758471f63SJani Nikula #include <drm/drm_probe_helper.h> 4858471f63SJani Nikula 4958471f63SJani Nikula #include "display/intel_acpi.h" 5058471f63SJani Nikula #include "display/intel_bw.h" 5158471f63SJani Nikula #include "display/intel_cdclk.h" 5258471f63SJani Nikula #include "display/intel_display_types.h" 5358471f63SJani Nikula #include "display/intel_dmc.h" 5458471f63SJani Nikula #include "display/intel_dp.h" 5558471f63SJani Nikula #include "display/intel_dpt.h" 5658471f63SJani Nikula #include "display/intel_fbdev.h" 5758471f63SJani Nikula #include "display/intel_hotplug.h" 5858471f63SJani Nikula #include "display/intel_overlay.h" 5958471f63SJani Nikula #include "display/intel_pch_refclk.h" 6058471f63SJani Nikula #include "display/intel_pipe_crc.h" 6158471f63SJani Nikula #include "display/intel_pps.h" 6258471f63SJani Nikula #include "display/intel_sprite.h" 6358471f63SJani Nikula #include "display/intel_vga.h" 6458471f63SJani Nikula 6558471f63SJani Nikula #include "gem/i915_gem_context.h" 66be137d79SJani Nikula #include "gem/i915_gem_create.h" 67c8eb426dSJani Nikula #include "gem/i915_gem_dmabuf.h" 6858471f63SJani Nikula #include "gem/i915_gem_ioctls.h" 6958471f63SJani Nikula #include "gem/i915_gem_mman.h" 7058471f63SJani Nikula #include "gem/i915_gem_pm.h" 7158471f63SJani Nikula #include "gt/intel_gt.h" 7258471f63SJani Nikula #include "gt/intel_gt_pm.h" 7358471f63SJani Nikula #include "gt/intel_rc6.h" 7458471f63SJani Nikula 7558471f63SJani Nikula #include "pxp/intel_pxp_pm.h" 7658471f63SJani Nikula 775472b3f2SJani Nikula #include "i915_file_private.h" 7858471f63SJani Nikula #include "i915_debugfs.h" 7958471f63SJani Nikula #include "i915_driver.h" 805f0d4d14STvrtko Ursulin #include "i915_drm_client.h" 8158471f63SJani Nikula #include "i915_drv.h" 822564c35dSJani Nikula #include "i915_getparam.h" 8358471f63SJani Nikula #include "i915_ioc32.h" 84198bca93SJani Nikula #include "i915_ioctl.h" 8558471f63SJani Nikula #include "i915_irq.h" 8658471f63SJani Nikula #include "i915_memcpy.h" 8758471f63SJani Nikula #include "i915_perf.h" 8858471f63SJani Nikula #include "i915_query.h" 8958471f63SJani Nikula #include "i915_suspend.h" 9058471f63SJani Nikula #include "i915_switcheroo.h" 9158471f63SJani Nikula #include "i915_sysfs.h" 92a7f46d5bSTvrtko Ursulin #include "i915_utils.h" 9358471f63SJani Nikula #include "i915_vgpu.h" 9458471f63SJani Nikula #include "intel_dram.h" 9558471f63SJani Nikula #include "intel_gvt.h" 9658471f63SJani Nikula #include "intel_memory_region.h" 977e470f10SJani Nikula #include "intel_pci_config.h" 9858471f63SJani Nikula #include "intel_pcode.h" 9958471f63SJani Nikula #include "intel_pm.h" 10058471f63SJani Nikula #include "intel_region_ttm.h" 10158471f63SJani Nikula #include "vlv_suspend.h" 10258471f63SJani Nikula 1032ef6efa7SThomas Hellström /* Intel Rapid Start Technology ACPI device name */ 1042ef6efa7SThomas Hellström static const char irst_name[] = "INT3392"; 1052ef6efa7SThomas Hellström 1064588d7ebSJani Nikula static const struct drm_driver i915_drm_driver; 10758471f63SJani Nikula 108*6438452dSMatt Roper static void i915_release_bridge_dev(struct drm_device *dev, 109*6438452dSMatt Roper void *bridge) 110*6438452dSMatt Roper { 111*6438452dSMatt Roper pci_dev_put(bridge); 112*6438452dSMatt Roper } 113*6438452dSMatt Roper 11458471f63SJani Nikula static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) 11558471f63SJani Nikula { 11658471f63SJani Nikula int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus); 11758471f63SJani Nikula 11858471f63SJani Nikula dev_priv->bridge_dev = 11958471f63SJani Nikula pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0)); 12058471f63SJani Nikula if (!dev_priv->bridge_dev) { 12158471f63SJani Nikula drm_err(&dev_priv->drm, "bridge device not found\n"); 12258471f63SJani Nikula return -EIO; 12358471f63SJani Nikula } 124*6438452dSMatt Roper 125*6438452dSMatt Roper return drmm_add_action_or_reset(&dev_priv->drm, i915_release_bridge_dev, 126*6438452dSMatt Roper dev_priv->bridge_dev); 12758471f63SJani Nikula } 12858471f63SJani Nikula 12958471f63SJani Nikula /* Allocate space for the MCH regs if needed, return nonzero on error */ 13058471f63SJani Nikula static int 13158471f63SJani Nikula intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv) 13258471f63SJani Nikula { 13358471f63SJani Nikula int reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; 13458471f63SJani Nikula u32 temp_lo, temp_hi = 0; 13558471f63SJani Nikula u64 mchbar_addr; 13658471f63SJani Nikula int ret; 13758471f63SJani Nikula 13858471f63SJani Nikula if (GRAPHICS_VER(dev_priv) >= 4) 13958471f63SJani Nikula pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); 14058471f63SJani Nikula pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); 14158471f63SJani Nikula mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 14258471f63SJani Nikula 14358471f63SJani Nikula /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 14458471f63SJani Nikula #ifdef CONFIG_PNP 14558471f63SJani Nikula if (mchbar_addr && 14658471f63SJani Nikula pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) 14758471f63SJani Nikula return 0; 14858471f63SJani Nikula #endif 14958471f63SJani Nikula 15058471f63SJani Nikula /* Get some space for it */ 15158471f63SJani Nikula dev_priv->mch_res.name = "i915 MCHBAR"; 15258471f63SJani Nikula dev_priv->mch_res.flags = IORESOURCE_MEM; 15358471f63SJani Nikula ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, 15458471f63SJani Nikula &dev_priv->mch_res, 15558471f63SJani Nikula MCHBAR_SIZE, MCHBAR_SIZE, 15658471f63SJani Nikula PCIBIOS_MIN_MEM, 15758471f63SJani Nikula 0, pcibios_align_resource, 15858471f63SJani Nikula dev_priv->bridge_dev); 15958471f63SJani Nikula if (ret) { 16058471f63SJani Nikula drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret); 16158471f63SJani Nikula dev_priv->mch_res.start = 0; 16258471f63SJani Nikula return ret; 16358471f63SJani Nikula } 16458471f63SJani Nikula 16558471f63SJani Nikula if (GRAPHICS_VER(dev_priv) >= 4) 16658471f63SJani Nikula pci_write_config_dword(dev_priv->bridge_dev, reg + 4, 16758471f63SJani Nikula upper_32_bits(dev_priv->mch_res.start)); 16858471f63SJani Nikula 16958471f63SJani Nikula pci_write_config_dword(dev_priv->bridge_dev, reg, 17058471f63SJani Nikula lower_32_bits(dev_priv->mch_res.start)); 17158471f63SJani Nikula return 0; 17258471f63SJani Nikula } 17358471f63SJani Nikula 17458471f63SJani Nikula /* Setup MCHBAR if possible, return true if we should disable it again */ 17558471f63SJani Nikula static void 17658471f63SJani Nikula intel_setup_mchbar(struct drm_i915_private *dev_priv) 17758471f63SJani Nikula { 17858471f63SJani Nikula int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; 17958471f63SJani Nikula u32 temp; 18058471f63SJani Nikula bool enabled; 18158471f63SJani Nikula 18258471f63SJani Nikula if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 18358471f63SJani Nikula return; 18458471f63SJani Nikula 18558471f63SJani Nikula dev_priv->mchbar_need_disable = false; 18658471f63SJani Nikula 18758471f63SJani Nikula if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { 18858471f63SJani Nikula pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp); 18958471f63SJani Nikula enabled = !!(temp & DEVEN_MCHBAR_EN); 19058471f63SJani Nikula } else { 19158471f63SJani Nikula pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 19258471f63SJani Nikula enabled = temp & 1; 19358471f63SJani Nikula } 19458471f63SJani Nikula 19558471f63SJani Nikula /* If it's already enabled, don't have to do anything */ 19658471f63SJani Nikula if (enabled) 19758471f63SJani Nikula return; 19858471f63SJani Nikula 19958471f63SJani Nikula if (intel_alloc_mchbar_resource(dev_priv)) 20058471f63SJani Nikula return; 20158471f63SJani Nikula 20258471f63SJani Nikula dev_priv->mchbar_need_disable = true; 20358471f63SJani Nikula 20458471f63SJani Nikula /* Space is allocated or reserved, so enable it. */ 20558471f63SJani Nikula if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { 20658471f63SJani Nikula pci_write_config_dword(dev_priv->bridge_dev, DEVEN, 20758471f63SJani Nikula temp | DEVEN_MCHBAR_EN); 20858471f63SJani Nikula } else { 20958471f63SJani Nikula pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 21058471f63SJani Nikula pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); 21158471f63SJani Nikula } 21258471f63SJani Nikula } 21358471f63SJani Nikula 21458471f63SJani Nikula static void 21558471f63SJani Nikula intel_teardown_mchbar(struct drm_i915_private *dev_priv) 21658471f63SJani Nikula { 21758471f63SJani Nikula int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; 21858471f63SJani Nikula 21958471f63SJani Nikula if (dev_priv->mchbar_need_disable) { 22058471f63SJani Nikula if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { 22158471f63SJani Nikula u32 deven_val; 22258471f63SJani Nikula 22358471f63SJani Nikula pci_read_config_dword(dev_priv->bridge_dev, DEVEN, 22458471f63SJani Nikula &deven_val); 22558471f63SJani Nikula deven_val &= ~DEVEN_MCHBAR_EN; 22658471f63SJani Nikula pci_write_config_dword(dev_priv->bridge_dev, DEVEN, 22758471f63SJani Nikula deven_val); 22858471f63SJani Nikula } else { 22958471f63SJani Nikula u32 mchbar_val; 23058471f63SJani Nikula 23158471f63SJani Nikula pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, 23258471f63SJani Nikula &mchbar_val); 23358471f63SJani Nikula mchbar_val &= ~1; 23458471f63SJani Nikula pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, 23558471f63SJani Nikula mchbar_val); 23658471f63SJani Nikula } 23758471f63SJani Nikula } 23858471f63SJani Nikula 23958471f63SJani Nikula if (dev_priv->mch_res.start) 24058471f63SJani Nikula release_resource(&dev_priv->mch_res); 24158471f63SJani Nikula } 24258471f63SJani Nikula 24358471f63SJani Nikula static int i915_workqueues_init(struct drm_i915_private *dev_priv) 24458471f63SJani Nikula { 24558471f63SJani Nikula /* 24658471f63SJani Nikula * The i915 workqueue is primarily used for batched retirement of 24758471f63SJani Nikula * requests (and thus managing bo) once the task has been completed 24858471f63SJani Nikula * by the GPU. i915_retire_requests() is called directly when we 24958471f63SJani Nikula * need high-priority retirement, such as waiting for an explicit 25058471f63SJani Nikula * bo. 25158471f63SJani Nikula * 25258471f63SJani Nikula * It is also used for periodic low-priority events, such as 25358471f63SJani Nikula * idle-timers and recording error state. 25458471f63SJani Nikula * 25558471f63SJani Nikula * All tasks on the workqueue are expected to acquire the dev mutex 25658471f63SJani Nikula * so there is no point in running more than one instance of the 25758471f63SJani Nikula * workqueue at any time. Use an ordered one. 25858471f63SJani Nikula */ 25958471f63SJani Nikula dev_priv->wq = alloc_ordered_workqueue("i915", 0); 26058471f63SJani Nikula if (dev_priv->wq == NULL) 26158471f63SJani Nikula goto out_err; 26258471f63SJani Nikula 26358471f63SJani Nikula dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); 26458471f63SJani Nikula if (dev_priv->hotplug.dp_wq == NULL) 26558471f63SJani Nikula goto out_free_wq; 26658471f63SJani Nikula 26758471f63SJani Nikula return 0; 26858471f63SJani Nikula 26958471f63SJani Nikula out_free_wq: 27058471f63SJani Nikula destroy_workqueue(dev_priv->wq); 27158471f63SJani Nikula out_err: 27258471f63SJani Nikula drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n"); 27358471f63SJani Nikula 27458471f63SJani Nikula return -ENOMEM; 27558471f63SJani Nikula } 27658471f63SJani Nikula 27758471f63SJani Nikula static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) 27858471f63SJani Nikula { 27958471f63SJani Nikula destroy_workqueue(dev_priv->hotplug.dp_wq); 28058471f63SJani Nikula destroy_workqueue(dev_priv->wq); 28158471f63SJani Nikula } 28258471f63SJani Nikula 28358471f63SJani Nikula /* 28458471f63SJani Nikula * We don't keep the workarounds for pre-production hardware, so we expect our 28558471f63SJani Nikula * driver to fail on these machines in one way or another. A little warning on 28658471f63SJani Nikula * dmesg may help both the user and the bug triagers. 28758471f63SJani Nikula * 28858471f63SJani Nikula * Our policy for removing pre-production workarounds is to keep the 28958471f63SJani Nikula * current gen workarounds as a guide to the bring-up of the next gen 29058471f63SJani Nikula * (workarounds have a habit of persisting!). Anything older than that 29158471f63SJani Nikula * should be removed along with the complications they introduce. 29258471f63SJani Nikula */ 29358471f63SJani Nikula static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) 29458471f63SJani Nikula { 29558471f63SJani Nikula bool pre = false; 29658471f63SJani Nikula 29758471f63SJani Nikula pre |= IS_HSW_EARLY_SDV(dev_priv); 29858471f63SJani Nikula pre |= IS_SKYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x6; 29958471f63SJani Nikula pre |= IS_BROXTON(dev_priv) && INTEL_REVID(dev_priv) < 0xA; 30058471f63SJani Nikula pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1; 30158471f63SJani Nikula pre |= IS_GEMINILAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x3; 30258471f63SJani Nikula pre |= IS_ICELAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x7; 30358471f63SJani Nikula 30458471f63SJani Nikula if (pre) { 30558471f63SJani Nikula drm_err(&dev_priv->drm, "This is a pre-production stepping. " 30658471f63SJani Nikula "It may not be fully functional.\n"); 30758471f63SJani Nikula add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); 30858471f63SJani Nikula } 30958471f63SJani Nikula } 31058471f63SJani Nikula 31158471f63SJani Nikula static void sanitize_gpu(struct drm_i915_private *i915) 31258471f63SJani Nikula { 31358471f63SJani Nikula if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) 3144817c37dSDave Airlie __intel_gt_reset(to_gt(i915), ALL_ENGINES); 31558471f63SJani Nikula } 31658471f63SJani Nikula 31758471f63SJani Nikula /** 31858471f63SJani Nikula * i915_driver_early_probe - setup state not requiring device access 31958471f63SJani Nikula * @dev_priv: device private 32058471f63SJani Nikula * 32158471f63SJani Nikula * Initialize everything that is a "SW-only" state, that is state not 32258471f63SJani Nikula * requiring accessing the device or exposing the driver via kernel internal 32358471f63SJani Nikula * or userspace interfaces. Example steps belonging here: lock initialization, 32458471f63SJani Nikula * system memory allocation, setting up device specific attributes and 32558471f63SJani Nikula * function hooks not requiring accessing the device. 32658471f63SJani Nikula */ 32758471f63SJani Nikula static int i915_driver_early_probe(struct drm_i915_private *dev_priv) 32858471f63SJani Nikula { 32958471f63SJani Nikula int ret = 0; 33058471f63SJani Nikula 33158471f63SJani Nikula if (i915_inject_probe_failure(dev_priv)) 33258471f63SJani Nikula return -ENODEV; 33358471f63SJani Nikula 33458471f63SJani Nikula intel_device_info_subplatform_init(dev_priv); 33558471f63SJani Nikula intel_step_init(dev_priv); 33658471f63SJani Nikula 337639e30eeSMatt Roper intel_uncore_mmio_debug_init_early(dev_priv); 33858471f63SJani Nikula 33958471f63SJani Nikula spin_lock_init(&dev_priv->irq_lock); 34058471f63SJani Nikula spin_lock_init(&dev_priv->gpu_error.lock); 34158471f63SJani Nikula mutex_init(&dev_priv->backlight_lock); 34258471f63SJani Nikula 34358471f63SJani Nikula mutex_init(&dev_priv->sb_lock); 34458471f63SJani Nikula cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE); 34558471f63SJani Nikula 34658471f63SJani Nikula mutex_init(&dev_priv->audio.mutex); 34758471f63SJani Nikula mutex_init(&dev_priv->wm.wm_mutex); 34858471f63SJani Nikula mutex_init(&dev_priv->pps_mutex); 34958471f63SJani Nikula mutex_init(&dev_priv->hdcp_comp_mutex); 35058471f63SJani Nikula 35158471f63SJani Nikula i915_memcpy_init_early(dev_priv); 35258471f63SJani Nikula intel_runtime_pm_init_early(&dev_priv->runtime_pm); 35358471f63SJani Nikula 35458471f63SJani Nikula ret = i915_workqueues_init(dev_priv); 35558471f63SJani Nikula if (ret < 0) 35658471f63SJani Nikula return ret; 35758471f63SJani Nikula 35858471f63SJani Nikula ret = vlv_suspend_init(dev_priv); 35958471f63SJani Nikula if (ret < 0) 36058471f63SJani Nikula goto err_workqueues; 36158471f63SJani Nikula 36258471f63SJani Nikula ret = intel_region_ttm_device_init(dev_priv); 36358471f63SJani Nikula if (ret) 36458471f63SJani Nikula goto err_ttm; 36558471f63SJani Nikula 36658471f63SJani Nikula intel_wopcm_init_early(&dev_priv->wopcm); 36758471f63SJani Nikula 368bec68cc9STvrtko Ursulin intel_root_gt_init_early(dev_priv); 36958471f63SJani Nikula 3705f0d4d14STvrtko Ursulin i915_drm_clients_init(&dev_priv->clients, dev_priv); 3715f0d4d14STvrtko Ursulin 37258471f63SJani Nikula i915_gem_init_early(dev_priv); 37358471f63SJani Nikula 37458471f63SJani Nikula /* This must be called before any calls to HAS_PCH_* */ 37558471f63SJani Nikula intel_detect_pch(dev_priv); 37658471f63SJani Nikula 37758471f63SJani Nikula intel_pm_setup(dev_priv); 37858471f63SJani Nikula ret = intel_power_domains_init(dev_priv); 37958471f63SJani Nikula if (ret < 0) 38058471f63SJani Nikula goto err_gem; 38158471f63SJani Nikula intel_irq_init(dev_priv); 38258471f63SJani Nikula intel_init_display_hooks(dev_priv); 38358471f63SJani Nikula intel_init_clock_gating_hooks(dev_priv); 38458471f63SJani Nikula 38558471f63SJani Nikula intel_detect_preproduction_hw(dev_priv); 38658471f63SJani Nikula 38758471f63SJani Nikula return 0; 38858471f63SJani Nikula 38958471f63SJani Nikula err_gem: 39058471f63SJani Nikula i915_gem_cleanup_early(dev_priv); 391bec68cc9STvrtko Ursulin intel_gt_driver_late_release_all(dev_priv); 3925f0d4d14STvrtko Ursulin i915_drm_clients_fini(&dev_priv->clients); 39358471f63SJani Nikula intel_region_ttm_device_fini(dev_priv); 39458471f63SJani Nikula err_ttm: 39558471f63SJani Nikula vlv_suspend_cleanup(dev_priv); 39658471f63SJani Nikula err_workqueues: 39758471f63SJani Nikula i915_workqueues_cleanup(dev_priv); 39858471f63SJani Nikula return ret; 39958471f63SJani Nikula } 40058471f63SJani Nikula 40158471f63SJani Nikula /** 40258471f63SJani Nikula * i915_driver_late_release - cleanup the setup done in 40358471f63SJani Nikula * i915_driver_early_probe() 40458471f63SJani Nikula * @dev_priv: device private 40558471f63SJani Nikula */ 40658471f63SJani Nikula static void i915_driver_late_release(struct drm_i915_private *dev_priv) 40758471f63SJani Nikula { 40858471f63SJani Nikula intel_irq_fini(dev_priv); 40958471f63SJani Nikula intel_power_domains_cleanup(dev_priv); 41058471f63SJani Nikula i915_gem_cleanup_early(dev_priv); 411bec68cc9STvrtko Ursulin intel_gt_driver_late_release_all(dev_priv); 4125f0d4d14STvrtko Ursulin i915_drm_clients_fini(&dev_priv->clients); 41358471f63SJani Nikula intel_region_ttm_device_fini(dev_priv); 41458471f63SJani Nikula vlv_suspend_cleanup(dev_priv); 41558471f63SJani Nikula i915_workqueues_cleanup(dev_priv); 41658471f63SJani Nikula 41758471f63SJani Nikula cpu_latency_qos_remove_request(&dev_priv->sb_qos); 41858471f63SJani Nikula mutex_destroy(&dev_priv->sb_lock); 41958471f63SJani Nikula 42058471f63SJani Nikula i915_params_free(&dev_priv->params); 42158471f63SJani Nikula } 42258471f63SJani Nikula 42358471f63SJani Nikula /** 42458471f63SJani Nikula * i915_driver_mmio_probe - setup device MMIO 42558471f63SJani Nikula * @dev_priv: device private 42658471f63SJani Nikula * 42758471f63SJani Nikula * Setup minimal device state necessary for MMIO accesses later in the 42858471f63SJani Nikula * initialization sequence. The setup here should avoid any other device-wide 42958471f63SJani Nikula * side effects or exposing the driver via kernel internal or user space 43058471f63SJani Nikula * interfaces. 43158471f63SJani Nikula */ 43258471f63SJani Nikula static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) 43358471f63SJani Nikula { 43458471f63SJani Nikula int ret; 43558471f63SJani Nikula 43658471f63SJani Nikula if (i915_inject_probe_failure(dev_priv)) 43758471f63SJani Nikula return -ENODEV; 43858471f63SJani Nikula 43958471f63SJani Nikula ret = i915_get_bridge_dev(dev_priv); 44058471f63SJani Nikula if (ret < 0) 44158471f63SJani Nikula return ret; 44258471f63SJani Nikula 443211b4dbcSDave Airlie ret = intel_uncore_init_mmio(&dev_priv->uncore); 444211b4dbcSDave Airlie if (ret) 445bec68cc9STvrtko Ursulin return ret; 446211b4dbcSDave Airlie 44758471f63SJani Nikula /* Try to make sure MCHBAR is enabled before poking at it */ 44858471f63SJani Nikula intel_setup_mchbar(dev_priv); 44958471f63SJani Nikula intel_device_info_runtime_init(dev_priv); 45058471f63SJani Nikula 4514817c37dSDave Airlie ret = intel_gt_init_mmio(to_gt(dev_priv)); 45258471f63SJani Nikula if (ret) 45358471f63SJani Nikula goto err_uncore; 45458471f63SJani Nikula 45558471f63SJani Nikula /* As early as possible, scrub existing GPU state before clobbering */ 45658471f63SJani Nikula sanitize_gpu(dev_priv); 45758471f63SJani Nikula 45858471f63SJani Nikula return 0; 45958471f63SJani Nikula 46058471f63SJani Nikula err_uncore: 46158471f63SJani Nikula intel_teardown_mchbar(dev_priv); 46258471f63SJani Nikula intel_uncore_fini_mmio(&dev_priv->uncore); 46358471f63SJani Nikula 46458471f63SJani Nikula return ret; 46558471f63SJani Nikula } 46658471f63SJani Nikula 46758471f63SJani Nikula /** 46858471f63SJani Nikula * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe() 46958471f63SJani Nikula * @dev_priv: device private 47058471f63SJani Nikula */ 47158471f63SJani Nikula static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) 47258471f63SJani Nikula { 47358471f63SJani Nikula intel_teardown_mchbar(dev_priv); 47458471f63SJani Nikula intel_uncore_fini_mmio(&dev_priv->uncore); 47558471f63SJani Nikula } 47658471f63SJani Nikula 47758471f63SJani Nikula /** 47858471f63SJani Nikula * i915_set_dma_info - set all relevant PCI dma info as configured for the 47958471f63SJani Nikula * platform 48058471f63SJani Nikula * @i915: valid i915 instance 48158471f63SJani Nikula * 48258471f63SJani Nikula * Set the dma max segment size, device and coherent masks. The dma mask set 48358471f63SJani Nikula * needs to occur before i915_ggtt_probe_hw. 48458471f63SJani Nikula * 48558471f63SJani Nikula * A couple of platforms have special needs. Address them as well. 48658471f63SJani Nikula * 48758471f63SJani Nikula */ 48858471f63SJani Nikula static int i915_set_dma_info(struct drm_i915_private *i915) 48958471f63SJani Nikula { 49058471f63SJani Nikula unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size; 49158471f63SJani Nikula int ret; 49258471f63SJani Nikula 49358471f63SJani Nikula GEM_BUG_ON(!mask_size); 49458471f63SJani Nikula 49558471f63SJani Nikula /* 49658471f63SJani Nikula * We don't have a max segment size, so set it to the max so sg's 49758471f63SJani Nikula * debugging layer doesn't complain 49858471f63SJani Nikula */ 49958471f63SJani Nikula dma_set_max_seg_size(i915->drm.dev, UINT_MAX); 50058471f63SJani Nikula 50158471f63SJani Nikula ret = dma_set_mask(i915->drm.dev, DMA_BIT_MASK(mask_size)); 50258471f63SJani Nikula if (ret) 50358471f63SJani Nikula goto mask_err; 50458471f63SJani Nikula 50558471f63SJani Nikula /* overlay on gen2 is broken and can't address above 1G */ 50658471f63SJani Nikula if (GRAPHICS_VER(i915) == 2) 50758471f63SJani Nikula mask_size = 30; 50858471f63SJani Nikula 50958471f63SJani Nikula /* 51058471f63SJani Nikula * 965GM sometimes incorrectly writes to hardware status page (HWS) 51158471f63SJani Nikula * using 32bit addressing, overwriting memory if HWS is located 51258471f63SJani Nikula * above 4GB. 51358471f63SJani Nikula * 51458471f63SJani Nikula * The documentation also mentions an issue with undefined 51558471f63SJani Nikula * behaviour if any general state is accessed within a page above 4GB, 51658471f63SJani Nikula * which also needs to be handled carefully. 51758471f63SJani Nikula */ 51858471f63SJani Nikula if (IS_I965G(i915) || IS_I965GM(i915)) 51958471f63SJani Nikula mask_size = 32; 52058471f63SJani Nikula 52158471f63SJani Nikula ret = dma_set_coherent_mask(i915->drm.dev, DMA_BIT_MASK(mask_size)); 52258471f63SJani Nikula if (ret) 52358471f63SJani Nikula goto mask_err; 52458471f63SJani Nikula 52558471f63SJani Nikula return 0; 52658471f63SJani Nikula 52758471f63SJani Nikula mask_err: 52858471f63SJani Nikula drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret); 52958471f63SJani Nikula return ret; 53058471f63SJani Nikula } 53158471f63SJani Nikula 5326a735552SAshutosh Dixit static int i915_pcode_init(struct drm_i915_private *i915) 5336a735552SAshutosh Dixit { 5346a735552SAshutosh Dixit struct intel_gt *gt; 5356a735552SAshutosh Dixit int id, ret; 5366a735552SAshutosh Dixit 5376a735552SAshutosh Dixit for_each_gt(gt, i915, id) { 5386a735552SAshutosh Dixit ret = intel_pcode_init(gt->uncore); 5396a735552SAshutosh Dixit if (ret) { 5406a735552SAshutosh Dixit drm_err(>->i915->drm, "gt%d: intel_pcode_init failed %d\n", id, ret); 5416a735552SAshutosh Dixit return ret; 5426a735552SAshutosh Dixit } 5436a735552SAshutosh Dixit } 5446a735552SAshutosh Dixit 5456a735552SAshutosh Dixit return 0; 5466a735552SAshutosh Dixit } 5476a735552SAshutosh Dixit 54858471f63SJani Nikula /** 54958471f63SJani Nikula * i915_driver_hw_probe - setup state requiring device access 55058471f63SJani Nikula * @dev_priv: device private 55158471f63SJani Nikula * 55258471f63SJani Nikula * Setup state that requires accessing the device, but doesn't require 55358471f63SJani Nikula * exposing the driver via kernel internal or userspace interfaces. 55458471f63SJani Nikula */ 55558471f63SJani Nikula static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) 55658471f63SJani Nikula { 55758471f63SJani Nikula struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 55858471f63SJani Nikula int ret; 55958471f63SJani Nikula 56058471f63SJani Nikula if (i915_inject_probe_failure(dev_priv)) 56158471f63SJani Nikula return -ENODEV; 56258471f63SJani Nikula 56358471f63SJani Nikula if (HAS_PPGTT(dev_priv)) { 56458471f63SJani Nikula if (intel_vgpu_active(dev_priv) && 56558471f63SJani Nikula !intel_vgpu_has_full_ppgtt(dev_priv)) { 56658471f63SJani Nikula i915_report_error(dev_priv, 56758471f63SJani Nikula "incompatible vGPU found, support for isolated ppGTT required\n"); 56858471f63SJani Nikula return -ENXIO; 56958471f63SJani Nikula } 57058471f63SJani Nikula } 57158471f63SJani Nikula 57258471f63SJani Nikula if (HAS_EXECLISTS(dev_priv)) { 57358471f63SJani Nikula /* 57458471f63SJani Nikula * Older GVT emulation depends upon intercepting CSB mmio, 57558471f63SJani Nikula * which we no longer use, preferring to use the HWSP cache 57658471f63SJani Nikula * instead. 57758471f63SJani Nikula */ 57858471f63SJani Nikula if (intel_vgpu_active(dev_priv) && 57958471f63SJani Nikula !intel_vgpu_has_hwsp_emulation(dev_priv)) { 58058471f63SJani Nikula i915_report_error(dev_priv, 58158471f63SJani Nikula "old vGPU host found, support for HWSP emulation required\n"); 58258471f63SJani Nikula return -ENXIO; 58358471f63SJani Nikula } 58458471f63SJani Nikula } 58558471f63SJani Nikula 58658471f63SJani Nikula /* needs to be done before ggtt probe */ 58758471f63SJani Nikula intel_dram_edram_detect(dev_priv); 58858471f63SJani Nikula 58958471f63SJani Nikula ret = i915_set_dma_info(dev_priv); 59058471f63SJani Nikula if (ret) 59158471f63SJani Nikula return ret; 59258471f63SJani Nikula 59358471f63SJani Nikula i915_perf_init(dev_priv); 59458471f63SJani Nikula 595647bfd26STvrtko Ursulin ret = intel_gt_assign_ggtt(to_gt(dev_priv)); 596647bfd26STvrtko Ursulin if (ret) 597647bfd26STvrtko Ursulin goto err_perf; 598647bfd26STvrtko Ursulin 59958471f63SJani Nikula ret = i915_ggtt_probe_hw(dev_priv); 60058471f63SJani Nikula if (ret) 60158471f63SJani Nikula goto err_perf; 60258471f63SJani Nikula 60358471f63SJani Nikula ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, dev_priv->drm.driver); 60458471f63SJani Nikula if (ret) 60558471f63SJani Nikula goto err_ggtt; 60658471f63SJani Nikula 60758471f63SJani Nikula ret = i915_ggtt_init_hw(dev_priv); 60858471f63SJani Nikula if (ret) 60958471f63SJani Nikula goto err_ggtt; 61058471f63SJani Nikula 61158471f63SJani Nikula ret = intel_memory_regions_hw_probe(dev_priv); 61258471f63SJani Nikula if (ret) 61358471f63SJani Nikula goto err_ggtt; 61458471f63SJani Nikula 615bec68cc9STvrtko Ursulin ret = intel_gt_tiles_init(dev_priv); 61658471f63SJani Nikula if (ret) 61758471f63SJani Nikula goto err_mem_regions; 61858471f63SJani Nikula 61958471f63SJani Nikula ret = i915_ggtt_enable_hw(dev_priv); 62058471f63SJani Nikula if (ret) { 62158471f63SJani Nikula drm_err(&dev_priv->drm, "failed to enable GGTT\n"); 62258471f63SJani Nikula goto err_mem_regions; 62358471f63SJani Nikula } 62458471f63SJani Nikula 62558471f63SJani Nikula pci_set_master(pdev); 62658471f63SJani Nikula 62758471f63SJani Nikula /* On the 945G/GM, the chipset reports the MSI capability on the 62858471f63SJani Nikula * integrated graphics even though the support isn't actually there 62958471f63SJani Nikula * according to the published specs. It doesn't appear to function 63058471f63SJani Nikula * correctly in testing on 945G. 63158471f63SJani Nikula * This may be a side effect of MSI having been made available for PEG 63258471f63SJani Nikula * and the registers being closely associated. 63358471f63SJani Nikula * 63458471f63SJani Nikula * According to chipset errata, on the 965GM, MSI interrupts may 63558471f63SJani Nikula * be lost or delayed, and was defeatured. MSI interrupts seem to 63658471f63SJani Nikula * get lost on g4x as well, and interrupt delivery seems to stay 63758471f63SJani Nikula * properly dead afterwards. So we'll just disable them for all 63858471f63SJani Nikula * pre-gen5 chipsets. 63958471f63SJani Nikula * 64058471f63SJani Nikula * dp aux and gmbus irq on gen4 seems to be able to generate legacy 64158471f63SJani Nikula * interrupts even when in MSI mode. This results in spurious 64258471f63SJani Nikula * interrupt warnings if the legacy irq no. is shared with another 64358471f63SJani Nikula * device. The kernel then disables that interrupt source and so 64458471f63SJani Nikula * prevents the other device from working properly. 64558471f63SJani Nikula */ 64658471f63SJani Nikula if (GRAPHICS_VER(dev_priv) >= 5) { 64758471f63SJani Nikula if (pci_enable_msi(pdev) < 0) 64858471f63SJani Nikula drm_dbg(&dev_priv->drm, "can't enable MSI"); 64958471f63SJani Nikula } 65058471f63SJani Nikula 65158471f63SJani Nikula ret = intel_gvt_init(dev_priv); 65258471f63SJani Nikula if (ret) 65358471f63SJani Nikula goto err_msi; 65458471f63SJani Nikula 65558471f63SJani Nikula intel_opregion_setup(dev_priv); 65658471f63SJani Nikula 6576a735552SAshutosh Dixit ret = i915_pcode_init(dev_priv); 65858471f63SJani Nikula if (ret) 65958471f63SJani Nikula goto err_msi; 66058471f63SJani Nikula 66158471f63SJani Nikula /* 66258471f63SJani Nikula * Fill the dram structure to get the system dram info. This will be 66358471f63SJani Nikula * used for memory latency calculation. 66458471f63SJani Nikula */ 66558471f63SJani Nikula intel_dram_detect(dev_priv); 66658471f63SJani Nikula 66758471f63SJani Nikula intel_bw_init_hw(dev_priv); 66858471f63SJani Nikula 66958471f63SJani Nikula return 0; 67058471f63SJani Nikula 67158471f63SJani Nikula err_msi: 67258471f63SJani Nikula if (pdev->msi_enabled) 67358471f63SJani Nikula pci_disable_msi(pdev); 67458471f63SJani Nikula err_mem_regions: 67558471f63SJani Nikula intel_memory_regions_driver_release(dev_priv); 67658471f63SJani Nikula err_ggtt: 67758471f63SJani Nikula i915_ggtt_driver_release(dev_priv); 67858471f63SJani Nikula i915_gem_drain_freed_objects(dev_priv); 67958471f63SJani Nikula i915_ggtt_driver_late_release(dev_priv); 68058471f63SJani Nikula err_perf: 68158471f63SJani Nikula i915_perf_fini(dev_priv); 68258471f63SJani Nikula return ret; 68358471f63SJani Nikula } 68458471f63SJani Nikula 68558471f63SJani Nikula /** 68658471f63SJani Nikula * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe() 68758471f63SJani Nikula * @dev_priv: device private 68858471f63SJani Nikula */ 68958471f63SJani Nikula static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) 69058471f63SJani Nikula { 69158471f63SJani Nikula struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 69258471f63SJani Nikula 69358471f63SJani Nikula i915_perf_fini(dev_priv); 69458471f63SJani Nikula 69558471f63SJani Nikula if (pdev->msi_enabled) 69658471f63SJani Nikula pci_disable_msi(pdev); 69758471f63SJani Nikula } 69858471f63SJani Nikula 69958471f63SJani Nikula /** 70058471f63SJani Nikula * i915_driver_register - register the driver with the rest of the system 70158471f63SJani Nikula * @dev_priv: device private 70258471f63SJani Nikula * 70358471f63SJani Nikula * Perform any steps necessary to make the driver available via kernel 70458471f63SJani Nikula * internal or userspace interfaces. 70558471f63SJani Nikula */ 70658471f63SJani Nikula static void i915_driver_register(struct drm_i915_private *dev_priv) 70758471f63SJani Nikula { 70858471f63SJani Nikula struct drm_device *dev = &dev_priv->drm; 70958471f63SJani Nikula 71058471f63SJani Nikula i915_gem_driver_register(dev_priv); 71158471f63SJani Nikula i915_pmu_register(dev_priv); 71258471f63SJani Nikula 71358471f63SJani Nikula intel_vgpu_register(dev_priv); 71458471f63SJani Nikula 71558471f63SJani Nikula /* Reveal our presence to userspace */ 71658471f63SJani Nikula if (drm_dev_register(dev, 0)) { 71758471f63SJani Nikula drm_err(&dev_priv->drm, 71858471f63SJani Nikula "Failed to register driver for userspace access!\n"); 71958471f63SJani Nikula return; 72058471f63SJani Nikula } 72158471f63SJani Nikula 72258471f63SJani Nikula i915_debugfs_register(dev_priv); 72358471f63SJani Nikula i915_setup_sysfs(dev_priv); 72458471f63SJani Nikula 72558471f63SJani Nikula /* Depends on sysfs having been initialized */ 72658471f63SJani Nikula i915_perf_register(dev_priv); 72758471f63SJani Nikula 7284817c37dSDave Airlie intel_gt_driver_register(to_gt(dev_priv)); 72958471f63SJani Nikula 73058471f63SJani Nikula intel_display_driver_register(dev_priv); 73158471f63SJani Nikula 73258471f63SJani Nikula intel_power_domains_enable(dev_priv); 73358471f63SJani Nikula intel_runtime_pm_enable(&dev_priv->runtime_pm); 73458471f63SJani Nikula 73558471f63SJani Nikula intel_register_dsm_handler(); 73658471f63SJani Nikula 73758471f63SJani Nikula if (i915_switcheroo_register(dev_priv)) 73858471f63SJani Nikula drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n"); 73958471f63SJani Nikula } 74058471f63SJani Nikula 74158471f63SJani Nikula /** 74258471f63SJani Nikula * i915_driver_unregister - cleanup the registration done in i915_driver_regiser() 74358471f63SJani Nikula * @dev_priv: device private 74458471f63SJani Nikula */ 74558471f63SJani Nikula static void i915_driver_unregister(struct drm_i915_private *dev_priv) 74658471f63SJani Nikula { 74758471f63SJani Nikula i915_switcheroo_unregister(dev_priv); 74858471f63SJani Nikula 74958471f63SJani Nikula intel_unregister_dsm_handler(); 75058471f63SJani Nikula 75158471f63SJani Nikula intel_runtime_pm_disable(&dev_priv->runtime_pm); 75258471f63SJani Nikula intel_power_domains_disable(dev_priv); 75358471f63SJani Nikula 75458471f63SJani Nikula intel_display_driver_unregister(dev_priv); 75558471f63SJani Nikula 7564817c37dSDave Airlie intel_gt_driver_unregister(to_gt(dev_priv)); 75758471f63SJani Nikula 75858471f63SJani Nikula i915_perf_unregister(dev_priv); 75958471f63SJani Nikula i915_pmu_unregister(dev_priv); 76058471f63SJani Nikula 76158471f63SJani Nikula i915_teardown_sysfs(dev_priv); 76258471f63SJani Nikula drm_dev_unplug(&dev_priv->drm); 76358471f63SJani Nikula 76458471f63SJani Nikula i915_gem_driver_unregister(dev_priv); 76558471f63SJani Nikula } 76658471f63SJani Nikula 767211b4dbcSDave Airlie void 768211b4dbcSDave Airlie i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p) 769211b4dbcSDave Airlie { 770ff9fbe7cSLucas De Marchi drm_printf(p, "iommu: %s\n", 771a7f46d5bSTvrtko Ursulin str_enabled_disabled(i915_vtd_active(i915))); 772211b4dbcSDave Airlie } 773211b4dbcSDave Airlie 77458471f63SJani Nikula static void i915_welcome_messages(struct drm_i915_private *dev_priv) 77558471f63SJani Nikula { 77658471f63SJani Nikula if (drm_debug_enabled(DRM_UT_DRIVER)) { 77758471f63SJani Nikula struct drm_printer p = drm_debug_printer("i915 device info:"); 77858471f63SJani Nikula 77958471f63SJani Nikula drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n", 78058471f63SJani Nikula INTEL_DEVID(dev_priv), 78158471f63SJani Nikula INTEL_REVID(dev_priv), 78258471f63SJani Nikula intel_platform_name(INTEL_INFO(dev_priv)->platform), 78358471f63SJani Nikula intel_subplatform(RUNTIME_INFO(dev_priv), 78458471f63SJani Nikula INTEL_INFO(dev_priv)->platform), 78558471f63SJani Nikula GRAPHICS_VER(dev_priv)); 78658471f63SJani Nikula 78758471f63SJani Nikula intel_device_info_print_static(INTEL_INFO(dev_priv), &p); 78858471f63SJani Nikula intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p); 789211b4dbcSDave Airlie i915_print_iommu_status(dev_priv, &p); 7904817c37dSDave Airlie intel_gt_info_print(&to_gt(dev_priv)->info, &p); 79158471f63SJani Nikula } 79258471f63SJani Nikula 79358471f63SJani Nikula if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) 79458471f63SJani Nikula drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n"); 79558471f63SJani Nikula if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 79658471f63SJani Nikula drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n"); 79758471f63SJani Nikula if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) 79858471f63SJani Nikula drm_info(&dev_priv->drm, 79958471f63SJani Nikula "DRM_I915_DEBUG_RUNTIME_PM enabled\n"); 80058471f63SJani Nikula } 80158471f63SJani Nikula 80258471f63SJani Nikula static struct drm_i915_private * 80358471f63SJani Nikula i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) 80458471f63SJani Nikula { 80558471f63SJani Nikula const struct intel_device_info *match_info = 80658471f63SJani Nikula (struct intel_device_info *)ent->driver_data; 80758471f63SJani Nikula struct intel_device_info *device_info; 80858471f63SJani Nikula struct drm_i915_private *i915; 80958471f63SJani Nikula 8104588d7ebSJani Nikula i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver, 81158471f63SJani Nikula struct drm_i915_private, drm); 81258471f63SJani Nikula if (IS_ERR(i915)) 81358471f63SJani Nikula return i915; 81458471f63SJani Nikula 81558471f63SJani Nikula pci_set_drvdata(pdev, i915); 81658471f63SJani Nikula 81758471f63SJani Nikula /* Device parameters start as a copy of module parameters. */ 81858471f63SJani Nikula i915_params_copy(&i915->params, &i915_modparams); 81958471f63SJani Nikula 82058471f63SJani Nikula /* Setup the write-once "constant" device info */ 82158471f63SJani Nikula device_info = mkwrite_device_info(i915); 82258471f63SJani Nikula memcpy(device_info, match_info, sizeof(*device_info)); 82358471f63SJani Nikula RUNTIME_INFO(i915)->device_id = pdev->device; 82458471f63SJani Nikula 82558471f63SJani Nikula return i915; 82658471f63SJani Nikula } 82758471f63SJani Nikula 82858471f63SJani Nikula /** 82958471f63SJani Nikula * i915_driver_probe - setup chip and create an initial config 83058471f63SJani Nikula * @pdev: PCI device 83158471f63SJani Nikula * @ent: matching PCI ID entry 83258471f63SJani Nikula * 83358471f63SJani Nikula * The driver probe routine has to do several things: 83458471f63SJani Nikula * - drive output discovery via intel_modeset_init() 83558471f63SJani Nikula * - initialize the memory manager 83658471f63SJani Nikula * - allocate initial config memory 83758471f63SJani Nikula * - setup the DRM framebuffer with the allocated memory 83858471f63SJani Nikula */ 83958471f63SJani Nikula int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 84058471f63SJani Nikula { 84158471f63SJani Nikula const struct intel_device_info *match_info = 84258471f63SJani Nikula (struct intel_device_info *)ent->driver_data; 84358471f63SJani Nikula struct drm_i915_private *i915; 84458471f63SJani Nikula int ret; 84558471f63SJani Nikula 84658471f63SJani Nikula i915 = i915_driver_create(pdev, ent); 84758471f63SJani Nikula if (IS_ERR(i915)) 84858471f63SJani Nikula return PTR_ERR(i915); 84958471f63SJani Nikula 85058471f63SJani Nikula /* Disable nuclear pageflip by default on pre-ILK */ 851211b4dbcSDave Airlie if (!i915->params.nuclear_pageflip && match_info->graphics.ver < 5) 85258471f63SJani Nikula i915->drm.driver_features &= ~DRIVER_ATOMIC; 85358471f63SJani Nikula 85458471f63SJani Nikula ret = pci_enable_device(pdev); 85558471f63SJani Nikula if (ret) 85658471f63SJani Nikula goto out_fini; 85758471f63SJani Nikula 85858471f63SJani Nikula ret = i915_driver_early_probe(i915); 85958471f63SJani Nikula if (ret < 0) 86058471f63SJani Nikula goto out_pci_disable; 86158471f63SJani Nikula 86258471f63SJani Nikula disable_rpm_wakeref_asserts(&i915->runtime_pm); 86358471f63SJani Nikula 86458471f63SJani Nikula intel_vgpu_detect(i915); 86558471f63SJani Nikula 866bec68cc9STvrtko Ursulin ret = intel_gt_probe_all(i915); 86758471f63SJani Nikula if (ret < 0) 86858471f63SJani Nikula goto out_runtime_pm_put; 86958471f63SJani Nikula 870bec68cc9STvrtko Ursulin ret = i915_driver_mmio_probe(i915); 871bec68cc9STvrtko Ursulin if (ret < 0) 872bec68cc9STvrtko Ursulin goto out_tiles_cleanup; 873bec68cc9STvrtko Ursulin 87458471f63SJani Nikula ret = i915_driver_hw_probe(i915); 87558471f63SJani Nikula if (ret < 0) 87658471f63SJani Nikula goto out_cleanup_mmio; 87758471f63SJani Nikula 87858471f63SJani Nikula ret = intel_modeset_init_noirq(i915); 87958471f63SJani Nikula if (ret < 0) 88058471f63SJani Nikula goto out_cleanup_hw; 88158471f63SJani Nikula 88258471f63SJani Nikula ret = intel_irq_install(i915); 88358471f63SJani Nikula if (ret) 88458471f63SJani Nikula goto out_cleanup_modeset; 88558471f63SJani Nikula 88658471f63SJani Nikula ret = intel_modeset_init_nogem(i915); 88758471f63SJani Nikula if (ret) 88858471f63SJani Nikula goto out_cleanup_irq; 88958471f63SJani Nikula 89058471f63SJani Nikula ret = i915_gem_init(i915); 89158471f63SJani Nikula if (ret) 89258471f63SJani Nikula goto out_cleanup_modeset2; 89358471f63SJani Nikula 89458471f63SJani Nikula ret = intel_modeset_init(i915); 89558471f63SJani Nikula if (ret) 89658471f63SJani Nikula goto out_cleanup_gem; 89758471f63SJani Nikula 89858471f63SJani Nikula i915_driver_register(i915); 89958471f63SJani Nikula 90058471f63SJani Nikula enable_rpm_wakeref_asserts(&i915->runtime_pm); 90158471f63SJani Nikula 90258471f63SJani Nikula i915_welcome_messages(i915); 90358471f63SJani Nikula 90458471f63SJani Nikula i915->do_release = true; 90558471f63SJani Nikula 90658471f63SJani Nikula return 0; 90758471f63SJani Nikula 90858471f63SJani Nikula out_cleanup_gem: 90958471f63SJani Nikula i915_gem_suspend(i915); 91058471f63SJani Nikula i915_gem_driver_remove(i915); 91158471f63SJani Nikula i915_gem_driver_release(i915); 91258471f63SJani Nikula out_cleanup_modeset2: 91358471f63SJani Nikula /* FIXME clean up the error path */ 91458471f63SJani Nikula intel_modeset_driver_remove(i915); 91558471f63SJani Nikula intel_irq_uninstall(i915); 91658471f63SJani Nikula intel_modeset_driver_remove_noirq(i915); 91758471f63SJani Nikula goto out_cleanup_modeset; 91858471f63SJani Nikula out_cleanup_irq: 91958471f63SJani Nikula intel_irq_uninstall(i915); 92058471f63SJani Nikula out_cleanup_modeset: 92158471f63SJani Nikula intel_modeset_driver_remove_nogem(i915); 92258471f63SJani Nikula out_cleanup_hw: 92358471f63SJani Nikula i915_driver_hw_remove(i915); 92458471f63SJani Nikula intel_memory_regions_driver_release(i915); 92558471f63SJani Nikula i915_ggtt_driver_release(i915); 92658471f63SJani Nikula i915_gem_drain_freed_objects(i915); 92758471f63SJani Nikula i915_ggtt_driver_late_release(i915); 92858471f63SJani Nikula out_cleanup_mmio: 92958471f63SJani Nikula i915_driver_mmio_release(i915); 930bec68cc9STvrtko Ursulin out_tiles_cleanup: 931bec68cc9STvrtko Ursulin intel_gt_release_all(i915); 93258471f63SJani Nikula out_runtime_pm_put: 93358471f63SJani Nikula enable_rpm_wakeref_asserts(&i915->runtime_pm); 93458471f63SJani Nikula i915_driver_late_release(i915); 93558471f63SJani Nikula out_pci_disable: 93658471f63SJani Nikula pci_disable_device(pdev); 93758471f63SJani Nikula out_fini: 93858471f63SJani Nikula i915_probe_error(i915, "Device initialization failed (%d)\n", ret); 93958471f63SJani Nikula return ret; 94058471f63SJani Nikula } 94158471f63SJani Nikula 94258471f63SJani Nikula void i915_driver_remove(struct drm_i915_private *i915) 94358471f63SJani Nikula { 94458471f63SJani Nikula disable_rpm_wakeref_asserts(&i915->runtime_pm); 94558471f63SJani Nikula 94658471f63SJani Nikula i915_driver_unregister(i915); 94758471f63SJani Nikula 94858471f63SJani Nikula /* Flush any external code that still may be under the RCU lock */ 94958471f63SJani Nikula synchronize_rcu(); 95058471f63SJani Nikula 95158471f63SJani Nikula i915_gem_suspend(i915); 95258471f63SJani Nikula 95358471f63SJani Nikula intel_gvt_driver_remove(i915); 95458471f63SJani Nikula 95558471f63SJani Nikula intel_modeset_driver_remove(i915); 95658471f63SJani Nikula 95758471f63SJani Nikula intel_irq_uninstall(i915); 95858471f63SJani Nikula 95958471f63SJani Nikula intel_modeset_driver_remove_noirq(i915); 96058471f63SJani Nikula 96158471f63SJani Nikula i915_reset_error_state(i915); 96258471f63SJani Nikula i915_gem_driver_remove(i915); 96358471f63SJani Nikula 96458471f63SJani Nikula intel_modeset_driver_remove_nogem(i915); 96558471f63SJani Nikula 96658471f63SJani Nikula i915_driver_hw_remove(i915); 96758471f63SJani Nikula 96858471f63SJani Nikula enable_rpm_wakeref_asserts(&i915->runtime_pm); 96958471f63SJani Nikula } 97058471f63SJani Nikula 97158471f63SJani Nikula static void i915_driver_release(struct drm_device *dev) 97258471f63SJani Nikula { 97358471f63SJani Nikula struct drm_i915_private *dev_priv = to_i915(dev); 97458471f63SJani Nikula struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 97558471f63SJani Nikula 97658471f63SJani Nikula if (!dev_priv->do_release) 97758471f63SJani Nikula return; 97858471f63SJani Nikula 97958471f63SJani Nikula disable_rpm_wakeref_asserts(rpm); 98058471f63SJani Nikula 98158471f63SJani Nikula i915_gem_driver_release(dev_priv); 98258471f63SJani Nikula 98358471f63SJani Nikula intel_memory_regions_driver_release(dev_priv); 98458471f63SJani Nikula i915_ggtt_driver_release(dev_priv); 98558471f63SJani Nikula i915_gem_drain_freed_objects(dev_priv); 98658471f63SJani Nikula i915_ggtt_driver_late_release(dev_priv); 98758471f63SJani Nikula 98858471f63SJani Nikula i915_driver_mmio_release(dev_priv); 98958471f63SJani Nikula 99058471f63SJani Nikula enable_rpm_wakeref_asserts(rpm); 99158471f63SJani Nikula intel_runtime_pm_driver_release(rpm); 99258471f63SJani Nikula 99358471f63SJani Nikula i915_driver_late_release(dev_priv); 99458471f63SJani Nikula } 99558471f63SJani Nikula 99658471f63SJani Nikula static int i915_driver_open(struct drm_device *dev, struct drm_file *file) 99758471f63SJani Nikula { 99858471f63SJani Nikula struct drm_i915_private *i915 = to_i915(dev); 99958471f63SJani Nikula int ret; 100058471f63SJani Nikula 100158471f63SJani Nikula ret = i915_gem_open(i915, file); 100258471f63SJani Nikula if (ret) 100358471f63SJani Nikula return ret; 100458471f63SJani Nikula 100558471f63SJani Nikula return 0; 100658471f63SJani Nikula } 100758471f63SJani Nikula 100858471f63SJani Nikula /** 100958471f63SJani Nikula * i915_driver_lastclose - clean up after all DRM clients have exited 101058471f63SJani Nikula * @dev: DRM device 101158471f63SJani Nikula * 101258471f63SJani Nikula * Take care of cleaning up after all DRM clients have exited. In the 101358471f63SJani Nikula * mode setting case, we want to restore the kernel's initial mode (just 101458471f63SJani Nikula * in case the last client left us in a bad state). 101558471f63SJani Nikula * 101658471f63SJani Nikula * Additionally, in the non-mode setting case, we'll tear down the GTT 101758471f63SJani Nikula * and DMA structures, since the kernel won't be using them, and clea 101858471f63SJani Nikula * up any GEM state. 101958471f63SJani Nikula */ 102058471f63SJani Nikula static void i915_driver_lastclose(struct drm_device *dev) 102158471f63SJani Nikula { 102258471f63SJani Nikula struct drm_i915_private *i915 = to_i915(dev); 102358471f63SJani Nikula 102458471f63SJani Nikula intel_fbdev_restore_mode(dev); 102558471f63SJani Nikula 102658471f63SJani Nikula if (HAS_DISPLAY(i915)) 102758471f63SJani Nikula vga_switcheroo_process_delayed_switch(); 102858471f63SJani Nikula } 102958471f63SJani Nikula 103058471f63SJani Nikula static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 103158471f63SJani Nikula { 103258471f63SJani Nikula struct drm_i915_file_private *file_priv = file->driver_priv; 103358471f63SJani Nikula 103458471f63SJani Nikula i915_gem_context_close(file); 10355f0d4d14STvrtko Ursulin i915_drm_client_put(file_priv->client); 103658471f63SJani Nikula 103758471f63SJani Nikula kfree_rcu(file_priv, rcu); 103858471f63SJani Nikula 103958471f63SJani Nikula /* Catch up with all the deferred frees from "this" client */ 104058471f63SJani Nikula i915_gem_flush_free_objects(to_i915(dev)); 104158471f63SJani Nikula } 104258471f63SJani Nikula 104358471f63SJani Nikula static void intel_suspend_encoders(struct drm_i915_private *dev_priv) 104458471f63SJani Nikula { 104558471f63SJani Nikula struct drm_device *dev = &dev_priv->drm; 104658471f63SJani Nikula struct intel_encoder *encoder; 104758471f63SJani Nikula 104858471f63SJani Nikula if (!HAS_DISPLAY(dev_priv)) 104958471f63SJani Nikula return; 105058471f63SJani Nikula 105158471f63SJani Nikula drm_modeset_lock_all(dev); 105258471f63SJani Nikula for_each_intel_encoder(dev, encoder) 105358471f63SJani Nikula if (encoder->suspend) 105458471f63SJani Nikula encoder->suspend(encoder); 105558471f63SJani Nikula drm_modeset_unlock_all(dev); 105658471f63SJani Nikula } 105758471f63SJani Nikula 105858471f63SJani Nikula static void intel_shutdown_encoders(struct drm_i915_private *dev_priv) 105958471f63SJani Nikula { 106058471f63SJani Nikula struct drm_device *dev = &dev_priv->drm; 106158471f63SJani Nikula struct intel_encoder *encoder; 106258471f63SJani Nikula 106358471f63SJani Nikula if (!HAS_DISPLAY(dev_priv)) 106458471f63SJani Nikula return; 106558471f63SJani Nikula 106658471f63SJani Nikula drm_modeset_lock_all(dev); 106758471f63SJani Nikula for_each_intel_encoder(dev, encoder) 106858471f63SJani Nikula if (encoder->shutdown) 106958471f63SJani Nikula encoder->shutdown(encoder); 107058471f63SJani Nikula drm_modeset_unlock_all(dev); 107158471f63SJani Nikula } 107258471f63SJani Nikula 107358471f63SJani Nikula void i915_driver_shutdown(struct drm_i915_private *i915) 107458471f63SJani Nikula { 107558471f63SJani Nikula disable_rpm_wakeref_asserts(&i915->runtime_pm); 107658471f63SJani Nikula intel_runtime_pm_disable(&i915->runtime_pm); 107758471f63SJani Nikula intel_power_domains_disable(i915); 107858471f63SJani Nikula 107958471f63SJani Nikula i915_gem_suspend(i915); 108058471f63SJani Nikula 108158471f63SJani Nikula if (HAS_DISPLAY(i915)) { 108258471f63SJani Nikula drm_kms_helper_poll_disable(&i915->drm); 108358471f63SJani Nikula 108458471f63SJani Nikula drm_atomic_helper_shutdown(&i915->drm); 108558471f63SJani Nikula } 108658471f63SJani Nikula 108758471f63SJani Nikula intel_dp_mst_suspend(i915); 108858471f63SJani Nikula 108958471f63SJani Nikula intel_runtime_pm_disable_interrupts(i915); 109058471f63SJani Nikula intel_hpd_cancel_work(i915); 109158471f63SJani Nikula 109258471f63SJani Nikula intel_suspend_encoders(i915); 109358471f63SJani Nikula intel_shutdown_encoders(i915); 109458471f63SJani Nikula 109558471f63SJani Nikula intel_dmc_ucode_suspend(i915); 109658471f63SJani Nikula 109758471f63SJani Nikula /* 109858471f63SJani Nikula * The only requirement is to reboot with display DC states disabled, 109958471f63SJani Nikula * for now leaving all display power wells in the INIT power domain 110058471f63SJani Nikula * enabled. 110158471f63SJani Nikula * 110258471f63SJani Nikula * TODO: 110358471f63SJani Nikula * - unify the pci_driver::shutdown sequence here with the 110458471f63SJani Nikula * pci_driver.driver.pm.poweroff,poweroff_late sequence. 110558471f63SJani Nikula * - unify the driver remove and system/runtime suspend sequences with 110658471f63SJani Nikula * the above unified shutdown/poweroff sequence. 110758471f63SJani Nikula */ 110858471f63SJani Nikula intel_power_domains_driver_remove(i915); 110958471f63SJani Nikula enable_rpm_wakeref_asserts(&i915->runtime_pm); 111058471f63SJani Nikula 111158471f63SJani Nikula intel_runtime_pm_driver_release(&i915->runtime_pm); 111258471f63SJani Nikula } 111358471f63SJani Nikula 111458471f63SJani Nikula static bool suspend_to_idle(struct drm_i915_private *dev_priv) 111558471f63SJani Nikula { 111658471f63SJani Nikula #if IS_ENABLED(CONFIG_ACPI_SLEEP) 111758471f63SJani Nikula if (acpi_target_system_state() < ACPI_STATE_S3) 111858471f63SJani Nikula return true; 111958471f63SJani Nikula #endif 112058471f63SJani Nikula return false; 112158471f63SJani Nikula } 112258471f63SJani Nikula 112358471f63SJani Nikula static int i915_drm_prepare(struct drm_device *dev) 112458471f63SJani Nikula { 112558471f63SJani Nikula struct drm_i915_private *i915 = to_i915(dev); 112658471f63SJani Nikula 112758471f63SJani Nikula /* 112858471f63SJani Nikula * NB intel_display_suspend() may issue new requests after we've 112958471f63SJani Nikula * ostensibly marked the GPU as ready-to-sleep here. We need to 113058471f63SJani Nikula * split out that work and pull it forward so that after point, 113158471f63SJani Nikula * the GPU is not woken again. 113258471f63SJani Nikula */ 113358471f63SJani Nikula return i915_gem_backup_suspend(i915); 113458471f63SJani Nikula } 113558471f63SJani Nikula 113658471f63SJani Nikula static int i915_drm_suspend(struct drm_device *dev) 113758471f63SJani Nikula { 113858471f63SJani Nikula struct drm_i915_private *dev_priv = to_i915(dev); 113958471f63SJani Nikula struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 114058471f63SJani Nikula pci_power_t opregion_target_state; 114158471f63SJani Nikula 114258471f63SJani Nikula disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 114358471f63SJani Nikula 114458471f63SJani Nikula /* We do a lot of poking in a lot of registers, make sure they work 114558471f63SJani Nikula * properly. */ 114658471f63SJani Nikula intel_power_domains_disable(dev_priv); 114758471f63SJani Nikula if (HAS_DISPLAY(dev_priv)) 114858471f63SJani Nikula drm_kms_helper_poll_disable(dev); 114958471f63SJani Nikula 115058471f63SJani Nikula pci_save_state(pdev); 115158471f63SJani Nikula 115258471f63SJani Nikula intel_display_suspend(dev); 115358471f63SJani Nikula 115458471f63SJani Nikula intel_dp_mst_suspend(dev_priv); 115558471f63SJani Nikula 115658471f63SJani Nikula intel_runtime_pm_disable_interrupts(dev_priv); 115758471f63SJani Nikula intel_hpd_cancel_work(dev_priv); 115858471f63SJani Nikula 115958471f63SJani Nikula intel_suspend_encoders(dev_priv); 116058471f63SJani Nikula 116158471f63SJani Nikula intel_suspend_hw(dev_priv); 116258471f63SJani Nikula 116358471f63SJani Nikula /* Must be called before GGTT is suspended. */ 116458471f63SJani Nikula intel_dpt_suspend(dev_priv); 1165647bfd26STvrtko Ursulin i915_ggtt_suspend(to_gt(dev_priv)->ggtt); 116658471f63SJani Nikula 116758471f63SJani Nikula i915_save_display(dev_priv); 116858471f63SJani Nikula 116958471f63SJani Nikula opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; 117058471f63SJani Nikula intel_opregion_suspend(dev_priv, opregion_target_state); 117158471f63SJani Nikula 117258471f63SJani Nikula intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); 117358471f63SJani Nikula 117458471f63SJani Nikula dev_priv->suspend_count++; 117558471f63SJani Nikula 117658471f63SJani Nikula intel_dmc_ucode_suspend(dev_priv); 117758471f63SJani Nikula 117858471f63SJani Nikula enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 117958471f63SJani Nikula 118058471f63SJani Nikula return 0; 118158471f63SJani Nikula } 118258471f63SJani Nikula 118358471f63SJani Nikula static enum i915_drm_suspend_mode 118458471f63SJani Nikula get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate) 118558471f63SJani Nikula { 118658471f63SJani Nikula if (hibernate) 118758471f63SJani Nikula return I915_DRM_SUSPEND_HIBERNATE; 118858471f63SJani Nikula 118958471f63SJani Nikula if (suspend_to_idle(dev_priv)) 119058471f63SJani Nikula return I915_DRM_SUSPEND_IDLE; 119158471f63SJani Nikula 119258471f63SJani Nikula return I915_DRM_SUSPEND_MEM; 119358471f63SJani Nikula } 119458471f63SJani Nikula 119558471f63SJani Nikula static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) 119658471f63SJani Nikula { 119758471f63SJani Nikula struct drm_i915_private *dev_priv = to_i915(dev); 119858471f63SJani Nikula struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 119958471f63SJani Nikula struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 120058471f63SJani Nikula int ret; 120158471f63SJani Nikula 120258471f63SJani Nikula disable_rpm_wakeref_asserts(rpm); 120358471f63SJani Nikula 120458471f63SJani Nikula i915_gem_suspend_late(dev_priv); 120558471f63SJani Nikula 120658471f63SJani Nikula intel_uncore_suspend(&dev_priv->uncore); 120758471f63SJani Nikula 120858471f63SJani Nikula intel_power_domains_suspend(dev_priv, 120958471f63SJani Nikula get_suspend_mode(dev_priv, hibernation)); 121058471f63SJani Nikula 121158471f63SJani Nikula intel_display_power_suspend_late(dev_priv); 121258471f63SJani Nikula 121358471f63SJani Nikula ret = vlv_suspend_complete(dev_priv); 121458471f63SJani Nikula if (ret) { 121558471f63SJani Nikula drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret); 121658471f63SJani Nikula intel_power_domains_resume(dev_priv); 121758471f63SJani Nikula 121858471f63SJani Nikula goto out; 121958471f63SJani Nikula } 122058471f63SJani Nikula 122158471f63SJani Nikula /* 122258471f63SJani Nikula * FIXME: Temporary hammer to avoid freezing the machine on our DGFX 122358471f63SJani Nikula * This should be totally removed when we handle the pci states properly 122458471f63SJani Nikula * on runtime PM and on s2idle cases. 122558471f63SJani Nikula */ 122658471f63SJani Nikula if (suspend_to_idle(dev_priv)) 122758471f63SJani Nikula pci_d3cold_disable(pdev); 122858471f63SJani Nikula 122958471f63SJani Nikula pci_disable_device(pdev); 123058471f63SJani Nikula /* 123158471f63SJani Nikula * During hibernation on some platforms the BIOS may try to access 123258471f63SJani Nikula * the device even though it's already in D3 and hang the machine. So 123358471f63SJani Nikula * leave the device in D0 on those platforms and hope the BIOS will 123458471f63SJani Nikula * power down the device properly. The issue was seen on multiple old 123558471f63SJani Nikula * GENs with different BIOS vendors, so having an explicit blacklist 123658471f63SJani Nikula * is inpractical; apply the workaround on everything pre GEN6. The 123758471f63SJani Nikula * platforms where the issue was seen: 123858471f63SJani Nikula * Lenovo Thinkpad X301, X61s, X60, T60, X41 123958471f63SJani Nikula * Fujitsu FSC S7110 124058471f63SJani Nikula * Acer Aspire 1830T 124158471f63SJani Nikula */ 124258471f63SJani Nikula if (!(hibernation && GRAPHICS_VER(dev_priv) < 6)) 124358471f63SJani Nikula pci_set_power_state(pdev, PCI_D3hot); 124458471f63SJani Nikula 124558471f63SJani Nikula out: 124658471f63SJani Nikula enable_rpm_wakeref_asserts(rpm); 124758471f63SJani Nikula if (!dev_priv->uncore.user_forcewake_count) 124858471f63SJani Nikula intel_runtime_pm_driver_release(rpm); 124958471f63SJani Nikula 125058471f63SJani Nikula return ret; 125158471f63SJani Nikula } 125258471f63SJani Nikula 1253b8d65b8aSJani Nikula int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, 1254b8d65b8aSJani Nikula pm_message_t state) 125558471f63SJani Nikula { 125658471f63SJani Nikula int error; 125758471f63SJani Nikula 125858471f63SJani Nikula if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND && 125958471f63SJani Nikula state.event != PM_EVENT_FREEZE)) 126058471f63SJani Nikula return -EINVAL; 126158471f63SJani Nikula 126258471f63SJani Nikula if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 126358471f63SJani Nikula return 0; 126458471f63SJani Nikula 126558471f63SJani Nikula error = i915_drm_suspend(&i915->drm); 126658471f63SJani Nikula if (error) 126758471f63SJani Nikula return error; 126858471f63SJani Nikula 126958471f63SJani Nikula return i915_drm_suspend_late(&i915->drm, false); 127058471f63SJani Nikula } 127158471f63SJani Nikula 127258471f63SJani Nikula static int i915_drm_resume(struct drm_device *dev) 127358471f63SJani Nikula { 127458471f63SJani Nikula struct drm_i915_private *dev_priv = to_i915(dev); 127558471f63SJani Nikula int ret; 127658471f63SJani Nikula 127758471f63SJani Nikula disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 127858471f63SJani Nikula 12796a735552SAshutosh Dixit ret = i915_pcode_init(dev_priv); 128058471f63SJani Nikula if (ret) 128158471f63SJani Nikula return ret; 128258471f63SJani Nikula 128358471f63SJani Nikula sanitize_gpu(dev_priv); 128458471f63SJani Nikula 128558471f63SJani Nikula ret = i915_ggtt_enable_hw(dev_priv); 128658471f63SJani Nikula if (ret) 128758471f63SJani Nikula drm_err(&dev_priv->drm, "failed to re-enable GGTT\n"); 128858471f63SJani Nikula 1289647bfd26STvrtko Ursulin i915_ggtt_resume(to_gt(dev_priv)->ggtt); 129058471f63SJani Nikula /* Must be called after GGTT is resumed. */ 129158471f63SJani Nikula intel_dpt_resume(dev_priv); 129258471f63SJani Nikula 129358471f63SJani Nikula intel_dmc_ucode_resume(dev_priv); 129458471f63SJani Nikula 129558471f63SJani Nikula i915_restore_display(dev_priv); 129658471f63SJani Nikula intel_pps_unlock_regs_wa(dev_priv); 129758471f63SJani Nikula 129858471f63SJani Nikula intel_init_pch_refclk(dev_priv); 129958471f63SJani Nikula 130058471f63SJani Nikula /* 130158471f63SJani Nikula * Interrupts have to be enabled before any batches are run. If not the 130258471f63SJani Nikula * GPU will hang. i915_gem_init_hw() will initiate batches to 130358471f63SJani Nikula * update/restore the context. 130458471f63SJani Nikula * 130558471f63SJani Nikula * drm_mode_config_reset() needs AUX interrupts. 130658471f63SJani Nikula * 130758471f63SJani Nikula * Modeset enabling in intel_modeset_init_hw() also needs working 130858471f63SJani Nikula * interrupts. 130958471f63SJani Nikula */ 131058471f63SJani Nikula intel_runtime_pm_enable_interrupts(dev_priv); 131158471f63SJani Nikula 131258471f63SJani Nikula if (HAS_DISPLAY(dev_priv)) 131358471f63SJani Nikula drm_mode_config_reset(dev); 131458471f63SJani Nikula 131558471f63SJani Nikula i915_gem_resume(dev_priv); 131658471f63SJani Nikula 131758471f63SJani Nikula intel_modeset_init_hw(dev_priv); 131858471f63SJani Nikula intel_init_clock_gating(dev_priv); 131958471f63SJani Nikula intel_hpd_init(dev_priv); 132058471f63SJani Nikula 132158471f63SJani Nikula /* MST sideband requires HPD interrupts enabled */ 132258471f63SJani Nikula intel_dp_mst_resume(dev_priv); 132358471f63SJani Nikula intel_display_resume(dev); 132458471f63SJani Nikula 132558471f63SJani Nikula intel_hpd_poll_disable(dev_priv); 132658471f63SJani Nikula if (HAS_DISPLAY(dev_priv)) 132758471f63SJani Nikula drm_kms_helper_poll_enable(dev); 132858471f63SJani Nikula 132958471f63SJani Nikula intel_opregion_resume(dev_priv); 133058471f63SJani Nikula 133158471f63SJani Nikula intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); 133258471f63SJani Nikula 133358471f63SJani Nikula intel_power_domains_enable(dev_priv); 133458471f63SJani Nikula 133558471f63SJani Nikula intel_gvt_resume(dev_priv); 133658471f63SJani Nikula 133758471f63SJani Nikula enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 133858471f63SJani Nikula 133958471f63SJani Nikula return 0; 134058471f63SJani Nikula } 134158471f63SJani Nikula 134258471f63SJani Nikula static int i915_drm_resume_early(struct drm_device *dev) 134358471f63SJani Nikula { 134458471f63SJani Nikula struct drm_i915_private *dev_priv = to_i915(dev); 134558471f63SJani Nikula struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 134658471f63SJani Nikula int ret; 134758471f63SJani Nikula 134858471f63SJani Nikula /* 134958471f63SJani Nikula * We have a resume ordering issue with the snd-hda driver also 135058471f63SJani Nikula * requiring our device to be power up. Due to the lack of a 135158471f63SJani Nikula * parent/child relationship we currently solve this with an early 135258471f63SJani Nikula * resume hook. 135358471f63SJani Nikula * 135458471f63SJani Nikula * FIXME: This should be solved with a special hdmi sink device or 135558471f63SJani Nikula * similar so that power domains can be employed. 135658471f63SJani Nikula */ 135758471f63SJani Nikula 135858471f63SJani Nikula /* 135958471f63SJani Nikula * Note that we need to set the power state explicitly, since we 136058471f63SJani Nikula * powered off the device during freeze and the PCI core won't power 136158471f63SJani Nikula * it back up for us during thaw. Powering off the device during 136258471f63SJani Nikula * freeze is not a hard requirement though, and during the 136358471f63SJani Nikula * suspend/resume phases the PCI core makes sure we get here with the 136458471f63SJani Nikula * device powered on. So in case we change our freeze logic and keep 136558471f63SJani Nikula * the device powered we can also remove the following set power state 136658471f63SJani Nikula * call. 136758471f63SJani Nikula */ 136858471f63SJani Nikula ret = pci_set_power_state(pdev, PCI_D0); 136958471f63SJani Nikula if (ret) { 137058471f63SJani Nikula drm_err(&dev_priv->drm, 137158471f63SJani Nikula "failed to set PCI D0 power state (%d)\n", ret); 137258471f63SJani Nikula return ret; 137358471f63SJani Nikula } 137458471f63SJani Nikula 137558471f63SJani Nikula /* 137658471f63SJani Nikula * Note that pci_enable_device() first enables any parent bridge 137758471f63SJani Nikula * device and only then sets the power state for this device. The 137858471f63SJani Nikula * bridge enabling is a nop though, since bridge devices are resumed 137958471f63SJani Nikula * first. The order of enabling power and enabling the device is 138058471f63SJani Nikula * imposed by the PCI core as described above, so here we preserve the 138158471f63SJani Nikula * same order for the freeze/thaw phases. 138258471f63SJani Nikula * 138358471f63SJani Nikula * TODO: eventually we should remove pci_disable_device() / 138458471f63SJani Nikula * pci_enable_enable_device() from suspend/resume. Due to how they 138558471f63SJani Nikula * depend on the device enable refcount we can't anyway depend on them 138658471f63SJani Nikula * disabling/enabling the device. 138758471f63SJani Nikula */ 138858471f63SJani Nikula if (pci_enable_device(pdev)) 138958471f63SJani Nikula return -EIO; 139058471f63SJani Nikula 139158471f63SJani Nikula pci_set_master(pdev); 139258471f63SJani Nikula 139358471f63SJani Nikula pci_d3cold_enable(pdev); 139458471f63SJani Nikula 139558471f63SJani Nikula disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 139658471f63SJani Nikula 139758471f63SJani Nikula ret = vlv_resume_prepare(dev_priv, false); 139858471f63SJani Nikula if (ret) 139958471f63SJani Nikula drm_err(&dev_priv->drm, 140058471f63SJani Nikula "Resume prepare failed: %d, continuing anyway\n", ret); 140158471f63SJani Nikula 140258471f63SJani Nikula intel_uncore_resume_early(&dev_priv->uncore); 140358471f63SJani Nikula 14044817c37dSDave Airlie intel_gt_check_and_clear_faults(to_gt(dev_priv)); 140558471f63SJani Nikula 140658471f63SJani Nikula intel_display_power_resume_early(dev_priv); 140758471f63SJani Nikula 140858471f63SJani Nikula intel_power_domains_resume(dev_priv); 140958471f63SJani Nikula 141058471f63SJani Nikula enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 141158471f63SJani Nikula 141258471f63SJani Nikula return ret; 141358471f63SJani Nikula } 141458471f63SJani Nikula 1415b8d65b8aSJani Nikula int i915_driver_resume_switcheroo(struct drm_i915_private *i915) 141658471f63SJani Nikula { 141758471f63SJani Nikula int ret; 141858471f63SJani Nikula 141958471f63SJani Nikula if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 142058471f63SJani Nikula return 0; 142158471f63SJani Nikula 142258471f63SJani Nikula ret = i915_drm_resume_early(&i915->drm); 142358471f63SJani Nikula if (ret) 142458471f63SJani Nikula return ret; 142558471f63SJani Nikula 142658471f63SJani Nikula return i915_drm_resume(&i915->drm); 142758471f63SJani Nikula } 142858471f63SJani Nikula 142958471f63SJani Nikula static int i915_pm_prepare(struct device *kdev) 143058471f63SJani Nikula { 143158471f63SJani Nikula struct drm_i915_private *i915 = kdev_to_i915(kdev); 143258471f63SJani Nikula 143358471f63SJani Nikula if (!i915) { 143458471f63SJani Nikula dev_err(kdev, "DRM not initialized, aborting suspend.\n"); 143558471f63SJani Nikula return -ENODEV; 143658471f63SJani Nikula } 143758471f63SJani Nikula 143858471f63SJani Nikula if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 143958471f63SJani Nikula return 0; 144058471f63SJani Nikula 144158471f63SJani Nikula return i915_drm_prepare(&i915->drm); 144258471f63SJani Nikula } 144358471f63SJani Nikula 144458471f63SJani Nikula static int i915_pm_suspend(struct device *kdev) 144558471f63SJani Nikula { 144658471f63SJani Nikula struct drm_i915_private *i915 = kdev_to_i915(kdev); 144758471f63SJani Nikula 144858471f63SJani Nikula if (!i915) { 144958471f63SJani Nikula dev_err(kdev, "DRM not initialized, aborting suspend.\n"); 145058471f63SJani Nikula return -ENODEV; 145158471f63SJani Nikula } 145258471f63SJani Nikula 14532ef6efa7SThomas Hellström i915_ggtt_mark_pte_lost(i915, false); 14542ef6efa7SThomas Hellström 145558471f63SJani Nikula if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 145658471f63SJani Nikula return 0; 145758471f63SJani Nikula 145858471f63SJani Nikula return i915_drm_suspend(&i915->drm); 145958471f63SJani Nikula } 146058471f63SJani Nikula 146158471f63SJani Nikula static int i915_pm_suspend_late(struct device *kdev) 146258471f63SJani Nikula { 146358471f63SJani Nikula struct drm_i915_private *i915 = kdev_to_i915(kdev); 146458471f63SJani Nikula 146558471f63SJani Nikula /* 146658471f63SJani Nikula * We have a suspend ordering issue with the snd-hda driver also 146758471f63SJani Nikula * requiring our device to be power up. Due to the lack of a 146858471f63SJani Nikula * parent/child relationship we currently solve this with an late 146958471f63SJani Nikula * suspend hook. 147058471f63SJani Nikula * 147158471f63SJani Nikula * FIXME: This should be solved with a special hdmi sink device or 147258471f63SJani Nikula * similar so that power domains can be employed. 147358471f63SJani Nikula */ 147458471f63SJani Nikula if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 147558471f63SJani Nikula return 0; 147658471f63SJani Nikula 147758471f63SJani Nikula return i915_drm_suspend_late(&i915->drm, false); 147858471f63SJani Nikula } 147958471f63SJani Nikula 148058471f63SJani Nikula static int i915_pm_poweroff_late(struct device *kdev) 148158471f63SJani Nikula { 148258471f63SJani Nikula struct drm_i915_private *i915 = kdev_to_i915(kdev); 148358471f63SJani Nikula 148458471f63SJani Nikula if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 148558471f63SJani Nikula return 0; 148658471f63SJani Nikula 148758471f63SJani Nikula return i915_drm_suspend_late(&i915->drm, true); 148858471f63SJani Nikula } 148958471f63SJani Nikula 149058471f63SJani Nikula static int i915_pm_resume_early(struct device *kdev) 149158471f63SJani Nikula { 149258471f63SJani Nikula struct drm_i915_private *i915 = kdev_to_i915(kdev); 149358471f63SJani Nikula 149458471f63SJani Nikula if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 149558471f63SJani Nikula return 0; 149658471f63SJani Nikula 149758471f63SJani Nikula return i915_drm_resume_early(&i915->drm); 149858471f63SJani Nikula } 149958471f63SJani Nikula 150058471f63SJani Nikula static int i915_pm_resume(struct device *kdev) 150158471f63SJani Nikula { 150258471f63SJani Nikula struct drm_i915_private *i915 = kdev_to_i915(kdev); 150358471f63SJani Nikula 150458471f63SJani Nikula if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 150558471f63SJani Nikula return 0; 150658471f63SJani Nikula 15072ef6efa7SThomas Hellström /* 15082ef6efa7SThomas Hellström * If IRST is enabled, or if we can't detect whether it's enabled, 15092ef6efa7SThomas Hellström * then we must assume we lost the GGTT page table entries, since 15102ef6efa7SThomas Hellström * they are not retained if IRST decided to enter S4. 15112ef6efa7SThomas Hellström */ 15122ef6efa7SThomas Hellström if (!IS_ENABLED(CONFIG_ACPI) || acpi_dev_present(irst_name, NULL, -1)) 15132ef6efa7SThomas Hellström i915_ggtt_mark_pte_lost(i915, true); 15142ef6efa7SThomas Hellström 151558471f63SJani Nikula return i915_drm_resume(&i915->drm); 151658471f63SJani Nikula } 151758471f63SJani Nikula 151858471f63SJani Nikula /* freeze: before creating the hibernation_image */ 151958471f63SJani Nikula static int i915_pm_freeze(struct device *kdev) 152058471f63SJani Nikula { 152158471f63SJani Nikula struct drm_i915_private *i915 = kdev_to_i915(kdev); 152258471f63SJani Nikula int ret; 152358471f63SJani Nikula 152458471f63SJani Nikula if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) { 152558471f63SJani Nikula ret = i915_drm_suspend(&i915->drm); 152658471f63SJani Nikula if (ret) 152758471f63SJani Nikula return ret; 152858471f63SJani Nikula } 152958471f63SJani Nikula 153058471f63SJani Nikula ret = i915_gem_freeze(i915); 153158471f63SJani Nikula if (ret) 153258471f63SJani Nikula return ret; 153358471f63SJani Nikula 153458471f63SJani Nikula return 0; 153558471f63SJani Nikula } 153658471f63SJani Nikula 153758471f63SJani Nikula static int i915_pm_freeze_late(struct device *kdev) 153858471f63SJani Nikula { 153958471f63SJani Nikula struct drm_i915_private *i915 = kdev_to_i915(kdev); 154058471f63SJani Nikula int ret; 154158471f63SJani Nikula 154258471f63SJani Nikula if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) { 154358471f63SJani Nikula ret = i915_drm_suspend_late(&i915->drm, true); 154458471f63SJani Nikula if (ret) 154558471f63SJani Nikula return ret; 154658471f63SJani Nikula } 154758471f63SJani Nikula 154858471f63SJani Nikula ret = i915_gem_freeze_late(i915); 154958471f63SJani Nikula if (ret) 155058471f63SJani Nikula return ret; 155158471f63SJani Nikula 155258471f63SJani Nikula return 0; 155358471f63SJani Nikula } 155458471f63SJani Nikula 155558471f63SJani Nikula /* thaw: called after creating the hibernation image, but before turning off. */ 155658471f63SJani Nikula static int i915_pm_thaw_early(struct device *kdev) 155758471f63SJani Nikula { 155858471f63SJani Nikula return i915_pm_resume_early(kdev); 155958471f63SJani Nikula } 156058471f63SJani Nikula 156158471f63SJani Nikula static int i915_pm_thaw(struct device *kdev) 156258471f63SJani Nikula { 156358471f63SJani Nikula return i915_pm_resume(kdev); 156458471f63SJani Nikula } 156558471f63SJani Nikula 156658471f63SJani Nikula /* restore: called after loading the hibernation image. */ 156758471f63SJani Nikula static int i915_pm_restore_early(struct device *kdev) 156858471f63SJani Nikula { 156958471f63SJani Nikula return i915_pm_resume_early(kdev); 157058471f63SJani Nikula } 157158471f63SJani Nikula 157258471f63SJani Nikula static int i915_pm_restore(struct device *kdev) 157358471f63SJani Nikula { 15742ef6efa7SThomas Hellström struct drm_i915_private *i915 = kdev_to_i915(kdev); 15752ef6efa7SThomas Hellström 15762ef6efa7SThomas Hellström i915_ggtt_mark_pte_lost(i915, true); 157758471f63SJani Nikula return i915_pm_resume(kdev); 157858471f63SJani Nikula } 157958471f63SJani Nikula 158058471f63SJani Nikula static int intel_runtime_suspend(struct device *kdev) 158158471f63SJani Nikula { 158258471f63SJani Nikula struct drm_i915_private *dev_priv = kdev_to_i915(kdev); 158358471f63SJani Nikula struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 158458471f63SJani Nikula struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 158558471f63SJani Nikula int ret; 158658471f63SJani Nikula 158758471f63SJani Nikula if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) 158858471f63SJani Nikula return -ENODEV; 158958471f63SJani Nikula 1590c3e57159SAnshuman Gupta drm_dbg(&dev_priv->drm, "Suspending device\n"); 159158471f63SJani Nikula 159258471f63SJani Nikula disable_rpm_wakeref_asserts(rpm); 159358471f63SJani Nikula 159458471f63SJani Nikula /* 159558471f63SJani Nikula * We are safe here against re-faults, since the fault handler takes 159658471f63SJani Nikula * an RPM reference. 159758471f63SJani Nikula */ 159858471f63SJani Nikula i915_gem_runtime_suspend(dev_priv); 159958471f63SJani Nikula 16004817c37dSDave Airlie intel_gt_runtime_suspend(to_gt(dev_priv)); 160158471f63SJani Nikula 160258471f63SJani Nikula intel_runtime_pm_disable_interrupts(dev_priv); 160358471f63SJani Nikula 160458471f63SJani Nikula intel_uncore_suspend(&dev_priv->uncore); 160558471f63SJani Nikula 160658471f63SJani Nikula intel_display_power_suspend(dev_priv); 160758471f63SJani Nikula 160858471f63SJani Nikula ret = vlv_suspend_complete(dev_priv); 160958471f63SJani Nikula if (ret) { 161058471f63SJani Nikula drm_err(&dev_priv->drm, 161158471f63SJani Nikula "Runtime suspend failed, disabling it (%d)\n", ret); 161258471f63SJani Nikula intel_uncore_runtime_resume(&dev_priv->uncore); 161358471f63SJani Nikula 161458471f63SJani Nikula intel_runtime_pm_enable_interrupts(dev_priv); 161558471f63SJani Nikula 16164817c37dSDave Airlie intel_gt_runtime_resume(to_gt(dev_priv)); 161758471f63SJani Nikula 161858471f63SJani Nikula enable_rpm_wakeref_asserts(rpm); 161958471f63SJani Nikula 162058471f63SJani Nikula return ret; 162158471f63SJani Nikula } 162258471f63SJani Nikula 162358471f63SJani Nikula enable_rpm_wakeref_asserts(rpm); 162458471f63SJani Nikula intel_runtime_pm_driver_release(rpm); 162558471f63SJani Nikula 162658471f63SJani Nikula if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore)) 162758471f63SJani Nikula drm_err(&dev_priv->drm, 162858471f63SJani Nikula "Unclaimed access detected prior to suspending\n"); 162958471f63SJani Nikula 163058471f63SJani Nikula /* 163158471f63SJani Nikula * FIXME: Temporary hammer to avoid freezing the machine on our DGFX 163258471f63SJani Nikula * This should be totally removed when we handle the pci states properly 163358471f63SJani Nikula * on runtime PM and on s2idle cases. 163458471f63SJani Nikula */ 163558471f63SJani Nikula pci_d3cold_disable(pdev); 163658471f63SJani Nikula rpm->suspended = true; 163758471f63SJani Nikula 163858471f63SJani Nikula /* 163958471f63SJani Nikula * FIXME: We really should find a document that references the arguments 164058471f63SJani Nikula * used below! 164158471f63SJani Nikula */ 164258471f63SJani Nikula if (IS_BROADWELL(dev_priv)) { 164358471f63SJani Nikula /* 164458471f63SJani Nikula * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop 164558471f63SJani Nikula * being detected, and the call we do at intel_runtime_resume() 164658471f63SJani Nikula * won't be able to restore them. Since PCI_D3hot matches the 164758471f63SJani Nikula * actual specification and appears to be working, use it. 164858471f63SJani Nikula */ 164958471f63SJani Nikula intel_opregion_notify_adapter(dev_priv, PCI_D3hot); 165058471f63SJani Nikula } else { 165158471f63SJani Nikula /* 165258471f63SJani Nikula * current versions of firmware which depend on this opregion 165358471f63SJani Nikula * notification have repurposed the D1 definition to mean 165458471f63SJani Nikula * "runtime suspended" vs. what you would normally expect (D3) 165558471f63SJani Nikula * to distinguish it from notifications that might be sent via 165658471f63SJani Nikula * the suspend path. 165758471f63SJani Nikula */ 165858471f63SJani Nikula intel_opregion_notify_adapter(dev_priv, PCI_D1); 165958471f63SJani Nikula } 166058471f63SJani Nikula 166158471f63SJani Nikula assert_forcewakes_inactive(&dev_priv->uncore); 166258471f63SJani Nikula 166358471f63SJani Nikula if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 166458471f63SJani Nikula intel_hpd_poll_enable(dev_priv); 166558471f63SJani Nikula 1666c3e57159SAnshuman Gupta drm_dbg(&dev_priv->drm, "Device suspended\n"); 166758471f63SJani Nikula return 0; 166858471f63SJani Nikula } 166958471f63SJani Nikula 167058471f63SJani Nikula static int intel_runtime_resume(struct device *kdev) 167158471f63SJani Nikula { 167258471f63SJani Nikula struct drm_i915_private *dev_priv = kdev_to_i915(kdev); 167358471f63SJani Nikula struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 167458471f63SJani Nikula struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 167558471f63SJani Nikula int ret; 167658471f63SJani Nikula 167758471f63SJani Nikula if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) 167858471f63SJani Nikula return -ENODEV; 167958471f63SJani Nikula 1680c3e57159SAnshuman Gupta drm_dbg(&dev_priv->drm, "Resuming device\n"); 168158471f63SJani Nikula 168258471f63SJani Nikula drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count)); 168358471f63SJani Nikula disable_rpm_wakeref_asserts(rpm); 168458471f63SJani Nikula 168558471f63SJani Nikula intel_opregion_notify_adapter(dev_priv, PCI_D0); 168658471f63SJani Nikula rpm->suspended = false; 168758471f63SJani Nikula pci_d3cold_enable(pdev); 168858471f63SJani Nikula if (intel_uncore_unclaimed_mmio(&dev_priv->uncore)) 168958471f63SJani Nikula drm_dbg(&dev_priv->drm, 169058471f63SJani Nikula "Unclaimed access during suspend, bios?\n"); 169158471f63SJani Nikula 169258471f63SJani Nikula intel_display_power_resume(dev_priv); 169358471f63SJani Nikula 169458471f63SJani Nikula ret = vlv_resume_prepare(dev_priv, true); 169558471f63SJani Nikula 169658471f63SJani Nikula intel_uncore_runtime_resume(&dev_priv->uncore); 169758471f63SJani Nikula 169858471f63SJani Nikula intel_runtime_pm_enable_interrupts(dev_priv); 169958471f63SJani Nikula 170058471f63SJani Nikula /* 170158471f63SJani Nikula * No point of rolling back things in case of an error, as the best 170258471f63SJani Nikula * we can do is to hope that things will still work (and disable RPM). 170358471f63SJani Nikula */ 17044817c37dSDave Airlie intel_gt_runtime_resume(to_gt(dev_priv)); 170558471f63SJani Nikula 170658471f63SJani Nikula /* 170758471f63SJani Nikula * On VLV/CHV display interrupts are part of the display 170858471f63SJani Nikula * power well, so hpd is reinitialized from there. For 170958471f63SJani Nikula * everyone else do it here. 171058471f63SJani Nikula */ 171158471f63SJani Nikula if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 171258471f63SJani Nikula intel_hpd_init(dev_priv); 171358471f63SJani Nikula intel_hpd_poll_disable(dev_priv); 171458471f63SJani Nikula } 171558471f63SJani Nikula 171658471f63SJani Nikula intel_enable_ipc(dev_priv); 171758471f63SJani Nikula 171858471f63SJani Nikula enable_rpm_wakeref_asserts(rpm); 171958471f63SJani Nikula 172058471f63SJani Nikula if (ret) 172158471f63SJani Nikula drm_err(&dev_priv->drm, 172258471f63SJani Nikula "Runtime resume failed, disabling it (%d)\n", ret); 172358471f63SJani Nikula else 1724c3e57159SAnshuman Gupta drm_dbg(&dev_priv->drm, "Device resumed\n"); 172558471f63SJani Nikula 172658471f63SJani Nikula return ret; 172758471f63SJani Nikula } 172858471f63SJani Nikula 172958471f63SJani Nikula const struct dev_pm_ops i915_pm_ops = { 173058471f63SJani Nikula /* 173158471f63SJani Nikula * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, 173258471f63SJani Nikula * PMSG_RESUME] 173358471f63SJani Nikula */ 173458471f63SJani Nikula .prepare = i915_pm_prepare, 173558471f63SJani Nikula .suspend = i915_pm_suspend, 173658471f63SJani Nikula .suspend_late = i915_pm_suspend_late, 173758471f63SJani Nikula .resume_early = i915_pm_resume_early, 173858471f63SJani Nikula .resume = i915_pm_resume, 173958471f63SJani Nikula 174058471f63SJani Nikula /* 174158471f63SJani Nikula * S4 event handlers 174258471f63SJani Nikula * @freeze, @freeze_late : called (1) before creating the 174358471f63SJani Nikula * hibernation image [PMSG_FREEZE] and 174458471f63SJani Nikula * (2) after rebooting, before restoring 174558471f63SJani Nikula * the image [PMSG_QUIESCE] 174658471f63SJani Nikula * @thaw, @thaw_early : called (1) after creating the hibernation 174758471f63SJani Nikula * image, before writing it [PMSG_THAW] 174858471f63SJani Nikula * and (2) after failing to create or 174958471f63SJani Nikula * restore the image [PMSG_RECOVER] 175058471f63SJani Nikula * @poweroff, @poweroff_late: called after writing the hibernation 175158471f63SJani Nikula * image, before rebooting [PMSG_HIBERNATE] 175258471f63SJani Nikula * @restore, @restore_early : called after rebooting and restoring the 175358471f63SJani Nikula * hibernation image [PMSG_RESTORE] 175458471f63SJani Nikula */ 175558471f63SJani Nikula .freeze = i915_pm_freeze, 175658471f63SJani Nikula .freeze_late = i915_pm_freeze_late, 175758471f63SJani Nikula .thaw_early = i915_pm_thaw_early, 175858471f63SJani Nikula .thaw = i915_pm_thaw, 175958471f63SJani Nikula .poweroff = i915_pm_suspend, 176058471f63SJani Nikula .poweroff_late = i915_pm_poweroff_late, 176158471f63SJani Nikula .restore_early = i915_pm_restore_early, 176258471f63SJani Nikula .restore = i915_pm_restore, 176358471f63SJani Nikula 176458471f63SJani Nikula /* S0ix (via runtime suspend) event handlers */ 176558471f63SJani Nikula .runtime_suspend = intel_runtime_suspend, 176658471f63SJani Nikula .runtime_resume = intel_runtime_resume, 176758471f63SJani Nikula }; 176858471f63SJani Nikula 176958471f63SJani Nikula static const struct file_operations i915_driver_fops = { 177058471f63SJani Nikula .owner = THIS_MODULE, 177158471f63SJani Nikula .open = drm_open, 177258471f63SJani Nikula .release = drm_release_noglobal, 177358471f63SJani Nikula .unlocked_ioctl = drm_ioctl, 177458471f63SJani Nikula .mmap = i915_gem_mmap, 177558471f63SJani Nikula .poll = drm_poll, 177658471f63SJani Nikula .read = drm_read, 177758471f63SJani Nikula .compat_ioctl = i915_ioc32_compat_ioctl, 177858471f63SJani Nikula .llseek = noop_llseek, 1779055634e4STvrtko Ursulin #ifdef CONFIG_PROC_FS 1780055634e4STvrtko Ursulin .show_fdinfo = i915_drm_client_fdinfo, 1781055634e4STvrtko Ursulin #endif 178258471f63SJani Nikula }; 178358471f63SJani Nikula 178458471f63SJani Nikula static int 178558471f63SJani Nikula i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, 178658471f63SJani Nikula struct drm_file *file) 178758471f63SJani Nikula { 178858471f63SJani Nikula return -ENODEV; 178958471f63SJani Nikula } 179058471f63SJani Nikula 179158471f63SJani Nikula static const struct drm_ioctl_desc i915_ioctls[] = { 179258471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 179358471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), 179458471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), 179558471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), 179658471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), 179758471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), 179858471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW), 179958471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 180058471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), 180158471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), 180258471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 180358471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), 180458471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 180558471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 180658471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), 180758471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), 180858471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 180958471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 181058471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, drm_invalid_op, DRM_AUTH), 181158471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW), 181258471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 181358471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 181458471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW), 181558471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), 181658471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), 181758471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW), 181858471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 181958471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 182058471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), 182158471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_CREATE_EXT, i915_gem_create_ext_ioctl, DRM_RENDER_ALLOW), 182258471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), 182358471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), 182458471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), 182558471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW), 182658471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), 182758471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), 182858471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW), 182958471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW), 183058471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), 183158471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0), 183258471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), 183358471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER), 183458471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER), 183558471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER), 183658471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER), 183758471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW), 183858471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), 183958471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), 184058471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), 184158471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), 184258471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), 184358471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), 184458471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), 184558471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW), 184658471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW), 184758471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW), 184858471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW), 184958471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW), 185058471f63SJani Nikula DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW), 185158471f63SJani Nikula }; 185258471f63SJani Nikula 185324524e3fSJani Nikula /* 185424524e3fSJani Nikula * Interface history: 185524524e3fSJani Nikula * 185624524e3fSJani Nikula * 1.1: Original. 185724524e3fSJani Nikula * 1.2: Add Power Management 185824524e3fSJani Nikula * 1.3: Add vblank support 185924524e3fSJani Nikula * 1.4: Fix cmdbuffer path, add heap destroy 186024524e3fSJani Nikula * 1.5: Add vblank pipe configuration 186124524e3fSJani Nikula * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 186224524e3fSJani Nikula * - Support vertical blank on secondary display pipe 186324524e3fSJani Nikula */ 186424524e3fSJani Nikula #define DRIVER_MAJOR 1 186524524e3fSJani Nikula #define DRIVER_MINOR 6 186624524e3fSJani Nikula #define DRIVER_PATCHLEVEL 0 186724524e3fSJani Nikula 18684588d7ebSJani Nikula static const struct drm_driver i915_drm_driver = { 186958471f63SJani Nikula /* Don't use MTRRs here; the Xserver or userspace app should 187058471f63SJani Nikula * deal with them for Intel hardware. 187158471f63SJani Nikula */ 187258471f63SJani Nikula .driver_features = 187358471f63SJani Nikula DRIVER_GEM | 187458471f63SJani Nikula DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ | 187558471f63SJani Nikula DRIVER_SYNCOBJ_TIMELINE, 187658471f63SJani Nikula .release = i915_driver_release, 187758471f63SJani Nikula .open = i915_driver_open, 187858471f63SJani Nikula .lastclose = i915_driver_lastclose, 187958471f63SJani Nikula .postclose = i915_driver_postclose, 188058471f63SJani Nikula 188158471f63SJani Nikula .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 188258471f63SJani Nikula .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 188358471f63SJani Nikula .gem_prime_import = i915_gem_prime_import, 188458471f63SJani Nikula 188558471f63SJani Nikula .dumb_create = i915_gem_dumb_create, 188658471f63SJani Nikula .dumb_map_offset = i915_gem_dumb_mmap_offset, 188758471f63SJani Nikula 188858471f63SJani Nikula .ioctls = i915_ioctls, 188958471f63SJani Nikula .num_ioctls = ARRAY_SIZE(i915_ioctls), 189058471f63SJani Nikula .fops = &i915_driver_fops, 189158471f63SJani Nikula .name = DRIVER_NAME, 189258471f63SJani Nikula .desc = DRIVER_DESC, 189358471f63SJani Nikula .date = DRIVER_DATE, 189458471f63SJani Nikula .major = DRIVER_MAJOR, 189558471f63SJani Nikula .minor = DRIVER_MINOR, 189658471f63SJani Nikula .patchlevel = DRIVER_PATCHLEVEL, 189758471f63SJani Nikula }; 1898