1dd08ebf6SMatthew Brost // SPDX-License-Identifier: MIT 2dd08ebf6SMatthew Brost /* 3dd08ebf6SMatthew Brost * Copyright © 2022 Intel Corporation 4dd08ebf6SMatthew Brost */ 5dd08ebf6SMatthew Brost 6ea9f879dSLucas De Marchi #include "xe_pm.h" 7ea9f879dSLucas De Marchi 8dd08ebf6SMatthew Brost #include <linux/pm_runtime.h> 9dd08ebf6SMatthew Brost 10dd08ebf6SMatthew Brost #include <drm/ttm/ttm_placement.h> 11dd08ebf6SMatthew Brost 12dd08ebf6SMatthew Brost #include "xe_bo.h" 13dd08ebf6SMatthew Brost #include "xe_bo_evict.h" 14dd08ebf6SMatthew Brost #include "xe_device.h" 15dd08ebf6SMatthew Brost #include "xe_ggtt.h" 16ea9f879dSLucas De Marchi #include "xe_gt.h" 17dd08ebf6SMatthew Brost #include "xe_irq.h" 18dd08ebf6SMatthew Brost #include "xe_pcode.h" 19dd08ebf6SMatthew Brost 20dd08ebf6SMatthew Brost /** 21dd08ebf6SMatthew Brost * DOC: Xe Power Management 22dd08ebf6SMatthew Brost * 23dd08ebf6SMatthew Brost * Xe PM shall be guided by the simplicity. 24dd08ebf6SMatthew Brost * Use the simplest hook options whenever possible. 25dd08ebf6SMatthew Brost * Let's not reinvent the runtime_pm references and hooks. 26dd08ebf6SMatthew Brost * Shall have a clear separation of display and gt underneath this component. 27dd08ebf6SMatthew Brost * 28dd08ebf6SMatthew Brost * What's next: 29dd08ebf6SMatthew Brost * 30dd08ebf6SMatthew Brost * For now s2idle and s3 are only working in integrated devices. The next step 31dd08ebf6SMatthew Brost * is to iterate through all VRAM's BO backing them up into the system memory 32dd08ebf6SMatthew Brost * before allowing the system suspend. 33dd08ebf6SMatthew Brost * 34dd08ebf6SMatthew Brost * Also runtime_pm needs to be here from the beginning. 35dd08ebf6SMatthew Brost * 36dd08ebf6SMatthew Brost * RC6/RPS are also critical PM features. Let's start with GuCRC and GuC SLPC 37dd08ebf6SMatthew Brost * and no wait boost. Frequency optimizations should come on a next stage. 38dd08ebf6SMatthew Brost */ 39dd08ebf6SMatthew Brost 40dd08ebf6SMatthew Brost /** 41dd08ebf6SMatthew Brost * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle 42dd08ebf6SMatthew Brost * @xe: xe device instance 43dd08ebf6SMatthew Brost * 44dd08ebf6SMatthew Brost * Return: 0 on success 45dd08ebf6SMatthew Brost */ 46dd08ebf6SMatthew Brost int xe_pm_suspend(struct xe_device *xe) 47dd08ebf6SMatthew Brost { 48dd08ebf6SMatthew Brost struct xe_gt *gt; 49dd08ebf6SMatthew Brost u8 id; 50dd08ebf6SMatthew Brost int err; 51dd08ebf6SMatthew Brost 52dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) 53dd08ebf6SMatthew Brost xe_gt_suspend_prepare(gt); 54dd08ebf6SMatthew Brost 55dd08ebf6SMatthew Brost /* FIXME: Super racey... */ 56dd08ebf6SMatthew Brost err = xe_bo_evict_all(xe); 57dd08ebf6SMatthew Brost if (err) 58dd08ebf6SMatthew Brost return err; 59dd08ebf6SMatthew Brost 60dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) { 61dd08ebf6SMatthew Brost err = xe_gt_suspend(gt); 62dd08ebf6SMatthew Brost if (err) 63dd08ebf6SMatthew Brost return err; 64dd08ebf6SMatthew Brost } 65dd08ebf6SMatthew Brost 66dd08ebf6SMatthew Brost xe_irq_suspend(xe); 67dd08ebf6SMatthew Brost 68dd08ebf6SMatthew Brost return 0; 69dd08ebf6SMatthew Brost } 70dd08ebf6SMatthew Brost 71dd08ebf6SMatthew Brost /** 72dd08ebf6SMatthew Brost * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0 73dd08ebf6SMatthew Brost * @xe: xe device instance 74dd08ebf6SMatthew Brost * 75dd08ebf6SMatthew Brost * Return: 0 on success 76dd08ebf6SMatthew Brost */ 77dd08ebf6SMatthew Brost int xe_pm_resume(struct xe_device *xe) 78dd08ebf6SMatthew Brost { 79dd08ebf6SMatthew Brost struct xe_gt *gt; 80dd08ebf6SMatthew Brost u8 id; 81dd08ebf6SMatthew Brost int err; 82dd08ebf6SMatthew Brost 83dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) { 84dd08ebf6SMatthew Brost err = xe_pcode_init(gt); 85dd08ebf6SMatthew Brost if (err) 86dd08ebf6SMatthew Brost return err; 87dd08ebf6SMatthew Brost } 88dd08ebf6SMatthew Brost 89dd08ebf6SMatthew Brost /* 90dd08ebf6SMatthew Brost * This only restores pinned memory which is the memory required for the 91dd08ebf6SMatthew Brost * GT(s) to resume. 92dd08ebf6SMatthew Brost */ 93dd08ebf6SMatthew Brost err = xe_bo_restore_kernel(xe); 94dd08ebf6SMatthew Brost if (err) 95dd08ebf6SMatthew Brost return err; 96dd08ebf6SMatthew Brost 97dd08ebf6SMatthew Brost xe_irq_resume(xe); 98dd08ebf6SMatthew Brost 99dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) 100dd08ebf6SMatthew Brost xe_gt_resume(gt); 101dd08ebf6SMatthew Brost 102dd08ebf6SMatthew Brost err = xe_bo_restore_user(xe); 103dd08ebf6SMatthew Brost if (err) 104dd08ebf6SMatthew Brost return err; 105dd08ebf6SMatthew Brost 106dd08ebf6SMatthew Brost return 0; 107dd08ebf6SMatthew Brost } 108dd08ebf6SMatthew Brost 109ac0be3b5SAnshuman Gupta static bool xe_pm_pci_d3cold_capable(struct pci_dev *pdev) 110ac0be3b5SAnshuman Gupta { 111ac0be3b5SAnshuman Gupta struct pci_dev *root_pdev; 112ac0be3b5SAnshuman Gupta 113ac0be3b5SAnshuman Gupta root_pdev = pcie_find_root_port(pdev); 114ac0be3b5SAnshuman Gupta if (!root_pdev) 115ac0be3b5SAnshuman Gupta return false; 116ac0be3b5SAnshuman Gupta 117ac0be3b5SAnshuman Gupta /* D3Cold requires PME capability and _PR3 power resource */ 118ac0be3b5SAnshuman Gupta if (!pci_pme_capable(root_pdev, PCI_D3cold) || !pci_pr3_present(root_pdev)) 119ac0be3b5SAnshuman Gupta return false; 120ac0be3b5SAnshuman Gupta 121ac0be3b5SAnshuman Gupta return true; 122ac0be3b5SAnshuman Gupta } 123ac0be3b5SAnshuman Gupta 124*fddebcbfSAnshuman Gupta static void xe_pm_runtime_init(struct xe_device *xe) 125dd08ebf6SMatthew Brost { 126dd08ebf6SMatthew Brost struct device *dev = xe->drm.dev; 127dd08ebf6SMatthew Brost 128dd08ebf6SMatthew Brost pm_runtime_use_autosuspend(dev); 129dd08ebf6SMatthew Brost pm_runtime_set_autosuspend_delay(dev, 1000); 130dd08ebf6SMatthew Brost pm_runtime_set_active(dev); 131dd08ebf6SMatthew Brost pm_runtime_allow(dev); 132dd08ebf6SMatthew Brost pm_runtime_mark_last_busy(dev); 133dd08ebf6SMatthew Brost pm_runtime_put_autosuspend(dev); 134dd08ebf6SMatthew Brost } 135dd08ebf6SMatthew Brost 136ac0be3b5SAnshuman Gupta void xe_pm_init(struct xe_device *xe) 137ac0be3b5SAnshuman Gupta { 138ac0be3b5SAnshuman Gupta struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 139ac0be3b5SAnshuman Gupta 140*fddebcbfSAnshuman Gupta xe_pm_runtime_init(xe); 141ac0be3b5SAnshuman Gupta xe->d3cold_capable = xe_pm_pci_d3cold_capable(pdev); 142ac0be3b5SAnshuman Gupta } 143ac0be3b5SAnshuman Gupta 1445b7e50e2SMatthew Auld void xe_pm_runtime_fini(struct xe_device *xe) 1455b7e50e2SMatthew Auld { 1465b7e50e2SMatthew Auld struct device *dev = xe->drm.dev; 1475b7e50e2SMatthew Auld 1485b7e50e2SMatthew Auld pm_runtime_get_sync(dev); 1495b7e50e2SMatthew Auld pm_runtime_forbid(dev); 1505b7e50e2SMatthew Auld } 1515b7e50e2SMatthew Auld 152dd08ebf6SMatthew Brost int xe_pm_runtime_suspend(struct xe_device *xe) 153dd08ebf6SMatthew Brost { 154dd08ebf6SMatthew Brost struct xe_gt *gt; 155dd08ebf6SMatthew Brost u8 id; 156dd08ebf6SMatthew Brost int err; 157dd08ebf6SMatthew Brost 158dd08ebf6SMatthew Brost if (xe->d3cold_allowed) { 159dd08ebf6SMatthew Brost if (xe_device_mem_access_ongoing(xe)) 160dd08ebf6SMatthew Brost return -EBUSY; 161dd08ebf6SMatthew Brost 162dd08ebf6SMatthew Brost err = xe_bo_evict_all(xe); 163dd08ebf6SMatthew Brost if (err) 164dd08ebf6SMatthew Brost return err; 165dd08ebf6SMatthew Brost } 166dd08ebf6SMatthew Brost 167dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) { 168dd08ebf6SMatthew Brost err = xe_gt_suspend(gt); 169dd08ebf6SMatthew Brost if (err) 170dd08ebf6SMatthew Brost return err; 171dd08ebf6SMatthew Brost } 172dd08ebf6SMatthew Brost 173dd08ebf6SMatthew Brost xe_irq_suspend(xe); 174dd08ebf6SMatthew Brost 175dd08ebf6SMatthew Brost return 0; 176dd08ebf6SMatthew Brost } 177dd08ebf6SMatthew Brost 178dd08ebf6SMatthew Brost int xe_pm_runtime_resume(struct xe_device *xe) 179dd08ebf6SMatthew Brost { 180dd08ebf6SMatthew Brost struct xe_gt *gt; 181dd08ebf6SMatthew Brost u8 id; 182dd08ebf6SMatthew Brost int err; 183dd08ebf6SMatthew Brost 184dd08ebf6SMatthew Brost if (xe->d3cold_allowed) { 185dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) { 186dd08ebf6SMatthew Brost err = xe_pcode_init(gt); 187dd08ebf6SMatthew Brost if (err) 188dd08ebf6SMatthew Brost return err; 189dd08ebf6SMatthew Brost } 190dd08ebf6SMatthew Brost 191dd08ebf6SMatthew Brost /* 192dd08ebf6SMatthew Brost * This only restores pinned memory which is the memory 193dd08ebf6SMatthew Brost * required for the GT(s) to resume. 194dd08ebf6SMatthew Brost */ 195dd08ebf6SMatthew Brost err = xe_bo_restore_kernel(xe); 196dd08ebf6SMatthew Brost if (err) 197dd08ebf6SMatthew Brost return err; 198dd08ebf6SMatthew Brost } 199dd08ebf6SMatthew Brost 200dd08ebf6SMatthew Brost xe_irq_resume(xe); 201dd08ebf6SMatthew Brost 202dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) 203dd08ebf6SMatthew Brost xe_gt_resume(gt); 204dd08ebf6SMatthew Brost 205dd08ebf6SMatthew Brost if (xe->d3cold_allowed) { 206dd08ebf6SMatthew Brost err = xe_bo_restore_user(xe); 207dd08ebf6SMatthew Brost if (err) 208dd08ebf6SMatthew Brost return err; 209dd08ebf6SMatthew Brost } 210dd08ebf6SMatthew Brost 211dd08ebf6SMatthew Brost return 0; 212dd08ebf6SMatthew Brost } 213dd08ebf6SMatthew Brost 214dd08ebf6SMatthew Brost int xe_pm_runtime_get(struct xe_device *xe) 215dd08ebf6SMatthew Brost { 216dd08ebf6SMatthew Brost return pm_runtime_get_sync(xe->drm.dev); 217dd08ebf6SMatthew Brost } 218dd08ebf6SMatthew Brost 219dd08ebf6SMatthew Brost int xe_pm_runtime_put(struct xe_device *xe) 220dd08ebf6SMatthew Brost { 221dd08ebf6SMatthew Brost pm_runtime_mark_last_busy(xe->drm.dev); 222dd08ebf6SMatthew Brost return pm_runtime_put_autosuspend(xe->drm.dev); 223dd08ebf6SMatthew Brost } 224dd08ebf6SMatthew Brost 225dd08ebf6SMatthew Brost /* Return true if resume operation happened and usage count was increased */ 226dd08ebf6SMatthew Brost bool xe_pm_runtime_resume_if_suspended(struct xe_device *xe) 227dd08ebf6SMatthew Brost { 228dd08ebf6SMatthew Brost /* In case we are suspended we need to immediately wake up */ 229dd08ebf6SMatthew Brost if (pm_runtime_suspended(xe->drm.dev)) 230dd08ebf6SMatthew Brost return !pm_runtime_resume_and_get(xe->drm.dev); 231dd08ebf6SMatthew Brost 232dd08ebf6SMatthew Brost return false; 233dd08ebf6SMatthew Brost } 234dd08ebf6SMatthew Brost 235dd08ebf6SMatthew Brost int xe_pm_runtime_get_if_active(struct xe_device *xe) 236dd08ebf6SMatthew Brost { 237dd08ebf6SMatthew Brost WARN_ON(pm_runtime_suspended(xe->drm.dev)); 238dd08ebf6SMatthew Brost return pm_runtime_get_if_active(xe->drm.dev, true); 239dd08ebf6SMatthew Brost } 240c8a74077SAnshuman Gupta 241c8a74077SAnshuman Gupta void xe_pm_assert_unbounded_bridge(struct xe_device *xe) 242c8a74077SAnshuman Gupta { 243c8a74077SAnshuman Gupta struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 244c8a74077SAnshuman Gupta struct pci_dev *bridge = pci_upstream_bridge(pdev); 245c8a74077SAnshuman Gupta 246c8a74077SAnshuman Gupta if (!bridge) 247c8a74077SAnshuman Gupta return; 248c8a74077SAnshuman Gupta 249c8a74077SAnshuman Gupta if (!bridge->driver) { 250c8a74077SAnshuman Gupta drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n"); 251c8a74077SAnshuman Gupta device_set_pm_not_required(&pdev->dev); 252c8a74077SAnshuman Gupta } 253c8a74077SAnshuman Gupta } 254