1dd08ebf6SMatthew Brost // SPDX-License-Identifier: MIT 2dd08ebf6SMatthew Brost /* 3dd08ebf6SMatthew Brost * Copyright © 2022 Intel Corporation 4dd08ebf6SMatthew Brost */ 5dd08ebf6SMatthew Brost 6ea9f879dSLucas De Marchi #include "xe_pm.h" 7ea9f879dSLucas De Marchi 8dd08ebf6SMatthew Brost #include <linux/pm_runtime.h> 9dd08ebf6SMatthew Brost 10b2d75619SAnshuman Gupta #include <drm/drm_managed.h> 11dd08ebf6SMatthew Brost #include <drm/ttm/ttm_placement.h> 12dd08ebf6SMatthew Brost 13dd08ebf6SMatthew Brost #include "xe_bo.h" 14dd08ebf6SMatthew Brost #include "xe_bo_evict.h" 15dd08ebf6SMatthew Brost #include "xe_device.h" 16b2d75619SAnshuman Gupta #include "xe_device_sysfs.h" 1744e69495SMaarten Lankhorst #include "xe_display.h" 18dd08ebf6SMatthew Brost #include "xe_ggtt.h" 19ea9f879dSLucas De Marchi #include "xe_gt.h" 2009d88e3bSAnshuman Gupta #include "xe_guc.h" 21dd08ebf6SMatthew Brost #include "xe_irq.h" 22dd08ebf6SMatthew Brost #include "xe_pcode.h" 230d053475SMatt Roper #include "xe_wa.h" 24dd08ebf6SMatthew Brost 25dd08ebf6SMatthew Brost /** 26dd08ebf6SMatthew Brost * DOC: Xe Power Management 27dd08ebf6SMatthew Brost * 28dd08ebf6SMatthew Brost * Xe PM shall be guided by the simplicity. 29dd08ebf6SMatthew Brost * Use the simplest hook options whenever possible. 30dd08ebf6SMatthew Brost * Let's not reinvent the runtime_pm references and hooks. 31dd08ebf6SMatthew Brost * Shall have a clear separation of display and gt underneath this component. 32dd08ebf6SMatthew Brost * 33dd08ebf6SMatthew Brost * What's next: 34dd08ebf6SMatthew Brost * 35dd08ebf6SMatthew Brost * For now s2idle and s3 are only working in integrated devices. The next step 36dd08ebf6SMatthew Brost * is to iterate through all VRAM's BO backing them up into the system memory 37dd08ebf6SMatthew Brost * before allowing the system suspend. 38dd08ebf6SMatthew Brost * 39dd08ebf6SMatthew Brost * Also runtime_pm needs to be here from the beginning. 40dd08ebf6SMatthew Brost * 41dd08ebf6SMatthew Brost * RC6/RPS are also critical PM features. Let's start with GuCRC and GuC SLPC 42dd08ebf6SMatthew Brost * and no wait boost. Frequency optimizations should come on a next stage. 43dd08ebf6SMatthew Brost */ 44dd08ebf6SMatthew Brost 45dd08ebf6SMatthew Brost /** 46dd08ebf6SMatthew Brost * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle 47dd08ebf6SMatthew Brost * @xe: xe device instance 48dd08ebf6SMatthew Brost * 49dd08ebf6SMatthew Brost * Return: 0 on success 50dd08ebf6SMatthew Brost */ 51dd08ebf6SMatthew Brost int xe_pm_suspend(struct xe_device *xe) 52dd08ebf6SMatthew Brost { 53dd08ebf6SMatthew Brost struct xe_gt *gt; 54dd08ebf6SMatthew Brost u8 id; 55dd08ebf6SMatthew Brost int err; 56dd08ebf6SMatthew Brost 57dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) 58dd08ebf6SMatthew Brost xe_gt_suspend_prepare(gt); 59dd08ebf6SMatthew Brost 60dd08ebf6SMatthew Brost /* FIXME: Super racey... */ 61dd08ebf6SMatthew Brost err = xe_bo_evict_all(xe); 62dd08ebf6SMatthew Brost if (err) 63dd08ebf6SMatthew Brost return err; 64dd08ebf6SMatthew Brost 6544e69495SMaarten Lankhorst xe_display_pm_suspend(xe); 6644e69495SMaarten Lankhorst 67dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) { 68dd08ebf6SMatthew Brost err = xe_gt_suspend(gt); 6944e69495SMaarten Lankhorst if (err) { 7044e69495SMaarten Lankhorst xe_display_pm_resume(xe); 71dd08ebf6SMatthew Brost return err; 72dd08ebf6SMatthew Brost } 7344e69495SMaarten Lankhorst } 74dd08ebf6SMatthew Brost 75dd08ebf6SMatthew Brost xe_irq_suspend(xe); 76dd08ebf6SMatthew Brost 7744e69495SMaarten Lankhorst xe_display_pm_suspend_late(xe); 7844e69495SMaarten Lankhorst 79dd08ebf6SMatthew Brost return 0; 80dd08ebf6SMatthew Brost } 81dd08ebf6SMatthew Brost 82dd08ebf6SMatthew Brost /** 83dd08ebf6SMatthew Brost * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0 84dd08ebf6SMatthew Brost * @xe: xe device instance 85dd08ebf6SMatthew Brost * 86dd08ebf6SMatthew Brost * Return: 0 on success 87dd08ebf6SMatthew Brost */ 88dd08ebf6SMatthew Brost int xe_pm_resume(struct xe_device *xe) 89dd08ebf6SMatthew Brost { 900d053475SMatt Roper struct xe_tile *tile; 91dd08ebf6SMatthew Brost struct xe_gt *gt; 92dd08ebf6SMatthew Brost u8 id; 93dd08ebf6SMatthew Brost int err; 94dd08ebf6SMatthew Brost 950d053475SMatt Roper for_each_tile(tile, xe, id) 960d053475SMatt Roper xe_wa_apply_tile_workarounds(tile); 970d053475SMatt Roper 98dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) { 99dd08ebf6SMatthew Brost err = xe_pcode_init(gt); 100dd08ebf6SMatthew Brost if (err) 101dd08ebf6SMatthew Brost return err; 102dd08ebf6SMatthew Brost } 103dd08ebf6SMatthew Brost 10444e69495SMaarten Lankhorst xe_display_pm_resume_early(xe); 10544e69495SMaarten Lankhorst 106dd08ebf6SMatthew Brost /* 107dd08ebf6SMatthew Brost * This only restores pinned memory which is the memory required for the 108dd08ebf6SMatthew Brost * GT(s) to resume. 109dd08ebf6SMatthew Brost */ 110dd08ebf6SMatthew Brost err = xe_bo_restore_kernel(xe); 111dd08ebf6SMatthew Brost if (err) 112dd08ebf6SMatthew Brost return err; 113dd08ebf6SMatthew Brost 114dd08ebf6SMatthew Brost xe_irq_resume(xe); 115dd08ebf6SMatthew Brost 11644e69495SMaarten Lankhorst xe_display_pm_resume(xe); 11744e69495SMaarten Lankhorst 118dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) 119dd08ebf6SMatthew Brost xe_gt_resume(gt); 120dd08ebf6SMatthew Brost 121dd08ebf6SMatthew Brost err = xe_bo_restore_user(xe); 122dd08ebf6SMatthew Brost if (err) 123dd08ebf6SMatthew Brost return err; 124dd08ebf6SMatthew Brost 125dd08ebf6SMatthew Brost return 0; 126dd08ebf6SMatthew Brost } 127dd08ebf6SMatthew Brost 128ac0be3b5SAnshuman Gupta static bool xe_pm_pci_d3cold_capable(struct pci_dev *pdev) 129ac0be3b5SAnshuman Gupta { 130ac0be3b5SAnshuman Gupta struct pci_dev *root_pdev; 131ac0be3b5SAnshuman Gupta 132ac0be3b5SAnshuman Gupta root_pdev = pcie_find_root_port(pdev); 133ac0be3b5SAnshuman Gupta if (!root_pdev) 134ac0be3b5SAnshuman Gupta return false; 135ac0be3b5SAnshuman Gupta 136ac0be3b5SAnshuman Gupta /* D3Cold requires PME capability and _PR3 power resource */ 137ac0be3b5SAnshuman Gupta if (!pci_pme_capable(root_pdev, PCI_D3cold) || !pci_pr3_present(root_pdev)) 138ac0be3b5SAnshuman Gupta return false; 139ac0be3b5SAnshuman Gupta 140ac0be3b5SAnshuman Gupta return true; 141ac0be3b5SAnshuman Gupta } 142ac0be3b5SAnshuman Gupta 143fddebcbfSAnshuman Gupta static void xe_pm_runtime_init(struct xe_device *xe) 144dd08ebf6SMatthew Brost { 145dd08ebf6SMatthew Brost struct device *dev = xe->drm.dev; 146dd08ebf6SMatthew Brost 147d87c424aSRodrigo Vivi /* 148d87c424aSRodrigo Vivi * Disable the system suspend direct complete optimization. 149d87c424aSRodrigo Vivi * We need to ensure that the regular device suspend/resume functions 150d87c424aSRodrigo Vivi * are called since our runtime_pm cannot guarantee local memory 151d87c424aSRodrigo Vivi * eviction for d3cold. 152d87c424aSRodrigo Vivi * TODO: Check HDA audio dependencies claimed by i915, and then enforce 153d87c424aSRodrigo Vivi * this option to integrated graphics as well. 154d87c424aSRodrigo Vivi */ 155d87c424aSRodrigo Vivi if (IS_DGFX(xe)) 156d87c424aSRodrigo Vivi dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE); 157d87c424aSRodrigo Vivi 158dd08ebf6SMatthew Brost pm_runtime_use_autosuspend(dev); 159dd08ebf6SMatthew Brost pm_runtime_set_autosuspend_delay(dev, 1000); 160dd08ebf6SMatthew Brost pm_runtime_set_active(dev); 161dd08ebf6SMatthew Brost pm_runtime_allow(dev); 162dd08ebf6SMatthew Brost pm_runtime_mark_last_busy(dev); 163bba2ec41SRodrigo Vivi pm_runtime_put(dev); 164dd08ebf6SMatthew Brost } 165dd08ebf6SMatthew Brost 166ac0be3b5SAnshuman Gupta void xe_pm_init(struct xe_device *xe) 167ac0be3b5SAnshuman Gupta { 168ac0be3b5SAnshuman Gupta struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 169ac0be3b5SAnshuman Gupta 1705349bb76SOhad Sharabi /* For now suspend/resume is only allowed with GuC */ 1715349bb76SOhad Sharabi if (!xe_device_uc_enabled(xe)) 1725349bb76SOhad Sharabi return; 1735349bb76SOhad Sharabi 174b2d75619SAnshuman Gupta drmm_mutex_init(&xe->drm, &xe->d3cold.lock); 175a32d82b4SRodrigo Vivi 176b2d75619SAnshuman Gupta xe->d3cold.capable = xe_pm_pci_d3cold_capable(pdev); 1773d4b0bfcSAnshuman Gupta 1783d4b0bfcSAnshuman Gupta if (xe->d3cold.capable) { 179b2d75619SAnshuman Gupta xe_device_sysfs_init(xe); 180b2d75619SAnshuman Gupta xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD); 1813d4b0bfcSAnshuman Gupta } 182a32d82b4SRodrigo Vivi 183a32d82b4SRodrigo Vivi xe_pm_runtime_init(xe); 184ac0be3b5SAnshuman Gupta } 185ac0be3b5SAnshuman Gupta 1865b7e50e2SMatthew Auld void xe_pm_runtime_fini(struct xe_device *xe) 1875b7e50e2SMatthew Auld { 1885b7e50e2SMatthew Auld struct device *dev = xe->drm.dev; 1895b7e50e2SMatthew Auld 1905b7e50e2SMatthew Auld pm_runtime_get_sync(dev); 1915b7e50e2SMatthew Auld pm_runtime_forbid(dev); 1925b7e50e2SMatthew Auld } 1935b7e50e2SMatthew Auld 194a00b8f1aSMatthew Auld static void xe_pm_write_callback_task(struct xe_device *xe, 195a00b8f1aSMatthew Auld struct task_struct *task) 196a00b8f1aSMatthew Auld { 197a00b8f1aSMatthew Auld WRITE_ONCE(xe->pm_callback_task, task); 198a00b8f1aSMatthew Auld 199a00b8f1aSMatthew Auld /* 200a00b8f1aSMatthew Auld * Just in case it's somehow possible for our writes to be reordered to 201a00b8f1aSMatthew Auld * the extent that something else re-uses the task written in 202a00b8f1aSMatthew Auld * pm_callback_task. For example after returning from the callback, but 203a00b8f1aSMatthew Auld * before the reordered write that resets pm_callback_task back to NULL. 204a00b8f1aSMatthew Auld */ 205a00b8f1aSMatthew Auld smp_mb(); /* pairs with xe_pm_read_callback_task */ 206a00b8f1aSMatthew Auld } 207a00b8f1aSMatthew Auld 208a00b8f1aSMatthew Auld struct task_struct *xe_pm_read_callback_task(struct xe_device *xe) 209a00b8f1aSMatthew Auld { 210a00b8f1aSMatthew Auld smp_mb(); /* pairs with xe_pm_write_callback_task */ 211a00b8f1aSMatthew Auld 212a00b8f1aSMatthew Auld return READ_ONCE(xe->pm_callback_task); 213a00b8f1aSMatthew Auld } 214a00b8f1aSMatthew Auld 215dd08ebf6SMatthew Brost int xe_pm_runtime_suspend(struct xe_device *xe) 216dd08ebf6SMatthew Brost { 217dd08ebf6SMatthew Brost struct xe_gt *gt; 218dd08ebf6SMatthew Brost u8 id; 219a00b8f1aSMatthew Auld int err = 0; 220dd08ebf6SMatthew Brost 221a00b8f1aSMatthew Auld if (xe->d3cold.allowed && xe_device_mem_access_ongoing(xe)) 222dd08ebf6SMatthew Brost return -EBUSY; 223dd08ebf6SMatthew Brost 224a00b8f1aSMatthew Auld /* Disable access_ongoing asserts and prevent recursive pm calls */ 225a00b8f1aSMatthew Auld xe_pm_write_callback_task(xe, current); 226a00b8f1aSMatthew Auld 2279700a1dfSMatthew Auld /* 2289700a1dfSMatthew Auld * The actual xe_device_mem_access_put() is always async underneath, so 2299700a1dfSMatthew Auld * exactly where that is called should makes no difference to us. However 2309700a1dfSMatthew Auld * we still need to be very careful with the locks that this callback 2319700a1dfSMatthew Auld * acquires and the locks that are acquired and held by any callers of 2329700a1dfSMatthew Auld * xe_device_mem_access_get(). We already have the matching annotation 2339700a1dfSMatthew Auld * on that side, but we also need it here. For example lockdep should be 2349700a1dfSMatthew Auld * able to tell us if the following scenario is in theory possible: 2359700a1dfSMatthew Auld * 2369700a1dfSMatthew Auld * CPU0 | CPU1 (kworker) 2379700a1dfSMatthew Auld * lock(A) | 2389700a1dfSMatthew Auld * | xe_pm_runtime_suspend() 2399700a1dfSMatthew Auld * | lock(A) 2409700a1dfSMatthew Auld * xe_device_mem_access_get() | 2419700a1dfSMatthew Auld * 2429700a1dfSMatthew Auld * This will clearly deadlock since rpm core needs to wait for 2439700a1dfSMatthew Auld * xe_pm_runtime_suspend() to complete, but here we are holding lock(A) 2449700a1dfSMatthew Auld * on CPU0 which prevents CPU1 making forward progress. With the 2459700a1dfSMatthew Auld * annotation here and in xe_device_mem_access_get() lockdep will see 2469700a1dfSMatthew Auld * the potential lock inversion and give us a nice splat. 2479700a1dfSMatthew Auld */ 2489700a1dfSMatthew Auld lock_map_acquire(&xe_device_mem_access_lockdep_map); 2499700a1dfSMatthew Auld 250a00b8f1aSMatthew Auld if (xe->d3cold.allowed) { 251dd08ebf6SMatthew Brost err = xe_bo_evict_all(xe); 252dd08ebf6SMatthew Brost if (err) 253a00b8f1aSMatthew Auld goto out; 254dd08ebf6SMatthew Brost } 255dd08ebf6SMatthew Brost 256dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) { 257dd08ebf6SMatthew Brost err = xe_gt_suspend(gt); 258dd08ebf6SMatthew Brost if (err) 259a00b8f1aSMatthew Auld goto out; 260dd08ebf6SMatthew Brost } 261dd08ebf6SMatthew Brost 262dd08ebf6SMatthew Brost xe_irq_suspend(xe); 263a00b8f1aSMatthew Auld out: 2649700a1dfSMatthew Auld lock_map_release(&xe_device_mem_access_lockdep_map); 265a00b8f1aSMatthew Auld xe_pm_write_callback_task(xe, NULL); 266a00b8f1aSMatthew Auld return err; 267dd08ebf6SMatthew Brost } 268dd08ebf6SMatthew Brost 269dd08ebf6SMatthew Brost int xe_pm_runtime_resume(struct xe_device *xe) 270dd08ebf6SMatthew Brost { 271dd08ebf6SMatthew Brost struct xe_gt *gt; 272dd08ebf6SMatthew Brost u8 id; 273a00b8f1aSMatthew Auld int err = 0; 274a00b8f1aSMatthew Auld 275a00b8f1aSMatthew Auld /* Disable access_ongoing asserts and prevent recursive pm calls */ 276a00b8f1aSMatthew Auld xe_pm_write_callback_task(xe, current); 277dd08ebf6SMatthew Brost 2789700a1dfSMatthew Auld lock_map_acquire(&xe_device_mem_access_lockdep_map); 2799700a1dfSMatthew Auld 28009d88e3bSAnshuman Gupta /* 28109d88e3bSAnshuman Gupta * It can be possible that xe has allowed d3cold but other pcie devices 28209d88e3bSAnshuman Gupta * in gfx card soc would have blocked d3cold, therefore card has not 28309d88e3bSAnshuman Gupta * really lost power. Detecting primary Gt power is sufficient. 28409d88e3bSAnshuman Gupta */ 28509d88e3bSAnshuman Gupta gt = xe_device_get_gt(xe, 0); 28609d88e3bSAnshuman Gupta xe->d3cold.power_lost = xe_guc_in_reset(>->uc.guc); 28709d88e3bSAnshuman Gupta 28809d88e3bSAnshuman Gupta if (xe->d3cold.allowed && xe->d3cold.power_lost) { 289dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) { 290dd08ebf6SMatthew Brost err = xe_pcode_init(gt); 291dd08ebf6SMatthew Brost if (err) 292a00b8f1aSMatthew Auld goto out; 293dd08ebf6SMatthew Brost } 294dd08ebf6SMatthew Brost 295dd08ebf6SMatthew Brost /* 296dd08ebf6SMatthew Brost * This only restores pinned memory which is the memory 297dd08ebf6SMatthew Brost * required for the GT(s) to resume. 298dd08ebf6SMatthew Brost */ 299dd08ebf6SMatthew Brost err = xe_bo_restore_kernel(xe); 300dd08ebf6SMatthew Brost if (err) 301a00b8f1aSMatthew Auld goto out; 302dd08ebf6SMatthew Brost } 303dd08ebf6SMatthew Brost 304dd08ebf6SMatthew Brost xe_irq_resume(xe); 305dd08ebf6SMatthew Brost 306dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) 307dd08ebf6SMatthew Brost xe_gt_resume(gt); 308dd08ebf6SMatthew Brost 30909d88e3bSAnshuman Gupta if (xe->d3cold.allowed && xe->d3cold.power_lost) { 310dd08ebf6SMatthew Brost err = xe_bo_restore_user(xe); 311dd08ebf6SMatthew Brost if (err) 312a00b8f1aSMatthew Auld goto out; 313dd08ebf6SMatthew Brost } 314a00b8f1aSMatthew Auld out: 3159700a1dfSMatthew Auld lock_map_release(&xe_device_mem_access_lockdep_map); 316a00b8f1aSMatthew Auld xe_pm_write_callback_task(xe, NULL); 317a00b8f1aSMatthew Auld return err; 318dd08ebf6SMatthew Brost } 319dd08ebf6SMatthew Brost 320dd08ebf6SMatthew Brost int xe_pm_runtime_get(struct xe_device *xe) 321dd08ebf6SMatthew Brost { 322dd08ebf6SMatthew Brost return pm_runtime_get_sync(xe->drm.dev); 323dd08ebf6SMatthew Brost } 324dd08ebf6SMatthew Brost 325dd08ebf6SMatthew Brost int xe_pm_runtime_put(struct xe_device *xe) 326dd08ebf6SMatthew Brost { 327dd08ebf6SMatthew Brost pm_runtime_mark_last_busy(xe->drm.dev); 328bba2ec41SRodrigo Vivi return pm_runtime_put(xe->drm.dev); 329dd08ebf6SMatthew Brost } 330dd08ebf6SMatthew Brost 331dd08ebf6SMatthew Brost int xe_pm_runtime_get_if_active(struct xe_device *xe) 332dd08ebf6SMatthew Brost { 333dd08ebf6SMatthew Brost return pm_runtime_get_if_active(xe->drm.dev, true); 334dd08ebf6SMatthew Brost } 335c8a74077SAnshuman Gupta 336c8a74077SAnshuman Gupta void xe_pm_assert_unbounded_bridge(struct xe_device *xe) 337c8a74077SAnshuman Gupta { 338c8a74077SAnshuman Gupta struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 339c8a74077SAnshuman Gupta struct pci_dev *bridge = pci_upstream_bridge(pdev); 340c8a74077SAnshuman Gupta 341c8a74077SAnshuman Gupta if (!bridge) 342c8a74077SAnshuman Gupta return; 343c8a74077SAnshuman Gupta 344c8a74077SAnshuman Gupta if (!bridge->driver) { 345c8a74077SAnshuman Gupta drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n"); 346c8a74077SAnshuman Gupta device_set_pm_not_required(&pdev->dev); 347c8a74077SAnshuman Gupta } 348c8a74077SAnshuman Gupta } 349b2d75619SAnshuman Gupta 350b2d75619SAnshuman Gupta int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold) 351b2d75619SAnshuman Gupta { 352b2d75619SAnshuman Gupta struct ttm_resource_manager *man; 353b2d75619SAnshuman Gupta u32 vram_total_mb = 0; 354b2d75619SAnshuman Gupta int i; 355b2d75619SAnshuman Gupta 356b2d75619SAnshuman Gupta for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) { 357b2d75619SAnshuman Gupta man = ttm_manager_type(&xe->ttm, i); 358b2d75619SAnshuman Gupta if (man) 359b2d75619SAnshuman Gupta vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024); 360b2d75619SAnshuman Gupta } 361b2d75619SAnshuman Gupta 362b2d75619SAnshuman Gupta drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb); 363b2d75619SAnshuman Gupta 364b2d75619SAnshuman Gupta if (threshold > vram_total_mb) 365b2d75619SAnshuman Gupta return -EINVAL; 366b2d75619SAnshuman Gupta 367b2d75619SAnshuman Gupta mutex_lock(&xe->d3cold.lock); 368b2d75619SAnshuman Gupta xe->d3cold.vram_threshold = threshold; 369b2d75619SAnshuman Gupta mutex_unlock(&xe->d3cold.lock); 370b2d75619SAnshuman Gupta 371b2d75619SAnshuman Gupta return 0; 372b2d75619SAnshuman Gupta } 3732ef08b98SAnshuman Gupta 3742ef08b98SAnshuman Gupta void xe_pm_d3cold_allowed_toggle(struct xe_device *xe) 3752ef08b98SAnshuman Gupta { 3762ef08b98SAnshuman Gupta struct ttm_resource_manager *man; 3772ef08b98SAnshuman Gupta u32 total_vram_used_mb = 0; 3782ef08b98SAnshuman Gupta u64 vram_used; 3792ef08b98SAnshuman Gupta int i; 3802ef08b98SAnshuman Gupta 381e07aa913SRodrigo Vivi if (!xe->d3cold.capable) { 382e07aa913SRodrigo Vivi xe->d3cold.allowed = false; 383e07aa913SRodrigo Vivi return; 384e07aa913SRodrigo Vivi } 385e07aa913SRodrigo Vivi 3862ef08b98SAnshuman Gupta for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) { 3872ef08b98SAnshuman Gupta man = ttm_manager_type(&xe->ttm, i); 3882ef08b98SAnshuman Gupta if (man) { 3892ef08b98SAnshuman Gupta vram_used = ttm_resource_manager_usage(man); 3902ef08b98SAnshuman Gupta total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024); 3912ef08b98SAnshuman Gupta } 3922ef08b98SAnshuman Gupta } 3932ef08b98SAnshuman Gupta 3942ef08b98SAnshuman Gupta mutex_lock(&xe->d3cold.lock); 3952ef08b98SAnshuman Gupta 3962ef08b98SAnshuman Gupta if (total_vram_used_mb < xe->d3cold.vram_threshold) 3972ef08b98SAnshuman Gupta xe->d3cold.allowed = true; 3982ef08b98SAnshuman Gupta else 3992ef08b98SAnshuman Gupta xe->d3cold.allowed = false; 4002ef08b98SAnshuman Gupta 4012ef08b98SAnshuman Gupta mutex_unlock(&xe->d3cold.lock); 402*ff765b77SMatthew Auld 403*ff765b77SMatthew Auld drm_dbg(&xe->drm, 404*ff765b77SMatthew Auld "d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed)); 4052ef08b98SAnshuman Gupta } 406