1dd08ebf6SMatthew Brost // SPDX-License-Identifier: MIT 2dd08ebf6SMatthew Brost /* 3dd08ebf6SMatthew Brost * Copyright © 2022 Intel Corporation 4dd08ebf6SMatthew Brost */ 5dd08ebf6SMatthew Brost 6ea9f879dSLucas De Marchi #include "xe_pm.h" 7ea9f879dSLucas De Marchi 8dd08ebf6SMatthew Brost #include <linux/pm_runtime.h> 9dd08ebf6SMatthew Brost 10b2d75619SAnshuman Gupta #include <drm/drm_managed.h> 11dd08ebf6SMatthew Brost #include <drm/ttm/ttm_placement.h> 12dd08ebf6SMatthew Brost 13dd08ebf6SMatthew Brost #include "xe_bo.h" 14dd08ebf6SMatthew Brost #include "xe_bo_evict.h" 15dd08ebf6SMatthew Brost #include "xe_device.h" 16b2d75619SAnshuman Gupta #include "xe_device_sysfs.h" 17dd08ebf6SMatthew Brost #include "xe_ggtt.h" 18ea9f879dSLucas De Marchi #include "xe_gt.h" 1909d88e3bSAnshuman Gupta #include "xe_guc.h" 20dd08ebf6SMatthew Brost #include "xe_irq.h" 21dd08ebf6SMatthew Brost #include "xe_pcode.h" 220d053475SMatt Roper #include "xe_wa.h" 23dd08ebf6SMatthew Brost 24dd08ebf6SMatthew Brost /** 25dd08ebf6SMatthew Brost * DOC: Xe Power Management 26dd08ebf6SMatthew Brost * 27dd08ebf6SMatthew Brost * Xe PM shall be guided by the simplicity. 28dd08ebf6SMatthew Brost * Use the simplest hook options whenever possible. 29dd08ebf6SMatthew Brost * Let's not reinvent the runtime_pm references and hooks. 30dd08ebf6SMatthew Brost * Shall have a clear separation of display and gt underneath this component. 31dd08ebf6SMatthew Brost * 32dd08ebf6SMatthew Brost * What's next: 33dd08ebf6SMatthew Brost * 34dd08ebf6SMatthew Brost * For now s2idle and s3 are only working in integrated devices. The next step 35dd08ebf6SMatthew Brost * is to iterate through all VRAM's BO backing them up into the system memory 36dd08ebf6SMatthew Brost * before allowing the system suspend. 37dd08ebf6SMatthew Brost * 38dd08ebf6SMatthew Brost * Also runtime_pm needs to be here from the beginning. 39dd08ebf6SMatthew Brost * 40dd08ebf6SMatthew Brost * RC6/RPS are also critical PM features. Let's start with GuCRC and GuC SLPC 41dd08ebf6SMatthew Brost * and no wait boost. Frequency optimizations should come on a next stage. 42dd08ebf6SMatthew Brost */ 43dd08ebf6SMatthew Brost 44dd08ebf6SMatthew Brost /** 45dd08ebf6SMatthew Brost * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle 46dd08ebf6SMatthew Brost * @xe: xe device instance 47dd08ebf6SMatthew Brost * 48dd08ebf6SMatthew Brost * Return: 0 on success 49dd08ebf6SMatthew Brost */ 50dd08ebf6SMatthew Brost int xe_pm_suspend(struct xe_device *xe) 51dd08ebf6SMatthew Brost { 52dd08ebf6SMatthew Brost struct xe_gt *gt; 53dd08ebf6SMatthew Brost u8 id; 54dd08ebf6SMatthew Brost int err; 55dd08ebf6SMatthew Brost 56dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) 57dd08ebf6SMatthew Brost xe_gt_suspend_prepare(gt); 58dd08ebf6SMatthew Brost 59dd08ebf6SMatthew Brost /* FIXME: Super racey... */ 60dd08ebf6SMatthew Brost err = xe_bo_evict_all(xe); 61dd08ebf6SMatthew Brost if (err) 62dd08ebf6SMatthew Brost return err; 63dd08ebf6SMatthew Brost 64dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) { 65dd08ebf6SMatthew Brost err = xe_gt_suspend(gt); 66dd08ebf6SMatthew Brost if (err) 67dd08ebf6SMatthew Brost return err; 68dd08ebf6SMatthew Brost } 69dd08ebf6SMatthew Brost 70dd08ebf6SMatthew Brost xe_irq_suspend(xe); 71dd08ebf6SMatthew Brost 72dd08ebf6SMatthew Brost return 0; 73dd08ebf6SMatthew Brost } 74dd08ebf6SMatthew Brost 75dd08ebf6SMatthew Brost /** 76dd08ebf6SMatthew Brost * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0 77dd08ebf6SMatthew Brost * @xe: xe device instance 78dd08ebf6SMatthew Brost * 79dd08ebf6SMatthew Brost * Return: 0 on success 80dd08ebf6SMatthew Brost */ 81dd08ebf6SMatthew Brost int xe_pm_resume(struct xe_device *xe) 82dd08ebf6SMatthew Brost { 830d053475SMatt Roper struct xe_tile *tile; 84dd08ebf6SMatthew Brost struct xe_gt *gt; 85dd08ebf6SMatthew Brost u8 id; 86dd08ebf6SMatthew Brost int err; 87dd08ebf6SMatthew Brost 880d053475SMatt Roper for_each_tile(tile, xe, id) 890d053475SMatt Roper xe_wa_apply_tile_workarounds(tile); 900d053475SMatt Roper 91dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) { 92dd08ebf6SMatthew Brost err = xe_pcode_init(gt); 93dd08ebf6SMatthew Brost if (err) 94dd08ebf6SMatthew Brost return err; 95dd08ebf6SMatthew Brost } 96dd08ebf6SMatthew Brost 97dd08ebf6SMatthew Brost /* 98dd08ebf6SMatthew Brost * This only restores pinned memory which is the memory required for the 99dd08ebf6SMatthew Brost * GT(s) to resume. 100dd08ebf6SMatthew Brost */ 101dd08ebf6SMatthew Brost err = xe_bo_restore_kernel(xe); 102dd08ebf6SMatthew Brost if (err) 103dd08ebf6SMatthew Brost return err; 104dd08ebf6SMatthew Brost 105dd08ebf6SMatthew Brost xe_irq_resume(xe); 106dd08ebf6SMatthew Brost 107dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) 108dd08ebf6SMatthew Brost xe_gt_resume(gt); 109dd08ebf6SMatthew Brost 110dd08ebf6SMatthew Brost err = xe_bo_restore_user(xe); 111dd08ebf6SMatthew Brost if (err) 112dd08ebf6SMatthew Brost return err; 113dd08ebf6SMatthew Brost 114dd08ebf6SMatthew Brost return 0; 115dd08ebf6SMatthew Brost } 116dd08ebf6SMatthew Brost 117ac0be3b5SAnshuman Gupta static bool xe_pm_pci_d3cold_capable(struct pci_dev *pdev) 118ac0be3b5SAnshuman Gupta { 119ac0be3b5SAnshuman Gupta struct pci_dev *root_pdev; 120ac0be3b5SAnshuman Gupta 121ac0be3b5SAnshuman Gupta root_pdev = pcie_find_root_port(pdev); 122ac0be3b5SAnshuman Gupta if (!root_pdev) 123ac0be3b5SAnshuman Gupta return false; 124ac0be3b5SAnshuman Gupta 125ac0be3b5SAnshuman Gupta /* D3Cold requires PME capability and _PR3 power resource */ 126ac0be3b5SAnshuman Gupta if (!pci_pme_capable(root_pdev, PCI_D3cold) || !pci_pr3_present(root_pdev)) 127ac0be3b5SAnshuman Gupta return false; 128ac0be3b5SAnshuman Gupta 129ac0be3b5SAnshuman Gupta return true; 130ac0be3b5SAnshuman Gupta } 131ac0be3b5SAnshuman Gupta 132fddebcbfSAnshuman Gupta static void xe_pm_runtime_init(struct xe_device *xe) 133dd08ebf6SMatthew Brost { 134dd08ebf6SMatthew Brost struct device *dev = xe->drm.dev; 135dd08ebf6SMatthew Brost 136d87c424aSRodrigo Vivi /* 137d87c424aSRodrigo Vivi * Disable the system suspend direct complete optimization. 138d87c424aSRodrigo Vivi * We need to ensure that the regular device suspend/resume functions 139d87c424aSRodrigo Vivi * are called since our runtime_pm cannot guarantee local memory 140d87c424aSRodrigo Vivi * eviction for d3cold. 141d87c424aSRodrigo Vivi * TODO: Check HDA audio dependencies claimed by i915, and then enforce 142d87c424aSRodrigo Vivi * this option to integrated graphics as well. 143d87c424aSRodrigo Vivi */ 144d87c424aSRodrigo Vivi if (IS_DGFX(xe)) 145d87c424aSRodrigo Vivi dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE); 146d87c424aSRodrigo Vivi 147dd08ebf6SMatthew Brost pm_runtime_use_autosuspend(dev); 148dd08ebf6SMatthew Brost pm_runtime_set_autosuspend_delay(dev, 1000); 149dd08ebf6SMatthew Brost pm_runtime_set_active(dev); 150dd08ebf6SMatthew Brost pm_runtime_allow(dev); 151dd08ebf6SMatthew Brost pm_runtime_mark_last_busy(dev); 152bba2ec41SRodrigo Vivi pm_runtime_put(dev); 153dd08ebf6SMatthew Brost } 154dd08ebf6SMatthew Brost 155ac0be3b5SAnshuman Gupta void xe_pm_init(struct xe_device *xe) 156ac0be3b5SAnshuman Gupta { 157ac0be3b5SAnshuman Gupta struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 158ac0be3b5SAnshuman Gupta 159*5349bb76SOhad Sharabi /* For now suspend/resume is only allowed with GuC */ 160*5349bb76SOhad Sharabi if (!xe_device_uc_enabled(xe)) 161*5349bb76SOhad Sharabi return; 162*5349bb76SOhad Sharabi 163b2d75619SAnshuman Gupta drmm_mutex_init(&xe->drm, &xe->d3cold.lock); 164a32d82b4SRodrigo Vivi 165b2d75619SAnshuman Gupta xe->d3cold.capable = xe_pm_pci_d3cold_capable(pdev); 1663d4b0bfcSAnshuman Gupta 1673d4b0bfcSAnshuman Gupta if (xe->d3cold.capable) { 168b2d75619SAnshuman Gupta xe_device_sysfs_init(xe); 169b2d75619SAnshuman Gupta xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD); 1703d4b0bfcSAnshuman Gupta } 171a32d82b4SRodrigo Vivi 172a32d82b4SRodrigo Vivi xe_pm_runtime_init(xe); 173ac0be3b5SAnshuman Gupta } 174ac0be3b5SAnshuman Gupta 1755b7e50e2SMatthew Auld void xe_pm_runtime_fini(struct xe_device *xe) 1765b7e50e2SMatthew Auld { 1775b7e50e2SMatthew Auld struct device *dev = xe->drm.dev; 1785b7e50e2SMatthew Auld 1795b7e50e2SMatthew Auld pm_runtime_get_sync(dev); 1805b7e50e2SMatthew Auld pm_runtime_forbid(dev); 1815b7e50e2SMatthew Auld } 1825b7e50e2SMatthew Auld 183a00b8f1aSMatthew Auld static void xe_pm_write_callback_task(struct xe_device *xe, 184a00b8f1aSMatthew Auld struct task_struct *task) 185a00b8f1aSMatthew Auld { 186a00b8f1aSMatthew Auld WRITE_ONCE(xe->pm_callback_task, task); 187a00b8f1aSMatthew Auld 188a00b8f1aSMatthew Auld /* 189a00b8f1aSMatthew Auld * Just in case it's somehow possible for our writes to be reordered to 190a00b8f1aSMatthew Auld * the extent that something else re-uses the task written in 191a00b8f1aSMatthew Auld * pm_callback_task. For example after returning from the callback, but 192a00b8f1aSMatthew Auld * before the reordered write that resets pm_callback_task back to NULL. 193a00b8f1aSMatthew Auld */ 194a00b8f1aSMatthew Auld smp_mb(); /* pairs with xe_pm_read_callback_task */ 195a00b8f1aSMatthew Auld } 196a00b8f1aSMatthew Auld 197a00b8f1aSMatthew Auld struct task_struct *xe_pm_read_callback_task(struct xe_device *xe) 198a00b8f1aSMatthew Auld { 199a00b8f1aSMatthew Auld smp_mb(); /* pairs with xe_pm_write_callback_task */ 200a00b8f1aSMatthew Auld 201a00b8f1aSMatthew Auld return READ_ONCE(xe->pm_callback_task); 202a00b8f1aSMatthew Auld } 203a00b8f1aSMatthew Auld 204dd08ebf6SMatthew Brost int xe_pm_runtime_suspend(struct xe_device *xe) 205dd08ebf6SMatthew Brost { 206dd08ebf6SMatthew Brost struct xe_gt *gt; 207dd08ebf6SMatthew Brost u8 id; 208a00b8f1aSMatthew Auld int err = 0; 209dd08ebf6SMatthew Brost 210a00b8f1aSMatthew Auld if (xe->d3cold.allowed && xe_device_mem_access_ongoing(xe)) 211dd08ebf6SMatthew Brost return -EBUSY; 212dd08ebf6SMatthew Brost 213a00b8f1aSMatthew Auld /* Disable access_ongoing asserts and prevent recursive pm calls */ 214a00b8f1aSMatthew Auld xe_pm_write_callback_task(xe, current); 215a00b8f1aSMatthew Auld 2169700a1dfSMatthew Auld /* 2179700a1dfSMatthew Auld * The actual xe_device_mem_access_put() is always async underneath, so 2189700a1dfSMatthew Auld * exactly where that is called should makes no difference to us. However 2199700a1dfSMatthew Auld * we still need to be very careful with the locks that this callback 2209700a1dfSMatthew Auld * acquires and the locks that are acquired and held by any callers of 2219700a1dfSMatthew Auld * xe_device_mem_access_get(). We already have the matching annotation 2229700a1dfSMatthew Auld * on that side, but we also need it here. For example lockdep should be 2239700a1dfSMatthew Auld * able to tell us if the following scenario is in theory possible: 2249700a1dfSMatthew Auld * 2259700a1dfSMatthew Auld * CPU0 | CPU1 (kworker) 2269700a1dfSMatthew Auld * lock(A) | 2279700a1dfSMatthew Auld * | xe_pm_runtime_suspend() 2289700a1dfSMatthew Auld * | lock(A) 2299700a1dfSMatthew Auld * xe_device_mem_access_get() | 2309700a1dfSMatthew Auld * 2319700a1dfSMatthew Auld * This will clearly deadlock since rpm core needs to wait for 2329700a1dfSMatthew Auld * xe_pm_runtime_suspend() to complete, but here we are holding lock(A) 2339700a1dfSMatthew Auld * on CPU0 which prevents CPU1 making forward progress. With the 2349700a1dfSMatthew Auld * annotation here and in xe_device_mem_access_get() lockdep will see 2359700a1dfSMatthew Auld * the potential lock inversion and give us a nice splat. 2369700a1dfSMatthew Auld */ 2379700a1dfSMatthew Auld lock_map_acquire(&xe_device_mem_access_lockdep_map); 2389700a1dfSMatthew Auld 239a00b8f1aSMatthew Auld if (xe->d3cold.allowed) { 240dd08ebf6SMatthew Brost err = xe_bo_evict_all(xe); 241dd08ebf6SMatthew Brost if (err) 242a00b8f1aSMatthew Auld goto out; 243dd08ebf6SMatthew Brost } 244dd08ebf6SMatthew Brost 245dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) { 246dd08ebf6SMatthew Brost err = xe_gt_suspend(gt); 247dd08ebf6SMatthew Brost if (err) 248a00b8f1aSMatthew Auld goto out; 249dd08ebf6SMatthew Brost } 250dd08ebf6SMatthew Brost 251dd08ebf6SMatthew Brost xe_irq_suspend(xe); 252a00b8f1aSMatthew Auld out: 2539700a1dfSMatthew Auld lock_map_release(&xe_device_mem_access_lockdep_map); 254a00b8f1aSMatthew Auld xe_pm_write_callback_task(xe, NULL); 255a00b8f1aSMatthew Auld return err; 256dd08ebf6SMatthew Brost } 257dd08ebf6SMatthew Brost 258dd08ebf6SMatthew Brost int xe_pm_runtime_resume(struct xe_device *xe) 259dd08ebf6SMatthew Brost { 260dd08ebf6SMatthew Brost struct xe_gt *gt; 261dd08ebf6SMatthew Brost u8 id; 262a00b8f1aSMatthew Auld int err = 0; 263a00b8f1aSMatthew Auld 264a00b8f1aSMatthew Auld /* Disable access_ongoing asserts and prevent recursive pm calls */ 265a00b8f1aSMatthew Auld xe_pm_write_callback_task(xe, current); 266dd08ebf6SMatthew Brost 2679700a1dfSMatthew Auld lock_map_acquire(&xe_device_mem_access_lockdep_map); 2689700a1dfSMatthew Auld 26909d88e3bSAnshuman Gupta /* 27009d88e3bSAnshuman Gupta * It can be possible that xe has allowed d3cold but other pcie devices 27109d88e3bSAnshuman Gupta * in gfx card soc would have blocked d3cold, therefore card has not 27209d88e3bSAnshuman Gupta * really lost power. Detecting primary Gt power is sufficient. 27309d88e3bSAnshuman Gupta */ 27409d88e3bSAnshuman Gupta gt = xe_device_get_gt(xe, 0); 27509d88e3bSAnshuman Gupta xe->d3cold.power_lost = xe_guc_in_reset(>->uc.guc); 27609d88e3bSAnshuman Gupta 27709d88e3bSAnshuman Gupta if (xe->d3cold.allowed && xe->d3cold.power_lost) { 278dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) { 279dd08ebf6SMatthew Brost err = xe_pcode_init(gt); 280dd08ebf6SMatthew Brost if (err) 281a00b8f1aSMatthew Auld goto out; 282dd08ebf6SMatthew Brost } 283dd08ebf6SMatthew Brost 284dd08ebf6SMatthew Brost /* 285dd08ebf6SMatthew Brost * This only restores pinned memory which is the memory 286dd08ebf6SMatthew Brost * required for the GT(s) to resume. 287dd08ebf6SMatthew Brost */ 288dd08ebf6SMatthew Brost err = xe_bo_restore_kernel(xe); 289dd08ebf6SMatthew Brost if (err) 290a00b8f1aSMatthew Auld goto out; 291dd08ebf6SMatthew Brost } 292dd08ebf6SMatthew Brost 293dd08ebf6SMatthew Brost xe_irq_resume(xe); 294dd08ebf6SMatthew Brost 295dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) 296dd08ebf6SMatthew Brost xe_gt_resume(gt); 297dd08ebf6SMatthew Brost 29809d88e3bSAnshuman Gupta if (xe->d3cold.allowed && xe->d3cold.power_lost) { 299dd08ebf6SMatthew Brost err = xe_bo_restore_user(xe); 300dd08ebf6SMatthew Brost if (err) 301a00b8f1aSMatthew Auld goto out; 302dd08ebf6SMatthew Brost } 303a00b8f1aSMatthew Auld out: 3049700a1dfSMatthew Auld lock_map_release(&xe_device_mem_access_lockdep_map); 305a00b8f1aSMatthew Auld xe_pm_write_callback_task(xe, NULL); 306a00b8f1aSMatthew Auld return err; 307dd08ebf6SMatthew Brost } 308dd08ebf6SMatthew Brost 309dd08ebf6SMatthew Brost int xe_pm_runtime_get(struct xe_device *xe) 310dd08ebf6SMatthew Brost { 311dd08ebf6SMatthew Brost return pm_runtime_get_sync(xe->drm.dev); 312dd08ebf6SMatthew Brost } 313dd08ebf6SMatthew Brost 314dd08ebf6SMatthew Brost int xe_pm_runtime_put(struct xe_device *xe) 315dd08ebf6SMatthew Brost { 316dd08ebf6SMatthew Brost pm_runtime_mark_last_busy(xe->drm.dev); 317bba2ec41SRodrigo Vivi return pm_runtime_put(xe->drm.dev); 318dd08ebf6SMatthew Brost } 319dd08ebf6SMatthew Brost 320dd08ebf6SMatthew Brost int xe_pm_runtime_get_if_active(struct xe_device *xe) 321dd08ebf6SMatthew Brost { 322dd08ebf6SMatthew Brost return pm_runtime_get_if_active(xe->drm.dev, true); 323dd08ebf6SMatthew Brost } 324c8a74077SAnshuman Gupta 325c8a74077SAnshuman Gupta void xe_pm_assert_unbounded_bridge(struct xe_device *xe) 326c8a74077SAnshuman Gupta { 327c8a74077SAnshuman Gupta struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 328c8a74077SAnshuman Gupta struct pci_dev *bridge = pci_upstream_bridge(pdev); 329c8a74077SAnshuman Gupta 330c8a74077SAnshuman Gupta if (!bridge) 331c8a74077SAnshuman Gupta return; 332c8a74077SAnshuman Gupta 333c8a74077SAnshuman Gupta if (!bridge->driver) { 334c8a74077SAnshuman Gupta drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n"); 335c8a74077SAnshuman Gupta device_set_pm_not_required(&pdev->dev); 336c8a74077SAnshuman Gupta } 337c8a74077SAnshuman Gupta } 338b2d75619SAnshuman Gupta 339b2d75619SAnshuman Gupta int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold) 340b2d75619SAnshuman Gupta { 341b2d75619SAnshuman Gupta struct ttm_resource_manager *man; 342b2d75619SAnshuman Gupta u32 vram_total_mb = 0; 343b2d75619SAnshuman Gupta int i; 344b2d75619SAnshuman Gupta 345b2d75619SAnshuman Gupta for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) { 346b2d75619SAnshuman Gupta man = ttm_manager_type(&xe->ttm, i); 347b2d75619SAnshuman Gupta if (man) 348b2d75619SAnshuman Gupta vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024); 349b2d75619SAnshuman Gupta } 350b2d75619SAnshuman Gupta 351b2d75619SAnshuman Gupta drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb); 352b2d75619SAnshuman Gupta 353b2d75619SAnshuman Gupta if (threshold > vram_total_mb) 354b2d75619SAnshuman Gupta return -EINVAL; 355b2d75619SAnshuman Gupta 356b2d75619SAnshuman Gupta mutex_lock(&xe->d3cold.lock); 357b2d75619SAnshuman Gupta xe->d3cold.vram_threshold = threshold; 358b2d75619SAnshuman Gupta mutex_unlock(&xe->d3cold.lock); 359b2d75619SAnshuman Gupta 360b2d75619SAnshuman Gupta return 0; 361b2d75619SAnshuman Gupta } 3622ef08b98SAnshuman Gupta 3632ef08b98SAnshuman Gupta void xe_pm_d3cold_allowed_toggle(struct xe_device *xe) 3642ef08b98SAnshuman Gupta { 3652ef08b98SAnshuman Gupta struct ttm_resource_manager *man; 3662ef08b98SAnshuman Gupta u32 total_vram_used_mb = 0; 3672ef08b98SAnshuman Gupta u64 vram_used; 3682ef08b98SAnshuman Gupta int i; 3692ef08b98SAnshuman Gupta 370e07aa913SRodrigo Vivi if (!xe->d3cold.capable) { 371e07aa913SRodrigo Vivi xe->d3cold.allowed = false; 372e07aa913SRodrigo Vivi return; 373e07aa913SRodrigo Vivi } 374e07aa913SRodrigo Vivi 3752ef08b98SAnshuman Gupta for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) { 3762ef08b98SAnshuman Gupta man = ttm_manager_type(&xe->ttm, i); 3772ef08b98SAnshuman Gupta if (man) { 3782ef08b98SAnshuman Gupta vram_used = ttm_resource_manager_usage(man); 3792ef08b98SAnshuman Gupta total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024); 3802ef08b98SAnshuman Gupta } 3812ef08b98SAnshuman Gupta } 3822ef08b98SAnshuman Gupta 3832ef08b98SAnshuman Gupta mutex_lock(&xe->d3cold.lock); 3842ef08b98SAnshuman Gupta 3852ef08b98SAnshuman Gupta if (total_vram_used_mb < xe->d3cold.vram_threshold) 3862ef08b98SAnshuman Gupta xe->d3cold.allowed = true; 3872ef08b98SAnshuman Gupta else 3882ef08b98SAnshuman Gupta xe->d3cold.allowed = false; 3892ef08b98SAnshuman Gupta 3902ef08b98SAnshuman Gupta mutex_unlock(&xe->d3cold.lock); 3912ef08b98SAnshuman Gupta } 392