1dd08ebf6SMatthew Brost // SPDX-License-Identifier: MIT 2dd08ebf6SMatthew Brost /* 3dd08ebf6SMatthew Brost * Copyright © 2022 Intel Corporation 4dd08ebf6SMatthew Brost */ 5dd08ebf6SMatthew Brost 6ea9f879dSLucas De Marchi #include "xe_pm.h" 7ea9f879dSLucas De Marchi 8dd08ebf6SMatthew Brost #include <linux/pm_runtime.h> 9dd08ebf6SMatthew Brost 10b2d75619SAnshuman Gupta #include <drm/drm_managed.h> 11dd08ebf6SMatthew Brost #include <drm/ttm/ttm_placement.h> 12dd08ebf6SMatthew Brost 13dd08ebf6SMatthew Brost #include "xe_bo.h" 14dd08ebf6SMatthew Brost #include "xe_bo_evict.h" 15dd08ebf6SMatthew Brost #include "xe_device.h" 16b2d75619SAnshuman Gupta #include "xe_device_sysfs.h" 17dd08ebf6SMatthew Brost #include "xe_ggtt.h" 18ea9f879dSLucas De Marchi #include "xe_gt.h" 1909d88e3bSAnshuman Gupta #include "xe_guc.h" 20dd08ebf6SMatthew Brost #include "xe_irq.h" 21dd08ebf6SMatthew Brost #include "xe_pcode.h" 22dd08ebf6SMatthew Brost 23dd08ebf6SMatthew Brost /** 24dd08ebf6SMatthew Brost * DOC: Xe Power Management 25dd08ebf6SMatthew Brost * 26dd08ebf6SMatthew Brost * Xe PM shall be guided by the simplicity. 27dd08ebf6SMatthew Brost * Use the simplest hook options whenever possible. 28dd08ebf6SMatthew Brost * Let's not reinvent the runtime_pm references and hooks. 29dd08ebf6SMatthew Brost * Shall have a clear separation of display and gt underneath this component. 30dd08ebf6SMatthew Brost * 31dd08ebf6SMatthew Brost * What's next: 32dd08ebf6SMatthew Brost * 33dd08ebf6SMatthew Brost * For now s2idle and s3 are only working in integrated devices. The next step 34dd08ebf6SMatthew Brost * is to iterate through all VRAM's BO backing them up into the system memory 35dd08ebf6SMatthew Brost * before allowing the system suspend. 36dd08ebf6SMatthew Brost * 37dd08ebf6SMatthew Brost * Also runtime_pm needs to be here from the beginning. 38dd08ebf6SMatthew Brost * 39dd08ebf6SMatthew Brost * RC6/RPS are also critical PM features. Let's start with GuCRC and GuC SLPC 40dd08ebf6SMatthew Brost * and no wait boost. Frequency optimizations should come on a next stage. 41dd08ebf6SMatthew Brost */ 42dd08ebf6SMatthew Brost 43dd08ebf6SMatthew Brost /** 44dd08ebf6SMatthew Brost * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle 45dd08ebf6SMatthew Brost * @xe: xe device instance 46dd08ebf6SMatthew Brost * 47dd08ebf6SMatthew Brost * Return: 0 on success 48dd08ebf6SMatthew Brost */ 49dd08ebf6SMatthew Brost int xe_pm_suspend(struct xe_device *xe) 50dd08ebf6SMatthew Brost { 51dd08ebf6SMatthew Brost struct xe_gt *gt; 52dd08ebf6SMatthew Brost u8 id; 53dd08ebf6SMatthew Brost int err; 54dd08ebf6SMatthew Brost 55dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) 56dd08ebf6SMatthew Brost xe_gt_suspend_prepare(gt); 57dd08ebf6SMatthew Brost 58dd08ebf6SMatthew Brost /* FIXME: Super racey... */ 59dd08ebf6SMatthew Brost err = xe_bo_evict_all(xe); 60dd08ebf6SMatthew Brost if (err) 61dd08ebf6SMatthew Brost return err; 62dd08ebf6SMatthew Brost 63dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) { 64dd08ebf6SMatthew Brost err = xe_gt_suspend(gt); 65dd08ebf6SMatthew Brost if (err) 66dd08ebf6SMatthew Brost return err; 67dd08ebf6SMatthew Brost } 68dd08ebf6SMatthew Brost 69dd08ebf6SMatthew Brost xe_irq_suspend(xe); 70dd08ebf6SMatthew Brost 71dd08ebf6SMatthew Brost return 0; 72dd08ebf6SMatthew Brost } 73dd08ebf6SMatthew Brost 74dd08ebf6SMatthew Brost /** 75dd08ebf6SMatthew Brost * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0 76dd08ebf6SMatthew Brost * @xe: xe device instance 77dd08ebf6SMatthew Brost * 78dd08ebf6SMatthew Brost * Return: 0 on success 79dd08ebf6SMatthew Brost */ 80dd08ebf6SMatthew Brost int xe_pm_resume(struct xe_device *xe) 81dd08ebf6SMatthew Brost { 82dd08ebf6SMatthew Brost struct xe_gt *gt; 83dd08ebf6SMatthew Brost u8 id; 84dd08ebf6SMatthew Brost int err; 85dd08ebf6SMatthew Brost 86dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) { 87dd08ebf6SMatthew Brost err = xe_pcode_init(gt); 88dd08ebf6SMatthew Brost if (err) 89dd08ebf6SMatthew Brost return err; 90dd08ebf6SMatthew Brost } 91dd08ebf6SMatthew Brost 92dd08ebf6SMatthew Brost /* 93dd08ebf6SMatthew Brost * This only restores pinned memory which is the memory required for the 94dd08ebf6SMatthew Brost * GT(s) to resume. 95dd08ebf6SMatthew Brost */ 96dd08ebf6SMatthew Brost err = xe_bo_restore_kernel(xe); 97dd08ebf6SMatthew Brost if (err) 98dd08ebf6SMatthew Brost return err; 99dd08ebf6SMatthew Brost 100dd08ebf6SMatthew Brost xe_irq_resume(xe); 101dd08ebf6SMatthew Brost 102dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) 103dd08ebf6SMatthew Brost xe_gt_resume(gt); 104dd08ebf6SMatthew Brost 105dd08ebf6SMatthew Brost err = xe_bo_restore_user(xe); 106dd08ebf6SMatthew Brost if (err) 107dd08ebf6SMatthew Brost return err; 108dd08ebf6SMatthew Brost 109dd08ebf6SMatthew Brost return 0; 110dd08ebf6SMatthew Brost } 111dd08ebf6SMatthew Brost 112ac0be3b5SAnshuman Gupta static bool xe_pm_pci_d3cold_capable(struct pci_dev *pdev) 113ac0be3b5SAnshuman Gupta { 114ac0be3b5SAnshuman Gupta struct pci_dev *root_pdev; 115ac0be3b5SAnshuman Gupta 116ac0be3b5SAnshuman Gupta root_pdev = pcie_find_root_port(pdev); 117ac0be3b5SAnshuman Gupta if (!root_pdev) 118ac0be3b5SAnshuman Gupta return false; 119ac0be3b5SAnshuman Gupta 120ac0be3b5SAnshuman Gupta /* D3Cold requires PME capability and _PR3 power resource */ 121ac0be3b5SAnshuman Gupta if (!pci_pme_capable(root_pdev, PCI_D3cold) || !pci_pr3_present(root_pdev)) 122ac0be3b5SAnshuman Gupta return false; 123ac0be3b5SAnshuman Gupta 124ac0be3b5SAnshuman Gupta return true; 125ac0be3b5SAnshuman Gupta } 126ac0be3b5SAnshuman Gupta 127fddebcbfSAnshuman Gupta static void xe_pm_runtime_init(struct xe_device *xe) 128dd08ebf6SMatthew Brost { 129dd08ebf6SMatthew Brost struct device *dev = xe->drm.dev; 130dd08ebf6SMatthew Brost 131*d87c424aSRodrigo Vivi /* 132*d87c424aSRodrigo Vivi * Disable the system suspend direct complete optimization. 133*d87c424aSRodrigo Vivi * We need to ensure that the regular device suspend/resume functions 134*d87c424aSRodrigo Vivi * are called since our runtime_pm cannot guarantee local memory 135*d87c424aSRodrigo Vivi * eviction for d3cold. 136*d87c424aSRodrigo Vivi * TODO: Check HDA audio dependencies claimed by i915, and then enforce 137*d87c424aSRodrigo Vivi * this option to integrated graphics as well. 138*d87c424aSRodrigo Vivi */ 139*d87c424aSRodrigo Vivi if (IS_DGFX(xe)) 140*d87c424aSRodrigo Vivi dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE); 141*d87c424aSRodrigo Vivi 142dd08ebf6SMatthew Brost pm_runtime_use_autosuspend(dev); 143dd08ebf6SMatthew Brost pm_runtime_set_autosuspend_delay(dev, 1000); 144dd08ebf6SMatthew Brost pm_runtime_set_active(dev); 145dd08ebf6SMatthew Brost pm_runtime_allow(dev); 146dd08ebf6SMatthew Brost pm_runtime_mark_last_busy(dev); 147bba2ec41SRodrigo Vivi pm_runtime_put(dev); 148dd08ebf6SMatthew Brost } 149dd08ebf6SMatthew Brost 150ac0be3b5SAnshuman Gupta void xe_pm_init(struct xe_device *xe) 151ac0be3b5SAnshuman Gupta { 152ac0be3b5SAnshuman Gupta struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 153ac0be3b5SAnshuman Gupta 154b2d75619SAnshuman Gupta drmm_mutex_init(&xe->drm, &xe->d3cold.lock); 155a32d82b4SRodrigo Vivi 156b2d75619SAnshuman Gupta xe->d3cold.capable = xe_pm_pci_d3cold_capable(pdev); 157b2d75619SAnshuman Gupta xe_device_sysfs_init(xe); 158b2d75619SAnshuman Gupta xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD); 159a32d82b4SRodrigo Vivi 160a32d82b4SRodrigo Vivi xe_pm_runtime_init(xe); 161ac0be3b5SAnshuman Gupta } 162ac0be3b5SAnshuman Gupta 1635b7e50e2SMatthew Auld void xe_pm_runtime_fini(struct xe_device *xe) 1645b7e50e2SMatthew Auld { 1655b7e50e2SMatthew Auld struct device *dev = xe->drm.dev; 1665b7e50e2SMatthew Auld 1675b7e50e2SMatthew Auld pm_runtime_get_sync(dev); 1685b7e50e2SMatthew Auld pm_runtime_forbid(dev); 1695b7e50e2SMatthew Auld } 1705b7e50e2SMatthew Auld 171a00b8f1aSMatthew Auld static void xe_pm_write_callback_task(struct xe_device *xe, 172a00b8f1aSMatthew Auld struct task_struct *task) 173a00b8f1aSMatthew Auld { 174a00b8f1aSMatthew Auld WRITE_ONCE(xe->pm_callback_task, task); 175a00b8f1aSMatthew Auld 176a00b8f1aSMatthew Auld /* 177a00b8f1aSMatthew Auld * Just in case it's somehow possible for our writes to be reordered to 178a00b8f1aSMatthew Auld * the extent that something else re-uses the task written in 179a00b8f1aSMatthew Auld * pm_callback_task. For example after returning from the callback, but 180a00b8f1aSMatthew Auld * before the reordered write that resets pm_callback_task back to NULL. 181a00b8f1aSMatthew Auld */ 182a00b8f1aSMatthew Auld smp_mb(); /* pairs with xe_pm_read_callback_task */ 183a00b8f1aSMatthew Auld } 184a00b8f1aSMatthew Auld 185a00b8f1aSMatthew Auld struct task_struct *xe_pm_read_callback_task(struct xe_device *xe) 186a00b8f1aSMatthew Auld { 187a00b8f1aSMatthew Auld smp_mb(); /* pairs with xe_pm_write_callback_task */ 188a00b8f1aSMatthew Auld 189a00b8f1aSMatthew Auld return READ_ONCE(xe->pm_callback_task); 190a00b8f1aSMatthew Auld } 191a00b8f1aSMatthew Auld 192dd08ebf6SMatthew Brost int xe_pm_runtime_suspend(struct xe_device *xe) 193dd08ebf6SMatthew Brost { 194dd08ebf6SMatthew Brost struct xe_gt *gt; 195dd08ebf6SMatthew Brost u8 id; 196a00b8f1aSMatthew Auld int err = 0; 197dd08ebf6SMatthew Brost 198a00b8f1aSMatthew Auld if (xe->d3cold.allowed && xe_device_mem_access_ongoing(xe)) 199dd08ebf6SMatthew Brost return -EBUSY; 200dd08ebf6SMatthew Brost 201a00b8f1aSMatthew Auld /* Disable access_ongoing asserts and prevent recursive pm calls */ 202a00b8f1aSMatthew Auld xe_pm_write_callback_task(xe, current); 203a00b8f1aSMatthew Auld 2049700a1dfSMatthew Auld /* 2059700a1dfSMatthew Auld * The actual xe_device_mem_access_put() is always async underneath, so 2069700a1dfSMatthew Auld * exactly where that is called should makes no difference to us. However 2079700a1dfSMatthew Auld * we still need to be very careful with the locks that this callback 2089700a1dfSMatthew Auld * acquires and the locks that are acquired and held by any callers of 2099700a1dfSMatthew Auld * xe_device_mem_access_get(). We already have the matching annotation 2109700a1dfSMatthew Auld * on that side, but we also need it here. For example lockdep should be 2119700a1dfSMatthew Auld * able to tell us if the following scenario is in theory possible: 2129700a1dfSMatthew Auld * 2139700a1dfSMatthew Auld * CPU0 | CPU1 (kworker) 2149700a1dfSMatthew Auld * lock(A) | 2159700a1dfSMatthew Auld * | xe_pm_runtime_suspend() 2169700a1dfSMatthew Auld * | lock(A) 2179700a1dfSMatthew Auld * xe_device_mem_access_get() | 2189700a1dfSMatthew Auld * 2199700a1dfSMatthew Auld * This will clearly deadlock since rpm core needs to wait for 2209700a1dfSMatthew Auld * xe_pm_runtime_suspend() to complete, but here we are holding lock(A) 2219700a1dfSMatthew Auld * on CPU0 which prevents CPU1 making forward progress. With the 2229700a1dfSMatthew Auld * annotation here and in xe_device_mem_access_get() lockdep will see 2239700a1dfSMatthew Auld * the potential lock inversion and give us a nice splat. 2249700a1dfSMatthew Auld */ 2259700a1dfSMatthew Auld lock_map_acquire(&xe_device_mem_access_lockdep_map); 2269700a1dfSMatthew Auld 227a00b8f1aSMatthew Auld if (xe->d3cold.allowed) { 228dd08ebf6SMatthew Brost err = xe_bo_evict_all(xe); 229dd08ebf6SMatthew Brost if (err) 230a00b8f1aSMatthew Auld goto out; 231dd08ebf6SMatthew Brost } 232dd08ebf6SMatthew Brost 233dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) { 234dd08ebf6SMatthew Brost err = xe_gt_suspend(gt); 235dd08ebf6SMatthew Brost if (err) 236a00b8f1aSMatthew Auld goto out; 237dd08ebf6SMatthew Brost } 238dd08ebf6SMatthew Brost 239dd08ebf6SMatthew Brost xe_irq_suspend(xe); 240a00b8f1aSMatthew Auld out: 2419700a1dfSMatthew Auld lock_map_release(&xe_device_mem_access_lockdep_map); 242a00b8f1aSMatthew Auld xe_pm_write_callback_task(xe, NULL); 243a00b8f1aSMatthew Auld return err; 244dd08ebf6SMatthew Brost } 245dd08ebf6SMatthew Brost 246dd08ebf6SMatthew Brost int xe_pm_runtime_resume(struct xe_device *xe) 247dd08ebf6SMatthew Brost { 248dd08ebf6SMatthew Brost struct xe_gt *gt; 249dd08ebf6SMatthew Brost u8 id; 250a00b8f1aSMatthew Auld int err = 0; 251a00b8f1aSMatthew Auld 252a00b8f1aSMatthew Auld /* Disable access_ongoing asserts and prevent recursive pm calls */ 253a00b8f1aSMatthew Auld xe_pm_write_callback_task(xe, current); 254dd08ebf6SMatthew Brost 2559700a1dfSMatthew Auld lock_map_acquire(&xe_device_mem_access_lockdep_map); 2569700a1dfSMatthew Auld 25709d88e3bSAnshuman Gupta /* 25809d88e3bSAnshuman Gupta * It can be possible that xe has allowed d3cold but other pcie devices 25909d88e3bSAnshuman Gupta * in gfx card soc would have blocked d3cold, therefore card has not 26009d88e3bSAnshuman Gupta * really lost power. Detecting primary Gt power is sufficient. 26109d88e3bSAnshuman Gupta */ 26209d88e3bSAnshuman Gupta gt = xe_device_get_gt(xe, 0); 26309d88e3bSAnshuman Gupta xe->d3cold.power_lost = xe_guc_in_reset(>->uc.guc); 26409d88e3bSAnshuman Gupta 26509d88e3bSAnshuman Gupta if (xe->d3cold.allowed && xe->d3cold.power_lost) { 266dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) { 267dd08ebf6SMatthew Brost err = xe_pcode_init(gt); 268dd08ebf6SMatthew Brost if (err) 269a00b8f1aSMatthew Auld goto out; 270dd08ebf6SMatthew Brost } 271dd08ebf6SMatthew Brost 272dd08ebf6SMatthew Brost /* 273dd08ebf6SMatthew Brost * This only restores pinned memory which is the memory 274dd08ebf6SMatthew Brost * required for the GT(s) to resume. 275dd08ebf6SMatthew Brost */ 276dd08ebf6SMatthew Brost err = xe_bo_restore_kernel(xe); 277dd08ebf6SMatthew Brost if (err) 278a00b8f1aSMatthew Auld goto out; 279dd08ebf6SMatthew Brost } 280dd08ebf6SMatthew Brost 281dd08ebf6SMatthew Brost xe_irq_resume(xe); 282dd08ebf6SMatthew Brost 283dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) 284dd08ebf6SMatthew Brost xe_gt_resume(gt); 285dd08ebf6SMatthew Brost 28609d88e3bSAnshuman Gupta if (xe->d3cold.allowed && xe->d3cold.power_lost) { 287dd08ebf6SMatthew Brost err = xe_bo_restore_user(xe); 288dd08ebf6SMatthew Brost if (err) 289a00b8f1aSMatthew Auld goto out; 290dd08ebf6SMatthew Brost } 291a00b8f1aSMatthew Auld out: 2929700a1dfSMatthew Auld lock_map_release(&xe_device_mem_access_lockdep_map); 293a00b8f1aSMatthew Auld xe_pm_write_callback_task(xe, NULL); 294a00b8f1aSMatthew Auld return err; 295dd08ebf6SMatthew Brost } 296dd08ebf6SMatthew Brost 297dd08ebf6SMatthew Brost int xe_pm_runtime_get(struct xe_device *xe) 298dd08ebf6SMatthew Brost { 299dd08ebf6SMatthew Brost return pm_runtime_get_sync(xe->drm.dev); 300dd08ebf6SMatthew Brost } 301dd08ebf6SMatthew Brost 302dd08ebf6SMatthew Brost int xe_pm_runtime_put(struct xe_device *xe) 303dd08ebf6SMatthew Brost { 304dd08ebf6SMatthew Brost pm_runtime_mark_last_busy(xe->drm.dev); 305bba2ec41SRodrigo Vivi return pm_runtime_put(xe->drm.dev); 306dd08ebf6SMatthew Brost } 307dd08ebf6SMatthew Brost 308dd08ebf6SMatthew Brost int xe_pm_runtime_get_if_active(struct xe_device *xe) 309dd08ebf6SMatthew Brost { 310dd08ebf6SMatthew Brost return pm_runtime_get_if_active(xe->drm.dev, true); 311dd08ebf6SMatthew Brost } 312c8a74077SAnshuman Gupta 313c8a74077SAnshuman Gupta void xe_pm_assert_unbounded_bridge(struct xe_device *xe) 314c8a74077SAnshuman Gupta { 315c8a74077SAnshuman Gupta struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 316c8a74077SAnshuman Gupta struct pci_dev *bridge = pci_upstream_bridge(pdev); 317c8a74077SAnshuman Gupta 318c8a74077SAnshuman Gupta if (!bridge) 319c8a74077SAnshuman Gupta return; 320c8a74077SAnshuman Gupta 321c8a74077SAnshuman Gupta if (!bridge->driver) { 322c8a74077SAnshuman Gupta drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n"); 323c8a74077SAnshuman Gupta device_set_pm_not_required(&pdev->dev); 324c8a74077SAnshuman Gupta } 325c8a74077SAnshuman Gupta } 326b2d75619SAnshuman Gupta 327b2d75619SAnshuman Gupta int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold) 328b2d75619SAnshuman Gupta { 329b2d75619SAnshuman Gupta struct ttm_resource_manager *man; 330b2d75619SAnshuman Gupta u32 vram_total_mb = 0; 331b2d75619SAnshuman Gupta int i; 332b2d75619SAnshuman Gupta 333b2d75619SAnshuman Gupta for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) { 334b2d75619SAnshuman Gupta man = ttm_manager_type(&xe->ttm, i); 335b2d75619SAnshuman Gupta if (man) 336b2d75619SAnshuman Gupta vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024); 337b2d75619SAnshuman Gupta } 338b2d75619SAnshuman Gupta 339b2d75619SAnshuman Gupta drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb); 340b2d75619SAnshuman Gupta 341b2d75619SAnshuman Gupta if (threshold > vram_total_mb) 342b2d75619SAnshuman Gupta return -EINVAL; 343b2d75619SAnshuman Gupta 344b2d75619SAnshuman Gupta mutex_lock(&xe->d3cold.lock); 345b2d75619SAnshuman Gupta xe->d3cold.vram_threshold = threshold; 346b2d75619SAnshuman Gupta mutex_unlock(&xe->d3cold.lock); 347b2d75619SAnshuman Gupta 348b2d75619SAnshuman Gupta return 0; 349b2d75619SAnshuman Gupta } 3502ef08b98SAnshuman Gupta 3512ef08b98SAnshuman Gupta void xe_pm_d3cold_allowed_toggle(struct xe_device *xe) 3522ef08b98SAnshuman Gupta { 3532ef08b98SAnshuman Gupta struct ttm_resource_manager *man; 3542ef08b98SAnshuman Gupta u32 total_vram_used_mb = 0; 3552ef08b98SAnshuman Gupta u64 vram_used; 3562ef08b98SAnshuman Gupta int i; 3572ef08b98SAnshuman Gupta 358e07aa913SRodrigo Vivi if (!xe->d3cold.capable) { 359e07aa913SRodrigo Vivi xe->d3cold.allowed = false; 360e07aa913SRodrigo Vivi return; 361e07aa913SRodrigo Vivi } 362e07aa913SRodrigo Vivi 3632ef08b98SAnshuman Gupta for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) { 3642ef08b98SAnshuman Gupta man = ttm_manager_type(&xe->ttm, i); 3652ef08b98SAnshuman Gupta if (man) { 3662ef08b98SAnshuman Gupta vram_used = ttm_resource_manager_usage(man); 3672ef08b98SAnshuman Gupta total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024); 3682ef08b98SAnshuman Gupta } 3692ef08b98SAnshuman Gupta } 3702ef08b98SAnshuman Gupta 3712ef08b98SAnshuman Gupta mutex_lock(&xe->d3cold.lock); 3722ef08b98SAnshuman Gupta 3732ef08b98SAnshuman Gupta if (total_vram_used_mb < xe->d3cold.vram_threshold) 3742ef08b98SAnshuman Gupta xe->d3cold.allowed = true; 3752ef08b98SAnshuman Gupta else 3762ef08b98SAnshuman Gupta xe->d3cold.allowed = false; 3772ef08b98SAnshuman Gupta 3782ef08b98SAnshuman Gupta mutex_unlock(&xe->d3cold.lock); 3792ef08b98SAnshuman Gupta } 380