1dd08ebf6SMatthew Brost // SPDX-License-Identifier: MIT 2dd08ebf6SMatthew Brost /* 3dd08ebf6SMatthew Brost * Copyright © 2022 Intel Corporation 4dd08ebf6SMatthew Brost */ 5dd08ebf6SMatthew Brost 6ea9f879dSLucas De Marchi #include "xe_pm.h" 7ea9f879dSLucas De Marchi 8dd08ebf6SMatthew Brost #include <linux/pm_runtime.h> 9dd08ebf6SMatthew Brost 10b2d75619SAnshuman Gupta #include <drm/drm_managed.h> 11dd08ebf6SMatthew Brost #include <drm/ttm/ttm_placement.h> 12dd08ebf6SMatthew Brost 131e5a4dfeSJani Nikula #include "display/xe_display.h" 14dd08ebf6SMatthew Brost #include "xe_bo.h" 15dd08ebf6SMatthew Brost #include "xe_bo_evict.h" 16dd08ebf6SMatthew Brost #include "xe_device.h" 17b2d75619SAnshuman Gupta #include "xe_device_sysfs.h" 18dd08ebf6SMatthew Brost #include "xe_ggtt.h" 19ea9f879dSLucas De Marchi #include "xe_gt.h" 2009d88e3bSAnshuman Gupta #include "xe_guc.h" 21dd08ebf6SMatthew Brost #include "xe_irq.h" 22dd08ebf6SMatthew Brost #include "xe_pcode.h" 23*275aa53fSNirmoy Das #include "xe_trace.h" 240d053475SMatt Roper #include "xe_wa.h" 25dd08ebf6SMatthew Brost 26dd08ebf6SMatthew Brost /** 27dd08ebf6SMatthew Brost * DOC: Xe Power Management 28dd08ebf6SMatthew Brost * 2930c39952SRodrigo Vivi * Xe PM implements the main routines for both system level suspend states and 3030c39952SRodrigo Vivi * for the opportunistic runtime suspend states. 31dd08ebf6SMatthew Brost * 3230c39952SRodrigo Vivi * System Level Suspend (S-States) - In general this is OS initiated suspend 3330c39952SRodrigo Vivi * driven by ACPI for achieving S0ix (a.k.a. S2idle, freeze), S3 (suspend to ram), 3430c39952SRodrigo Vivi * S4 (disk). The main functions here are `xe_pm_suspend` and `xe_pm_resume`. They 3530c39952SRodrigo Vivi * are the main point for the suspend to and resume from these states. 36dd08ebf6SMatthew Brost * 3730c39952SRodrigo Vivi * PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power 3830c39952SRodrigo Vivi * state D3, controlled by the PCI subsystem and ACPI with the help from the 3930c39952SRodrigo Vivi * runtime_pm infrastructure. 4030c39952SRodrigo Vivi * PCI D3 is special and can mean D3hot, where Vcc power is on for keeping memory 4130c39952SRodrigo Vivi * alive and quicker low latency resume or D3Cold where Vcc power is off for 4230c39952SRodrigo Vivi * better power savings. 4330c39952SRodrigo Vivi * The Vcc control of PCI hierarchy can only be controlled at the PCI root port 4430c39952SRodrigo Vivi * level, while the device driver can be behind multiple bridges/switches and 4530c39952SRodrigo Vivi * paired with other devices. For this reason, the PCI subsystem cannot perform 4630c39952SRodrigo Vivi * the transition towards D3Cold. The lowest runtime PM possible from the PCI 4730c39952SRodrigo Vivi * subsystem is D3hot. Then, if all these paired devices in the same root port 4830c39952SRodrigo Vivi * are in D3hot, ACPI will assist here and run its own methods (_PR3 and _OFF) 4930c39952SRodrigo Vivi * to perform the transition from D3hot to D3cold. Xe may disallow this 5030c39952SRodrigo Vivi * transition by calling pci_d3cold_disable(root_pdev) before going to runtime 5130c39952SRodrigo Vivi * suspend. It will be based on runtime conditions such as VRAM usage for a 5230c39952SRodrigo Vivi * quick and low latency resume for instance. 53dd08ebf6SMatthew Brost * 5430c39952SRodrigo Vivi * Runtime PM - This infrastructure provided by the Linux kernel allows the 5530c39952SRodrigo Vivi * device drivers to indicate when the can be runtime suspended, so the device 5630c39952SRodrigo Vivi * could be put at D3 (if supported), or allow deeper package sleep states 5730c39952SRodrigo Vivi * (PC-states), and/or other low level power states. Xe PM component provides 5830c39952SRodrigo Vivi * `xe_pm_runtime_suspend` and `xe_pm_runtime_resume` functions that PCI 5930c39952SRodrigo Vivi * subsystem will call before transition to/from runtime suspend. 60dd08ebf6SMatthew Brost * 6130c39952SRodrigo Vivi * Also, Xe PM provides get and put functions that Xe driver will use to 6230c39952SRodrigo Vivi * indicate activity. In order to avoid locking complications with the memory 6330c39952SRodrigo Vivi * management, whenever possible, these get and put functions needs to be called 6430c39952SRodrigo Vivi * from the higher/outer levels. 6530c39952SRodrigo Vivi * The main cases that need to be protected from the outer levels are: IOCTL, 6630c39952SRodrigo Vivi * sysfs, debugfs, dma-buf sharing, GPU execution. 6730c39952SRodrigo Vivi * 6830c39952SRodrigo Vivi * This component is not responsible for GT idleness (RC6) nor GT frequency 6930c39952SRodrigo Vivi * management (RPS). 70dd08ebf6SMatthew Brost */ 71dd08ebf6SMatthew Brost 728ae84a27SRodrigo Vivi #ifdef CONFIG_LOCKDEP 73869e54d4SRodrigo Vivi static struct lockdep_map xe_pm_runtime_lockdep_map = { 748ae84a27SRodrigo Vivi .name = "xe_pm_runtime_lockdep_map" 758ae84a27SRodrigo Vivi }; 768ae84a27SRodrigo Vivi #endif 778ae84a27SRodrigo Vivi 78dd08ebf6SMatthew Brost /** 79dd08ebf6SMatthew Brost * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle 80dd08ebf6SMatthew Brost * @xe: xe device instance 81dd08ebf6SMatthew Brost * 82dd08ebf6SMatthew Brost * Return: 0 on success 83dd08ebf6SMatthew Brost */ 84dd08ebf6SMatthew Brost int xe_pm_suspend(struct xe_device *xe) 85dd08ebf6SMatthew Brost { 86dd08ebf6SMatthew Brost struct xe_gt *gt; 87dd08ebf6SMatthew Brost u8 id; 88dd08ebf6SMatthew Brost int err; 89dd08ebf6SMatthew Brost 90f7f24b79SRodrigo Vivi drm_dbg(&xe->drm, "Suspending device\n"); 91*275aa53fSNirmoy Das trace_xe_pm_suspend(xe, __builtin_return_address(0)); 92f7f24b79SRodrigo Vivi 93dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) 94dd08ebf6SMatthew Brost xe_gt_suspend_prepare(gt); 95dd08ebf6SMatthew Brost 96dd08ebf6SMatthew Brost /* FIXME: Super racey... */ 97dd08ebf6SMatthew Brost err = xe_bo_evict_all(xe); 98dd08ebf6SMatthew Brost if (err) 99f7f24b79SRodrigo Vivi goto err; 100dd08ebf6SMatthew Brost 101e7b180b2SRodrigo Vivi xe_display_pm_suspend(xe, false); 10244e69495SMaarten Lankhorst 103dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) { 104dd08ebf6SMatthew Brost err = xe_gt_suspend(gt); 10544e69495SMaarten Lankhorst if (err) { 106e7b180b2SRodrigo Vivi xe_display_pm_resume(xe, false); 107f7f24b79SRodrigo Vivi goto err; 108dd08ebf6SMatthew Brost } 10944e69495SMaarten Lankhorst } 110dd08ebf6SMatthew Brost 111dd08ebf6SMatthew Brost xe_irq_suspend(xe); 112dd08ebf6SMatthew Brost 11344e69495SMaarten Lankhorst xe_display_pm_suspend_late(xe); 11444e69495SMaarten Lankhorst 115f7f24b79SRodrigo Vivi drm_dbg(&xe->drm, "Device suspended\n"); 116dd08ebf6SMatthew Brost return 0; 117f7f24b79SRodrigo Vivi err: 118f7f24b79SRodrigo Vivi drm_dbg(&xe->drm, "Device suspend failed %d\n", err); 119f7f24b79SRodrigo Vivi return err; 120dd08ebf6SMatthew Brost } 121dd08ebf6SMatthew Brost 122dd08ebf6SMatthew Brost /** 123dd08ebf6SMatthew Brost * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0 124dd08ebf6SMatthew Brost * @xe: xe device instance 125dd08ebf6SMatthew Brost * 126dd08ebf6SMatthew Brost * Return: 0 on success 127dd08ebf6SMatthew Brost */ 128dd08ebf6SMatthew Brost int xe_pm_resume(struct xe_device *xe) 129dd08ebf6SMatthew Brost { 1300d053475SMatt Roper struct xe_tile *tile; 131dd08ebf6SMatthew Brost struct xe_gt *gt; 132dd08ebf6SMatthew Brost u8 id; 133dd08ebf6SMatthew Brost int err; 134dd08ebf6SMatthew Brost 135f7f24b79SRodrigo Vivi drm_dbg(&xe->drm, "Resuming device\n"); 136*275aa53fSNirmoy Das trace_xe_pm_resume(xe, __builtin_return_address(0)); 137f7f24b79SRodrigo Vivi 1380d053475SMatt Roper for_each_tile(tile, xe, id) 1390d053475SMatt Roper xe_wa_apply_tile_workarounds(tile); 1400d053475SMatt Roper 141933fd5ffSRiana Tauro err = xe_pcode_ready(xe, true); 142dd08ebf6SMatthew Brost if (err) 143933fd5ffSRiana Tauro return err; 144dd08ebf6SMatthew Brost 14544e69495SMaarten Lankhorst xe_display_pm_resume_early(xe); 14644e69495SMaarten Lankhorst 147dd08ebf6SMatthew Brost /* 148dd08ebf6SMatthew Brost * This only restores pinned memory which is the memory required for the 149dd08ebf6SMatthew Brost * GT(s) to resume. 150dd08ebf6SMatthew Brost */ 151dd08ebf6SMatthew Brost err = xe_bo_restore_kernel(xe); 152dd08ebf6SMatthew Brost if (err) 153f7f24b79SRodrigo Vivi goto err; 154dd08ebf6SMatthew Brost 155dd08ebf6SMatthew Brost xe_irq_resume(xe); 156dd08ebf6SMatthew Brost 157e7b180b2SRodrigo Vivi xe_display_pm_resume(xe, false); 15844e69495SMaarten Lankhorst 159dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) 160dd08ebf6SMatthew Brost xe_gt_resume(gt); 161dd08ebf6SMatthew Brost 162dd08ebf6SMatthew Brost err = xe_bo_restore_user(xe); 163dd08ebf6SMatthew Brost if (err) 164f7f24b79SRodrigo Vivi goto err; 165dd08ebf6SMatthew Brost 166f7f24b79SRodrigo Vivi drm_dbg(&xe->drm, "Device resumed\n"); 167dd08ebf6SMatthew Brost return 0; 168f7f24b79SRodrigo Vivi err: 169f7f24b79SRodrigo Vivi drm_dbg(&xe->drm, "Device resume failed %d\n", err); 170f7f24b79SRodrigo Vivi return err; 171dd08ebf6SMatthew Brost } 172dd08ebf6SMatthew Brost 17395ec8c1dSRiana Tauro static bool xe_pm_pci_d3cold_capable(struct xe_device *xe) 174ac0be3b5SAnshuman Gupta { 17595ec8c1dSRiana Tauro struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 176ac0be3b5SAnshuman Gupta struct pci_dev *root_pdev; 177ac0be3b5SAnshuman Gupta 178ac0be3b5SAnshuman Gupta root_pdev = pcie_find_root_port(pdev); 179ac0be3b5SAnshuman Gupta if (!root_pdev) 180ac0be3b5SAnshuman Gupta return false; 181ac0be3b5SAnshuman Gupta 18295ec8c1dSRiana Tauro /* D3Cold requires PME capability */ 18395ec8c1dSRiana Tauro if (!pci_pme_capable(root_pdev, PCI_D3cold)) { 18495ec8c1dSRiana Tauro drm_dbg(&xe->drm, "d3cold: PME# not supported\n"); 185ac0be3b5SAnshuman Gupta return false; 18695ec8c1dSRiana Tauro } 18795ec8c1dSRiana Tauro 18895ec8c1dSRiana Tauro /* D3Cold requires _PR3 power resource */ 18995ec8c1dSRiana Tauro if (!pci_pr3_present(root_pdev)) { 19095ec8c1dSRiana Tauro drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n"); 19195ec8c1dSRiana Tauro return false; 19295ec8c1dSRiana Tauro } 193ac0be3b5SAnshuman Gupta 194ac0be3b5SAnshuman Gupta return true; 195ac0be3b5SAnshuman Gupta } 196ac0be3b5SAnshuman Gupta 197fddebcbfSAnshuman Gupta static void xe_pm_runtime_init(struct xe_device *xe) 198dd08ebf6SMatthew Brost { 199dd08ebf6SMatthew Brost struct device *dev = xe->drm.dev; 200dd08ebf6SMatthew Brost 201d87c424aSRodrigo Vivi /* 202d87c424aSRodrigo Vivi * Disable the system suspend direct complete optimization. 203d87c424aSRodrigo Vivi * We need to ensure that the regular device suspend/resume functions 204d87c424aSRodrigo Vivi * are called since our runtime_pm cannot guarantee local memory 205d87c424aSRodrigo Vivi * eviction for d3cold. 206d87c424aSRodrigo Vivi * TODO: Check HDA audio dependencies claimed by i915, and then enforce 207d87c424aSRodrigo Vivi * this option to integrated graphics as well. 208d87c424aSRodrigo Vivi */ 209d87c424aSRodrigo Vivi if (IS_DGFX(xe)) 210d87c424aSRodrigo Vivi dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE); 211d87c424aSRodrigo Vivi 212dd08ebf6SMatthew Brost pm_runtime_use_autosuspend(dev); 213dd08ebf6SMatthew Brost pm_runtime_set_autosuspend_delay(dev, 1000); 214dd08ebf6SMatthew Brost pm_runtime_set_active(dev); 215dd08ebf6SMatthew Brost pm_runtime_allow(dev); 216dd08ebf6SMatthew Brost pm_runtime_mark_last_busy(dev); 217bba2ec41SRodrigo Vivi pm_runtime_put(dev); 218dd08ebf6SMatthew Brost } 219dd08ebf6SMatthew Brost 220c086bfc6SHimal Prasad Ghimiray int xe_pm_init_early(struct xe_device *xe) 221fa78e188SBadal Nilawar { 222c086bfc6SHimal Prasad Ghimiray int err; 223c086bfc6SHimal Prasad Ghimiray 224fa78e188SBadal Nilawar INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list); 225c086bfc6SHimal Prasad Ghimiray 226c086bfc6SHimal Prasad Ghimiray err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock); 227c086bfc6SHimal Prasad Ghimiray if (err) 228c086bfc6SHimal Prasad Ghimiray return err; 229c086bfc6SHimal Prasad Ghimiray 230c086bfc6SHimal Prasad Ghimiray err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock); 231c086bfc6SHimal Prasad Ghimiray if (err) 232c086bfc6SHimal Prasad Ghimiray return err; 233c086bfc6SHimal Prasad Ghimiray 234c086bfc6SHimal Prasad Ghimiray return 0; 235fa78e188SBadal Nilawar } 236fa78e188SBadal Nilawar 23730c39952SRodrigo Vivi /** 23830c39952SRodrigo Vivi * xe_pm_init - Initialize Xe Power Management 23930c39952SRodrigo Vivi * @xe: xe device instance 24030c39952SRodrigo Vivi * 24130c39952SRodrigo Vivi * This component is responsible for System and Device sleep states. 242c086bfc6SHimal Prasad Ghimiray * 243c086bfc6SHimal Prasad Ghimiray * Returns 0 for success, negative error code otherwise. 24430c39952SRodrigo Vivi */ 245c086bfc6SHimal Prasad Ghimiray int xe_pm_init(struct xe_device *xe) 246ac0be3b5SAnshuman Gupta { 247c086bfc6SHimal Prasad Ghimiray int err; 248c086bfc6SHimal Prasad Ghimiray 2495349bb76SOhad Sharabi /* For now suspend/resume is only allowed with GuC */ 2505349bb76SOhad Sharabi if (!xe_device_uc_enabled(xe)) 251c086bfc6SHimal Prasad Ghimiray return 0; 252a32d82b4SRodrigo Vivi 25395ec8c1dSRiana Tauro xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe); 2543d4b0bfcSAnshuman Gupta 2553d4b0bfcSAnshuman Gupta if (xe->d3cold.capable) { 256c086bfc6SHimal Prasad Ghimiray err = xe_device_sysfs_init(xe); 257c086bfc6SHimal Prasad Ghimiray if (err) 258c086bfc6SHimal Prasad Ghimiray return err; 259c086bfc6SHimal Prasad Ghimiray 260c086bfc6SHimal Prasad Ghimiray err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD); 261c086bfc6SHimal Prasad Ghimiray if (err) 262c086bfc6SHimal Prasad Ghimiray return err; 2633d4b0bfcSAnshuman Gupta } 264a32d82b4SRodrigo Vivi 265a32d82b4SRodrigo Vivi xe_pm_runtime_init(xe); 266c086bfc6SHimal Prasad Ghimiray 267c086bfc6SHimal Prasad Ghimiray return 0; 268ac0be3b5SAnshuman Gupta } 269ac0be3b5SAnshuman Gupta 27030c39952SRodrigo Vivi /** 27130c39952SRodrigo Vivi * xe_pm_runtime_fini - Finalize Runtime PM 27230c39952SRodrigo Vivi * @xe: xe device instance 27330c39952SRodrigo Vivi */ 2745b7e50e2SMatthew Auld void xe_pm_runtime_fini(struct xe_device *xe) 2755b7e50e2SMatthew Auld { 2765b7e50e2SMatthew Auld struct device *dev = xe->drm.dev; 2775b7e50e2SMatthew Auld 2785b7e50e2SMatthew Auld pm_runtime_get_sync(dev); 2795b7e50e2SMatthew Auld pm_runtime_forbid(dev); 2805b7e50e2SMatthew Auld } 2815b7e50e2SMatthew Auld 282a00b8f1aSMatthew Auld static void xe_pm_write_callback_task(struct xe_device *xe, 283a00b8f1aSMatthew Auld struct task_struct *task) 284a00b8f1aSMatthew Auld { 285a00b8f1aSMatthew Auld WRITE_ONCE(xe->pm_callback_task, task); 286a00b8f1aSMatthew Auld 287a00b8f1aSMatthew Auld /* 288a00b8f1aSMatthew Auld * Just in case it's somehow possible for our writes to be reordered to 289a00b8f1aSMatthew Auld * the extent that something else re-uses the task written in 290a00b8f1aSMatthew Auld * pm_callback_task. For example after returning from the callback, but 291a00b8f1aSMatthew Auld * before the reordered write that resets pm_callback_task back to NULL. 292a00b8f1aSMatthew Auld */ 293a00b8f1aSMatthew Auld smp_mb(); /* pairs with xe_pm_read_callback_task */ 294a00b8f1aSMatthew Auld } 295a00b8f1aSMatthew Auld 296a00b8f1aSMatthew Auld struct task_struct *xe_pm_read_callback_task(struct xe_device *xe) 297a00b8f1aSMatthew Auld { 298a00b8f1aSMatthew Auld smp_mb(); /* pairs with xe_pm_write_callback_task */ 299a00b8f1aSMatthew Auld 300a00b8f1aSMatthew Auld return READ_ONCE(xe->pm_callback_task); 301a00b8f1aSMatthew Auld } 302a00b8f1aSMatthew Auld 30330c39952SRodrigo Vivi /** 3040f9d886fSRodrigo Vivi * xe_pm_runtime_suspended - Check if runtime_pm state is suspended 3050f9d886fSRodrigo Vivi * @xe: xe device instance 3060f9d886fSRodrigo Vivi * 3070f9d886fSRodrigo Vivi * This does not provide any guarantee that the device is going to remain 3080f9d886fSRodrigo Vivi * suspended as it might be racing with the runtime state transitions. 3090f9d886fSRodrigo Vivi * It can be used only as a non-reliable assertion, to ensure that we are not in 3100f9d886fSRodrigo Vivi * the sleep state while trying to access some memory for instance. 3110f9d886fSRodrigo Vivi * 3120f9d886fSRodrigo Vivi * Returns true if PCI device is suspended, false otherwise. 3130f9d886fSRodrigo Vivi */ 3140f9d886fSRodrigo Vivi bool xe_pm_runtime_suspended(struct xe_device *xe) 3150f9d886fSRodrigo Vivi { 3160f9d886fSRodrigo Vivi return pm_runtime_suspended(xe->drm.dev); 3170f9d886fSRodrigo Vivi } 3180f9d886fSRodrigo Vivi 3190f9d886fSRodrigo Vivi /** 32030c39952SRodrigo Vivi * xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold 32130c39952SRodrigo Vivi * @xe: xe device instance 32230c39952SRodrigo Vivi * 32330c39952SRodrigo Vivi * Returns 0 for success, negative error code otherwise. 32430c39952SRodrigo Vivi */ 325dd08ebf6SMatthew Brost int xe_pm_runtime_suspend(struct xe_device *xe) 326dd08ebf6SMatthew Brost { 327fa78e188SBadal Nilawar struct xe_bo *bo, *on; 328dd08ebf6SMatthew Brost struct xe_gt *gt; 329dd08ebf6SMatthew Brost u8 id; 330a00b8f1aSMatthew Auld int err = 0; 331dd08ebf6SMatthew Brost 332*275aa53fSNirmoy Das trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0)); 333a00b8f1aSMatthew Auld /* Disable access_ongoing asserts and prevent recursive pm calls */ 334a00b8f1aSMatthew Auld xe_pm_write_callback_task(xe, current); 335a00b8f1aSMatthew Auld 3369700a1dfSMatthew Auld /* 3378ae84a27SRodrigo Vivi * The actual xe_pm_runtime_put() is always async underneath, so 3389700a1dfSMatthew Auld * exactly where that is called should makes no difference to us. However 3399700a1dfSMatthew Auld * we still need to be very careful with the locks that this callback 3409700a1dfSMatthew Auld * acquires and the locks that are acquired and held by any callers of 3418ae84a27SRodrigo Vivi * xe_runtime_pm_get(). We already have the matching annotation 3429700a1dfSMatthew Auld * on that side, but we also need it here. For example lockdep should be 3439700a1dfSMatthew Auld * able to tell us if the following scenario is in theory possible: 3449700a1dfSMatthew Auld * 3459700a1dfSMatthew Auld * CPU0 | CPU1 (kworker) 3469700a1dfSMatthew Auld * lock(A) | 3479700a1dfSMatthew Auld * | xe_pm_runtime_suspend() 3489700a1dfSMatthew Auld * | lock(A) 3498ae84a27SRodrigo Vivi * xe_pm_runtime_get() | 3509700a1dfSMatthew Auld * 3519700a1dfSMatthew Auld * This will clearly deadlock since rpm core needs to wait for 3529700a1dfSMatthew Auld * xe_pm_runtime_suspend() to complete, but here we are holding lock(A) 3539700a1dfSMatthew Auld * on CPU0 which prevents CPU1 making forward progress. With the 3548ae84a27SRodrigo Vivi * annotation here and in xe_pm_runtime_get() lockdep will see 3559700a1dfSMatthew Auld * the potential lock inversion and give us a nice splat. 3569700a1dfSMatthew Auld */ 3578ae84a27SRodrigo Vivi lock_map_acquire(&xe_pm_runtime_lockdep_map); 3589700a1dfSMatthew Auld 359fa78e188SBadal Nilawar /* 360fa78e188SBadal Nilawar * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify 361fa78e188SBadal Nilawar * also checks and delets bo entry from user fault list. 362fa78e188SBadal Nilawar */ 363fa78e188SBadal Nilawar mutex_lock(&xe->mem_access.vram_userfault.lock); 364fa78e188SBadal Nilawar list_for_each_entry_safe(bo, on, 365fa78e188SBadal Nilawar &xe->mem_access.vram_userfault.list, vram_userfault_link) 366fa78e188SBadal Nilawar xe_bo_runtime_pm_release_mmap_offset(bo); 367fa78e188SBadal Nilawar mutex_unlock(&xe->mem_access.vram_userfault.lock); 368fa78e188SBadal Nilawar 369a00b8f1aSMatthew Auld if (xe->d3cold.allowed) { 370dd08ebf6SMatthew Brost err = xe_bo_evict_all(xe); 371dd08ebf6SMatthew Brost if (err) 372a00b8f1aSMatthew Auld goto out; 373e7b180b2SRodrigo Vivi xe_display_pm_suspend(xe, true); 374dd08ebf6SMatthew Brost } 375dd08ebf6SMatthew Brost 376dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) { 377dd08ebf6SMatthew Brost err = xe_gt_suspend(gt); 378dd08ebf6SMatthew Brost if (err) 379a00b8f1aSMatthew Auld goto out; 380dd08ebf6SMatthew Brost } 381dd08ebf6SMatthew Brost 382dd08ebf6SMatthew Brost xe_irq_suspend(xe); 383e7b180b2SRodrigo Vivi 384e7b180b2SRodrigo Vivi if (xe->d3cold.allowed) 385e7b180b2SRodrigo Vivi xe_display_pm_suspend_late(xe); 386a00b8f1aSMatthew Auld out: 387e7b180b2SRodrigo Vivi if (err) 388e7b180b2SRodrigo Vivi xe_display_pm_resume(xe, true); 3898ae84a27SRodrigo Vivi lock_map_release(&xe_pm_runtime_lockdep_map); 390a00b8f1aSMatthew Auld xe_pm_write_callback_task(xe, NULL); 391a00b8f1aSMatthew Auld return err; 392dd08ebf6SMatthew Brost } 393dd08ebf6SMatthew Brost 39430c39952SRodrigo Vivi /** 39530c39952SRodrigo Vivi * xe_pm_runtime_resume - Waking up from D3hot/D3Cold 39630c39952SRodrigo Vivi * @xe: xe device instance 39730c39952SRodrigo Vivi * 39830c39952SRodrigo Vivi * Returns 0 for success, negative error code otherwise. 39930c39952SRodrigo Vivi */ 400dd08ebf6SMatthew Brost int xe_pm_runtime_resume(struct xe_device *xe) 401dd08ebf6SMatthew Brost { 402dd08ebf6SMatthew Brost struct xe_gt *gt; 403dd08ebf6SMatthew Brost u8 id; 404a00b8f1aSMatthew Auld int err = 0; 405a00b8f1aSMatthew Auld 406*275aa53fSNirmoy Das trace_xe_pm_runtime_resume(xe, __builtin_return_address(0)); 407a00b8f1aSMatthew Auld /* Disable access_ongoing asserts and prevent recursive pm calls */ 408a00b8f1aSMatthew Auld xe_pm_write_callback_task(xe, current); 409dd08ebf6SMatthew Brost 4108ae84a27SRodrigo Vivi lock_map_acquire(&xe_pm_runtime_lockdep_map); 4119700a1dfSMatthew Auld 4128d490e01SRodrigo Vivi if (xe->d3cold.allowed) { 413933fd5ffSRiana Tauro err = xe_pcode_ready(xe, true); 414dd08ebf6SMatthew Brost if (err) 415a00b8f1aSMatthew Auld goto out; 416dd08ebf6SMatthew Brost 417e7b180b2SRodrigo Vivi xe_display_pm_resume_early(xe); 418e7b180b2SRodrigo Vivi 419dd08ebf6SMatthew Brost /* 420dd08ebf6SMatthew Brost * This only restores pinned memory which is the memory 421dd08ebf6SMatthew Brost * required for the GT(s) to resume. 422dd08ebf6SMatthew Brost */ 423dd08ebf6SMatthew Brost err = xe_bo_restore_kernel(xe); 424dd08ebf6SMatthew Brost if (err) 425a00b8f1aSMatthew Auld goto out; 426dd08ebf6SMatthew Brost } 427dd08ebf6SMatthew Brost 428dd08ebf6SMatthew Brost xe_irq_resume(xe); 429dd08ebf6SMatthew Brost 430dd08ebf6SMatthew Brost for_each_gt(gt, xe, id) 431dd08ebf6SMatthew Brost xe_gt_resume(gt); 432dd08ebf6SMatthew Brost 4338d490e01SRodrigo Vivi if (xe->d3cold.allowed) { 434e7b180b2SRodrigo Vivi xe_display_pm_resume(xe, true); 435dd08ebf6SMatthew Brost err = xe_bo_restore_user(xe); 436dd08ebf6SMatthew Brost if (err) 437a00b8f1aSMatthew Auld goto out; 438dd08ebf6SMatthew Brost } 439a00b8f1aSMatthew Auld out: 4408ae84a27SRodrigo Vivi lock_map_release(&xe_pm_runtime_lockdep_map); 441a00b8f1aSMatthew Auld xe_pm_write_callback_task(xe, NULL); 442a00b8f1aSMatthew Auld return err; 443dd08ebf6SMatthew Brost } 444dd08ebf6SMatthew Brost 4458ae84a27SRodrigo Vivi /* 4468ae84a27SRodrigo Vivi * For places where resume is synchronous it can be quite easy to deadlock 4478ae84a27SRodrigo Vivi * if we are not careful. Also in practice it might be quite timing 4488ae84a27SRodrigo Vivi * sensitive to ever see the 0 -> 1 transition with the callers locks 4498ae84a27SRodrigo Vivi * held, so deadlocks might exist but are hard for lockdep to ever see. 4508ae84a27SRodrigo Vivi * With this in mind, help lockdep learn about the potentially scary 4518ae84a27SRodrigo Vivi * stuff that can happen inside the runtime_resume callback by acquiring 4528ae84a27SRodrigo Vivi * a dummy lock (it doesn't protect anything and gets compiled out on 4538ae84a27SRodrigo Vivi * non-debug builds). Lockdep then only needs to see the 4548ae84a27SRodrigo Vivi * xe_pm_runtime_lockdep_map -> runtime_resume callback once, and then can 4558ae84a27SRodrigo Vivi * hopefully validate all the (callers_locks) -> xe_pm_runtime_lockdep_map. 4568ae84a27SRodrigo Vivi * For example if the (callers_locks) are ever grabbed in the 4578ae84a27SRodrigo Vivi * runtime_resume callback, lockdep should give us a nice splat. 4588ae84a27SRodrigo Vivi */ 4598ae84a27SRodrigo Vivi static void pm_runtime_lockdep_prime(void) 4608ae84a27SRodrigo Vivi { 4618ae84a27SRodrigo Vivi lock_map_acquire(&xe_pm_runtime_lockdep_map); 4628ae84a27SRodrigo Vivi lock_map_release(&xe_pm_runtime_lockdep_map); 4638ae84a27SRodrigo Vivi } 4648ae84a27SRodrigo Vivi 46530c39952SRodrigo Vivi /** 46630c39952SRodrigo Vivi * xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously 46730c39952SRodrigo Vivi * @xe: xe device instance 46830c39952SRodrigo Vivi */ 4695c9da9fcSRodrigo Vivi void xe_pm_runtime_get(struct xe_device *xe) 470dd08ebf6SMatthew Brost { 471*275aa53fSNirmoy Das trace_xe_pm_runtime_get(xe, __builtin_return_address(0)); 4725c9da9fcSRodrigo Vivi pm_runtime_get_noresume(xe->drm.dev); 4735c9da9fcSRodrigo Vivi 4745c9da9fcSRodrigo Vivi if (xe_pm_read_callback_task(xe) == current) 4755c9da9fcSRodrigo Vivi return; 4765c9da9fcSRodrigo Vivi 4778ae84a27SRodrigo Vivi pm_runtime_lockdep_prime(); 4785c9da9fcSRodrigo Vivi pm_runtime_resume(xe->drm.dev); 479dd08ebf6SMatthew Brost } 480dd08ebf6SMatthew Brost 48130c39952SRodrigo Vivi /** 48230c39952SRodrigo Vivi * xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle 48330c39952SRodrigo Vivi * @xe: xe device instance 48430c39952SRodrigo Vivi */ 4855c9da9fcSRodrigo Vivi void xe_pm_runtime_put(struct xe_device *xe) 486dd08ebf6SMatthew Brost { 487*275aa53fSNirmoy Das trace_xe_pm_runtime_put(xe, __builtin_return_address(0)); 4885c9da9fcSRodrigo Vivi if (xe_pm_read_callback_task(xe) == current) { 4895c9da9fcSRodrigo Vivi pm_runtime_put_noidle(xe->drm.dev); 4905c9da9fcSRodrigo Vivi } else { 491dd08ebf6SMatthew Brost pm_runtime_mark_last_busy(xe->drm.dev); 4925c9da9fcSRodrigo Vivi pm_runtime_put(xe->drm.dev); 4935c9da9fcSRodrigo Vivi } 494dd08ebf6SMatthew Brost } 495dd08ebf6SMatthew Brost 49630c39952SRodrigo Vivi /** 49723cf006bSRodrigo Vivi * xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl 49823cf006bSRodrigo Vivi * @xe: xe device instance 49923cf006bSRodrigo Vivi * 50023cf006bSRodrigo Vivi * Returns: Any number greater than or equal to 0 for success, negative error 50123cf006bSRodrigo Vivi * code otherwise. 50223cf006bSRodrigo Vivi */ 50323cf006bSRodrigo Vivi int xe_pm_runtime_get_ioctl(struct xe_device *xe) 50423cf006bSRodrigo Vivi { 505*275aa53fSNirmoy Das trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0)); 50623cf006bSRodrigo Vivi if (WARN_ON(xe_pm_read_callback_task(xe) == current)) 50723cf006bSRodrigo Vivi return -ELOOP; 50823cf006bSRodrigo Vivi 5098ae84a27SRodrigo Vivi pm_runtime_lockdep_prime(); 51023cf006bSRodrigo Vivi return pm_runtime_get_sync(xe->drm.dev); 51123cf006bSRodrigo Vivi } 51223cf006bSRodrigo Vivi 51323cf006bSRodrigo Vivi /** 51430c39952SRodrigo Vivi * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active 51530c39952SRodrigo Vivi * @xe: xe device instance 51630c39952SRodrigo Vivi * 51746edb0a3SRodrigo Vivi * Return: True if device is awake (regardless the previous number of references) 51846edb0a3SRodrigo Vivi * and a new reference was taken, false otherwise. 51930c39952SRodrigo Vivi */ 52046edb0a3SRodrigo Vivi bool xe_pm_runtime_get_if_active(struct xe_device *xe) 521dd08ebf6SMatthew Brost { 52246edb0a3SRodrigo Vivi return pm_runtime_get_if_active(xe->drm.dev) > 0; 523dd08ebf6SMatthew Brost } 524c8a74077SAnshuman Gupta 52530c39952SRodrigo Vivi /** 526967c5d7cSRodrigo Vivi * xe_pm_runtime_get_if_in_use - Get a new reference if device is active with previous ref taken 5273b85b7bcSRodrigo Vivi * @xe: xe device instance 5283b85b7bcSRodrigo Vivi * 529967c5d7cSRodrigo Vivi * Return: True if device is awake, a previous reference had been already taken, 530967c5d7cSRodrigo Vivi * and a new reference was now taken, false otherwise. 5313b85b7bcSRodrigo Vivi */ 5323b85b7bcSRodrigo Vivi bool xe_pm_runtime_get_if_in_use(struct xe_device *xe) 5333b85b7bcSRodrigo Vivi { 5343b85b7bcSRodrigo Vivi if (xe_pm_read_callback_task(xe) == current) { 5353b85b7bcSRodrigo Vivi /* The device is awake, grab the ref and move on */ 5363b85b7bcSRodrigo Vivi pm_runtime_get_noresume(xe->drm.dev); 5373b85b7bcSRodrigo Vivi return true; 5383b85b7bcSRodrigo Vivi } 5393b85b7bcSRodrigo Vivi 5403b85b7bcSRodrigo Vivi return pm_runtime_get_if_in_use(xe->drm.dev) > 0; 5413b85b7bcSRodrigo Vivi } 5423b85b7bcSRodrigo Vivi 5433b85b7bcSRodrigo Vivi /** 544cbb6a741SRodrigo Vivi * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming 545cbb6a741SRodrigo Vivi * @xe: xe device instance 546cbb6a741SRodrigo Vivi * 547cbb6a741SRodrigo Vivi * This function should be used in inner places where it is surely already 548cbb6a741SRodrigo Vivi * protected by outer-bound callers of `xe_pm_runtime_get`. 549cbb6a741SRodrigo Vivi * It will warn if not protected. 550cbb6a741SRodrigo Vivi * The reference should be put back after this function regardless, since it 551cbb6a741SRodrigo Vivi * will always bump the usage counter, regardless. 552cbb6a741SRodrigo Vivi */ 553cbb6a741SRodrigo Vivi void xe_pm_runtime_get_noresume(struct xe_device *xe) 554cbb6a741SRodrigo Vivi { 555cbb6a741SRodrigo Vivi bool ref; 556cbb6a741SRodrigo Vivi 557cbb6a741SRodrigo Vivi ref = xe_pm_runtime_get_if_in_use(xe); 558cbb6a741SRodrigo Vivi 559cbb6a741SRodrigo Vivi if (drm_WARN(&xe->drm, !ref, "Missing outer runtime PM protection\n")) 560cbb6a741SRodrigo Vivi pm_runtime_get_noresume(xe->drm.dev); 561cbb6a741SRodrigo Vivi } 562cbb6a741SRodrigo Vivi 563cbb6a741SRodrigo Vivi /** 564d6b41378SRodrigo Vivi * xe_pm_runtime_resume_and_get - Resume, then get a runtime_pm ref if awake. 565d6b41378SRodrigo Vivi * @xe: xe device instance 566d6b41378SRodrigo Vivi * 567d6b41378SRodrigo Vivi * Returns: True if device is awake and the reference was taken, false otherwise. 568d6b41378SRodrigo Vivi */ 569d6b41378SRodrigo Vivi bool xe_pm_runtime_resume_and_get(struct xe_device *xe) 570d6b41378SRodrigo Vivi { 571d6b41378SRodrigo Vivi if (xe_pm_read_callback_task(xe) == current) { 572d6b41378SRodrigo Vivi /* The device is awake, grab the ref and move on */ 573d6b41378SRodrigo Vivi pm_runtime_get_noresume(xe->drm.dev); 574d6b41378SRodrigo Vivi return true; 575d6b41378SRodrigo Vivi } 576d6b41378SRodrigo Vivi 5778ae84a27SRodrigo Vivi pm_runtime_lockdep_prime(); 578d6b41378SRodrigo Vivi return pm_runtime_resume_and_get(xe->drm.dev) >= 0; 579d6b41378SRodrigo Vivi } 580d6b41378SRodrigo Vivi 581d6b41378SRodrigo Vivi /** 58230c39952SRodrigo Vivi * xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge 58330c39952SRodrigo Vivi * @xe: xe device instance 58430c39952SRodrigo Vivi */ 585c8a74077SAnshuman Gupta void xe_pm_assert_unbounded_bridge(struct xe_device *xe) 586c8a74077SAnshuman Gupta { 587c8a74077SAnshuman Gupta struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 588c8a74077SAnshuman Gupta struct pci_dev *bridge = pci_upstream_bridge(pdev); 589c8a74077SAnshuman Gupta 590c8a74077SAnshuman Gupta if (!bridge) 591c8a74077SAnshuman Gupta return; 592c8a74077SAnshuman Gupta 593c8a74077SAnshuman Gupta if (!bridge->driver) { 594c8a74077SAnshuman Gupta drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n"); 595c8a74077SAnshuman Gupta device_set_pm_not_required(&pdev->dev); 596c8a74077SAnshuman Gupta } 597c8a74077SAnshuman Gupta } 598b2d75619SAnshuman Gupta 59930c39952SRodrigo Vivi /** 60030c39952SRodrigo Vivi * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold 60130c39952SRodrigo Vivi * @xe: xe device instance 60230c39952SRodrigo Vivi * @threshold: VRAM size in bites for the D3cold threshold 60330c39952SRodrigo Vivi * 60430c39952SRodrigo Vivi * Returns 0 for success, negative error code otherwise. 60530c39952SRodrigo Vivi */ 606b2d75619SAnshuman Gupta int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold) 607b2d75619SAnshuman Gupta { 608b2d75619SAnshuman Gupta struct ttm_resource_manager *man; 609b2d75619SAnshuman Gupta u32 vram_total_mb = 0; 610b2d75619SAnshuman Gupta int i; 611b2d75619SAnshuman Gupta 612b2d75619SAnshuman Gupta for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) { 613b2d75619SAnshuman Gupta man = ttm_manager_type(&xe->ttm, i); 614b2d75619SAnshuman Gupta if (man) 615b2d75619SAnshuman Gupta vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024); 616b2d75619SAnshuman Gupta } 617b2d75619SAnshuman Gupta 618b2d75619SAnshuman Gupta drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb); 619b2d75619SAnshuman Gupta 620b2d75619SAnshuman Gupta if (threshold > vram_total_mb) 621b2d75619SAnshuman Gupta return -EINVAL; 622b2d75619SAnshuman Gupta 623b2d75619SAnshuman Gupta mutex_lock(&xe->d3cold.lock); 624b2d75619SAnshuman Gupta xe->d3cold.vram_threshold = threshold; 625b2d75619SAnshuman Gupta mutex_unlock(&xe->d3cold.lock); 626b2d75619SAnshuman Gupta 627b2d75619SAnshuman Gupta return 0; 628b2d75619SAnshuman Gupta } 6292ef08b98SAnshuman Gupta 63030c39952SRodrigo Vivi /** 63130c39952SRodrigo Vivi * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed 63230c39952SRodrigo Vivi * @xe: xe device instance 63330c39952SRodrigo Vivi * 63430c39952SRodrigo Vivi * To be called during runtime_pm idle callback. 63530c39952SRodrigo Vivi * Check for all the D3Cold conditions ahead of runtime suspend. 63630c39952SRodrigo Vivi */ 6372ef08b98SAnshuman Gupta void xe_pm_d3cold_allowed_toggle(struct xe_device *xe) 6382ef08b98SAnshuman Gupta { 6392ef08b98SAnshuman Gupta struct ttm_resource_manager *man; 6402ef08b98SAnshuman Gupta u32 total_vram_used_mb = 0; 6412ef08b98SAnshuman Gupta u64 vram_used; 6422ef08b98SAnshuman Gupta int i; 6432ef08b98SAnshuman Gupta 644e07aa913SRodrigo Vivi if (!xe->d3cold.capable) { 645e07aa913SRodrigo Vivi xe->d3cold.allowed = false; 646e07aa913SRodrigo Vivi return; 647e07aa913SRodrigo Vivi } 648e07aa913SRodrigo Vivi 6492ef08b98SAnshuman Gupta for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) { 6502ef08b98SAnshuman Gupta man = ttm_manager_type(&xe->ttm, i); 6512ef08b98SAnshuman Gupta if (man) { 6522ef08b98SAnshuman Gupta vram_used = ttm_resource_manager_usage(man); 6532ef08b98SAnshuman Gupta total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024); 6542ef08b98SAnshuman Gupta } 6552ef08b98SAnshuman Gupta } 6562ef08b98SAnshuman Gupta 6572ef08b98SAnshuman Gupta mutex_lock(&xe->d3cold.lock); 6582ef08b98SAnshuman Gupta 6592ef08b98SAnshuman Gupta if (total_vram_used_mb < xe->d3cold.vram_threshold) 6602ef08b98SAnshuman Gupta xe->d3cold.allowed = true; 6612ef08b98SAnshuman Gupta else 6622ef08b98SAnshuman Gupta xe->d3cold.allowed = false; 6632ef08b98SAnshuman Gupta 6642ef08b98SAnshuman Gupta mutex_unlock(&xe->d3cold.lock); 665ff765b77SMatthew Auld 666ff765b77SMatthew Auld drm_dbg(&xe->drm, 667ff765b77SMatthew Auld "d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed)); 6682ef08b98SAnshuman Gupta } 669