xref: /linux/drivers/gpu/drm/xe/xe_pm.c (revision 23cf006beac3db89f946a52c962cd16c82066c5c)
1dd08ebf6SMatthew Brost // SPDX-License-Identifier: MIT
2dd08ebf6SMatthew Brost /*
3dd08ebf6SMatthew Brost  * Copyright © 2022 Intel Corporation
4dd08ebf6SMatthew Brost  */
5dd08ebf6SMatthew Brost 
6ea9f879dSLucas De Marchi #include "xe_pm.h"
7ea9f879dSLucas De Marchi 
8dd08ebf6SMatthew Brost #include <linux/pm_runtime.h>
9dd08ebf6SMatthew Brost 
10b2d75619SAnshuman Gupta #include <drm/drm_managed.h>
11dd08ebf6SMatthew Brost #include <drm/ttm/ttm_placement.h>
12dd08ebf6SMatthew Brost 
131e5a4dfeSJani Nikula #include "display/xe_display.h"
14dd08ebf6SMatthew Brost #include "xe_bo.h"
15dd08ebf6SMatthew Brost #include "xe_bo_evict.h"
16dd08ebf6SMatthew Brost #include "xe_device.h"
17b2d75619SAnshuman Gupta #include "xe_device_sysfs.h"
18dd08ebf6SMatthew Brost #include "xe_ggtt.h"
19ea9f879dSLucas De Marchi #include "xe_gt.h"
2009d88e3bSAnshuman Gupta #include "xe_guc.h"
21dd08ebf6SMatthew Brost #include "xe_irq.h"
22dd08ebf6SMatthew Brost #include "xe_pcode.h"
230d053475SMatt Roper #include "xe_wa.h"
24dd08ebf6SMatthew Brost 
25dd08ebf6SMatthew Brost /**
26dd08ebf6SMatthew Brost  * DOC: Xe Power Management
27dd08ebf6SMatthew Brost  *
2830c39952SRodrigo Vivi  * Xe PM implements the main routines for both system level suspend states and
2930c39952SRodrigo Vivi  * for the opportunistic runtime suspend states.
30dd08ebf6SMatthew Brost  *
3130c39952SRodrigo Vivi  * System Level Suspend (S-States) - In general this is OS initiated suspend
3230c39952SRodrigo Vivi  * driven by ACPI for achieving S0ix (a.k.a. S2idle, freeze), S3 (suspend to ram),
3330c39952SRodrigo Vivi  * S4 (disk). The main functions here are `xe_pm_suspend` and `xe_pm_resume`. They
3430c39952SRodrigo Vivi  * are the main point for the suspend to and resume from these states.
35dd08ebf6SMatthew Brost  *
3630c39952SRodrigo Vivi  * PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power
3730c39952SRodrigo Vivi  * state D3, controlled by the PCI subsystem and ACPI with the help from the
3830c39952SRodrigo Vivi  * runtime_pm infrastructure.
3930c39952SRodrigo Vivi  * PCI D3 is special and can mean D3hot, where Vcc power is on for keeping memory
4030c39952SRodrigo Vivi  * alive and quicker low latency resume or D3Cold where Vcc power is off for
4130c39952SRodrigo Vivi  * better power savings.
4230c39952SRodrigo Vivi  * The Vcc control of PCI hierarchy can only be controlled at the PCI root port
4330c39952SRodrigo Vivi  * level, while the device driver can be behind multiple bridges/switches and
4430c39952SRodrigo Vivi  * paired with other devices. For this reason, the PCI subsystem cannot perform
4530c39952SRodrigo Vivi  * the transition towards D3Cold. The lowest runtime PM possible from the PCI
4630c39952SRodrigo Vivi  * subsystem is D3hot. Then, if all these paired devices in the same root port
4730c39952SRodrigo Vivi  * are in D3hot, ACPI will assist here and run its own methods (_PR3 and _OFF)
4830c39952SRodrigo Vivi  * to perform the transition from D3hot to D3cold. Xe may disallow this
4930c39952SRodrigo Vivi  * transition by calling pci_d3cold_disable(root_pdev) before going to runtime
5030c39952SRodrigo Vivi  * suspend. It will be based on runtime conditions such as VRAM usage for a
5130c39952SRodrigo Vivi  * quick and low latency resume for instance.
52dd08ebf6SMatthew Brost  *
5330c39952SRodrigo Vivi  * Runtime PM - This infrastructure provided by the Linux kernel allows the
5430c39952SRodrigo Vivi  * device drivers to indicate when the can be runtime suspended, so the device
5530c39952SRodrigo Vivi  * could be put at D3 (if supported), or allow deeper package sleep states
5630c39952SRodrigo Vivi  * (PC-states), and/or other low level power states. Xe PM component provides
5730c39952SRodrigo Vivi  * `xe_pm_runtime_suspend` and `xe_pm_runtime_resume` functions that PCI
5830c39952SRodrigo Vivi  * subsystem will call before transition to/from runtime suspend.
59dd08ebf6SMatthew Brost  *
6030c39952SRodrigo Vivi  * Also, Xe PM provides get and put functions that Xe driver will use to
6130c39952SRodrigo Vivi  * indicate activity. In order to avoid locking complications with the memory
6230c39952SRodrigo Vivi  * management, whenever possible, these get and put functions needs to be called
6330c39952SRodrigo Vivi  * from the higher/outer levels.
6430c39952SRodrigo Vivi  * The main cases that need to be protected from the outer levels are: IOCTL,
6530c39952SRodrigo Vivi  * sysfs, debugfs, dma-buf sharing, GPU execution.
6630c39952SRodrigo Vivi  *
6730c39952SRodrigo Vivi  * This component is not responsible for GT idleness (RC6) nor GT frequency
6830c39952SRodrigo Vivi  * management (RPS).
69dd08ebf6SMatthew Brost  */
70dd08ebf6SMatthew Brost 
71dd08ebf6SMatthew Brost /**
72dd08ebf6SMatthew Brost  * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
73dd08ebf6SMatthew Brost  * @xe: xe device instance
74dd08ebf6SMatthew Brost  *
75dd08ebf6SMatthew Brost  * Return: 0 on success
76dd08ebf6SMatthew Brost  */
77dd08ebf6SMatthew Brost int xe_pm_suspend(struct xe_device *xe)
78dd08ebf6SMatthew Brost {
79dd08ebf6SMatthew Brost 	struct xe_gt *gt;
80dd08ebf6SMatthew Brost 	u8 id;
81dd08ebf6SMatthew Brost 	int err;
82dd08ebf6SMatthew Brost 
83dd08ebf6SMatthew Brost 	for_each_gt(gt, xe, id)
84dd08ebf6SMatthew Brost 		xe_gt_suspend_prepare(gt);
85dd08ebf6SMatthew Brost 
86dd08ebf6SMatthew Brost 	/* FIXME: Super racey... */
87dd08ebf6SMatthew Brost 	err = xe_bo_evict_all(xe);
88dd08ebf6SMatthew Brost 	if (err)
89dd08ebf6SMatthew Brost 		return err;
90dd08ebf6SMatthew Brost 
9144e69495SMaarten Lankhorst 	xe_display_pm_suspend(xe);
9244e69495SMaarten Lankhorst 
93dd08ebf6SMatthew Brost 	for_each_gt(gt, xe, id) {
94dd08ebf6SMatthew Brost 		err = xe_gt_suspend(gt);
9544e69495SMaarten Lankhorst 		if (err) {
9644e69495SMaarten Lankhorst 			xe_display_pm_resume(xe);
97dd08ebf6SMatthew Brost 			return err;
98dd08ebf6SMatthew Brost 		}
9944e69495SMaarten Lankhorst 	}
100dd08ebf6SMatthew Brost 
101dd08ebf6SMatthew Brost 	xe_irq_suspend(xe);
102dd08ebf6SMatthew Brost 
10344e69495SMaarten Lankhorst 	xe_display_pm_suspend_late(xe);
10444e69495SMaarten Lankhorst 
105dd08ebf6SMatthew Brost 	return 0;
106dd08ebf6SMatthew Brost }
107dd08ebf6SMatthew Brost 
108dd08ebf6SMatthew Brost /**
109dd08ebf6SMatthew Brost  * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0
110dd08ebf6SMatthew Brost  * @xe: xe device instance
111dd08ebf6SMatthew Brost  *
112dd08ebf6SMatthew Brost  * Return: 0 on success
113dd08ebf6SMatthew Brost  */
114dd08ebf6SMatthew Brost int xe_pm_resume(struct xe_device *xe)
115dd08ebf6SMatthew Brost {
1160d053475SMatt Roper 	struct xe_tile *tile;
117dd08ebf6SMatthew Brost 	struct xe_gt *gt;
118dd08ebf6SMatthew Brost 	u8 id;
119dd08ebf6SMatthew Brost 	int err;
120dd08ebf6SMatthew Brost 
1210d053475SMatt Roper 	for_each_tile(tile, xe, id)
1220d053475SMatt Roper 		xe_wa_apply_tile_workarounds(tile);
1230d053475SMatt Roper 
124dd08ebf6SMatthew Brost 	for_each_gt(gt, xe, id) {
125dd08ebf6SMatthew Brost 		err = xe_pcode_init(gt);
126dd08ebf6SMatthew Brost 		if (err)
127dd08ebf6SMatthew Brost 			return err;
128dd08ebf6SMatthew Brost 	}
129dd08ebf6SMatthew Brost 
13044e69495SMaarten Lankhorst 	xe_display_pm_resume_early(xe);
13144e69495SMaarten Lankhorst 
132dd08ebf6SMatthew Brost 	/*
133dd08ebf6SMatthew Brost 	 * This only restores pinned memory which is the memory required for the
134dd08ebf6SMatthew Brost 	 * GT(s) to resume.
135dd08ebf6SMatthew Brost 	 */
136dd08ebf6SMatthew Brost 	err = xe_bo_restore_kernel(xe);
137dd08ebf6SMatthew Brost 	if (err)
138dd08ebf6SMatthew Brost 		return err;
139dd08ebf6SMatthew Brost 
140dd08ebf6SMatthew Brost 	xe_irq_resume(xe);
141dd08ebf6SMatthew Brost 
14244e69495SMaarten Lankhorst 	xe_display_pm_resume(xe);
14344e69495SMaarten Lankhorst 
144dd08ebf6SMatthew Brost 	for_each_gt(gt, xe, id)
145dd08ebf6SMatthew Brost 		xe_gt_resume(gt);
146dd08ebf6SMatthew Brost 
147dd08ebf6SMatthew Brost 	err = xe_bo_restore_user(xe);
148dd08ebf6SMatthew Brost 	if (err)
149dd08ebf6SMatthew Brost 		return err;
150dd08ebf6SMatthew Brost 
151dd08ebf6SMatthew Brost 	return 0;
152dd08ebf6SMatthew Brost }
153dd08ebf6SMatthew Brost 
15495ec8c1dSRiana Tauro static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
155ac0be3b5SAnshuman Gupta {
15695ec8c1dSRiana Tauro 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
157ac0be3b5SAnshuman Gupta 	struct pci_dev *root_pdev;
158ac0be3b5SAnshuman Gupta 
159ac0be3b5SAnshuman Gupta 	root_pdev = pcie_find_root_port(pdev);
160ac0be3b5SAnshuman Gupta 	if (!root_pdev)
161ac0be3b5SAnshuman Gupta 		return false;
162ac0be3b5SAnshuman Gupta 
16395ec8c1dSRiana Tauro 	/* D3Cold requires PME capability */
16495ec8c1dSRiana Tauro 	if (!pci_pme_capable(root_pdev, PCI_D3cold)) {
16595ec8c1dSRiana Tauro 		drm_dbg(&xe->drm, "d3cold: PME# not supported\n");
166ac0be3b5SAnshuman Gupta 		return false;
16795ec8c1dSRiana Tauro 	}
16895ec8c1dSRiana Tauro 
16995ec8c1dSRiana Tauro 	/* D3Cold requires _PR3 power resource */
17095ec8c1dSRiana Tauro 	if (!pci_pr3_present(root_pdev)) {
17195ec8c1dSRiana Tauro 		drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n");
17295ec8c1dSRiana Tauro 		return false;
17395ec8c1dSRiana Tauro 	}
174ac0be3b5SAnshuman Gupta 
175ac0be3b5SAnshuman Gupta 	return true;
176ac0be3b5SAnshuman Gupta }
177ac0be3b5SAnshuman Gupta 
178fddebcbfSAnshuman Gupta static void xe_pm_runtime_init(struct xe_device *xe)
179dd08ebf6SMatthew Brost {
180dd08ebf6SMatthew Brost 	struct device *dev = xe->drm.dev;
181dd08ebf6SMatthew Brost 
182d87c424aSRodrigo Vivi 	/*
183d87c424aSRodrigo Vivi 	 * Disable the system suspend direct complete optimization.
184d87c424aSRodrigo Vivi 	 * We need to ensure that the regular device suspend/resume functions
185d87c424aSRodrigo Vivi 	 * are called since our runtime_pm cannot guarantee local memory
186d87c424aSRodrigo Vivi 	 * eviction for d3cold.
187d87c424aSRodrigo Vivi 	 * TODO: Check HDA audio dependencies claimed by i915, and then enforce
188d87c424aSRodrigo Vivi 	 *       this option to integrated graphics as well.
189d87c424aSRodrigo Vivi 	 */
190d87c424aSRodrigo Vivi 	if (IS_DGFX(xe))
191d87c424aSRodrigo Vivi 		dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
192d87c424aSRodrigo Vivi 
193dd08ebf6SMatthew Brost 	pm_runtime_use_autosuspend(dev);
194dd08ebf6SMatthew Brost 	pm_runtime_set_autosuspend_delay(dev, 1000);
195dd08ebf6SMatthew Brost 	pm_runtime_set_active(dev);
196dd08ebf6SMatthew Brost 	pm_runtime_allow(dev);
197dd08ebf6SMatthew Brost 	pm_runtime_mark_last_busy(dev);
198bba2ec41SRodrigo Vivi 	pm_runtime_put(dev);
199dd08ebf6SMatthew Brost }
200dd08ebf6SMatthew Brost 
201fa78e188SBadal Nilawar void xe_pm_init_early(struct xe_device *xe)
202fa78e188SBadal Nilawar {
203fa78e188SBadal Nilawar 	INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
204fa78e188SBadal Nilawar 	drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
205fa78e188SBadal Nilawar }
206fa78e188SBadal Nilawar 
20730c39952SRodrigo Vivi /**
20830c39952SRodrigo Vivi  * xe_pm_init - Initialize Xe Power Management
20930c39952SRodrigo Vivi  * @xe: xe device instance
21030c39952SRodrigo Vivi  *
21130c39952SRodrigo Vivi  * This component is responsible for System and Device sleep states.
21230c39952SRodrigo Vivi  */
213ac0be3b5SAnshuman Gupta void xe_pm_init(struct xe_device *xe)
214ac0be3b5SAnshuman Gupta {
2155349bb76SOhad Sharabi 	/* For now suspend/resume is only allowed with GuC */
2165349bb76SOhad Sharabi 	if (!xe_device_uc_enabled(xe))
2175349bb76SOhad Sharabi 		return;
2185349bb76SOhad Sharabi 
219b2d75619SAnshuman Gupta 	drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
220a32d82b4SRodrigo Vivi 
22195ec8c1dSRiana Tauro 	xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
2223d4b0bfcSAnshuman Gupta 
2233d4b0bfcSAnshuman Gupta 	if (xe->d3cold.capable) {
224b2d75619SAnshuman Gupta 		xe_device_sysfs_init(xe);
225b2d75619SAnshuman Gupta 		xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD);
2263d4b0bfcSAnshuman Gupta 	}
227a32d82b4SRodrigo Vivi 
228a32d82b4SRodrigo Vivi 	xe_pm_runtime_init(xe);
229ac0be3b5SAnshuman Gupta }
230ac0be3b5SAnshuman Gupta 
23130c39952SRodrigo Vivi /**
23230c39952SRodrigo Vivi  * xe_pm_runtime_fini - Finalize Runtime PM
23330c39952SRodrigo Vivi  * @xe: xe device instance
23430c39952SRodrigo Vivi  */
2355b7e50e2SMatthew Auld void xe_pm_runtime_fini(struct xe_device *xe)
2365b7e50e2SMatthew Auld {
2375b7e50e2SMatthew Auld 	struct device *dev = xe->drm.dev;
2385b7e50e2SMatthew Auld 
2395b7e50e2SMatthew Auld 	pm_runtime_get_sync(dev);
2405b7e50e2SMatthew Auld 	pm_runtime_forbid(dev);
2415b7e50e2SMatthew Auld }
2425b7e50e2SMatthew Auld 
243a00b8f1aSMatthew Auld static void xe_pm_write_callback_task(struct xe_device *xe,
244a00b8f1aSMatthew Auld 				      struct task_struct *task)
245a00b8f1aSMatthew Auld {
246a00b8f1aSMatthew Auld 	WRITE_ONCE(xe->pm_callback_task, task);
247a00b8f1aSMatthew Auld 
248a00b8f1aSMatthew Auld 	/*
249a00b8f1aSMatthew Auld 	 * Just in case it's somehow possible for our writes to be reordered to
250a00b8f1aSMatthew Auld 	 * the extent that something else re-uses the task written in
251a00b8f1aSMatthew Auld 	 * pm_callback_task. For example after returning from the callback, but
252a00b8f1aSMatthew Auld 	 * before the reordered write that resets pm_callback_task back to NULL.
253a00b8f1aSMatthew Auld 	 */
254a00b8f1aSMatthew Auld 	smp_mb(); /* pairs with xe_pm_read_callback_task */
255a00b8f1aSMatthew Auld }
256a00b8f1aSMatthew Auld 
257a00b8f1aSMatthew Auld struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
258a00b8f1aSMatthew Auld {
259a00b8f1aSMatthew Auld 	smp_mb(); /* pairs with xe_pm_write_callback_task */
260a00b8f1aSMatthew Auld 
261a00b8f1aSMatthew Auld 	return READ_ONCE(xe->pm_callback_task);
262a00b8f1aSMatthew Auld }
263a00b8f1aSMatthew Auld 
26430c39952SRodrigo Vivi /**
2650f9d886fSRodrigo Vivi  * xe_pm_runtime_suspended - Check if runtime_pm state is suspended
2660f9d886fSRodrigo Vivi  * @xe: xe device instance
2670f9d886fSRodrigo Vivi  *
2680f9d886fSRodrigo Vivi  * This does not provide any guarantee that the device is going to remain
2690f9d886fSRodrigo Vivi  * suspended as it might be racing with the runtime state transitions.
2700f9d886fSRodrigo Vivi  * It can be used only as a non-reliable assertion, to ensure that we are not in
2710f9d886fSRodrigo Vivi  * the sleep state while trying to access some memory for instance.
2720f9d886fSRodrigo Vivi  *
2730f9d886fSRodrigo Vivi  * Returns true if PCI device is suspended, false otherwise.
2740f9d886fSRodrigo Vivi  */
2750f9d886fSRodrigo Vivi bool xe_pm_runtime_suspended(struct xe_device *xe)
2760f9d886fSRodrigo Vivi {
2770f9d886fSRodrigo Vivi 	return pm_runtime_suspended(xe->drm.dev);
2780f9d886fSRodrigo Vivi }
2790f9d886fSRodrigo Vivi 
2800f9d886fSRodrigo Vivi /**
28130c39952SRodrigo Vivi  * xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold
28230c39952SRodrigo Vivi  * @xe: xe device instance
28330c39952SRodrigo Vivi  *
28430c39952SRodrigo Vivi  * Returns 0 for success, negative error code otherwise.
28530c39952SRodrigo Vivi  */
286dd08ebf6SMatthew Brost int xe_pm_runtime_suspend(struct xe_device *xe)
287dd08ebf6SMatthew Brost {
288fa78e188SBadal Nilawar 	struct xe_bo *bo, *on;
289dd08ebf6SMatthew Brost 	struct xe_gt *gt;
290dd08ebf6SMatthew Brost 	u8 id;
291a00b8f1aSMatthew Auld 	int err = 0;
292dd08ebf6SMatthew Brost 
293a00b8f1aSMatthew Auld 	if (xe->d3cold.allowed && xe_device_mem_access_ongoing(xe))
294dd08ebf6SMatthew Brost 		return -EBUSY;
295dd08ebf6SMatthew Brost 
296a00b8f1aSMatthew Auld 	/* Disable access_ongoing asserts and prevent recursive pm calls */
297a00b8f1aSMatthew Auld 	xe_pm_write_callback_task(xe, current);
298a00b8f1aSMatthew Auld 
2999700a1dfSMatthew Auld 	/*
3009700a1dfSMatthew Auld 	 * The actual xe_device_mem_access_put() is always async underneath, so
3019700a1dfSMatthew Auld 	 * exactly where that is called should makes no difference to us. However
3029700a1dfSMatthew Auld 	 * we still need to be very careful with the locks that this callback
3039700a1dfSMatthew Auld 	 * acquires and the locks that are acquired and held by any callers of
3049700a1dfSMatthew Auld 	 * xe_device_mem_access_get(). We already have the matching annotation
3059700a1dfSMatthew Auld 	 * on that side, but we also need it here. For example lockdep should be
3069700a1dfSMatthew Auld 	 * able to tell us if the following scenario is in theory possible:
3079700a1dfSMatthew Auld 	 *
3089700a1dfSMatthew Auld 	 * CPU0                          | CPU1 (kworker)
3099700a1dfSMatthew Auld 	 * lock(A)                       |
3109700a1dfSMatthew Auld 	 *                               | xe_pm_runtime_suspend()
3119700a1dfSMatthew Auld 	 *                               |      lock(A)
3129700a1dfSMatthew Auld 	 * xe_device_mem_access_get()    |
3139700a1dfSMatthew Auld 	 *
3149700a1dfSMatthew Auld 	 * This will clearly deadlock since rpm core needs to wait for
3159700a1dfSMatthew Auld 	 * xe_pm_runtime_suspend() to complete, but here we are holding lock(A)
3169700a1dfSMatthew Auld 	 * on CPU0 which prevents CPU1 making forward progress.  With the
3179700a1dfSMatthew Auld 	 * annotation here and in xe_device_mem_access_get() lockdep will see
3189700a1dfSMatthew Auld 	 * the potential lock inversion and give us a nice splat.
3199700a1dfSMatthew Auld 	 */
3209700a1dfSMatthew Auld 	lock_map_acquire(&xe_device_mem_access_lockdep_map);
3219700a1dfSMatthew Auld 
322fa78e188SBadal Nilawar 	/*
323fa78e188SBadal Nilawar 	 * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
324fa78e188SBadal Nilawar 	 * also checks and delets bo entry from user fault list.
325fa78e188SBadal Nilawar 	 */
326fa78e188SBadal Nilawar 	mutex_lock(&xe->mem_access.vram_userfault.lock);
327fa78e188SBadal Nilawar 	list_for_each_entry_safe(bo, on,
328fa78e188SBadal Nilawar 				 &xe->mem_access.vram_userfault.list, vram_userfault_link)
329fa78e188SBadal Nilawar 		xe_bo_runtime_pm_release_mmap_offset(bo);
330fa78e188SBadal Nilawar 	mutex_unlock(&xe->mem_access.vram_userfault.lock);
331fa78e188SBadal Nilawar 
332a00b8f1aSMatthew Auld 	if (xe->d3cold.allowed) {
333dd08ebf6SMatthew Brost 		err = xe_bo_evict_all(xe);
334dd08ebf6SMatthew Brost 		if (err)
335a00b8f1aSMatthew Auld 			goto out;
336dd08ebf6SMatthew Brost 	}
337dd08ebf6SMatthew Brost 
338dd08ebf6SMatthew Brost 	for_each_gt(gt, xe, id) {
339dd08ebf6SMatthew Brost 		err = xe_gt_suspend(gt);
340dd08ebf6SMatthew Brost 		if (err)
341a00b8f1aSMatthew Auld 			goto out;
342dd08ebf6SMatthew Brost 	}
343dd08ebf6SMatthew Brost 
344dd08ebf6SMatthew Brost 	xe_irq_suspend(xe);
345a00b8f1aSMatthew Auld out:
3469700a1dfSMatthew Auld 	lock_map_release(&xe_device_mem_access_lockdep_map);
347a00b8f1aSMatthew Auld 	xe_pm_write_callback_task(xe, NULL);
348a00b8f1aSMatthew Auld 	return err;
349dd08ebf6SMatthew Brost }
350dd08ebf6SMatthew Brost 
35130c39952SRodrigo Vivi /**
35230c39952SRodrigo Vivi  * xe_pm_runtime_resume - Waking up from D3hot/D3Cold
35330c39952SRodrigo Vivi  * @xe: xe device instance
35430c39952SRodrigo Vivi  *
35530c39952SRodrigo Vivi  * Returns 0 for success, negative error code otherwise.
35630c39952SRodrigo Vivi  */
357dd08ebf6SMatthew Brost int xe_pm_runtime_resume(struct xe_device *xe)
358dd08ebf6SMatthew Brost {
359dd08ebf6SMatthew Brost 	struct xe_gt *gt;
360dd08ebf6SMatthew Brost 	u8 id;
361a00b8f1aSMatthew Auld 	int err = 0;
362a00b8f1aSMatthew Auld 
363a00b8f1aSMatthew Auld 	/* Disable access_ongoing asserts and prevent recursive pm calls */
364a00b8f1aSMatthew Auld 	xe_pm_write_callback_task(xe, current);
365dd08ebf6SMatthew Brost 
3669700a1dfSMatthew Auld 	lock_map_acquire(&xe_device_mem_access_lockdep_map);
3679700a1dfSMatthew Auld 
36809d88e3bSAnshuman Gupta 	/*
36909d88e3bSAnshuman Gupta 	 * It can be possible that xe has allowed d3cold but other pcie devices
37009d88e3bSAnshuman Gupta 	 * in gfx card soc would have blocked d3cold, therefore card has not
37109d88e3bSAnshuman Gupta 	 * really lost power. Detecting primary Gt power is sufficient.
37209d88e3bSAnshuman Gupta 	 */
37309d88e3bSAnshuman Gupta 	gt = xe_device_get_gt(xe, 0);
37409d88e3bSAnshuman Gupta 	xe->d3cold.power_lost = xe_guc_in_reset(&gt->uc.guc);
37509d88e3bSAnshuman Gupta 
37609d88e3bSAnshuman Gupta 	if (xe->d3cold.allowed && xe->d3cold.power_lost) {
377dd08ebf6SMatthew Brost 		for_each_gt(gt, xe, id) {
378dd08ebf6SMatthew Brost 			err = xe_pcode_init(gt);
379dd08ebf6SMatthew Brost 			if (err)
380a00b8f1aSMatthew Auld 				goto out;
381dd08ebf6SMatthew Brost 		}
382dd08ebf6SMatthew Brost 
383dd08ebf6SMatthew Brost 		/*
384dd08ebf6SMatthew Brost 		 * This only restores pinned memory which is the memory
385dd08ebf6SMatthew Brost 		 * required for the GT(s) to resume.
386dd08ebf6SMatthew Brost 		 */
387dd08ebf6SMatthew Brost 		err = xe_bo_restore_kernel(xe);
388dd08ebf6SMatthew Brost 		if (err)
389a00b8f1aSMatthew Auld 			goto out;
390dd08ebf6SMatthew Brost 	}
391dd08ebf6SMatthew Brost 
392dd08ebf6SMatthew Brost 	xe_irq_resume(xe);
393dd08ebf6SMatthew Brost 
394dd08ebf6SMatthew Brost 	for_each_gt(gt, xe, id)
395dd08ebf6SMatthew Brost 		xe_gt_resume(gt);
396dd08ebf6SMatthew Brost 
39709d88e3bSAnshuman Gupta 	if (xe->d3cold.allowed && xe->d3cold.power_lost) {
398dd08ebf6SMatthew Brost 		err = xe_bo_restore_user(xe);
399dd08ebf6SMatthew Brost 		if (err)
400a00b8f1aSMatthew Auld 			goto out;
401dd08ebf6SMatthew Brost 	}
402a00b8f1aSMatthew Auld out:
4039700a1dfSMatthew Auld 	lock_map_release(&xe_device_mem_access_lockdep_map);
404a00b8f1aSMatthew Auld 	xe_pm_write_callback_task(xe, NULL);
405a00b8f1aSMatthew Auld 	return err;
406dd08ebf6SMatthew Brost }
407dd08ebf6SMatthew Brost 
40830c39952SRodrigo Vivi /**
40930c39952SRodrigo Vivi  * xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
41030c39952SRodrigo Vivi  * @xe: xe device instance
41130c39952SRodrigo Vivi  *
41230c39952SRodrigo Vivi  * Returns: Any number greater than or equal to 0 for success, negative error
41330c39952SRodrigo Vivi  * code otherwise.
41430c39952SRodrigo Vivi  */
415dd08ebf6SMatthew Brost int xe_pm_runtime_get(struct xe_device *xe)
416dd08ebf6SMatthew Brost {
417dd08ebf6SMatthew Brost 	return pm_runtime_get_sync(xe->drm.dev);
418dd08ebf6SMatthew Brost }
419dd08ebf6SMatthew Brost 
42030c39952SRodrigo Vivi /**
42130c39952SRodrigo Vivi  * xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle
42230c39952SRodrigo Vivi  * @xe: xe device instance
42330c39952SRodrigo Vivi  *
42430c39952SRodrigo Vivi  * Returns: Any number greater than or equal to 0 for success, negative error
42530c39952SRodrigo Vivi  * code otherwise.
42630c39952SRodrigo Vivi  */
427dd08ebf6SMatthew Brost int xe_pm_runtime_put(struct xe_device *xe)
428dd08ebf6SMatthew Brost {
429dd08ebf6SMatthew Brost 	pm_runtime_mark_last_busy(xe->drm.dev);
430bba2ec41SRodrigo Vivi 	return pm_runtime_put(xe->drm.dev);
431dd08ebf6SMatthew Brost }
432dd08ebf6SMatthew Brost 
43330c39952SRodrigo Vivi /**
434*23cf006bSRodrigo Vivi  * xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
435*23cf006bSRodrigo Vivi  * @xe: xe device instance
436*23cf006bSRodrigo Vivi  *
437*23cf006bSRodrigo Vivi  * Returns: Any number greater than or equal to 0 for success, negative error
438*23cf006bSRodrigo Vivi  * code otherwise.
439*23cf006bSRodrigo Vivi  */
440*23cf006bSRodrigo Vivi int xe_pm_runtime_get_ioctl(struct xe_device *xe)
441*23cf006bSRodrigo Vivi {
442*23cf006bSRodrigo Vivi 	if (WARN_ON(xe_pm_read_callback_task(xe) == current))
443*23cf006bSRodrigo Vivi 		return -ELOOP;
444*23cf006bSRodrigo Vivi 
445*23cf006bSRodrigo Vivi 	return pm_runtime_get_sync(xe->drm.dev);
446*23cf006bSRodrigo Vivi }
447*23cf006bSRodrigo Vivi 
448*23cf006bSRodrigo Vivi /**
44930c39952SRodrigo Vivi  * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
45030c39952SRodrigo Vivi  * @xe: xe device instance
45130c39952SRodrigo Vivi  *
45230c39952SRodrigo Vivi  * Returns: Any number greater than or equal to 0 for success, negative error
45330c39952SRodrigo Vivi  * code otherwise.
45430c39952SRodrigo Vivi  */
455dd08ebf6SMatthew Brost int xe_pm_runtime_get_if_active(struct xe_device *xe)
456dd08ebf6SMatthew Brost {
457dd08ebf6SMatthew Brost 	return pm_runtime_get_if_active(xe->drm.dev, true);
458dd08ebf6SMatthew Brost }
459c8a74077SAnshuman Gupta 
46030c39952SRodrigo Vivi /**
46130c39952SRodrigo Vivi  * xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge
46230c39952SRodrigo Vivi  * @xe: xe device instance
46330c39952SRodrigo Vivi  */
464c8a74077SAnshuman Gupta void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
465c8a74077SAnshuman Gupta {
466c8a74077SAnshuman Gupta 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
467c8a74077SAnshuman Gupta 	struct pci_dev *bridge = pci_upstream_bridge(pdev);
468c8a74077SAnshuman Gupta 
469c8a74077SAnshuman Gupta 	if (!bridge)
470c8a74077SAnshuman Gupta 		return;
471c8a74077SAnshuman Gupta 
472c8a74077SAnshuman Gupta 	if (!bridge->driver) {
473c8a74077SAnshuman Gupta 		drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n");
474c8a74077SAnshuman Gupta 		device_set_pm_not_required(&pdev->dev);
475c8a74077SAnshuman Gupta 	}
476c8a74077SAnshuman Gupta }
477b2d75619SAnshuman Gupta 
47830c39952SRodrigo Vivi /**
47930c39952SRodrigo Vivi  * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold
48030c39952SRodrigo Vivi  * @xe: xe device instance
48130c39952SRodrigo Vivi  * @threshold: VRAM size in bites for the D3cold threshold
48230c39952SRodrigo Vivi  *
48330c39952SRodrigo Vivi  * Returns 0 for success, negative error code otherwise.
48430c39952SRodrigo Vivi  */
485b2d75619SAnshuman Gupta int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
486b2d75619SAnshuman Gupta {
487b2d75619SAnshuman Gupta 	struct ttm_resource_manager *man;
488b2d75619SAnshuman Gupta 	u32 vram_total_mb = 0;
489b2d75619SAnshuman Gupta 	int i;
490b2d75619SAnshuman Gupta 
491b2d75619SAnshuman Gupta 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
492b2d75619SAnshuman Gupta 		man = ttm_manager_type(&xe->ttm, i);
493b2d75619SAnshuman Gupta 		if (man)
494b2d75619SAnshuman Gupta 			vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024);
495b2d75619SAnshuman Gupta 	}
496b2d75619SAnshuman Gupta 
497b2d75619SAnshuman Gupta 	drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb);
498b2d75619SAnshuman Gupta 
499b2d75619SAnshuman Gupta 	if (threshold > vram_total_mb)
500b2d75619SAnshuman Gupta 		return -EINVAL;
501b2d75619SAnshuman Gupta 
502b2d75619SAnshuman Gupta 	mutex_lock(&xe->d3cold.lock);
503b2d75619SAnshuman Gupta 	xe->d3cold.vram_threshold = threshold;
504b2d75619SAnshuman Gupta 	mutex_unlock(&xe->d3cold.lock);
505b2d75619SAnshuman Gupta 
506b2d75619SAnshuman Gupta 	return 0;
507b2d75619SAnshuman Gupta }
5082ef08b98SAnshuman Gupta 
50930c39952SRodrigo Vivi /**
51030c39952SRodrigo Vivi  * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed
51130c39952SRodrigo Vivi  * @xe: xe device instance
51230c39952SRodrigo Vivi  *
51330c39952SRodrigo Vivi  * To be called during runtime_pm idle callback.
51430c39952SRodrigo Vivi  * Check for all the D3Cold conditions ahead of runtime suspend.
51530c39952SRodrigo Vivi  */
5162ef08b98SAnshuman Gupta void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
5172ef08b98SAnshuman Gupta {
5182ef08b98SAnshuman Gupta 	struct ttm_resource_manager *man;
5192ef08b98SAnshuman Gupta 	u32 total_vram_used_mb = 0;
5202ef08b98SAnshuman Gupta 	u64 vram_used;
5212ef08b98SAnshuman Gupta 	int i;
5222ef08b98SAnshuman Gupta 
523e07aa913SRodrigo Vivi 	if (!xe->d3cold.capable) {
524e07aa913SRodrigo Vivi 		xe->d3cold.allowed = false;
525e07aa913SRodrigo Vivi 		return;
526e07aa913SRodrigo Vivi 	}
527e07aa913SRodrigo Vivi 
5282ef08b98SAnshuman Gupta 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
5292ef08b98SAnshuman Gupta 		man = ttm_manager_type(&xe->ttm, i);
5302ef08b98SAnshuman Gupta 		if (man) {
5312ef08b98SAnshuman Gupta 			vram_used = ttm_resource_manager_usage(man);
5322ef08b98SAnshuman Gupta 			total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024);
5332ef08b98SAnshuman Gupta 		}
5342ef08b98SAnshuman Gupta 	}
5352ef08b98SAnshuman Gupta 
5362ef08b98SAnshuman Gupta 	mutex_lock(&xe->d3cold.lock);
5372ef08b98SAnshuman Gupta 
5382ef08b98SAnshuman Gupta 	if (total_vram_used_mb < xe->d3cold.vram_threshold)
5392ef08b98SAnshuman Gupta 		xe->d3cold.allowed = true;
5402ef08b98SAnshuman Gupta 	else
5412ef08b98SAnshuman Gupta 		xe->d3cold.allowed = false;
5422ef08b98SAnshuman Gupta 
5432ef08b98SAnshuman Gupta 	mutex_unlock(&xe->d3cold.lock);
544ff765b77SMatthew Auld 
545ff765b77SMatthew Auld 	drm_dbg(&xe->drm,
546ff765b77SMatthew Auld 		"d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed));
5472ef08b98SAnshuman Gupta }
548