xref: /linux/drivers/gpu/drm/xe/xe_pm.c (revision ad92f52312614b0ef6eee07ee64f1e7661072a49)
1dd08ebf6SMatthew Brost // SPDX-License-Identifier: MIT
2dd08ebf6SMatthew Brost /*
3dd08ebf6SMatthew Brost  * Copyright © 2022 Intel Corporation
4dd08ebf6SMatthew Brost  */
5dd08ebf6SMatthew Brost 
6ea9f879dSLucas De Marchi #include "xe_pm.h"
7ea9f879dSLucas De Marchi 
8dd08ebf6SMatthew Brost #include <linux/pm_runtime.h>
9dd08ebf6SMatthew Brost 
10b2d75619SAnshuman Gupta #include <drm/drm_managed.h>
11dd08ebf6SMatthew Brost #include <drm/ttm/ttm_placement.h>
12dd08ebf6SMatthew Brost 
131e5a4dfeSJani Nikula #include "display/xe_display.h"
14dd08ebf6SMatthew Brost #include "xe_bo.h"
15dd08ebf6SMatthew Brost #include "xe_bo_evict.h"
16dd08ebf6SMatthew Brost #include "xe_device.h"
17b2d75619SAnshuman Gupta #include "xe_device_sysfs.h"
18dd08ebf6SMatthew Brost #include "xe_ggtt.h"
19ea9f879dSLucas De Marchi #include "xe_gt.h"
2009d88e3bSAnshuman Gupta #include "xe_guc.h"
21dd08ebf6SMatthew Brost #include "xe_irq.h"
22dd08ebf6SMatthew Brost #include "xe_pcode.h"
23275aa53fSNirmoy Das #include "xe_trace.h"
240d053475SMatt Roper #include "xe_wa.h"
25dd08ebf6SMatthew Brost 
26dd08ebf6SMatthew Brost /**
27dd08ebf6SMatthew Brost  * DOC: Xe Power Management
28dd08ebf6SMatthew Brost  *
2930c39952SRodrigo Vivi  * Xe PM implements the main routines for both system level suspend states and
3030c39952SRodrigo Vivi  * for the opportunistic runtime suspend states.
31dd08ebf6SMatthew Brost  *
3230c39952SRodrigo Vivi  * System Level Suspend (S-States) - In general this is OS initiated suspend
3330c39952SRodrigo Vivi  * driven by ACPI for achieving S0ix (a.k.a. S2idle, freeze), S3 (suspend to ram),
3430c39952SRodrigo Vivi  * S4 (disk). The main functions here are `xe_pm_suspend` and `xe_pm_resume`. They
3530c39952SRodrigo Vivi  * are the main point for the suspend to and resume from these states.
36dd08ebf6SMatthew Brost  *
3730c39952SRodrigo Vivi  * PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power
3830c39952SRodrigo Vivi  * state D3, controlled by the PCI subsystem and ACPI with the help from the
3930c39952SRodrigo Vivi  * runtime_pm infrastructure.
4030c39952SRodrigo Vivi  * PCI D3 is special and can mean D3hot, where Vcc power is on for keeping memory
4130c39952SRodrigo Vivi  * alive and quicker low latency resume or D3Cold where Vcc power is off for
4230c39952SRodrigo Vivi  * better power savings.
4330c39952SRodrigo Vivi  * The Vcc control of PCI hierarchy can only be controlled at the PCI root port
4430c39952SRodrigo Vivi  * level, while the device driver can be behind multiple bridges/switches and
4530c39952SRodrigo Vivi  * paired with other devices. For this reason, the PCI subsystem cannot perform
4630c39952SRodrigo Vivi  * the transition towards D3Cold. The lowest runtime PM possible from the PCI
4730c39952SRodrigo Vivi  * subsystem is D3hot. Then, if all these paired devices in the same root port
4830c39952SRodrigo Vivi  * are in D3hot, ACPI will assist here and run its own methods (_PR3 and _OFF)
4930c39952SRodrigo Vivi  * to perform the transition from D3hot to D3cold. Xe may disallow this
5030c39952SRodrigo Vivi  * transition by calling pci_d3cold_disable(root_pdev) before going to runtime
5130c39952SRodrigo Vivi  * suspend. It will be based on runtime conditions such as VRAM usage for a
5230c39952SRodrigo Vivi  * quick and low latency resume for instance.
53dd08ebf6SMatthew Brost  *
5430c39952SRodrigo Vivi  * Runtime PM - This infrastructure provided by the Linux kernel allows the
5530c39952SRodrigo Vivi  * device drivers to indicate when the can be runtime suspended, so the device
5630c39952SRodrigo Vivi  * could be put at D3 (if supported), or allow deeper package sleep states
5730c39952SRodrigo Vivi  * (PC-states), and/or other low level power states. Xe PM component provides
5830c39952SRodrigo Vivi  * `xe_pm_runtime_suspend` and `xe_pm_runtime_resume` functions that PCI
5930c39952SRodrigo Vivi  * subsystem will call before transition to/from runtime suspend.
60dd08ebf6SMatthew Brost  *
6130c39952SRodrigo Vivi  * Also, Xe PM provides get and put functions that Xe driver will use to
6230c39952SRodrigo Vivi  * indicate activity. In order to avoid locking complications with the memory
6330c39952SRodrigo Vivi  * management, whenever possible, these get and put functions needs to be called
6430c39952SRodrigo Vivi  * from the higher/outer levels.
6530c39952SRodrigo Vivi  * The main cases that need to be protected from the outer levels are: IOCTL,
6630c39952SRodrigo Vivi  * sysfs, debugfs, dma-buf sharing, GPU execution.
6730c39952SRodrigo Vivi  *
6830c39952SRodrigo Vivi  * This component is not responsible for GT idleness (RC6) nor GT frequency
6930c39952SRodrigo Vivi  * management (RPS).
70dd08ebf6SMatthew Brost  */
71dd08ebf6SMatthew Brost 
728ae84a27SRodrigo Vivi #ifdef CONFIG_LOCKDEP
73379cad69SThomas Hellström static struct lockdep_map xe_pm_runtime_d3cold_map = {
74379cad69SThomas Hellström 	.name = "xe_rpm_d3cold_map"
75379cad69SThomas Hellström };
76379cad69SThomas Hellström 
77379cad69SThomas Hellström static struct lockdep_map xe_pm_runtime_nod3cold_map = {
78379cad69SThomas Hellström 	.name = "xe_rpm_nod3cold_map"
798ae84a27SRodrigo Vivi };
808ae84a27SRodrigo Vivi #endif
818ae84a27SRodrigo Vivi 
8234bb7b81SThomas Hellström /**
8334bb7b81SThomas Hellström  * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
8434bb7b81SThomas Hellström  * @xe: The xe device.
8534bb7b81SThomas Hellström  *
8634bb7b81SThomas Hellström  * Return: true if it is safe to runtime resume from reclaim context.
8734bb7b81SThomas Hellström  * false otherwise.
8834bb7b81SThomas Hellström  */
8934bb7b81SThomas Hellström bool xe_rpm_reclaim_safe(const struct xe_device *xe)
90379cad69SThomas Hellström {
91379cad69SThomas Hellström 	return !xe->d3cold.capable && !xe->info.has_sriov;
92379cad69SThomas Hellström }
93379cad69SThomas Hellström 
94379cad69SThomas Hellström static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
95379cad69SThomas Hellström {
96379cad69SThomas Hellström 	lock_map_acquire(xe_rpm_reclaim_safe(xe) ?
97379cad69SThomas Hellström 			 &xe_pm_runtime_nod3cold_map :
98379cad69SThomas Hellström 			 &xe_pm_runtime_d3cold_map);
99379cad69SThomas Hellström }
100379cad69SThomas Hellström 
101379cad69SThomas Hellström static void xe_rpm_lockmap_release(const struct xe_device *xe)
102379cad69SThomas Hellström {
103379cad69SThomas Hellström 	lock_map_release(xe_rpm_reclaim_safe(xe) ?
104379cad69SThomas Hellström 			 &xe_pm_runtime_nod3cold_map :
105379cad69SThomas Hellström 			 &xe_pm_runtime_d3cold_map);
106379cad69SThomas Hellström }
107379cad69SThomas Hellström 
108dd08ebf6SMatthew Brost /**
109dd08ebf6SMatthew Brost  * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
110dd08ebf6SMatthew Brost  * @xe: xe device instance
111dd08ebf6SMatthew Brost  *
112dd08ebf6SMatthew Brost  * Return: 0 on success
113dd08ebf6SMatthew Brost  */
114dd08ebf6SMatthew Brost int xe_pm_suspend(struct xe_device *xe)
115dd08ebf6SMatthew Brost {
116dd08ebf6SMatthew Brost 	struct xe_gt *gt;
117dd08ebf6SMatthew Brost 	u8 id;
118dd08ebf6SMatthew Brost 	int err;
119dd08ebf6SMatthew Brost 
120f7f24b79SRodrigo Vivi 	drm_dbg(&xe->drm, "Suspending device\n");
121275aa53fSNirmoy Das 	trace_xe_pm_suspend(xe, __builtin_return_address(0));
122f7f24b79SRodrigo Vivi 
123dd08ebf6SMatthew Brost 	for_each_gt(gt, xe, id)
124dd08ebf6SMatthew Brost 		xe_gt_suspend_prepare(gt);
125dd08ebf6SMatthew Brost 
126cb8f81c1SMaarten Lankhorst 	xe_display_pm_suspend(xe, false);
127cb8f81c1SMaarten Lankhorst 
128dd08ebf6SMatthew Brost 	/* FIXME: Super racey... */
129dd08ebf6SMatthew Brost 	err = xe_bo_evict_all(xe);
130dd08ebf6SMatthew Brost 	if (err)
131f7f24b79SRodrigo Vivi 		goto err;
132dd08ebf6SMatthew Brost 
133dd08ebf6SMatthew Brost 	for_each_gt(gt, xe, id) {
134dd08ebf6SMatthew Brost 		err = xe_gt_suspend(gt);
13544e69495SMaarten Lankhorst 		if (err) {
136e7b180b2SRodrigo Vivi 			xe_display_pm_resume(xe, false);
137f7f24b79SRodrigo Vivi 			goto err;
138dd08ebf6SMatthew Brost 		}
13944e69495SMaarten Lankhorst 	}
140dd08ebf6SMatthew Brost 
141dd08ebf6SMatthew Brost 	xe_irq_suspend(xe);
142dd08ebf6SMatthew Brost 
14344e69495SMaarten Lankhorst 	xe_display_pm_suspend_late(xe);
14444e69495SMaarten Lankhorst 
145f7f24b79SRodrigo Vivi 	drm_dbg(&xe->drm, "Device suspended\n");
146dd08ebf6SMatthew Brost 	return 0;
147f7f24b79SRodrigo Vivi err:
148f7f24b79SRodrigo Vivi 	drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
149f7f24b79SRodrigo Vivi 	return err;
150dd08ebf6SMatthew Brost }
151dd08ebf6SMatthew Brost 
152dd08ebf6SMatthew Brost /**
153dd08ebf6SMatthew Brost  * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0
154dd08ebf6SMatthew Brost  * @xe: xe device instance
155dd08ebf6SMatthew Brost  *
156dd08ebf6SMatthew Brost  * Return: 0 on success
157dd08ebf6SMatthew Brost  */
158dd08ebf6SMatthew Brost int xe_pm_resume(struct xe_device *xe)
159dd08ebf6SMatthew Brost {
1600d053475SMatt Roper 	struct xe_tile *tile;
161dd08ebf6SMatthew Brost 	struct xe_gt *gt;
162dd08ebf6SMatthew Brost 	u8 id;
163dd08ebf6SMatthew Brost 	int err;
164dd08ebf6SMatthew Brost 
165f7f24b79SRodrigo Vivi 	drm_dbg(&xe->drm, "Resuming device\n");
166275aa53fSNirmoy Das 	trace_xe_pm_resume(xe, __builtin_return_address(0));
167f7f24b79SRodrigo Vivi 
1680d053475SMatt Roper 	for_each_tile(tile, xe, id)
1690d053475SMatt Roper 		xe_wa_apply_tile_workarounds(tile);
1700d053475SMatt Roper 
171933fd5ffSRiana Tauro 	err = xe_pcode_ready(xe, true);
172dd08ebf6SMatthew Brost 	if (err)
173933fd5ffSRiana Tauro 		return err;
174dd08ebf6SMatthew Brost 
17544e69495SMaarten Lankhorst 	xe_display_pm_resume_early(xe);
17644e69495SMaarten Lankhorst 
177dd08ebf6SMatthew Brost 	/*
178dd08ebf6SMatthew Brost 	 * This only restores pinned memory which is the memory required for the
179dd08ebf6SMatthew Brost 	 * GT(s) to resume.
180dd08ebf6SMatthew Brost 	 */
181dd08ebf6SMatthew Brost 	err = xe_bo_restore_kernel(xe);
182dd08ebf6SMatthew Brost 	if (err)
183f7f24b79SRodrigo Vivi 		goto err;
184dd08ebf6SMatthew Brost 
185dd08ebf6SMatthew Brost 	xe_irq_resume(xe);
186dd08ebf6SMatthew Brost 
187dd08ebf6SMatthew Brost 	for_each_gt(gt, xe, id)
188dd08ebf6SMatthew Brost 		xe_gt_resume(gt);
189dd08ebf6SMatthew Brost 
190cb8f81c1SMaarten Lankhorst 	xe_display_pm_resume(xe, false);
191cb8f81c1SMaarten Lankhorst 
192dd08ebf6SMatthew Brost 	err = xe_bo_restore_user(xe);
193dd08ebf6SMatthew Brost 	if (err)
194f7f24b79SRodrigo Vivi 		goto err;
195dd08ebf6SMatthew Brost 
196f7f24b79SRodrigo Vivi 	drm_dbg(&xe->drm, "Device resumed\n");
197dd08ebf6SMatthew Brost 	return 0;
198f7f24b79SRodrigo Vivi err:
199f7f24b79SRodrigo Vivi 	drm_dbg(&xe->drm, "Device resume failed %d\n", err);
200f7f24b79SRodrigo Vivi 	return err;
201dd08ebf6SMatthew Brost }
202dd08ebf6SMatthew Brost 
20395ec8c1dSRiana Tauro static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
204ac0be3b5SAnshuman Gupta {
20595ec8c1dSRiana Tauro 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
206ac0be3b5SAnshuman Gupta 	struct pci_dev *root_pdev;
207ac0be3b5SAnshuman Gupta 
208ac0be3b5SAnshuman Gupta 	root_pdev = pcie_find_root_port(pdev);
209ac0be3b5SAnshuman Gupta 	if (!root_pdev)
210ac0be3b5SAnshuman Gupta 		return false;
211ac0be3b5SAnshuman Gupta 
21295ec8c1dSRiana Tauro 	/* D3Cold requires PME capability */
21395ec8c1dSRiana Tauro 	if (!pci_pme_capable(root_pdev, PCI_D3cold)) {
21495ec8c1dSRiana Tauro 		drm_dbg(&xe->drm, "d3cold: PME# not supported\n");
215ac0be3b5SAnshuman Gupta 		return false;
21695ec8c1dSRiana Tauro 	}
21795ec8c1dSRiana Tauro 
21895ec8c1dSRiana Tauro 	/* D3Cold requires _PR3 power resource */
21995ec8c1dSRiana Tauro 	if (!pci_pr3_present(root_pdev)) {
22095ec8c1dSRiana Tauro 		drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n");
22195ec8c1dSRiana Tauro 		return false;
22295ec8c1dSRiana Tauro 	}
223ac0be3b5SAnshuman Gupta 
224ac0be3b5SAnshuman Gupta 	return true;
225ac0be3b5SAnshuman Gupta }
226ac0be3b5SAnshuman Gupta 
227fddebcbfSAnshuman Gupta static void xe_pm_runtime_init(struct xe_device *xe)
228dd08ebf6SMatthew Brost {
229dd08ebf6SMatthew Brost 	struct device *dev = xe->drm.dev;
230dd08ebf6SMatthew Brost 
231d87c424aSRodrigo Vivi 	/*
232d87c424aSRodrigo Vivi 	 * Disable the system suspend direct complete optimization.
233d87c424aSRodrigo Vivi 	 * We need to ensure that the regular device suspend/resume functions
234d87c424aSRodrigo Vivi 	 * are called since our runtime_pm cannot guarantee local memory
235d87c424aSRodrigo Vivi 	 * eviction for d3cold.
236d87c424aSRodrigo Vivi 	 * TODO: Check HDA audio dependencies claimed by i915, and then enforce
237d87c424aSRodrigo Vivi 	 *       this option to integrated graphics as well.
238d87c424aSRodrigo Vivi 	 */
239d87c424aSRodrigo Vivi 	if (IS_DGFX(xe))
240d87c424aSRodrigo Vivi 		dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
241d87c424aSRodrigo Vivi 
242dd08ebf6SMatthew Brost 	pm_runtime_use_autosuspend(dev);
243dd08ebf6SMatthew Brost 	pm_runtime_set_autosuspend_delay(dev, 1000);
244dd08ebf6SMatthew Brost 	pm_runtime_set_active(dev);
245dd08ebf6SMatthew Brost 	pm_runtime_allow(dev);
246dd08ebf6SMatthew Brost 	pm_runtime_mark_last_busy(dev);
247bba2ec41SRodrigo Vivi 	pm_runtime_put(dev);
248dd08ebf6SMatthew Brost }
249dd08ebf6SMatthew Brost 
250c086bfc6SHimal Prasad Ghimiray int xe_pm_init_early(struct xe_device *xe)
251fa78e188SBadal Nilawar {
252c086bfc6SHimal Prasad Ghimiray 	int err;
253c086bfc6SHimal Prasad Ghimiray 
254fa78e188SBadal Nilawar 	INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
255c086bfc6SHimal Prasad Ghimiray 
256c086bfc6SHimal Prasad Ghimiray 	err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
257c086bfc6SHimal Prasad Ghimiray 	if (err)
258c086bfc6SHimal Prasad Ghimiray 		return err;
259c086bfc6SHimal Prasad Ghimiray 
260c086bfc6SHimal Prasad Ghimiray 	err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
261c086bfc6SHimal Prasad Ghimiray 	if (err)
262c086bfc6SHimal Prasad Ghimiray 		return err;
263c086bfc6SHimal Prasad Ghimiray 
264c086bfc6SHimal Prasad Ghimiray 	return 0;
265fa78e188SBadal Nilawar }
266fa78e188SBadal Nilawar 
26730c39952SRodrigo Vivi /**
26830c39952SRodrigo Vivi  * xe_pm_init - Initialize Xe Power Management
26930c39952SRodrigo Vivi  * @xe: xe device instance
27030c39952SRodrigo Vivi  *
27130c39952SRodrigo Vivi  * This component is responsible for System and Device sleep states.
272c086bfc6SHimal Prasad Ghimiray  *
273c086bfc6SHimal Prasad Ghimiray  * Returns 0 for success, negative error code otherwise.
27430c39952SRodrigo Vivi  */
275c086bfc6SHimal Prasad Ghimiray int xe_pm_init(struct xe_device *xe)
276ac0be3b5SAnshuman Gupta {
277c086bfc6SHimal Prasad Ghimiray 	int err;
278c086bfc6SHimal Prasad Ghimiray 
2795349bb76SOhad Sharabi 	/* For now suspend/resume is only allowed with GuC */
2805349bb76SOhad Sharabi 	if (!xe_device_uc_enabled(xe))
281c086bfc6SHimal Prasad Ghimiray 		return 0;
282a32d82b4SRodrigo Vivi 
28395ec8c1dSRiana Tauro 	xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
2843d4b0bfcSAnshuman Gupta 
2853d4b0bfcSAnshuman Gupta 	if (xe->d3cold.capable) {
286c086bfc6SHimal Prasad Ghimiray 		err = xe_device_sysfs_init(xe);
287c086bfc6SHimal Prasad Ghimiray 		if (err)
288c086bfc6SHimal Prasad Ghimiray 			return err;
289c086bfc6SHimal Prasad Ghimiray 
290c086bfc6SHimal Prasad Ghimiray 		err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD);
291c086bfc6SHimal Prasad Ghimiray 		if (err)
292c086bfc6SHimal Prasad Ghimiray 			return err;
2933d4b0bfcSAnshuman Gupta 	}
294a32d82b4SRodrigo Vivi 
295a32d82b4SRodrigo Vivi 	xe_pm_runtime_init(xe);
296c086bfc6SHimal Prasad Ghimiray 
297c086bfc6SHimal Prasad Ghimiray 	return 0;
298ac0be3b5SAnshuman Gupta }
299ac0be3b5SAnshuman Gupta 
30030c39952SRodrigo Vivi /**
30130c39952SRodrigo Vivi  * xe_pm_runtime_fini - Finalize Runtime PM
30230c39952SRodrigo Vivi  * @xe: xe device instance
30330c39952SRodrigo Vivi  */
3045b7e50e2SMatthew Auld void xe_pm_runtime_fini(struct xe_device *xe)
3055b7e50e2SMatthew Auld {
3065b7e50e2SMatthew Auld 	struct device *dev = xe->drm.dev;
3075b7e50e2SMatthew Auld 
3085b7e50e2SMatthew Auld 	pm_runtime_get_sync(dev);
3095b7e50e2SMatthew Auld 	pm_runtime_forbid(dev);
3105b7e50e2SMatthew Auld }
3115b7e50e2SMatthew Auld 
312a00b8f1aSMatthew Auld static void xe_pm_write_callback_task(struct xe_device *xe,
313a00b8f1aSMatthew Auld 				      struct task_struct *task)
314a00b8f1aSMatthew Auld {
315a00b8f1aSMatthew Auld 	WRITE_ONCE(xe->pm_callback_task, task);
316a00b8f1aSMatthew Auld 
317a00b8f1aSMatthew Auld 	/*
318a00b8f1aSMatthew Auld 	 * Just in case it's somehow possible for our writes to be reordered to
319a00b8f1aSMatthew Auld 	 * the extent that something else re-uses the task written in
320a00b8f1aSMatthew Auld 	 * pm_callback_task. For example after returning from the callback, but
321a00b8f1aSMatthew Auld 	 * before the reordered write that resets pm_callback_task back to NULL.
322a00b8f1aSMatthew Auld 	 */
323a00b8f1aSMatthew Auld 	smp_mb(); /* pairs with xe_pm_read_callback_task */
324a00b8f1aSMatthew Auld }
325a00b8f1aSMatthew Auld 
326a00b8f1aSMatthew Auld struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
327a00b8f1aSMatthew Auld {
328a00b8f1aSMatthew Auld 	smp_mb(); /* pairs with xe_pm_write_callback_task */
329a00b8f1aSMatthew Auld 
330a00b8f1aSMatthew Auld 	return READ_ONCE(xe->pm_callback_task);
331a00b8f1aSMatthew Auld }
332a00b8f1aSMatthew Auld 
33330c39952SRodrigo Vivi /**
3340f9d886fSRodrigo Vivi  * xe_pm_runtime_suspended - Check if runtime_pm state is suspended
3350f9d886fSRodrigo Vivi  * @xe: xe device instance
3360f9d886fSRodrigo Vivi  *
3370f9d886fSRodrigo Vivi  * This does not provide any guarantee that the device is going to remain
3380f9d886fSRodrigo Vivi  * suspended as it might be racing with the runtime state transitions.
3390f9d886fSRodrigo Vivi  * It can be used only as a non-reliable assertion, to ensure that we are not in
3400f9d886fSRodrigo Vivi  * the sleep state while trying to access some memory for instance.
3410f9d886fSRodrigo Vivi  *
3420f9d886fSRodrigo Vivi  * Returns true if PCI device is suspended, false otherwise.
3430f9d886fSRodrigo Vivi  */
3440f9d886fSRodrigo Vivi bool xe_pm_runtime_suspended(struct xe_device *xe)
3450f9d886fSRodrigo Vivi {
3460f9d886fSRodrigo Vivi 	return pm_runtime_suspended(xe->drm.dev);
3470f9d886fSRodrigo Vivi }
3480f9d886fSRodrigo Vivi 
3490f9d886fSRodrigo Vivi /**
35030c39952SRodrigo Vivi  * xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold
35130c39952SRodrigo Vivi  * @xe: xe device instance
35230c39952SRodrigo Vivi  *
35330c39952SRodrigo Vivi  * Returns 0 for success, negative error code otherwise.
35430c39952SRodrigo Vivi  */
355dd08ebf6SMatthew Brost int xe_pm_runtime_suspend(struct xe_device *xe)
356dd08ebf6SMatthew Brost {
357fa78e188SBadal Nilawar 	struct xe_bo *bo, *on;
358dd08ebf6SMatthew Brost 	struct xe_gt *gt;
359dd08ebf6SMatthew Brost 	u8 id;
360a00b8f1aSMatthew Auld 	int err = 0;
361dd08ebf6SMatthew Brost 
362275aa53fSNirmoy Das 	trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0));
363a00b8f1aSMatthew Auld 	/* Disable access_ongoing asserts and prevent recursive pm calls */
364a00b8f1aSMatthew Auld 	xe_pm_write_callback_task(xe, current);
365a00b8f1aSMatthew Auld 
3669700a1dfSMatthew Auld 	/*
3678ae84a27SRodrigo Vivi 	 * The actual xe_pm_runtime_put() is always async underneath, so
3689700a1dfSMatthew Auld 	 * exactly where that is called should makes no difference to us. However
3699700a1dfSMatthew Auld 	 * we still need to be very careful with the locks that this callback
3709700a1dfSMatthew Auld 	 * acquires and the locks that are acquired and held by any callers of
3718ae84a27SRodrigo Vivi 	 * xe_runtime_pm_get(). We already have the matching annotation
3729700a1dfSMatthew Auld 	 * on that side, but we also need it here. For example lockdep should be
3739700a1dfSMatthew Auld 	 * able to tell us if the following scenario is in theory possible:
3749700a1dfSMatthew Auld 	 *
3759700a1dfSMatthew Auld 	 * CPU0                          | CPU1 (kworker)
3769700a1dfSMatthew Auld 	 * lock(A)                       |
3779700a1dfSMatthew Auld 	 *                               | xe_pm_runtime_suspend()
3789700a1dfSMatthew Auld 	 *                               |      lock(A)
3798ae84a27SRodrigo Vivi 	 * xe_pm_runtime_get()           |
3809700a1dfSMatthew Auld 	 *
3819700a1dfSMatthew Auld 	 * This will clearly deadlock since rpm core needs to wait for
3829700a1dfSMatthew Auld 	 * xe_pm_runtime_suspend() to complete, but here we are holding lock(A)
3839700a1dfSMatthew Auld 	 * on CPU0 which prevents CPU1 making forward progress.  With the
3848ae84a27SRodrigo Vivi 	 * annotation here and in xe_pm_runtime_get() lockdep will see
3859700a1dfSMatthew Auld 	 * the potential lock inversion and give us a nice splat.
3869700a1dfSMatthew Auld 	 */
387379cad69SThomas Hellström 	xe_rpm_lockmap_acquire(xe);
3889700a1dfSMatthew Auld 
389fa78e188SBadal Nilawar 	/*
390fa78e188SBadal Nilawar 	 * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
391fa78e188SBadal Nilawar 	 * also checks and delets bo entry from user fault list.
392fa78e188SBadal Nilawar 	 */
393fa78e188SBadal Nilawar 	mutex_lock(&xe->mem_access.vram_userfault.lock);
394fa78e188SBadal Nilawar 	list_for_each_entry_safe(bo, on,
395fa78e188SBadal Nilawar 				 &xe->mem_access.vram_userfault.list, vram_userfault_link)
396fa78e188SBadal Nilawar 		xe_bo_runtime_pm_release_mmap_offset(bo);
397fa78e188SBadal Nilawar 	mutex_unlock(&xe->mem_access.vram_userfault.lock);
398fa78e188SBadal Nilawar 
39966a0f6b9SVinod Govindapillai 	xe_display_pm_runtime_suspend(xe);
400cb8f81c1SMaarten Lankhorst 
40166a0f6b9SVinod Govindapillai 	if (xe->d3cold.allowed) {
402dd08ebf6SMatthew Brost 		err = xe_bo_evict_all(xe);
403dd08ebf6SMatthew Brost 		if (err)
404a00b8f1aSMatthew Auld 			goto out;
405dd08ebf6SMatthew Brost 	}
406dd08ebf6SMatthew Brost 
407dd08ebf6SMatthew Brost 	for_each_gt(gt, xe, id) {
408dd08ebf6SMatthew Brost 		err = xe_gt_suspend(gt);
409dd08ebf6SMatthew Brost 		if (err)
410a00b8f1aSMatthew Auld 			goto out;
411dd08ebf6SMatthew Brost 	}
412dd08ebf6SMatthew Brost 
413dd08ebf6SMatthew Brost 	xe_irq_suspend(xe);
414e7b180b2SRodrigo Vivi 
415e7b180b2SRodrigo Vivi 	if (xe->d3cold.allowed)
416e7b180b2SRodrigo Vivi 		xe_display_pm_suspend_late(xe);
417a00b8f1aSMatthew Auld out:
418e7b180b2SRodrigo Vivi 	if (err)
419e7b180b2SRodrigo Vivi 		xe_display_pm_resume(xe, true);
420379cad69SThomas Hellström 	xe_rpm_lockmap_release(xe);
421a00b8f1aSMatthew Auld 	xe_pm_write_callback_task(xe, NULL);
422a00b8f1aSMatthew Auld 	return err;
423dd08ebf6SMatthew Brost }
424dd08ebf6SMatthew Brost 
42530c39952SRodrigo Vivi /**
42630c39952SRodrigo Vivi  * xe_pm_runtime_resume - Waking up from D3hot/D3Cold
42730c39952SRodrigo Vivi  * @xe: xe device instance
42830c39952SRodrigo Vivi  *
42930c39952SRodrigo Vivi  * Returns 0 for success, negative error code otherwise.
43030c39952SRodrigo Vivi  */
431dd08ebf6SMatthew Brost int xe_pm_runtime_resume(struct xe_device *xe)
432dd08ebf6SMatthew Brost {
433dd08ebf6SMatthew Brost 	struct xe_gt *gt;
434dd08ebf6SMatthew Brost 	u8 id;
435a00b8f1aSMatthew Auld 	int err = 0;
436a00b8f1aSMatthew Auld 
437275aa53fSNirmoy Das 	trace_xe_pm_runtime_resume(xe, __builtin_return_address(0));
438a00b8f1aSMatthew Auld 	/* Disable access_ongoing asserts and prevent recursive pm calls */
439a00b8f1aSMatthew Auld 	xe_pm_write_callback_task(xe, current);
440dd08ebf6SMatthew Brost 
441379cad69SThomas Hellström 	xe_rpm_lockmap_acquire(xe);
4429700a1dfSMatthew Auld 
4438d490e01SRodrigo Vivi 	if (xe->d3cold.allowed) {
444933fd5ffSRiana Tauro 		err = xe_pcode_ready(xe, true);
445dd08ebf6SMatthew Brost 		if (err)
446a00b8f1aSMatthew Auld 			goto out;
447dd08ebf6SMatthew Brost 
448e7b180b2SRodrigo Vivi 		xe_display_pm_resume_early(xe);
449e7b180b2SRodrigo Vivi 
450dd08ebf6SMatthew Brost 		/*
451dd08ebf6SMatthew Brost 		 * This only restores pinned memory which is the memory
452dd08ebf6SMatthew Brost 		 * required for the GT(s) to resume.
453dd08ebf6SMatthew Brost 		 */
454dd08ebf6SMatthew Brost 		err = xe_bo_restore_kernel(xe);
455dd08ebf6SMatthew Brost 		if (err)
456a00b8f1aSMatthew Auld 			goto out;
457dd08ebf6SMatthew Brost 	}
458dd08ebf6SMatthew Brost 
459dd08ebf6SMatthew Brost 	xe_irq_resume(xe);
460dd08ebf6SMatthew Brost 
461dd08ebf6SMatthew Brost 	for_each_gt(gt, xe, id)
462dd08ebf6SMatthew Brost 		xe_gt_resume(gt);
463dd08ebf6SMatthew Brost 
46466a0f6b9SVinod Govindapillai 	xe_display_pm_runtime_resume(xe);
46566a0f6b9SVinod Govindapillai 
4668d490e01SRodrigo Vivi 	if (xe->d3cold.allowed) {
467dd08ebf6SMatthew Brost 		err = xe_bo_restore_user(xe);
468dd08ebf6SMatthew Brost 		if (err)
469a00b8f1aSMatthew Auld 			goto out;
470dd08ebf6SMatthew Brost 	}
47166a0f6b9SVinod Govindapillai 
472a00b8f1aSMatthew Auld out:
473379cad69SThomas Hellström 	xe_rpm_lockmap_release(xe);
474a00b8f1aSMatthew Auld 	xe_pm_write_callback_task(xe, NULL);
475a00b8f1aSMatthew Auld 	return err;
476dd08ebf6SMatthew Brost }
477dd08ebf6SMatthew Brost 
4788ae84a27SRodrigo Vivi /*
4798ae84a27SRodrigo Vivi  * For places where resume is synchronous it can be quite easy to deadlock
4808ae84a27SRodrigo Vivi  * if we are not careful. Also in practice it might be quite timing
4818ae84a27SRodrigo Vivi  * sensitive to ever see the 0 -> 1 transition with the callers locks
4828ae84a27SRodrigo Vivi  * held, so deadlocks might exist but are hard for lockdep to ever see.
4838ae84a27SRodrigo Vivi  * With this in mind, help lockdep learn about the potentially scary
4848ae84a27SRodrigo Vivi  * stuff that can happen inside the runtime_resume callback by acquiring
4858ae84a27SRodrigo Vivi  * a dummy lock (it doesn't protect anything and gets compiled out on
4868ae84a27SRodrigo Vivi  * non-debug builds).  Lockdep then only needs to see the
487379cad69SThomas Hellström  * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can
488379cad69SThomas Hellström  * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map.
4898ae84a27SRodrigo Vivi  * For example if the (callers_locks) are ever grabbed in the
4908ae84a27SRodrigo Vivi  * runtime_resume callback, lockdep should give us a nice splat.
4918ae84a27SRodrigo Vivi  */
492379cad69SThomas Hellström static void xe_rpm_might_enter_cb(const struct xe_device *xe)
4938ae84a27SRodrigo Vivi {
494379cad69SThomas Hellström 	xe_rpm_lockmap_acquire(xe);
495379cad69SThomas Hellström 	xe_rpm_lockmap_release(xe);
496379cad69SThomas Hellström }
497379cad69SThomas Hellström 
498379cad69SThomas Hellström /*
499379cad69SThomas Hellström  * Prime the lockdep maps for known locking orders that need to
500379cad69SThomas Hellström  * be supported but that may not always occur on all systems.
501379cad69SThomas Hellström  */
502379cad69SThomas Hellström static void xe_pm_runtime_lockdep_prime(void)
503379cad69SThomas Hellström {
504379cad69SThomas Hellström 	struct dma_resv lockdep_resv;
505379cad69SThomas Hellström 
506379cad69SThomas Hellström 	dma_resv_init(&lockdep_resv);
507379cad69SThomas Hellström 	lock_map_acquire(&xe_pm_runtime_d3cold_map);
508379cad69SThomas Hellström 	/* D3Cold takes the dma_resv locks to evict bos */
509379cad69SThomas Hellström 	dma_resv_lock(&lockdep_resv, NULL);
510379cad69SThomas Hellström 	dma_resv_unlock(&lockdep_resv);
511379cad69SThomas Hellström 	lock_map_release(&xe_pm_runtime_d3cold_map);
512379cad69SThomas Hellström 
513379cad69SThomas Hellström 	/* Shrinkers might like to wake up the device under reclaim. */
514379cad69SThomas Hellström 	fs_reclaim_acquire(GFP_KERNEL);
515379cad69SThomas Hellström 	lock_map_acquire(&xe_pm_runtime_nod3cold_map);
516379cad69SThomas Hellström 	lock_map_release(&xe_pm_runtime_nod3cold_map);
517379cad69SThomas Hellström 	fs_reclaim_release(GFP_KERNEL);
5188ae84a27SRodrigo Vivi }
5198ae84a27SRodrigo Vivi 
52030c39952SRodrigo Vivi /**
52130c39952SRodrigo Vivi  * xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
52230c39952SRodrigo Vivi  * @xe: xe device instance
52330c39952SRodrigo Vivi  */
5245c9da9fcSRodrigo Vivi void xe_pm_runtime_get(struct xe_device *xe)
525dd08ebf6SMatthew Brost {
526275aa53fSNirmoy Das 	trace_xe_pm_runtime_get(xe, __builtin_return_address(0));
5275c9da9fcSRodrigo Vivi 	pm_runtime_get_noresume(xe->drm.dev);
5285c9da9fcSRodrigo Vivi 
5295c9da9fcSRodrigo Vivi 	if (xe_pm_read_callback_task(xe) == current)
5305c9da9fcSRodrigo Vivi 		return;
5315c9da9fcSRodrigo Vivi 
532379cad69SThomas Hellström 	xe_rpm_might_enter_cb(xe);
5335c9da9fcSRodrigo Vivi 	pm_runtime_resume(xe->drm.dev);
534dd08ebf6SMatthew Brost }
535dd08ebf6SMatthew Brost 
53630c39952SRodrigo Vivi /**
53730c39952SRodrigo Vivi  * xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle
53830c39952SRodrigo Vivi  * @xe: xe device instance
53930c39952SRodrigo Vivi  */
5405c9da9fcSRodrigo Vivi void xe_pm_runtime_put(struct xe_device *xe)
541dd08ebf6SMatthew Brost {
542275aa53fSNirmoy Das 	trace_xe_pm_runtime_put(xe, __builtin_return_address(0));
5435c9da9fcSRodrigo Vivi 	if (xe_pm_read_callback_task(xe) == current) {
5445c9da9fcSRodrigo Vivi 		pm_runtime_put_noidle(xe->drm.dev);
5455c9da9fcSRodrigo Vivi 	} else {
546dd08ebf6SMatthew Brost 		pm_runtime_mark_last_busy(xe->drm.dev);
5475c9da9fcSRodrigo Vivi 		pm_runtime_put(xe->drm.dev);
5485c9da9fcSRodrigo Vivi 	}
549dd08ebf6SMatthew Brost }
550dd08ebf6SMatthew Brost 
55130c39952SRodrigo Vivi /**
55223cf006bSRodrigo Vivi  * xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
55323cf006bSRodrigo Vivi  * @xe: xe device instance
55423cf006bSRodrigo Vivi  *
55523cf006bSRodrigo Vivi  * Returns: Any number greater than or equal to 0 for success, negative error
55623cf006bSRodrigo Vivi  * code otherwise.
55723cf006bSRodrigo Vivi  */
55823cf006bSRodrigo Vivi int xe_pm_runtime_get_ioctl(struct xe_device *xe)
55923cf006bSRodrigo Vivi {
560275aa53fSNirmoy Das 	trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0));
56123cf006bSRodrigo Vivi 	if (WARN_ON(xe_pm_read_callback_task(xe) == current))
56223cf006bSRodrigo Vivi 		return -ELOOP;
56323cf006bSRodrigo Vivi 
564379cad69SThomas Hellström 	xe_rpm_might_enter_cb(xe);
56523cf006bSRodrigo Vivi 	return pm_runtime_get_sync(xe->drm.dev);
56623cf006bSRodrigo Vivi }
56723cf006bSRodrigo Vivi 
56823cf006bSRodrigo Vivi /**
56930c39952SRodrigo Vivi  * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
57030c39952SRodrigo Vivi  * @xe: xe device instance
57130c39952SRodrigo Vivi  *
57246edb0a3SRodrigo Vivi  * Return: True if device is awake (regardless the previous number of references)
57346edb0a3SRodrigo Vivi  * and a new reference was taken, false otherwise.
57430c39952SRodrigo Vivi  */
57546edb0a3SRodrigo Vivi bool xe_pm_runtime_get_if_active(struct xe_device *xe)
576dd08ebf6SMatthew Brost {
57746edb0a3SRodrigo Vivi 	return pm_runtime_get_if_active(xe->drm.dev) > 0;
578dd08ebf6SMatthew Brost }
579c8a74077SAnshuman Gupta 
58030c39952SRodrigo Vivi /**
581967c5d7cSRodrigo Vivi  * xe_pm_runtime_get_if_in_use - Get a new reference if device is active with previous ref taken
5823b85b7bcSRodrigo Vivi  * @xe: xe device instance
5833b85b7bcSRodrigo Vivi  *
584967c5d7cSRodrigo Vivi  * Return: True if device is awake, a previous reference had been already taken,
585967c5d7cSRodrigo Vivi  * and a new reference was now taken, false otherwise.
5863b85b7bcSRodrigo Vivi  */
5873b85b7bcSRodrigo Vivi bool xe_pm_runtime_get_if_in_use(struct xe_device *xe)
5883b85b7bcSRodrigo Vivi {
5893b85b7bcSRodrigo Vivi 	if (xe_pm_read_callback_task(xe) == current) {
5903b85b7bcSRodrigo Vivi 		/* The device is awake, grab the ref and move on */
5913b85b7bcSRodrigo Vivi 		pm_runtime_get_noresume(xe->drm.dev);
5923b85b7bcSRodrigo Vivi 		return true;
5933b85b7bcSRodrigo Vivi 	}
5943b85b7bcSRodrigo Vivi 
5953b85b7bcSRodrigo Vivi 	return pm_runtime_get_if_in_use(xe->drm.dev) > 0;
5963b85b7bcSRodrigo Vivi }
5973b85b7bcSRodrigo Vivi 
598*ad92f523SRodrigo Vivi /*
599*ad92f523SRodrigo Vivi  * Very unreliable! Should only be used to suppress the false positive case
600*ad92f523SRodrigo Vivi  * in the missing outer rpm protection warning.
601*ad92f523SRodrigo Vivi  */
602*ad92f523SRodrigo Vivi static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
603*ad92f523SRodrigo Vivi {
604*ad92f523SRodrigo Vivi 	struct device *dev = xe->drm.dev;
605*ad92f523SRodrigo Vivi 
606*ad92f523SRodrigo Vivi 	return dev->power.runtime_status == RPM_SUSPENDING ||
607*ad92f523SRodrigo Vivi 		dev->power.runtime_status == RPM_RESUMING;
608*ad92f523SRodrigo Vivi }
609*ad92f523SRodrigo Vivi 
6103b85b7bcSRodrigo Vivi /**
611cbb6a741SRodrigo Vivi  * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming
612cbb6a741SRodrigo Vivi  * @xe: xe device instance
613cbb6a741SRodrigo Vivi  *
614cbb6a741SRodrigo Vivi  * This function should be used in inner places where it is surely already
615cbb6a741SRodrigo Vivi  * protected by outer-bound callers of `xe_pm_runtime_get`.
616cbb6a741SRodrigo Vivi  * It will warn if not protected.
617cbb6a741SRodrigo Vivi  * The reference should be put back after this function regardless, since it
618cbb6a741SRodrigo Vivi  * will always bump the usage counter, regardless.
619cbb6a741SRodrigo Vivi  */
620cbb6a741SRodrigo Vivi void xe_pm_runtime_get_noresume(struct xe_device *xe)
621cbb6a741SRodrigo Vivi {
622cbb6a741SRodrigo Vivi 	bool ref;
623cbb6a741SRodrigo Vivi 
624cbb6a741SRodrigo Vivi 	ref = xe_pm_runtime_get_if_in_use(xe);
625cbb6a741SRodrigo Vivi 
626*ad92f523SRodrigo Vivi 	if (!ref) {
627cbb6a741SRodrigo Vivi 		pm_runtime_get_noresume(xe->drm.dev);
628*ad92f523SRodrigo Vivi 		drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe),
629*ad92f523SRodrigo Vivi 			 "Missing outer runtime PM protection\n");
630*ad92f523SRodrigo Vivi 	}
631cbb6a741SRodrigo Vivi }
632cbb6a741SRodrigo Vivi 
633cbb6a741SRodrigo Vivi /**
634d6b41378SRodrigo Vivi  * xe_pm_runtime_resume_and_get - Resume, then get a runtime_pm ref if awake.
635d6b41378SRodrigo Vivi  * @xe: xe device instance
636d6b41378SRodrigo Vivi  *
637d6b41378SRodrigo Vivi  * Returns: True if device is awake and the reference was taken, false otherwise.
638d6b41378SRodrigo Vivi  */
639d6b41378SRodrigo Vivi bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
640d6b41378SRodrigo Vivi {
641d6b41378SRodrigo Vivi 	if (xe_pm_read_callback_task(xe) == current) {
642d6b41378SRodrigo Vivi 		/* The device is awake, grab the ref and move on */
643d6b41378SRodrigo Vivi 		pm_runtime_get_noresume(xe->drm.dev);
644d6b41378SRodrigo Vivi 		return true;
645d6b41378SRodrigo Vivi 	}
646d6b41378SRodrigo Vivi 
647379cad69SThomas Hellström 	xe_rpm_might_enter_cb(xe);
648d6b41378SRodrigo Vivi 	return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
649d6b41378SRodrigo Vivi }
650d6b41378SRodrigo Vivi 
651d6b41378SRodrigo Vivi /**
65230c39952SRodrigo Vivi  * xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge
65330c39952SRodrigo Vivi  * @xe: xe device instance
65430c39952SRodrigo Vivi  */
655c8a74077SAnshuman Gupta void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
656c8a74077SAnshuman Gupta {
657c8a74077SAnshuman Gupta 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
658c8a74077SAnshuman Gupta 	struct pci_dev *bridge = pci_upstream_bridge(pdev);
659c8a74077SAnshuman Gupta 
660c8a74077SAnshuman Gupta 	if (!bridge)
661c8a74077SAnshuman Gupta 		return;
662c8a74077SAnshuman Gupta 
663c8a74077SAnshuman Gupta 	if (!bridge->driver) {
664c8a74077SAnshuman Gupta 		drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n");
665c8a74077SAnshuman Gupta 		device_set_pm_not_required(&pdev->dev);
666c8a74077SAnshuman Gupta 	}
667c8a74077SAnshuman Gupta }
668b2d75619SAnshuman Gupta 
66930c39952SRodrigo Vivi /**
67030c39952SRodrigo Vivi  * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold
67130c39952SRodrigo Vivi  * @xe: xe device instance
67230c39952SRodrigo Vivi  * @threshold: VRAM size in bites for the D3cold threshold
67330c39952SRodrigo Vivi  *
67430c39952SRodrigo Vivi  * Returns 0 for success, negative error code otherwise.
67530c39952SRodrigo Vivi  */
676b2d75619SAnshuman Gupta int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
677b2d75619SAnshuman Gupta {
678b2d75619SAnshuman Gupta 	struct ttm_resource_manager *man;
679b2d75619SAnshuman Gupta 	u32 vram_total_mb = 0;
680b2d75619SAnshuman Gupta 	int i;
681b2d75619SAnshuman Gupta 
682b2d75619SAnshuman Gupta 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
683b2d75619SAnshuman Gupta 		man = ttm_manager_type(&xe->ttm, i);
684b2d75619SAnshuman Gupta 		if (man)
685b2d75619SAnshuman Gupta 			vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024);
686b2d75619SAnshuman Gupta 	}
687b2d75619SAnshuman Gupta 
688b2d75619SAnshuman Gupta 	drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb);
689b2d75619SAnshuman Gupta 
690b2d75619SAnshuman Gupta 	if (threshold > vram_total_mb)
691b2d75619SAnshuman Gupta 		return -EINVAL;
692b2d75619SAnshuman Gupta 
693b2d75619SAnshuman Gupta 	mutex_lock(&xe->d3cold.lock);
694b2d75619SAnshuman Gupta 	xe->d3cold.vram_threshold = threshold;
695b2d75619SAnshuman Gupta 	mutex_unlock(&xe->d3cold.lock);
696b2d75619SAnshuman Gupta 
697b2d75619SAnshuman Gupta 	return 0;
698b2d75619SAnshuman Gupta }
6992ef08b98SAnshuman Gupta 
70030c39952SRodrigo Vivi /**
70130c39952SRodrigo Vivi  * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed
70230c39952SRodrigo Vivi  * @xe: xe device instance
70330c39952SRodrigo Vivi  *
70430c39952SRodrigo Vivi  * To be called during runtime_pm idle callback.
70530c39952SRodrigo Vivi  * Check for all the D3Cold conditions ahead of runtime suspend.
70630c39952SRodrigo Vivi  */
7072ef08b98SAnshuman Gupta void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
7082ef08b98SAnshuman Gupta {
7092ef08b98SAnshuman Gupta 	struct ttm_resource_manager *man;
7102ef08b98SAnshuman Gupta 	u32 total_vram_used_mb = 0;
7112ef08b98SAnshuman Gupta 	u64 vram_used;
7122ef08b98SAnshuman Gupta 	int i;
7132ef08b98SAnshuman Gupta 
714e07aa913SRodrigo Vivi 	if (!xe->d3cold.capable) {
715e07aa913SRodrigo Vivi 		xe->d3cold.allowed = false;
716e07aa913SRodrigo Vivi 		return;
717e07aa913SRodrigo Vivi 	}
718e07aa913SRodrigo Vivi 
7192ef08b98SAnshuman Gupta 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
7202ef08b98SAnshuman Gupta 		man = ttm_manager_type(&xe->ttm, i);
7212ef08b98SAnshuman Gupta 		if (man) {
7222ef08b98SAnshuman Gupta 			vram_used = ttm_resource_manager_usage(man);
7232ef08b98SAnshuman Gupta 			total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024);
7242ef08b98SAnshuman Gupta 		}
7252ef08b98SAnshuman Gupta 	}
7262ef08b98SAnshuman Gupta 
7272ef08b98SAnshuman Gupta 	mutex_lock(&xe->d3cold.lock);
7282ef08b98SAnshuman Gupta 
7292ef08b98SAnshuman Gupta 	if (total_vram_used_mb < xe->d3cold.vram_threshold)
7302ef08b98SAnshuman Gupta 		xe->d3cold.allowed = true;
7312ef08b98SAnshuman Gupta 	else
7322ef08b98SAnshuman Gupta 		xe->d3cold.allowed = false;
7332ef08b98SAnshuman Gupta 
7342ef08b98SAnshuman Gupta 	mutex_unlock(&xe->d3cold.lock);
735ff765b77SMatthew Auld 
736ff765b77SMatthew Auld 	drm_dbg(&xe->drm,
737ff765b77SMatthew Auld 		"d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed));
7382ef08b98SAnshuman Gupta }
739379cad69SThomas Hellström 
740379cad69SThomas Hellström /**
741379cad69SThomas Hellström  * xe_pm_module_init() - Perform xe_pm specific module initialization.
742379cad69SThomas Hellström  *
743379cad69SThomas Hellström  * Return: 0 on success. Currently doesn't fail.
744379cad69SThomas Hellström  */
745379cad69SThomas Hellström int __init xe_pm_module_init(void)
746379cad69SThomas Hellström {
747379cad69SThomas Hellström 	xe_pm_runtime_lockdep_prime();
748379cad69SThomas Hellström 	return 0;
749379cad69SThomas Hellström }
750