xref: /linux/drivers/gpu/drm/xe/xe_pm.c (revision dd08ebf6c3525a7ea2186e636df064ea47281987)
1*dd08ebf6SMatthew Brost // SPDX-License-Identifier: MIT
2*dd08ebf6SMatthew Brost /*
3*dd08ebf6SMatthew Brost  * Copyright © 2022 Intel Corporation
4*dd08ebf6SMatthew Brost  */
5*dd08ebf6SMatthew Brost 
6*dd08ebf6SMatthew Brost #include <linux/pm_runtime.h>
7*dd08ebf6SMatthew Brost 
8*dd08ebf6SMatthew Brost #include <drm/ttm/ttm_placement.h>
9*dd08ebf6SMatthew Brost 
10*dd08ebf6SMatthew Brost #include "xe_bo.h"
11*dd08ebf6SMatthew Brost #include "xe_bo_evict.h"
12*dd08ebf6SMatthew Brost #include "xe_device.h"
13*dd08ebf6SMatthew Brost #include "xe_pm.h"
14*dd08ebf6SMatthew Brost #include "xe_gt.h"
15*dd08ebf6SMatthew Brost #include "xe_ggtt.h"
16*dd08ebf6SMatthew Brost #include "xe_irq.h"
17*dd08ebf6SMatthew Brost #include "xe_pcode.h"
18*dd08ebf6SMatthew Brost 
19*dd08ebf6SMatthew Brost /**
20*dd08ebf6SMatthew Brost  * DOC: Xe Power Management
21*dd08ebf6SMatthew Brost  *
22*dd08ebf6SMatthew Brost  * Xe PM shall be guided by the simplicity.
23*dd08ebf6SMatthew Brost  * Use the simplest hook options whenever possible.
24*dd08ebf6SMatthew Brost  * Let's not reinvent the runtime_pm references and hooks.
25*dd08ebf6SMatthew Brost  * Shall have a clear separation of display and gt underneath this component.
26*dd08ebf6SMatthew Brost  *
27*dd08ebf6SMatthew Brost  * What's next:
28*dd08ebf6SMatthew Brost  *
29*dd08ebf6SMatthew Brost  * For now s2idle and s3 are only working in integrated devices. The next step
30*dd08ebf6SMatthew Brost  * is to iterate through all VRAM's BO backing them up into the system memory
31*dd08ebf6SMatthew Brost  * before allowing the system suspend.
32*dd08ebf6SMatthew Brost  *
33*dd08ebf6SMatthew Brost  * Also runtime_pm needs to be here from the beginning.
34*dd08ebf6SMatthew Brost  *
35*dd08ebf6SMatthew Brost  * RC6/RPS are also critical PM features. Let's start with GuCRC and GuC SLPC
36*dd08ebf6SMatthew Brost  * and no wait boost. Frequency optimizations should come on a next stage.
37*dd08ebf6SMatthew Brost  */
38*dd08ebf6SMatthew Brost 
39*dd08ebf6SMatthew Brost /**
40*dd08ebf6SMatthew Brost  * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
41*dd08ebf6SMatthew Brost  * @xe: xe device instance
42*dd08ebf6SMatthew Brost  *
43*dd08ebf6SMatthew Brost  * Return: 0 on success
44*dd08ebf6SMatthew Brost  */
45*dd08ebf6SMatthew Brost int xe_pm_suspend(struct xe_device *xe)
46*dd08ebf6SMatthew Brost {
47*dd08ebf6SMatthew Brost 	struct xe_gt *gt;
48*dd08ebf6SMatthew Brost 	u8 id;
49*dd08ebf6SMatthew Brost 	int err;
50*dd08ebf6SMatthew Brost 
51*dd08ebf6SMatthew Brost 	for_each_gt(gt, xe, id)
52*dd08ebf6SMatthew Brost 		xe_gt_suspend_prepare(gt);
53*dd08ebf6SMatthew Brost 
54*dd08ebf6SMatthew Brost 	/* FIXME: Super racey... */
55*dd08ebf6SMatthew Brost 	err = xe_bo_evict_all(xe);
56*dd08ebf6SMatthew Brost 	if (err)
57*dd08ebf6SMatthew Brost 		return err;
58*dd08ebf6SMatthew Brost 
59*dd08ebf6SMatthew Brost 	for_each_gt(gt, xe, id) {
60*dd08ebf6SMatthew Brost 		err = xe_gt_suspend(gt);
61*dd08ebf6SMatthew Brost 		if (err)
62*dd08ebf6SMatthew Brost 			return err;
63*dd08ebf6SMatthew Brost 	}
64*dd08ebf6SMatthew Brost 
65*dd08ebf6SMatthew Brost 	xe_irq_suspend(xe);
66*dd08ebf6SMatthew Brost 
67*dd08ebf6SMatthew Brost 	return 0;
68*dd08ebf6SMatthew Brost }
69*dd08ebf6SMatthew Brost 
70*dd08ebf6SMatthew Brost /**
71*dd08ebf6SMatthew Brost  * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0
72*dd08ebf6SMatthew Brost  * @xe: xe device instance
73*dd08ebf6SMatthew Brost  *
74*dd08ebf6SMatthew Brost  * Return: 0 on success
75*dd08ebf6SMatthew Brost  */
76*dd08ebf6SMatthew Brost int xe_pm_resume(struct xe_device *xe)
77*dd08ebf6SMatthew Brost {
78*dd08ebf6SMatthew Brost 	struct xe_gt *gt;
79*dd08ebf6SMatthew Brost 	u8 id;
80*dd08ebf6SMatthew Brost 	int err;
81*dd08ebf6SMatthew Brost 
82*dd08ebf6SMatthew Brost 	for_each_gt(gt, xe, id) {
83*dd08ebf6SMatthew Brost 		err = xe_pcode_init(gt);
84*dd08ebf6SMatthew Brost 		if (err)
85*dd08ebf6SMatthew Brost 			return err;
86*dd08ebf6SMatthew Brost 	}
87*dd08ebf6SMatthew Brost 
88*dd08ebf6SMatthew Brost 	/*
89*dd08ebf6SMatthew Brost 	 * This only restores pinned memory which is the memory required for the
90*dd08ebf6SMatthew Brost 	 * GT(s) to resume.
91*dd08ebf6SMatthew Brost 	 */
92*dd08ebf6SMatthew Brost 	err = xe_bo_restore_kernel(xe);
93*dd08ebf6SMatthew Brost 	if (err)
94*dd08ebf6SMatthew Brost 		return err;
95*dd08ebf6SMatthew Brost 
96*dd08ebf6SMatthew Brost 	xe_irq_resume(xe);
97*dd08ebf6SMatthew Brost 
98*dd08ebf6SMatthew Brost 	for_each_gt(gt, xe, id)
99*dd08ebf6SMatthew Brost 		xe_gt_resume(gt);
100*dd08ebf6SMatthew Brost 
101*dd08ebf6SMatthew Brost 	err = xe_bo_restore_user(xe);
102*dd08ebf6SMatthew Brost 	if (err)
103*dd08ebf6SMatthew Brost 		return err;
104*dd08ebf6SMatthew Brost 
105*dd08ebf6SMatthew Brost 	return 0;
106*dd08ebf6SMatthew Brost }
107*dd08ebf6SMatthew Brost 
108*dd08ebf6SMatthew Brost void xe_pm_runtime_init(struct xe_device *xe)
109*dd08ebf6SMatthew Brost {
110*dd08ebf6SMatthew Brost 	struct device *dev = xe->drm.dev;
111*dd08ebf6SMatthew Brost 
112*dd08ebf6SMatthew Brost 	pm_runtime_use_autosuspend(dev);
113*dd08ebf6SMatthew Brost 	pm_runtime_set_autosuspend_delay(dev, 1000);
114*dd08ebf6SMatthew Brost 	pm_runtime_set_active(dev);
115*dd08ebf6SMatthew Brost 	pm_runtime_allow(dev);
116*dd08ebf6SMatthew Brost 	pm_runtime_mark_last_busy(dev);
117*dd08ebf6SMatthew Brost 	pm_runtime_put_autosuspend(dev);
118*dd08ebf6SMatthew Brost }
119*dd08ebf6SMatthew Brost 
120*dd08ebf6SMatthew Brost int xe_pm_runtime_suspend(struct xe_device *xe)
121*dd08ebf6SMatthew Brost {
122*dd08ebf6SMatthew Brost 	struct xe_gt *gt;
123*dd08ebf6SMatthew Brost 	u8 id;
124*dd08ebf6SMatthew Brost 	int err;
125*dd08ebf6SMatthew Brost 
126*dd08ebf6SMatthew Brost 	if (xe->d3cold_allowed) {
127*dd08ebf6SMatthew Brost 		if (xe_device_mem_access_ongoing(xe))
128*dd08ebf6SMatthew Brost 			return -EBUSY;
129*dd08ebf6SMatthew Brost 
130*dd08ebf6SMatthew Brost 		err = xe_bo_evict_all(xe);
131*dd08ebf6SMatthew Brost 		if (err)
132*dd08ebf6SMatthew Brost 			return err;
133*dd08ebf6SMatthew Brost 	}
134*dd08ebf6SMatthew Brost 
135*dd08ebf6SMatthew Brost 	for_each_gt(gt, xe, id) {
136*dd08ebf6SMatthew Brost 		err = xe_gt_suspend(gt);
137*dd08ebf6SMatthew Brost 		if (err)
138*dd08ebf6SMatthew Brost 			return err;
139*dd08ebf6SMatthew Brost 	}
140*dd08ebf6SMatthew Brost 
141*dd08ebf6SMatthew Brost 	xe_irq_suspend(xe);
142*dd08ebf6SMatthew Brost 
143*dd08ebf6SMatthew Brost 	return 0;
144*dd08ebf6SMatthew Brost }
145*dd08ebf6SMatthew Brost 
146*dd08ebf6SMatthew Brost int xe_pm_runtime_resume(struct xe_device *xe)
147*dd08ebf6SMatthew Brost {
148*dd08ebf6SMatthew Brost 	struct xe_gt *gt;
149*dd08ebf6SMatthew Brost 	u8 id;
150*dd08ebf6SMatthew Brost 	int err;
151*dd08ebf6SMatthew Brost 
152*dd08ebf6SMatthew Brost 	if (xe->d3cold_allowed) {
153*dd08ebf6SMatthew Brost 		for_each_gt(gt, xe, id) {
154*dd08ebf6SMatthew Brost 			err = xe_pcode_init(gt);
155*dd08ebf6SMatthew Brost 			if (err)
156*dd08ebf6SMatthew Brost 				return err;
157*dd08ebf6SMatthew Brost 		}
158*dd08ebf6SMatthew Brost 
159*dd08ebf6SMatthew Brost 		/*
160*dd08ebf6SMatthew Brost 		 * This only restores pinned memory which is the memory
161*dd08ebf6SMatthew Brost 		 * required for the GT(s) to resume.
162*dd08ebf6SMatthew Brost 		 */
163*dd08ebf6SMatthew Brost 		err = xe_bo_restore_kernel(xe);
164*dd08ebf6SMatthew Brost 		if (err)
165*dd08ebf6SMatthew Brost 			return err;
166*dd08ebf6SMatthew Brost 	}
167*dd08ebf6SMatthew Brost 
168*dd08ebf6SMatthew Brost 	xe_irq_resume(xe);
169*dd08ebf6SMatthew Brost 
170*dd08ebf6SMatthew Brost 	for_each_gt(gt, xe, id)
171*dd08ebf6SMatthew Brost 		xe_gt_resume(gt);
172*dd08ebf6SMatthew Brost 
173*dd08ebf6SMatthew Brost 	if (xe->d3cold_allowed) {
174*dd08ebf6SMatthew Brost 		err = xe_bo_restore_user(xe);
175*dd08ebf6SMatthew Brost 		if (err)
176*dd08ebf6SMatthew Brost 			return err;
177*dd08ebf6SMatthew Brost 	}
178*dd08ebf6SMatthew Brost 
179*dd08ebf6SMatthew Brost 	return 0;
180*dd08ebf6SMatthew Brost }
181*dd08ebf6SMatthew Brost 
182*dd08ebf6SMatthew Brost int xe_pm_runtime_get(struct xe_device *xe)
183*dd08ebf6SMatthew Brost {
184*dd08ebf6SMatthew Brost 	return pm_runtime_get_sync(xe->drm.dev);
185*dd08ebf6SMatthew Brost }
186*dd08ebf6SMatthew Brost 
187*dd08ebf6SMatthew Brost int xe_pm_runtime_put(struct xe_device *xe)
188*dd08ebf6SMatthew Brost {
189*dd08ebf6SMatthew Brost 	pm_runtime_mark_last_busy(xe->drm.dev);
190*dd08ebf6SMatthew Brost 	return pm_runtime_put_autosuspend(xe->drm.dev);
191*dd08ebf6SMatthew Brost }
192*dd08ebf6SMatthew Brost 
193*dd08ebf6SMatthew Brost /* Return true if resume operation happened and usage count was increased */
194*dd08ebf6SMatthew Brost bool xe_pm_runtime_resume_if_suspended(struct xe_device *xe)
195*dd08ebf6SMatthew Brost {
196*dd08ebf6SMatthew Brost 	/* In case we are suspended we need to immediately wake up */
197*dd08ebf6SMatthew Brost 	if (pm_runtime_suspended(xe->drm.dev))
198*dd08ebf6SMatthew Brost 		return !pm_runtime_resume_and_get(xe->drm.dev);
199*dd08ebf6SMatthew Brost 
200*dd08ebf6SMatthew Brost 	return false;
201*dd08ebf6SMatthew Brost }
202*dd08ebf6SMatthew Brost 
203*dd08ebf6SMatthew Brost int xe_pm_runtime_get_if_active(struct xe_device *xe)
204*dd08ebf6SMatthew Brost {
205*dd08ebf6SMatthew Brost 	WARN_ON(pm_runtime_suspended(xe->drm.dev));
206*dd08ebf6SMatthew Brost 	return pm_runtime_get_if_active(xe->drm.dev, true);
207*dd08ebf6SMatthew Brost }
208