xref: /linux/drivers/gpu/drm/xe/xe_pm.c (revision 08516de501fae647fb29bf3b62718de56cc24014)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_pm.h"
7 
8 #include <linux/pm_runtime.h>
9 
10 #include <drm/ttm/ttm_placement.h>
11 
12 #include "xe_bo.h"
13 #include "xe_bo_evict.h"
14 #include "xe_device.h"
15 #include "xe_ggtt.h"
16 #include "xe_gt.h"
17 #include "xe_irq.h"
18 #include "xe_pcode.h"
19 
20 /**
21  * DOC: Xe Power Management
22  *
23  * Xe PM shall be guided by the simplicity.
24  * Use the simplest hook options whenever possible.
25  * Let's not reinvent the runtime_pm references and hooks.
26  * Shall have a clear separation of display and gt underneath this component.
27  *
28  * What's next:
29  *
30  * For now s2idle and s3 are only working in integrated devices. The next step
31  * is to iterate through all VRAM's BO backing them up into the system memory
32  * before allowing the system suspend.
33  *
34  * Also runtime_pm needs to be here from the beginning.
35  *
36  * RC6/RPS are also critical PM features. Let's start with GuCRC and GuC SLPC
37  * and no wait boost. Frequency optimizations should come on a next stage.
38  */
39 
40 /**
41  * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
42  * @xe: xe device instance
43  *
44  * Return: 0 on success
45  */
46 int xe_pm_suspend(struct xe_device *xe)
47 {
48 	struct xe_gt *gt;
49 	u8 id;
50 	int err;
51 
52 	for_each_gt(gt, xe, id)
53 		xe_gt_suspend_prepare(gt);
54 
55 	/* FIXME: Super racey... */
56 	err = xe_bo_evict_all(xe);
57 	if (err)
58 		return err;
59 
60 	for_each_gt(gt, xe, id) {
61 		err = xe_gt_suspend(gt);
62 		if (err)
63 			return err;
64 	}
65 
66 	xe_irq_suspend(xe);
67 
68 	return 0;
69 }
70 
71 /**
72  * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0
73  * @xe: xe device instance
74  *
75  * Return: 0 on success
76  */
77 int xe_pm_resume(struct xe_device *xe)
78 {
79 	struct xe_gt *gt;
80 	u8 id;
81 	int err;
82 
83 	for_each_gt(gt, xe, id) {
84 		err = xe_pcode_init(gt);
85 		if (err)
86 			return err;
87 	}
88 
89 	/*
90 	 * This only restores pinned memory which is the memory required for the
91 	 * GT(s) to resume.
92 	 */
93 	err = xe_bo_restore_kernel(xe);
94 	if (err)
95 		return err;
96 
97 	xe_irq_resume(xe);
98 
99 	for_each_gt(gt, xe, id)
100 		xe_gt_resume(gt);
101 
102 	err = xe_bo_restore_user(xe);
103 	if (err)
104 		return err;
105 
106 	return 0;
107 }
108 
109 void xe_pm_runtime_init(struct xe_device *xe)
110 {
111 	struct device *dev = xe->drm.dev;
112 
113 	pm_runtime_use_autosuspend(dev);
114 	pm_runtime_set_autosuspend_delay(dev, 1000);
115 	pm_runtime_set_active(dev);
116 	pm_runtime_allow(dev);
117 	pm_runtime_mark_last_busy(dev);
118 	pm_runtime_put_autosuspend(dev);
119 }
120 
121 void xe_pm_runtime_fini(struct xe_device *xe)
122 {
123 	struct device *dev = xe->drm.dev;
124 
125 	pm_runtime_get_sync(dev);
126 	pm_runtime_forbid(dev);
127 }
128 
129 int xe_pm_runtime_suspend(struct xe_device *xe)
130 {
131 	struct xe_gt *gt;
132 	u8 id;
133 	int err;
134 
135 	if (xe->d3cold_allowed) {
136 		if (xe_device_mem_access_ongoing(xe))
137 			return -EBUSY;
138 
139 		err = xe_bo_evict_all(xe);
140 		if (err)
141 			return err;
142 	}
143 
144 	for_each_gt(gt, xe, id) {
145 		err = xe_gt_suspend(gt);
146 		if (err)
147 			return err;
148 	}
149 
150 	xe_irq_suspend(xe);
151 
152 	return 0;
153 }
154 
155 int xe_pm_runtime_resume(struct xe_device *xe)
156 {
157 	struct xe_gt *gt;
158 	u8 id;
159 	int err;
160 
161 	if (xe->d3cold_allowed) {
162 		for_each_gt(gt, xe, id) {
163 			err = xe_pcode_init(gt);
164 			if (err)
165 				return err;
166 		}
167 
168 		/*
169 		 * This only restores pinned memory which is the memory
170 		 * required for the GT(s) to resume.
171 		 */
172 		err = xe_bo_restore_kernel(xe);
173 		if (err)
174 			return err;
175 	}
176 
177 	xe_irq_resume(xe);
178 
179 	for_each_gt(gt, xe, id)
180 		xe_gt_resume(gt);
181 
182 	if (xe->d3cold_allowed) {
183 		err = xe_bo_restore_user(xe);
184 		if (err)
185 			return err;
186 	}
187 
188 	return 0;
189 }
190 
191 int xe_pm_runtime_get(struct xe_device *xe)
192 {
193 	return pm_runtime_get_sync(xe->drm.dev);
194 }
195 
196 int xe_pm_runtime_put(struct xe_device *xe)
197 {
198 	pm_runtime_mark_last_busy(xe->drm.dev);
199 	return pm_runtime_put_autosuspend(xe->drm.dev);
200 }
201 
202 /* Return true if resume operation happened and usage count was increased */
203 bool xe_pm_runtime_resume_if_suspended(struct xe_device *xe)
204 {
205 	/* In case we are suspended we need to immediately wake up */
206 	if (pm_runtime_suspended(xe->drm.dev))
207 		return !pm_runtime_resume_and_get(xe->drm.dev);
208 
209 	return false;
210 }
211 
212 int xe_pm_runtime_get_if_active(struct xe_device *xe)
213 {
214 	WARN_ON(pm_runtime_suspended(xe->drm.dev));
215 	return pm_runtime_get_if_active(xe->drm.dev, true);
216 }
217