xref: /linux/drivers/gpu/drm/panthor/panthor_devfreq.c (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2019 Collabora ltd. */
3 
4 #include <linux/clk.h>
5 #include <linux/devfreq.h>
6 #include <linux/devfreq_cooling.h>
7 #include <linux/platform_device.h>
8 #include <linux/pm_opp.h>
9 
10 #include <drm/drm_managed.h>
11 #include <drm/drm_print.h>
12 
13 #include "panthor_devfreq.h"
14 #include "panthor_device.h"
15 
16 /**
17  * struct panthor_devfreq - Device frequency management
18  */
19 struct panthor_devfreq {
20 	/** @devfreq: devfreq device. */
21 	struct devfreq *devfreq;
22 
23 	/** @gov_data: Governor data. */
24 	struct devfreq_simple_ondemand_data gov_data;
25 
26 	/** @busy_time: Busy time. */
27 	ktime_t busy_time;
28 
29 	/** @idle_time: Idle time. */
30 	ktime_t idle_time;
31 
32 	/** @time_last_update: Last update time. */
33 	ktime_t time_last_update;
34 
35 	/** @last_busy_state: True if the GPU was busy last time we updated the state. */
36 	bool last_busy_state;
37 
38 	/**
39 	 * @lock: Lock used to protect busy_time, idle_time, time_last_update and
40 	 * last_busy_state.
41 	 *
42 	 * These fields can be accessed concurrently by panthor_devfreq_get_dev_status()
43 	 * and panthor_devfreq_record_{busy,idle}().
44 	 */
45 	spinlock_t lock;
46 };
47 
48 static void panthor_devfreq_update_utilization(struct panthor_devfreq *pdevfreq)
49 {
50 	ktime_t now, last;
51 
52 	now = ktime_get();
53 	last = pdevfreq->time_last_update;
54 
55 	if (pdevfreq->last_busy_state)
56 		pdevfreq->busy_time += ktime_sub(now, last);
57 	else
58 		pdevfreq->idle_time += ktime_sub(now, last);
59 
60 	pdevfreq->time_last_update = now;
61 }
62 
63 static int panthor_devfreq_target(struct device *dev, unsigned long *freq,
64 				  u32 flags)
65 {
66 	struct panthor_device *ptdev = dev_get_drvdata(dev);
67 	struct dev_pm_opp *opp;
68 	int err;
69 
70 	opp = devfreq_recommended_opp(dev, freq, flags);
71 	if (IS_ERR(opp))
72 		return PTR_ERR(opp);
73 	dev_pm_opp_put(opp);
74 
75 	err = dev_pm_opp_set_rate(dev, *freq);
76 	if (!err)
77 		ptdev->current_frequency = *freq;
78 
79 	return err;
80 }
81 
82 static void panthor_devfreq_reset(struct panthor_devfreq *pdevfreq)
83 {
84 	pdevfreq->busy_time = 0;
85 	pdevfreq->idle_time = 0;
86 	pdevfreq->time_last_update = ktime_get();
87 }
88 
89 static int panthor_devfreq_get_dev_status(struct device *dev,
90 					  struct devfreq_dev_status *status)
91 {
92 	struct panthor_device *ptdev = dev_get_drvdata(dev);
93 	struct panthor_devfreq *pdevfreq = ptdev->devfreq;
94 	unsigned long irqflags;
95 
96 	status->current_frequency = clk_get_rate(ptdev->clks.core);
97 
98 	spin_lock_irqsave(&pdevfreq->lock, irqflags);
99 
100 	panthor_devfreq_update_utilization(pdevfreq);
101 
102 	status->total_time = ktime_to_ns(ktime_add(pdevfreq->busy_time,
103 						   pdevfreq->idle_time));
104 
105 	status->busy_time = ktime_to_ns(pdevfreq->busy_time);
106 
107 	panthor_devfreq_reset(pdevfreq);
108 
109 	spin_unlock_irqrestore(&pdevfreq->lock, irqflags);
110 
111 	drm_dbg(&ptdev->base, "busy %lu total %lu %lu %% freq %lu MHz\n",
112 		status->busy_time, status->total_time,
113 		status->busy_time / (status->total_time / 100),
114 		status->current_frequency / 1000 / 1000);
115 
116 	return 0;
117 }
118 
119 static struct devfreq_dev_profile panthor_devfreq_profile = {
120 	.timer = DEVFREQ_TIMER_DELAYED,
121 	.polling_ms = 50, /* ~3 frames */
122 	.target = panthor_devfreq_target,
123 	.get_dev_status = panthor_devfreq_get_dev_status,
124 };
125 
126 int panthor_devfreq_init(struct panthor_device *ptdev)
127 {
128 	/* There's actually 2 regulators (mali and sram), but the OPP core only
129 	 * supports one.
130 	 *
131 	 * We assume the sram regulator is coupled with the mali one and let
132 	 * the coupling logic deal with voltage updates.
133 	 */
134 	static const char * const reg_names[] = { "mali", NULL };
135 	struct thermal_cooling_device *cooling;
136 	struct device *dev = ptdev->base.dev;
137 	struct panthor_devfreq *pdevfreq;
138 	struct dev_pm_opp *opp;
139 	unsigned long cur_freq;
140 	unsigned long freq = ULONG_MAX;
141 	int ret;
142 
143 	pdevfreq = drmm_kzalloc(&ptdev->base, sizeof(*ptdev->devfreq), GFP_KERNEL);
144 	if (!pdevfreq)
145 		return -ENOMEM;
146 
147 	ptdev->devfreq = pdevfreq;
148 
149 	ret = devm_pm_opp_set_regulators(dev, reg_names);
150 	if (ret && ret != -ENODEV) {
151 		if (ret != -EPROBE_DEFER)
152 			DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n");
153 		return ret;
154 	}
155 
156 	ret = devm_pm_opp_of_add_table(dev);
157 	if (ret)
158 		return ret;
159 
160 	spin_lock_init(&pdevfreq->lock);
161 
162 	panthor_devfreq_reset(pdevfreq);
163 
164 	cur_freq = clk_get_rate(ptdev->clks.core);
165 
166 	/* Regulator coupling only takes care of synchronizing/balancing voltage
167 	 * updates, but the coupled regulator needs to be enabled manually.
168 	 *
169 	 * We use devm_regulator_get_enable_optional() and keep the sram supply
170 	 * enabled until the device is removed, just like we do for the mali
171 	 * supply, which is enabled when dev_pm_opp_set_opp(dev, opp) is called,
172 	 * and disabled when the opp_table is torn down, using the devm action.
173 	 *
174 	 * If we really care about disabling regulators on suspend, we should:
175 	 * - use devm_regulator_get_optional() here
176 	 * - call dev_pm_opp_set_opp(dev, NULL) before leaving this function
177 	 *   (this disables the regulator passed to the OPP layer)
178 	 * - call dev_pm_opp_set_opp(dev, NULL) and
179 	 *   regulator_disable(ptdev->regulators.sram) in
180 	 *   panthor_devfreq_suspend()
181 	 * - call dev_pm_opp_set_opp(dev, default_opp) and
182 	 *   regulator_enable(ptdev->regulators.sram) in
183 	 *   panthor_devfreq_resume()
184 	 *
185 	 * But without knowing if it's beneficial or not (in term of power
186 	 * consumption), or how much it slows down the suspend/resume steps,
187 	 * let's just keep regulators enabled for the device lifetime.
188 	 */
189 	ret = devm_regulator_get_enable_optional(dev, "sram");
190 	if (ret && ret != -ENODEV) {
191 		if (ret != -EPROBE_DEFER)
192 			DRM_DEV_ERROR(dev, "Couldn't retrieve/enable sram supply\n");
193 		return ret;
194 	}
195 
196 	opp = devfreq_recommended_opp(dev, &cur_freq, 0);
197 	if (IS_ERR(opp))
198 		return PTR_ERR(opp);
199 
200 	panthor_devfreq_profile.initial_freq = cur_freq;
201 	ptdev->current_frequency = cur_freq;
202 
203 	/*
204 	 * Set the recommend OPP this will enable and configure the regulator
205 	 * if any and will avoid a switch off by regulator_late_cleanup()
206 	 */
207 	ret = dev_pm_opp_set_opp(dev, opp);
208 	dev_pm_opp_put(opp);
209 	if (ret) {
210 		DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
211 		return ret;
212 	}
213 
214 	/* Find the fastest defined rate  */
215 	opp = dev_pm_opp_find_freq_floor(dev, &freq);
216 	if (IS_ERR(opp))
217 		return PTR_ERR(opp);
218 	ptdev->fast_rate = freq;
219 
220 	dev_pm_opp_put(opp);
221 
222 	/*
223 	 * Setup default thresholds for the simple_ondemand governor.
224 	 * The values are chosen based on experiments.
225 	 */
226 	pdevfreq->gov_data.upthreshold = 45;
227 	pdevfreq->gov_data.downdifferential = 5;
228 
229 	pdevfreq->devfreq = devm_devfreq_add_device(dev, &panthor_devfreq_profile,
230 						    DEVFREQ_GOV_SIMPLE_ONDEMAND,
231 						    &pdevfreq->gov_data);
232 	if (IS_ERR(pdevfreq->devfreq)) {
233 		DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n");
234 		ret = PTR_ERR(pdevfreq->devfreq);
235 		pdevfreq->devfreq = NULL;
236 		return ret;
237 	}
238 
239 	cooling = devfreq_cooling_em_register(pdevfreq->devfreq, NULL);
240 	if (IS_ERR(cooling))
241 		DRM_DEV_INFO(dev, "Failed to register cooling device\n");
242 
243 	return 0;
244 }
245 
246 void panthor_devfreq_resume(struct panthor_device *ptdev)
247 {
248 	struct panthor_devfreq *pdevfreq = ptdev->devfreq;
249 
250 	if (!pdevfreq->devfreq)
251 		return;
252 
253 	panthor_devfreq_reset(pdevfreq);
254 
255 	drm_WARN_ON(&ptdev->base, devfreq_resume_device(pdevfreq->devfreq));
256 }
257 
258 void panthor_devfreq_suspend(struct panthor_device *ptdev)
259 {
260 	struct panthor_devfreq *pdevfreq = ptdev->devfreq;
261 
262 	if (!pdevfreq->devfreq)
263 		return;
264 
265 	drm_WARN_ON(&ptdev->base, devfreq_suspend_device(pdevfreq->devfreq));
266 }
267 
268 void panthor_devfreq_record_busy(struct panthor_device *ptdev)
269 {
270 	struct panthor_devfreq *pdevfreq = ptdev->devfreq;
271 	unsigned long irqflags;
272 
273 	if (!pdevfreq->devfreq)
274 		return;
275 
276 	spin_lock_irqsave(&pdevfreq->lock, irqflags);
277 
278 	panthor_devfreq_update_utilization(pdevfreq);
279 	pdevfreq->last_busy_state = true;
280 
281 	spin_unlock_irqrestore(&pdevfreq->lock, irqflags);
282 }
283 
284 void panthor_devfreq_record_idle(struct panthor_device *ptdev)
285 {
286 	struct panthor_devfreq *pdevfreq = ptdev->devfreq;
287 	unsigned long irqflags;
288 
289 	if (!pdevfreq->devfreq)
290 		return;
291 
292 	spin_lock_irqsave(&pdevfreq->lock, irqflags);
293 
294 	panthor_devfreq_update_utilization(pdevfreq);
295 	pdevfreq->last_busy_state = false;
296 
297 	spin_unlock_irqrestore(&pdevfreq->lock, irqflags);
298 }
299