xref: /linux/drivers/devfreq/governor_passive.c (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/drivers/devfreq/governor_passive.c
4  *
5  * Copyright (C) 2016 Samsung Electronics
6  * Author: Chanwoo Choi <cw00.choi@samsung.com>
7  * Author: MyungJoo Ham <myungjoo.ham@samsung.com>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/cpu.h>
12 #include <linux/cpufreq.h>
13 #include <linux/cpumask.h>
14 #include <linux/slab.h>
15 #include <linux/device.h>
16 #include <linux/devfreq.h>
17 #include <linux/devfreq-governor.h>
18 #include <linux/units.h>
19 
20 /**
21  * struct devfreq_cpu_data - Hold the per-cpu data
22  * @node:	list node
23  * @dev:	reference to cpu device.
24  * @first_cpu:	the cpumask of the first cpu of a policy.
25  * @opp_table:	reference to cpu opp table.
26  * @cur_freq:	the current frequency of the cpu.
27  * @min_freq:	the min frequency of the cpu.
28  * @max_freq:	the max frequency of the cpu.
29  *
30  * This structure stores the required cpu_data of a cpu.
31  * This is auto-populated by the governor.
32  */
33 struct devfreq_cpu_data {
34 	struct list_head node;
35 
36 	struct device *dev;
37 	unsigned int first_cpu;
38 
39 	struct opp_table *opp_table;
40 	unsigned int cur_freq;
41 	unsigned int min_freq;
42 	unsigned int max_freq;
43 };
44 
45 static struct devfreq_cpu_data *
46 get_parent_cpu_data(struct devfreq_passive_data *p_data,
47 		    struct cpufreq_policy *policy)
48 {
49 	struct devfreq_cpu_data *parent_cpu_data;
50 
51 	if (!p_data || !policy)
52 		return NULL;
53 
54 	list_for_each_entry(parent_cpu_data, &p_data->cpu_data_list, node)
55 		if (parent_cpu_data->first_cpu == cpumask_first(policy->related_cpus))
56 			return parent_cpu_data;
57 
58 	return NULL;
59 }
60 
61 static void delete_parent_cpu_data(struct devfreq_passive_data *p_data)
62 {
63 	struct devfreq_cpu_data *parent_cpu_data, *tmp;
64 
65 	list_for_each_entry_safe(parent_cpu_data, tmp, &p_data->cpu_data_list, node) {
66 		list_del(&parent_cpu_data->node);
67 
68 		if (parent_cpu_data->opp_table)
69 			dev_pm_opp_put_opp_table(parent_cpu_data->opp_table);
70 
71 		kfree(parent_cpu_data);
72 	}
73 }
74 
75 static unsigned long get_target_freq_by_required_opp(struct device *p_dev,
76 						struct opp_table *p_opp_table,
77 						struct opp_table *opp_table,
78 						unsigned long *freq)
79 {
80 	struct dev_pm_opp *opp = NULL, *p_opp = NULL;
81 	unsigned long target_freq;
82 
83 	if (!p_dev || !p_opp_table || !opp_table || !freq)
84 		return 0;
85 
86 	p_opp = devfreq_recommended_opp(p_dev, freq, 0);
87 	if (IS_ERR(p_opp))
88 		return 0;
89 
90 	opp = dev_pm_opp_xlate_required_opp(p_opp_table, opp_table, p_opp);
91 	dev_pm_opp_put(p_opp);
92 
93 	if (IS_ERR(opp))
94 		return 0;
95 
96 	target_freq = dev_pm_opp_get_freq(opp);
97 	dev_pm_opp_put(opp);
98 
99 	return target_freq;
100 }
101 
102 static int get_target_freq_with_cpufreq(struct devfreq *devfreq,
103 					unsigned long *target_freq)
104 {
105 	struct devfreq_passive_data *p_data =
106 				(struct devfreq_passive_data *)devfreq->data;
107 	struct devfreq_cpu_data *parent_cpu_data;
108 	struct cpufreq_policy *policy;
109 	unsigned long cpu, cpu_cur, cpu_min, cpu_max, cpu_percent;
110 	unsigned long dev_min, dev_max;
111 	unsigned long freq = 0;
112 	int ret = 0;
113 
114 	for_each_online_cpu(cpu) {
115 		policy = cpufreq_cpu_get(cpu);
116 		if (!policy) {
117 			ret = -EINVAL;
118 			continue;
119 		}
120 
121 		parent_cpu_data = get_parent_cpu_data(p_data, policy);
122 		if (!parent_cpu_data) {
123 			cpufreq_cpu_put(policy);
124 			continue;
125 		}
126 
127 		/* Get target freq via required opps */
128 		cpu_cur = parent_cpu_data->cur_freq * HZ_PER_KHZ;
129 		freq = get_target_freq_by_required_opp(parent_cpu_data->dev,
130 					parent_cpu_data->opp_table,
131 					devfreq->opp_table, &cpu_cur);
132 		if (freq) {
133 			*target_freq = max(freq, *target_freq);
134 			cpufreq_cpu_put(policy);
135 			continue;
136 		}
137 
138 		/* Use interpolation if required opps is not available */
139 		devfreq_get_freq_range(devfreq, &dev_min, &dev_max);
140 
141 		cpu_min = parent_cpu_data->min_freq;
142 		cpu_max = parent_cpu_data->max_freq;
143 		cpu_cur = parent_cpu_data->cur_freq;
144 
145 		cpu_percent = ((cpu_cur - cpu_min) * 100) / (cpu_max - cpu_min);
146 		freq = dev_min + mult_frac(dev_max - dev_min, cpu_percent, 100);
147 
148 		*target_freq = max(freq, *target_freq);
149 		cpufreq_cpu_put(policy);
150 	}
151 
152 	return ret;
153 }
154 
155 static int get_target_freq_with_devfreq(struct devfreq *devfreq,
156 					unsigned long *freq)
157 {
158 	struct devfreq_passive_data *p_data
159 			= (struct devfreq_passive_data *)devfreq->data;
160 	struct devfreq *parent_devfreq = (struct devfreq *)p_data->parent;
161 	unsigned long child_freq = ULONG_MAX;
162 	int i, count;
163 
164 	/* Get target freq via required opps */
165 	child_freq = get_target_freq_by_required_opp(parent_devfreq->dev.parent,
166 						parent_devfreq->opp_table,
167 						devfreq->opp_table, freq);
168 	if (child_freq)
169 		goto out;
170 
171 	/* Use interpolation if required opps is not available */
172 	for (i = 0; i < parent_devfreq->max_state; i++)
173 		if (parent_devfreq->freq_table[i] == *freq)
174 			break;
175 
176 	if (i == parent_devfreq->max_state)
177 		return -EINVAL;
178 
179 	if (i < devfreq->max_state) {
180 		child_freq = devfreq->freq_table[i];
181 	} else {
182 		count = devfreq->max_state;
183 		child_freq = devfreq->freq_table[count - 1];
184 	}
185 
186 out:
187 	*freq = child_freq;
188 
189 	return 0;
190 }
191 
192 static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
193 					   unsigned long *freq)
194 {
195 	struct devfreq_passive_data *p_data =
196 				(struct devfreq_passive_data *)devfreq->data;
197 	int ret;
198 
199 	if (!p_data)
200 		return -EINVAL;
201 
202 	/*
203 	 * If the devfreq device with passive governor has the specific method
204 	 * to determine the next frequency, should use the get_target_freq()
205 	 * of struct devfreq_passive_data.
206 	 */
207 	if (p_data->get_target_freq)
208 		return p_data->get_target_freq(devfreq, freq);
209 
210 	switch (p_data->parent_type) {
211 	case DEVFREQ_PARENT_DEV:
212 		ret = get_target_freq_with_devfreq(devfreq, freq);
213 		break;
214 	case CPUFREQ_PARENT_DEV:
215 		ret = get_target_freq_with_cpufreq(devfreq, freq);
216 		break;
217 	default:
218 		ret = -EINVAL;
219 		dev_err(&devfreq->dev, "Invalid parent type\n");
220 		break;
221 	}
222 
223 	return ret;
224 }
225 
226 static int cpufreq_passive_notifier_call(struct notifier_block *nb,
227 					 unsigned long event, void *ptr)
228 {
229 	struct devfreq_passive_data *p_data =
230 			container_of(nb, struct devfreq_passive_data, nb);
231 	struct devfreq *devfreq = (struct devfreq *)p_data->this;
232 	struct devfreq_cpu_data *parent_cpu_data;
233 	struct cpufreq_freqs *freqs = ptr;
234 	unsigned int cur_freq;
235 	int ret;
236 
237 	if (event != CPUFREQ_POSTCHANGE || !freqs)
238 		return 0;
239 
240 	parent_cpu_data = get_parent_cpu_data(p_data, freqs->policy);
241 	if (!parent_cpu_data || parent_cpu_data->cur_freq == freqs->new)
242 		return 0;
243 
244 	cur_freq = parent_cpu_data->cur_freq;
245 	parent_cpu_data->cur_freq = freqs->new;
246 
247 	mutex_lock(&devfreq->lock);
248 	ret = devfreq_update_target(devfreq, freqs->new);
249 	mutex_unlock(&devfreq->lock);
250 	if (ret) {
251 		parent_cpu_data->cur_freq = cur_freq;
252 		dev_err(&devfreq->dev, "failed to update the frequency.\n");
253 		return ret;
254 	}
255 
256 	return 0;
257 }
258 
259 static int cpufreq_passive_unregister_notifier(struct devfreq *devfreq)
260 {
261 	struct devfreq_passive_data *p_data
262 			= (struct devfreq_passive_data *)devfreq->data;
263 	int ret;
264 
265 	if (p_data->nb.notifier_call) {
266 		ret = cpufreq_unregister_notifier(&p_data->nb,
267 					CPUFREQ_TRANSITION_NOTIFIER);
268 		if (ret < 0)
269 			return ret;
270 	}
271 
272 	delete_parent_cpu_data(p_data);
273 
274 	return 0;
275 }
276 
277 static int cpufreq_passive_register_notifier(struct devfreq *devfreq)
278 {
279 	struct devfreq_passive_data *p_data
280 			= (struct devfreq_passive_data *)devfreq->data;
281 	struct device *dev = devfreq->dev.parent;
282 	struct opp_table *opp_table = NULL;
283 	struct devfreq_cpu_data *parent_cpu_data;
284 	struct cpufreq_policy *policy;
285 	struct device *cpu_dev;
286 	unsigned int cpu;
287 	int ret;
288 
289 	p_data->cpu_data_list
290 		= (struct list_head)LIST_HEAD_INIT(p_data->cpu_data_list);
291 
292 	p_data->nb.notifier_call = cpufreq_passive_notifier_call;
293 	ret = cpufreq_register_notifier(&p_data->nb, CPUFREQ_TRANSITION_NOTIFIER);
294 	if (ret) {
295 		dev_err(dev, "failed to register cpufreq notifier\n");
296 		p_data->nb.notifier_call = NULL;
297 		goto err;
298 	}
299 
300 	for_each_possible_cpu(cpu) {
301 		policy = cpufreq_cpu_get(cpu);
302 		if (!policy) {
303 			ret = -EPROBE_DEFER;
304 			goto err;
305 		}
306 
307 		parent_cpu_data = get_parent_cpu_data(p_data, policy);
308 		if (parent_cpu_data) {
309 			cpufreq_cpu_put(policy);
310 			continue;
311 		}
312 
313 		parent_cpu_data = kzalloc(sizeof(*parent_cpu_data),
314 						GFP_KERNEL);
315 		if (!parent_cpu_data) {
316 			ret = -ENOMEM;
317 			goto err_put_policy;
318 		}
319 
320 		cpu_dev = get_cpu_device(cpu);
321 		if (!cpu_dev) {
322 			dev_err(dev, "failed to get cpu device\n");
323 			ret = -ENODEV;
324 			goto err_free_cpu_data;
325 		}
326 
327 		opp_table = dev_pm_opp_get_opp_table(cpu_dev);
328 		if (IS_ERR(opp_table)) {
329 			dev_err(dev, "failed to get opp_table of cpu%d\n", cpu);
330 			ret = PTR_ERR(opp_table);
331 			goto err_free_cpu_data;
332 		}
333 
334 		parent_cpu_data->dev = cpu_dev;
335 		parent_cpu_data->opp_table = opp_table;
336 		parent_cpu_data->first_cpu = cpumask_first(policy->related_cpus);
337 		parent_cpu_data->cur_freq = policy->cur;
338 		parent_cpu_data->min_freq = policy->cpuinfo.min_freq;
339 		parent_cpu_data->max_freq = policy->cpuinfo.max_freq;
340 
341 		list_add_tail(&parent_cpu_data->node, &p_data->cpu_data_list);
342 		cpufreq_cpu_put(policy);
343 	}
344 
345 	mutex_lock(&devfreq->lock);
346 	ret = devfreq_update_target(devfreq, 0L);
347 	mutex_unlock(&devfreq->lock);
348 	if (ret)
349 		dev_err(dev, "failed to update the frequency\n");
350 
351 	return ret;
352 
353 err_free_cpu_data:
354 	kfree(parent_cpu_data);
355 err_put_policy:
356 	cpufreq_cpu_put(policy);
357 err:
358 
359 	return ret;
360 }
361 
362 static int devfreq_passive_notifier_call(struct notifier_block *nb,
363 				unsigned long event, void *ptr)
364 {
365 	struct devfreq_passive_data *data
366 			= container_of(nb, struct devfreq_passive_data, nb);
367 	struct devfreq *devfreq = (struct devfreq *)data->this;
368 	struct devfreq *parent = (struct devfreq *)data->parent;
369 	struct devfreq_freqs *freqs = (struct devfreq_freqs *)ptr;
370 	unsigned long freq = freqs->new;
371 	int ret = 0;
372 
373 	mutex_lock_nested(&devfreq->lock, SINGLE_DEPTH_NESTING);
374 	switch (event) {
375 	case DEVFREQ_PRECHANGE:
376 		if (parent->previous_freq > freq)
377 			ret = devfreq_update_target(devfreq, freq);
378 
379 		break;
380 	case DEVFREQ_POSTCHANGE:
381 		if (parent->previous_freq < freq)
382 			ret = devfreq_update_target(devfreq, freq);
383 		break;
384 	}
385 	mutex_unlock(&devfreq->lock);
386 
387 	if (ret < 0)
388 		dev_warn(&devfreq->dev,
389 			"failed to update devfreq using passive governor\n");
390 
391 	return NOTIFY_DONE;
392 }
393 
394 static int devfreq_passive_unregister_notifier(struct devfreq *devfreq)
395 {
396 	struct devfreq_passive_data *p_data
397 			= (struct devfreq_passive_data *)devfreq->data;
398 	struct devfreq *parent = (struct devfreq *)p_data->parent;
399 	struct notifier_block *nb = &p_data->nb;
400 
401 	return devfreq_unregister_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER);
402 }
403 
404 static int devfreq_passive_register_notifier(struct devfreq *devfreq)
405 {
406 	struct devfreq_passive_data *p_data
407 			= (struct devfreq_passive_data *)devfreq->data;
408 	struct devfreq *parent = (struct devfreq *)p_data->parent;
409 	struct notifier_block *nb = &p_data->nb;
410 
411 	if (!parent)
412 		return -EPROBE_DEFER;
413 
414 	nb->notifier_call = devfreq_passive_notifier_call;
415 	return devfreq_register_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER);
416 }
417 
418 static int devfreq_passive_event_handler(struct devfreq *devfreq,
419 				unsigned int event, void *data)
420 {
421 	struct devfreq_passive_data *p_data
422 			= (struct devfreq_passive_data *)devfreq->data;
423 	int ret = 0;
424 
425 	if (!p_data)
426 		return -EINVAL;
427 
428 	p_data->this = devfreq;
429 
430 	switch (event) {
431 	case DEVFREQ_GOV_START:
432 		if (p_data->parent_type == DEVFREQ_PARENT_DEV)
433 			ret = devfreq_passive_register_notifier(devfreq);
434 		else if (p_data->parent_type == CPUFREQ_PARENT_DEV)
435 			ret = cpufreq_passive_register_notifier(devfreq);
436 		break;
437 	case DEVFREQ_GOV_STOP:
438 		if (p_data->parent_type == DEVFREQ_PARENT_DEV)
439 			WARN_ON(devfreq_passive_unregister_notifier(devfreq));
440 		else if (p_data->parent_type == CPUFREQ_PARENT_DEV)
441 			WARN_ON(cpufreq_passive_unregister_notifier(devfreq));
442 		break;
443 	default:
444 		break;
445 	}
446 
447 	return ret;
448 }
449 
450 static struct devfreq_governor devfreq_passive = {
451 	.name = DEVFREQ_GOV_PASSIVE,
452 	.flags = DEVFREQ_GOV_FLAG_IMMUTABLE,
453 	.get_target_freq = devfreq_passive_get_target_freq,
454 	.event_handler = devfreq_passive_event_handler,
455 };
456 
457 static int __init devfreq_passive_init(void)
458 {
459 	return devfreq_add_governor(&devfreq_passive);
460 }
461 subsys_initcall(devfreq_passive_init);
462 
463 static void __exit devfreq_passive_exit(void)
464 {
465 	int ret;
466 
467 	ret = devfreq_remove_governor(&devfreq_passive);
468 	if (ret)
469 		pr_err("%s: failed remove governor %d\n", __func__, ret);
470 }
471 module_exit(devfreq_passive_exit);
472 
473 MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
474 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
475 MODULE_DESCRIPTION("DEVFREQ Passive governor");
476 MODULE_LICENSE("GPL v2");
477