xref: /linux/drivers/devfreq/governor_passive.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/drivers/devfreq/governor_passive.c
4  *
5  * Copyright (C) 2016 Samsung Electronics
6  * Author: Chanwoo Choi <cw00.choi@samsung.com>
7  * Author: MyungJoo Ham <myungjoo.ham@samsung.com>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/cpu.h>
12 #include <linux/cpufreq.h>
13 #include <linux/cpumask.h>
14 #include <linux/slab.h>
15 #include <linux/device.h>
16 #include <linux/devfreq.h>
17 #include <linux/devfreq-governor.h>
18 #include <linux/units.h>
19 
20 /**
21  * struct devfreq_cpu_data - Hold the per-cpu data
22  * @node:	list node
23  * @dev:	reference to cpu device.
24  * @first_cpu:	the cpumask of the first cpu of a policy.
25  * @opp_table:	reference to cpu opp table.
26  * @cur_freq:	the current frequency of the cpu.
27  * @min_freq:	the min frequency of the cpu.
28  * @max_freq:	the max frequency of the cpu.
29  *
30  * This structure stores the required cpu_data of a cpu.
31  * This is auto-populated by the governor.
32  */
33 struct devfreq_cpu_data {
34 	struct list_head node;
35 
36 	struct device *dev;
37 	unsigned int first_cpu;
38 
39 	struct opp_table *opp_table;
40 	unsigned int cur_freq;
41 	unsigned int min_freq;
42 	unsigned int max_freq;
43 };
44 
45 static struct devfreq_cpu_data *
46 get_parent_cpu_data(struct devfreq_passive_data *p_data,
47 		    struct cpufreq_policy *policy)
48 {
49 	struct devfreq_cpu_data *parent_cpu_data;
50 
51 	if (!p_data || !policy)
52 		return NULL;
53 
54 	list_for_each_entry(parent_cpu_data, &p_data->cpu_data_list, node)
55 		if (parent_cpu_data->first_cpu == cpumask_first(policy->related_cpus))
56 			return parent_cpu_data;
57 
58 	return NULL;
59 }
60 
61 static void delete_parent_cpu_data(struct devfreq_passive_data *p_data)
62 {
63 	struct devfreq_cpu_data *parent_cpu_data, *tmp;
64 
65 	list_for_each_entry_safe(parent_cpu_data, tmp, &p_data->cpu_data_list, node) {
66 		list_del(&parent_cpu_data->node);
67 
68 		if (parent_cpu_data->opp_table)
69 			dev_pm_opp_put_opp_table(parent_cpu_data->opp_table);
70 
71 		kfree(parent_cpu_data);
72 	}
73 }
74 
75 static unsigned long get_target_freq_by_required_opp(struct device *p_dev,
76 						struct opp_table *p_opp_table,
77 						struct opp_table *opp_table,
78 						unsigned long *freq)
79 {
80 	struct dev_pm_opp *opp = NULL, *p_opp = NULL;
81 	unsigned long target_freq;
82 
83 	if (!p_dev || !p_opp_table || !opp_table || !freq)
84 		return 0;
85 
86 	p_opp = devfreq_recommended_opp(p_dev, freq, 0);
87 	if (IS_ERR(p_opp))
88 		return 0;
89 
90 	opp = dev_pm_opp_xlate_required_opp(p_opp_table, opp_table, p_opp);
91 	dev_pm_opp_put(p_opp);
92 
93 	if (IS_ERR(opp))
94 		return 0;
95 
96 	target_freq = dev_pm_opp_get_freq(opp);
97 	dev_pm_opp_put(opp);
98 
99 	return target_freq;
100 }
101 
102 static int get_target_freq_with_cpufreq(struct devfreq *devfreq,
103 					unsigned long *target_freq)
104 {
105 	struct devfreq_passive_data *p_data =
106 				(struct devfreq_passive_data *)devfreq->data;
107 	struct devfreq_cpu_data *parent_cpu_data;
108 	struct cpufreq_policy *policy;
109 	unsigned long cpu, cpu_cur, cpu_min, cpu_max, cpu_percent;
110 	unsigned long dev_min, dev_max;
111 	unsigned long freq = 0;
112 	int ret = 0;
113 
114 	for_each_online_cpu(cpu) {
115 		policy = cpufreq_cpu_get(cpu);
116 		if (!policy) {
117 			ret = -EINVAL;
118 			continue;
119 		}
120 
121 		parent_cpu_data = get_parent_cpu_data(p_data, policy);
122 		if (!parent_cpu_data) {
123 			cpufreq_cpu_put(policy);
124 			continue;
125 		}
126 
127 		/* Get target freq via required opps */
128 		cpu_cur = parent_cpu_data->cur_freq * HZ_PER_KHZ;
129 		freq = get_target_freq_by_required_opp(parent_cpu_data->dev,
130 					parent_cpu_data->opp_table,
131 					devfreq->opp_table, &cpu_cur);
132 		if (freq) {
133 			*target_freq = max(freq, *target_freq);
134 			cpufreq_cpu_put(policy);
135 			continue;
136 		}
137 
138 		/* Use interpolation if required opps is not available */
139 		devfreq_get_freq_range(devfreq, &dev_min, &dev_max);
140 
141 		cpu_min = parent_cpu_data->min_freq;
142 		cpu_max = parent_cpu_data->max_freq;
143 		cpu_cur = parent_cpu_data->cur_freq;
144 
145 		cpu_percent = ((cpu_cur - cpu_min) * 100) / (cpu_max - cpu_min);
146 		freq = dev_min + mult_frac(dev_max - dev_min, cpu_percent, 100);
147 
148 		*target_freq = max(freq, *target_freq);
149 		cpufreq_cpu_put(policy);
150 	}
151 
152 	return ret;
153 }
154 
155 static int get_target_freq_with_devfreq(struct devfreq *devfreq,
156 					unsigned long *freq)
157 {
158 	struct devfreq_passive_data *p_data
159 			= (struct devfreq_passive_data *)devfreq->data;
160 	struct devfreq *parent_devfreq = (struct devfreq *)p_data->parent;
161 	unsigned long child_freq = ULONG_MAX;
162 	int i, count;
163 
164 	/* Get target freq via required opps */
165 	child_freq = get_target_freq_by_required_opp(parent_devfreq->dev.parent,
166 						parent_devfreq->opp_table,
167 						devfreq->opp_table, freq);
168 	if (child_freq)
169 		goto out;
170 
171 	/* Use interpolation if required opps is not available */
172 	for (i = 0; i < parent_devfreq->max_state; i++)
173 		if (parent_devfreq->freq_table[i] == *freq)
174 			break;
175 
176 	if (i == parent_devfreq->max_state)
177 		return -EINVAL;
178 
179 	if (i < devfreq->max_state) {
180 		child_freq = devfreq->freq_table[i];
181 	} else {
182 		count = devfreq->max_state;
183 		child_freq = devfreq->freq_table[count - 1];
184 	}
185 
186 out:
187 	*freq = child_freq;
188 
189 	return 0;
190 }
191 
192 static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
193 					   unsigned long *freq)
194 {
195 	struct devfreq_passive_data *p_data =
196 				(struct devfreq_passive_data *)devfreq->data;
197 	int ret;
198 
199 	if (!p_data)
200 		return -EINVAL;
201 
202 	/*
203 	 * If the devfreq device with passive governor has the specific method
204 	 * to determine the next frequency, should use the get_target_freq()
205 	 * of struct devfreq_passive_data.
206 	 */
207 	if (p_data->get_target_freq)
208 		return p_data->get_target_freq(devfreq, freq);
209 
210 	switch (p_data->parent_type) {
211 	case DEVFREQ_PARENT_DEV:
212 		ret = get_target_freq_with_devfreq(devfreq, freq);
213 		break;
214 	case CPUFREQ_PARENT_DEV:
215 		ret = get_target_freq_with_cpufreq(devfreq, freq);
216 		break;
217 	default:
218 		ret = -EINVAL;
219 		dev_err(&devfreq->dev, "Invalid parent type\n");
220 		break;
221 	}
222 
223 	return ret;
224 }
225 
226 static int cpufreq_passive_notifier_call(struct notifier_block *nb,
227 					 unsigned long event, void *ptr)
228 {
229 	struct devfreq_passive_data *p_data =
230 			container_of(nb, struct devfreq_passive_data, nb);
231 	struct devfreq *devfreq = (struct devfreq *)p_data->this;
232 	struct devfreq_cpu_data *parent_cpu_data;
233 	struct cpufreq_freqs *freqs = ptr;
234 	unsigned int cur_freq;
235 	int ret;
236 
237 	if (event != CPUFREQ_POSTCHANGE || !freqs)
238 		return 0;
239 
240 	parent_cpu_data = get_parent_cpu_data(p_data, freqs->policy);
241 	if (!parent_cpu_data || parent_cpu_data->cur_freq == freqs->new)
242 		return 0;
243 
244 	cur_freq = parent_cpu_data->cur_freq;
245 	parent_cpu_data->cur_freq = freqs->new;
246 
247 	mutex_lock(&devfreq->lock);
248 	ret = devfreq_update_target(devfreq, freqs->new);
249 	mutex_unlock(&devfreq->lock);
250 	if (ret) {
251 		parent_cpu_data->cur_freq = cur_freq;
252 		dev_err(&devfreq->dev, "failed to update the frequency.\n");
253 		return ret;
254 	}
255 
256 	return 0;
257 }
258 
259 static int cpufreq_passive_unregister_notifier(struct devfreq *devfreq)
260 {
261 	struct devfreq_passive_data *p_data
262 			= (struct devfreq_passive_data *)devfreq->data;
263 	int ret;
264 
265 	if (p_data->nb.notifier_call) {
266 		ret = cpufreq_unregister_notifier(&p_data->nb,
267 					CPUFREQ_TRANSITION_NOTIFIER);
268 		if (ret < 0)
269 			return ret;
270 	}
271 
272 	delete_parent_cpu_data(p_data);
273 
274 	return 0;
275 }
276 
277 static int cpufreq_passive_register_notifier(struct devfreq *devfreq)
278 {
279 	struct devfreq_passive_data *p_data
280 			= (struct devfreq_passive_data *)devfreq->data;
281 	struct device *dev = devfreq->dev.parent;
282 	struct opp_table *opp_table = NULL;
283 	struct devfreq_cpu_data *parent_cpu_data;
284 	struct cpufreq_policy *policy;
285 	struct device *cpu_dev;
286 	unsigned int cpu;
287 	int ret;
288 
289 	p_data->cpu_data_list
290 		= (struct list_head)LIST_HEAD_INIT(p_data->cpu_data_list);
291 
292 	p_data->nb.notifier_call = cpufreq_passive_notifier_call;
293 	ret = cpufreq_register_notifier(&p_data->nb, CPUFREQ_TRANSITION_NOTIFIER);
294 	if (ret) {
295 		dev_err(dev, "failed to register cpufreq notifier\n");
296 		p_data->nb.notifier_call = NULL;
297 		goto err;
298 	}
299 
300 	for_each_possible_cpu(cpu) {
301 		policy = cpufreq_cpu_get(cpu);
302 		if (!policy) {
303 			ret = -EPROBE_DEFER;
304 			goto err;
305 		}
306 
307 		parent_cpu_data = get_parent_cpu_data(p_data, policy);
308 		if (parent_cpu_data) {
309 			cpufreq_cpu_put(policy);
310 			continue;
311 		}
312 
313 		parent_cpu_data = kzalloc_obj(*parent_cpu_data, GFP_KERNEL);
314 		if (!parent_cpu_data) {
315 			ret = -ENOMEM;
316 			goto err_put_policy;
317 		}
318 
319 		cpu_dev = get_cpu_device(cpu);
320 		if (!cpu_dev) {
321 			dev_err(dev, "failed to get cpu device\n");
322 			ret = -ENODEV;
323 			goto err_free_cpu_data;
324 		}
325 
326 		opp_table = dev_pm_opp_get_opp_table(cpu_dev);
327 		if (IS_ERR(opp_table)) {
328 			dev_err(dev, "failed to get opp_table of cpu%d\n", cpu);
329 			ret = PTR_ERR(opp_table);
330 			goto err_free_cpu_data;
331 		}
332 
333 		parent_cpu_data->dev = cpu_dev;
334 		parent_cpu_data->opp_table = opp_table;
335 		parent_cpu_data->first_cpu = cpumask_first(policy->related_cpus);
336 		parent_cpu_data->cur_freq = policy->cur;
337 		parent_cpu_data->min_freq = policy->cpuinfo.min_freq;
338 		parent_cpu_data->max_freq = policy->cpuinfo.max_freq;
339 
340 		list_add_tail(&parent_cpu_data->node, &p_data->cpu_data_list);
341 		cpufreq_cpu_put(policy);
342 	}
343 
344 	mutex_lock(&devfreq->lock);
345 	ret = devfreq_update_target(devfreq, 0L);
346 	mutex_unlock(&devfreq->lock);
347 	if (ret)
348 		dev_err(dev, "failed to update the frequency\n");
349 
350 	return ret;
351 
352 err_free_cpu_data:
353 	kfree(parent_cpu_data);
354 err_put_policy:
355 	cpufreq_cpu_put(policy);
356 err:
357 
358 	return ret;
359 }
360 
361 static int devfreq_passive_notifier_call(struct notifier_block *nb,
362 				unsigned long event, void *ptr)
363 {
364 	struct devfreq_passive_data *data
365 			= container_of(nb, struct devfreq_passive_data, nb);
366 	struct devfreq *devfreq = (struct devfreq *)data->this;
367 	struct devfreq *parent = (struct devfreq *)data->parent;
368 	struct devfreq_freqs *freqs = (struct devfreq_freqs *)ptr;
369 	unsigned long freq = freqs->new;
370 	int ret = 0;
371 
372 	mutex_lock_nested(&devfreq->lock, SINGLE_DEPTH_NESTING);
373 	switch (event) {
374 	case DEVFREQ_PRECHANGE:
375 		if (parent->previous_freq > freq)
376 			ret = devfreq_update_target(devfreq, freq);
377 
378 		break;
379 	case DEVFREQ_POSTCHANGE:
380 		if (parent->previous_freq < freq)
381 			ret = devfreq_update_target(devfreq, freq);
382 		break;
383 	}
384 	mutex_unlock(&devfreq->lock);
385 
386 	if (ret < 0)
387 		dev_warn(&devfreq->dev,
388 			"failed to update devfreq using passive governor\n");
389 
390 	return NOTIFY_DONE;
391 }
392 
393 static int devfreq_passive_unregister_notifier(struct devfreq *devfreq)
394 {
395 	struct devfreq_passive_data *p_data
396 			= (struct devfreq_passive_data *)devfreq->data;
397 	struct devfreq *parent = (struct devfreq *)p_data->parent;
398 	struct notifier_block *nb = &p_data->nb;
399 
400 	return devfreq_unregister_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER);
401 }
402 
403 static int devfreq_passive_register_notifier(struct devfreq *devfreq)
404 {
405 	struct devfreq_passive_data *p_data
406 			= (struct devfreq_passive_data *)devfreq->data;
407 	struct devfreq *parent = (struct devfreq *)p_data->parent;
408 	struct notifier_block *nb = &p_data->nb;
409 
410 	if (!parent)
411 		return -EPROBE_DEFER;
412 
413 	nb->notifier_call = devfreq_passive_notifier_call;
414 	return devfreq_register_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER);
415 }
416 
417 static int devfreq_passive_event_handler(struct devfreq *devfreq,
418 				unsigned int event, void *data)
419 {
420 	struct devfreq_passive_data *p_data
421 			= (struct devfreq_passive_data *)devfreq->data;
422 	int ret = 0;
423 
424 	if (!p_data)
425 		return -EINVAL;
426 
427 	p_data->this = devfreq;
428 
429 	switch (event) {
430 	case DEVFREQ_GOV_START:
431 		if (p_data->parent_type == DEVFREQ_PARENT_DEV)
432 			ret = devfreq_passive_register_notifier(devfreq);
433 		else if (p_data->parent_type == CPUFREQ_PARENT_DEV)
434 			ret = cpufreq_passive_register_notifier(devfreq);
435 		break;
436 	case DEVFREQ_GOV_STOP:
437 		if (p_data->parent_type == DEVFREQ_PARENT_DEV)
438 			WARN_ON(devfreq_passive_unregister_notifier(devfreq));
439 		else if (p_data->parent_type == CPUFREQ_PARENT_DEV)
440 			WARN_ON(cpufreq_passive_unregister_notifier(devfreq));
441 		break;
442 	default:
443 		break;
444 	}
445 
446 	return ret;
447 }
448 
449 static struct devfreq_governor devfreq_passive = {
450 	.name = DEVFREQ_GOV_PASSIVE,
451 	.flags = DEVFREQ_GOV_FLAG_IMMUTABLE,
452 	.get_target_freq = devfreq_passive_get_target_freq,
453 	.event_handler = devfreq_passive_event_handler,
454 };
455 
456 static int __init devfreq_passive_init(void)
457 {
458 	return devfreq_add_governor(&devfreq_passive);
459 }
460 subsys_initcall(devfreq_passive_init);
461 
462 static void __exit devfreq_passive_exit(void)
463 {
464 	int ret;
465 
466 	ret = devfreq_remove_governor(&devfreq_passive);
467 	if (ret)
468 		pr_err("%s: failed remove governor %d\n", __func__, ret);
469 }
470 module_exit(devfreq_passive_exit);
471 
472 MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
473 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
474 MODULE_DESCRIPTION("DEVFREQ Passive governor");
475 MODULE_LICENSE("GPL v2");
476