xref: /linux/drivers/powercap/dtpm_devfreq.c (revision da5b2ad1c2f18834cb1ce429e2e5a5cf5cbdf21b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2021 Linaro Limited
4  *
5  * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
6  *
7  * The devfreq device combined with the energy model and the load can
8  * give an estimation of the power consumption as well as limiting the
9  * power.
10  *
11  */
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/cpumask.h>
15 #include <linux/devfreq.h>
16 #include <linux/dtpm.h>
17 #include <linux/energy_model.h>
18 #include <linux/of.h>
19 #include <linux/pm_qos.h>
20 #include <linux/slab.h>
21 #include <linux/units.h>
22 
23 struct dtpm_devfreq {
24 	struct dtpm dtpm;
25 	struct dev_pm_qos_request qos_req;
26 	struct devfreq *devfreq;
27 };
28 
29 static struct dtpm_devfreq *to_dtpm_devfreq(struct dtpm *dtpm)
30 {
31 	return container_of(dtpm, struct dtpm_devfreq, dtpm);
32 }
33 
34 static int update_pd_power_uw(struct dtpm *dtpm)
35 {
36 	struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
37 	struct devfreq *devfreq = dtpm_devfreq->devfreq;
38 	struct device *dev = devfreq->dev.parent;
39 	struct em_perf_domain *pd = em_pd_get(dev);
40 	struct em_perf_state *table;
41 
42 	rcu_read_lock();
43 	table = em_perf_state_from_pd(pd);
44 
45 	dtpm->power_min = table[0].power;
46 
47 	dtpm->power_max = table[pd->nr_perf_states - 1].power;
48 
49 	rcu_read_unlock();
50 	return 0;
51 }
52 
53 static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
54 {
55 	struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
56 	struct devfreq *devfreq = dtpm_devfreq->devfreq;
57 	struct device *dev = devfreq->dev.parent;
58 	struct em_perf_domain *pd = em_pd_get(dev);
59 	struct em_perf_state *table;
60 	unsigned long freq;
61 	int i;
62 
63 	rcu_read_lock();
64 	table = em_perf_state_from_pd(pd);
65 	for (i = 0; i < pd->nr_perf_states; i++) {
66 		if (table[i].power > power_limit)
67 			break;
68 	}
69 
70 	freq = table[i - 1].frequency;
71 	power_limit = table[i - 1].power;
72 	rcu_read_unlock();
73 
74 	dev_pm_qos_update_request(&dtpm_devfreq->qos_req, freq);
75 
76 	return power_limit;
77 }
78 
79 static void _normalize_load(struct devfreq_dev_status *status)
80 {
81 	if (status->total_time > 0xfffff) {
82 		status->total_time >>= 10;
83 		status->busy_time >>= 10;
84 	}
85 
86 	status->busy_time <<= 10;
87 	status->busy_time /= status->total_time ? : 1;
88 
89 	status->busy_time = status->busy_time ? : 1;
90 	status->total_time = 1024;
91 }
92 
93 static u64 get_pd_power_uw(struct dtpm *dtpm)
94 {
95 	struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
96 	struct devfreq *devfreq = dtpm_devfreq->devfreq;
97 	struct device *dev = devfreq->dev.parent;
98 	struct em_perf_domain *pd = em_pd_get(dev);
99 	struct devfreq_dev_status status;
100 	struct em_perf_state *table;
101 	unsigned long freq;
102 	u64 power = 0;
103 	int i;
104 
105 	mutex_lock(&devfreq->lock);
106 	status = devfreq->last_status;
107 	mutex_unlock(&devfreq->lock);
108 
109 	freq = DIV_ROUND_UP(status.current_frequency, HZ_PER_KHZ);
110 	_normalize_load(&status);
111 
112 	rcu_read_lock();
113 	table = em_perf_state_from_pd(pd);
114 	for (i = 0; i < pd->nr_perf_states; i++) {
115 
116 		if (table[i].frequency < freq)
117 			continue;
118 
119 		power = table[i].power;
120 		power *= status.busy_time;
121 		power >>= 10;
122 
123 		break;
124 	}
125 	rcu_read_unlock();
126 
127 	return power;
128 }
129 
130 static void pd_release(struct dtpm *dtpm)
131 {
132 	struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
133 
134 	if (dev_pm_qos_request_active(&dtpm_devfreq->qos_req))
135 		dev_pm_qos_remove_request(&dtpm_devfreq->qos_req);
136 
137 	kfree(dtpm_devfreq);
138 }
139 
140 static struct dtpm_ops dtpm_ops = {
141 	.set_power_uw = set_pd_power_limit,
142 	.get_power_uw = get_pd_power_uw,
143 	.update_power_uw = update_pd_power_uw,
144 	.release = pd_release,
145 };
146 
147 static int __dtpm_devfreq_setup(struct devfreq *devfreq, struct dtpm *parent)
148 {
149 	struct device *dev = devfreq->dev.parent;
150 	struct dtpm_devfreq *dtpm_devfreq;
151 	struct em_perf_domain *pd;
152 	int ret = -ENOMEM;
153 
154 	pd = em_pd_get(dev);
155 	if (!pd) {
156 		ret = dev_pm_opp_of_register_em(dev, NULL);
157 		if (ret) {
158 			pr_err("No energy model available for '%s'\n", dev_name(dev));
159 			return -EINVAL;
160 		}
161 	}
162 
163 	dtpm_devfreq = kzalloc(sizeof(*dtpm_devfreq), GFP_KERNEL);
164 	if (!dtpm_devfreq)
165 		return -ENOMEM;
166 
167 	dtpm_init(&dtpm_devfreq->dtpm, &dtpm_ops);
168 
169 	dtpm_devfreq->devfreq = devfreq;
170 
171 	ret = dtpm_register(dev_name(dev), &dtpm_devfreq->dtpm, parent);
172 	if (ret) {
173 		pr_err("Failed to register '%s': %d\n", dev_name(dev), ret);
174 		kfree(dtpm_devfreq);
175 		return ret;
176 	}
177 
178 	ret = dev_pm_qos_add_request(dev, &dtpm_devfreq->qos_req,
179 				     DEV_PM_QOS_MAX_FREQUENCY,
180 				     PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
181 	if (ret) {
182 		pr_err("Failed to add QoS request: %d\n", ret);
183 		goto out_dtpm_unregister;
184 	}
185 
186 	dtpm_update_power(&dtpm_devfreq->dtpm);
187 
188 	return 0;
189 
190 out_dtpm_unregister:
191 	dtpm_unregister(&dtpm_devfreq->dtpm);
192 
193 	return ret;
194 }
195 
196 static int dtpm_devfreq_setup(struct dtpm *dtpm, struct device_node *np)
197 {
198 	struct devfreq *devfreq;
199 
200 	devfreq = devfreq_get_devfreq_by_node(np);
201 	if (IS_ERR(devfreq))
202 		return 0;
203 
204 	return __dtpm_devfreq_setup(devfreq, dtpm);
205 }
206 
207 struct dtpm_subsys_ops dtpm_devfreq_ops = {
208 	.name = KBUILD_MODNAME,
209 	.setup = dtpm_devfreq_setup,
210 };
211