xref: /linux/drivers/powercap/dtpm_devfreq.c (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2021 Linaro Limited
4  *
5  * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
6  *
7  * The devfreq device combined with the energy model and the load can
8  * give an estimation of the power consumption as well as limiting the
9  * power.
10  *
11  */
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/cpumask.h>
15 #include <linux/devfreq.h>
16 #include <linux/dtpm.h>
17 #include <linux/energy_model.h>
18 #include <linux/of.h>
19 #include <linux/pm_qos.h>
20 #include <linux/slab.h>
21 #include <linux/units.h>
22 
23 struct dtpm_devfreq {
24 	struct dtpm dtpm;
25 	struct dev_pm_qos_request qos_req;
26 	struct devfreq *devfreq;
27 };
28 
29 static struct dtpm_devfreq *to_dtpm_devfreq(struct dtpm *dtpm)
30 {
31 	return container_of(dtpm, struct dtpm_devfreq, dtpm);
32 }
33 
34 static int update_pd_power_uw(struct dtpm *dtpm)
35 {
36 	struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
37 	struct devfreq *devfreq = dtpm_devfreq->devfreq;
38 	struct device *dev = devfreq->dev.parent;
39 	struct em_perf_domain *pd = em_pd_get(dev);
40 
41 	dtpm->power_min = pd->table[0].power;
42 
43 	dtpm->power_max = pd->table[pd->nr_perf_states - 1].power;
44 
45 	return 0;
46 }
47 
48 static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
49 {
50 	struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
51 	struct devfreq *devfreq = dtpm_devfreq->devfreq;
52 	struct device *dev = devfreq->dev.parent;
53 	struct em_perf_domain *pd = em_pd_get(dev);
54 	unsigned long freq;
55 	int i;
56 
57 	for (i = 0; i < pd->nr_perf_states; i++) {
58 		if (pd->table[i].power > power_limit)
59 			break;
60 	}
61 
62 	freq = pd->table[i - 1].frequency;
63 
64 	dev_pm_qos_update_request(&dtpm_devfreq->qos_req, freq);
65 
66 	power_limit = pd->table[i - 1].power;
67 
68 	return power_limit;
69 }
70 
71 static void _normalize_load(struct devfreq_dev_status *status)
72 {
73 	if (status->total_time > 0xfffff) {
74 		status->total_time >>= 10;
75 		status->busy_time >>= 10;
76 	}
77 
78 	status->busy_time <<= 10;
79 	status->busy_time /= status->total_time ? : 1;
80 
81 	status->busy_time = status->busy_time ? : 1;
82 	status->total_time = 1024;
83 }
84 
85 static u64 get_pd_power_uw(struct dtpm *dtpm)
86 {
87 	struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
88 	struct devfreq *devfreq = dtpm_devfreq->devfreq;
89 	struct device *dev = devfreq->dev.parent;
90 	struct em_perf_domain *pd = em_pd_get(dev);
91 	struct devfreq_dev_status status;
92 	unsigned long freq;
93 	u64 power;
94 	int i;
95 
96 	mutex_lock(&devfreq->lock);
97 	status = devfreq->last_status;
98 	mutex_unlock(&devfreq->lock);
99 
100 	freq = DIV_ROUND_UP(status.current_frequency, HZ_PER_KHZ);
101 	_normalize_load(&status);
102 
103 	for (i = 0; i < pd->nr_perf_states; i++) {
104 
105 		if (pd->table[i].frequency < freq)
106 			continue;
107 
108 		power = pd->table[i].power;
109 		power *= status.busy_time;
110 		power >>= 10;
111 
112 		return power;
113 	}
114 
115 	return 0;
116 }
117 
118 static void pd_release(struct dtpm *dtpm)
119 {
120 	struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
121 
122 	if (dev_pm_qos_request_active(&dtpm_devfreq->qos_req))
123 		dev_pm_qos_remove_request(&dtpm_devfreq->qos_req);
124 
125 	kfree(dtpm_devfreq);
126 }
127 
128 static struct dtpm_ops dtpm_ops = {
129 	.set_power_uw = set_pd_power_limit,
130 	.get_power_uw = get_pd_power_uw,
131 	.update_power_uw = update_pd_power_uw,
132 	.release = pd_release,
133 };
134 
135 static int __dtpm_devfreq_setup(struct devfreq *devfreq, struct dtpm *parent)
136 {
137 	struct device *dev = devfreq->dev.parent;
138 	struct dtpm_devfreq *dtpm_devfreq;
139 	struct em_perf_domain *pd;
140 	int ret = -ENOMEM;
141 
142 	pd = em_pd_get(dev);
143 	if (!pd) {
144 		ret = dev_pm_opp_of_register_em(dev, NULL);
145 		if (ret) {
146 			pr_err("No energy model available for '%s'\n", dev_name(dev));
147 			return -EINVAL;
148 		}
149 	}
150 
151 	dtpm_devfreq = kzalloc(sizeof(*dtpm_devfreq), GFP_KERNEL);
152 	if (!dtpm_devfreq)
153 		return -ENOMEM;
154 
155 	dtpm_init(&dtpm_devfreq->dtpm, &dtpm_ops);
156 
157 	dtpm_devfreq->devfreq = devfreq;
158 
159 	ret = dtpm_register(dev_name(dev), &dtpm_devfreq->dtpm, parent);
160 	if (ret) {
161 		pr_err("Failed to register '%s': %d\n", dev_name(dev), ret);
162 		kfree(dtpm_devfreq);
163 		return ret;
164 	}
165 
166 	ret = dev_pm_qos_add_request(dev, &dtpm_devfreq->qos_req,
167 				     DEV_PM_QOS_MAX_FREQUENCY,
168 				     PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
169 	if (ret) {
170 		pr_err("Failed to add QoS request: %d\n", ret);
171 		goto out_dtpm_unregister;
172 	}
173 
174 	dtpm_update_power(&dtpm_devfreq->dtpm);
175 
176 	return 0;
177 
178 out_dtpm_unregister:
179 	dtpm_unregister(&dtpm_devfreq->dtpm);
180 
181 	return ret;
182 }
183 
184 static int dtpm_devfreq_setup(struct dtpm *dtpm, struct device_node *np)
185 {
186 	struct devfreq *devfreq;
187 
188 	devfreq = devfreq_get_devfreq_by_node(np);
189 	if (IS_ERR(devfreq))
190 		return 0;
191 
192 	return __dtpm_devfreq_setup(devfreq, dtpm);
193 }
194 
195 struct dtpm_subsys_ops dtpm_devfreq_ops = {
196 	.name = KBUILD_MODNAME,
197 	.setup = dtpm_devfreq_setup,
198 };
199