1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2020 Linaro Limited 4 * 5 * Author: Daniel Lezcano <daniel.lezcano@linaro.org> 6 * 7 * The DTPM CPU is based on the energy model. It hooks the CPU in the 8 * DTPM tree which in turns update the power number by propagating the 9 * power number from the CPU energy model information to the parents. 10 * 11 * The association between the power and the performance state, allows 12 * to set the power of the CPU at the OPP granularity. 13 * 14 * The CPU hotplug is supported and the power numbers will be updated 15 * if a CPU is hot plugged / unplugged. 16 */ 17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 19 #include <linux/cpumask.h> 20 #include <linux/cpufreq.h> 21 #include <linux/cpuhotplug.h> 22 #include <linux/dtpm.h> 23 #include <linux/energy_model.h> 24 #include <linux/of.h> 25 #include <linux/pm_qos.h> 26 #include <linux/slab.h> 27 28 struct dtpm_cpu { 29 struct dtpm dtpm; 30 struct freq_qos_request qos_req; 31 int cpu; 32 }; 33 34 static DEFINE_PER_CPU(struct dtpm_cpu *, dtpm_per_cpu); 35 36 static struct dtpm_cpu *to_dtpm_cpu(struct dtpm *dtpm) 37 { 38 return container_of(dtpm, struct dtpm_cpu, dtpm); 39 } 40 41 static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit) 42 { 43 struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm); 44 struct em_perf_domain *pd = em_cpu_get(dtpm_cpu->cpu); 45 struct em_perf_state *table; 46 struct cpumask cpus; 47 unsigned long freq; 48 u64 power; 49 int i, nr_cpus; 50 51 cpumask_and(&cpus, cpu_online_mask, to_cpumask(pd->cpus)); 52 nr_cpus = cpumask_weight(&cpus); 53 54 rcu_read_lock(); 55 table = em_perf_state_from_pd(pd); 56 for (i = 0; i < pd->nr_perf_states; i++) { 57 58 power = table[i].power * nr_cpus; 59 60 if (power > power_limit) 61 break; 62 } 63 64 freq = table[i - 1].frequency; 65 power_limit = table[i - 1].power * nr_cpus; 66 rcu_read_unlock(); 67 68 freq_qos_update_request(&dtpm_cpu->qos_req, freq); 69 70 return power_limit; 71 } 72 73 static u64 scale_pd_power_uw(struct cpumask *pd_mask, u64 power) 74 { 75 unsigned long max, sum_util = 0; 76 int cpu; 77 78 /* 79 * The capacity is the same for all CPUs belonging to 80 * the same perf domain. 81 */ 82 max = arch_scale_cpu_capacity(cpumask_first(pd_mask)); 83 84 for_each_cpu_and(cpu, pd_mask, cpu_online_mask) 85 sum_util += sched_cpu_util(cpu); 86 87 return (power * ((sum_util << 10) / max)) >> 10; 88 } 89 90 static u64 get_pd_power_uw(struct dtpm *dtpm) 91 { 92 struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm); 93 struct em_perf_state *table; 94 struct em_perf_domain *pd; 95 struct cpumask *pd_mask; 96 unsigned long freq; 97 u64 power = 0; 98 int i; 99 100 pd = em_cpu_get(dtpm_cpu->cpu); 101 102 pd_mask = em_span_cpus(pd); 103 104 freq = cpufreq_quick_get(dtpm_cpu->cpu); 105 106 rcu_read_lock(); 107 table = em_perf_state_from_pd(pd); 108 for (i = 0; i < pd->nr_perf_states; i++) { 109 110 if (table[i].frequency < freq) 111 continue; 112 113 power = scale_pd_power_uw(pd_mask, table[i].power); 114 break; 115 } 116 rcu_read_unlock(); 117 118 return power; 119 } 120 121 static int update_pd_power_uw(struct dtpm *dtpm) 122 { 123 struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm); 124 struct em_perf_domain *em = em_cpu_get(dtpm_cpu->cpu); 125 struct em_perf_state *table; 126 struct cpumask cpus; 127 int nr_cpus; 128 129 cpumask_and(&cpus, cpu_online_mask, to_cpumask(em->cpus)); 130 nr_cpus = cpumask_weight(&cpus); 131 132 rcu_read_lock(); 133 table = em_perf_state_from_pd(em); 134 135 dtpm->power_min = table[0].power; 136 dtpm->power_min *= nr_cpus; 137 138 dtpm->power_max = table[em->nr_perf_states - 1].power; 139 dtpm->power_max *= nr_cpus; 140 141 rcu_read_unlock(); 142 143 return 0; 144 } 145 146 static void pd_release(struct dtpm *dtpm) 147 { 148 struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm); 149 struct cpufreq_policy *policy; 150 151 if (freq_qos_request_active(&dtpm_cpu->qos_req)) 152 freq_qos_remove_request(&dtpm_cpu->qos_req); 153 154 policy = cpufreq_cpu_get(dtpm_cpu->cpu); 155 if (policy) { 156 for_each_cpu(dtpm_cpu->cpu, policy->related_cpus) 157 per_cpu(dtpm_per_cpu, dtpm_cpu->cpu) = NULL; 158 159 cpufreq_cpu_put(policy); 160 } 161 162 kfree(dtpm_cpu); 163 } 164 165 static struct dtpm_ops dtpm_ops = { 166 .set_power_uw = set_pd_power_limit, 167 .get_power_uw = get_pd_power_uw, 168 .update_power_uw = update_pd_power_uw, 169 .release = pd_release, 170 }; 171 172 static int cpuhp_dtpm_cpu_offline(unsigned int cpu) 173 { 174 struct dtpm_cpu *dtpm_cpu; 175 176 dtpm_cpu = per_cpu(dtpm_per_cpu, cpu); 177 if (dtpm_cpu) 178 dtpm_update_power(&dtpm_cpu->dtpm); 179 180 return 0; 181 } 182 183 static int cpuhp_dtpm_cpu_online(unsigned int cpu) 184 { 185 struct dtpm_cpu *dtpm_cpu; 186 187 dtpm_cpu = per_cpu(dtpm_per_cpu, cpu); 188 if (dtpm_cpu) 189 return dtpm_update_power(&dtpm_cpu->dtpm); 190 191 return 0; 192 } 193 194 static int __dtpm_cpu_setup(int cpu, struct dtpm *parent) 195 { 196 struct dtpm_cpu *dtpm_cpu; 197 struct cpufreq_policy *policy; 198 struct em_perf_state *table; 199 struct em_perf_domain *pd; 200 char name[CPUFREQ_NAME_LEN]; 201 int ret = -ENOMEM; 202 203 dtpm_cpu = per_cpu(dtpm_per_cpu, cpu); 204 if (dtpm_cpu) 205 return 0; 206 207 policy = cpufreq_cpu_get(cpu); 208 if (!policy) 209 return 0; 210 211 pd = em_cpu_get(cpu); 212 if (!pd || em_is_artificial(pd)) { 213 ret = -EINVAL; 214 goto release_policy; 215 } 216 217 dtpm_cpu = kzalloc(sizeof(*dtpm_cpu), GFP_KERNEL); 218 if (!dtpm_cpu) { 219 ret = -ENOMEM; 220 goto release_policy; 221 } 222 223 dtpm_init(&dtpm_cpu->dtpm, &dtpm_ops); 224 dtpm_cpu->cpu = cpu; 225 226 for_each_cpu(cpu, policy->related_cpus) 227 per_cpu(dtpm_per_cpu, cpu) = dtpm_cpu; 228 229 snprintf(name, sizeof(name), "cpu%d-cpufreq", dtpm_cpu->cpu); 230 231 ret = dtpm_register(name, &dtpm_cpu->dtpm, parent); 232 if (ret) 233 goto out_kfree_dtpm_cpu; 234 235 rcu_read_lock(); 236 table = em_perf_state_from_pd(pd); 237 ret = freq_qos_add_request(&policy->constraints, 238 &dtpm_cpu->qos_req, FREQ_QOS_MAX, 239 table[pd->nr_perf_states - 1].frequency); 240 rcu_read_unlock(); 241 if (ret < 0) 242 goto out_dtpm_unregister; 243 244 cpufreq_cpu_put(policy); 245 return 0; 246 247 out_dtpm_unregister: 248 dtpm_unregister(&dtpm_cpu->dtpm); 249 dtpm_cpu = NULL; 250 251 out_kfree_dtpm_cpu: 252 for_each_cpu(cpu, policy->related_cpus) 253 per_cpu(dtpm_per_cpu, cpu) = NULL; 254 kfree(dtpm_cpu); 255 256 release_policy: 257 cpufreq_cpu_put(policy); 258 return ret; 259 } 260 261 static int dtpm_cpu_setup(struct dtpm *dtpm, struct device_node *np) 262 { 263 int cpu; 264 265 cpu = of_cpu_node_to_id(np); 266 if (cpu < 0) 267 return 0; 268 269 return __dtpm_cpu_setup(cpu, dtpm); 270 } 271 272 static int dtpm_cpu_init(void) 273 { 274 int ret; 275 276 /* 277 * The callbacks at CPU hotplug time are calling 278 * dtpm_update_power() which in turns calls update_pd_power(). 279 * 280 * The function update_pd_power() uses the online mask to 281 * figure out the power consumption limits. 282 * 283 * At CPUHP_AP_ONLINE_DYN, the CPU is present in the CPU 284 * online mask when the cpuhp_dtpm_cpu_online function is 285 * called, but the CPU is still in the online mask for the 286 * tear down callback. So the power can not be updated when 287 * the CPU is unplugged. 288 * 289 * At CPUHP_AP_DTPM_CPU_DEAD, the situation is the opposite as 290 * above. The CPU online mask is not up to date when the CPU 291 * is plugged in. 292 * 293 * For this reason, we need to call the online and offline 294 * callbacks at different moments when the CPU online mask is 295 * consistent with the power numbers we want to update. 296 */ 297 ret = cpuhp_setup_state(CPUHP_AP_DTPM_CPU_DEAD, "dtpm_cpu:offline", 298 NULL, cpuhp_dtpm_cpu_offline); 299 if (ret < 0) 300 return ret; 301 302 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "dtpm_cpu:online", 303 cpuhp_dtpm_cpu_online, NULL); 304 if (ret < 0) 305 return ret; 306 307 return 0; 308 } 309 310 static void dtpm_cpu_exit(void) 311 { 312 cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN); 313 cpuhp_remove_state_nocalls(CPUHP_AP_DTPM_CPU_DEAD); 314 } 315 316 struct dtpm_subsys_ops dtpm_cpu_ops = { 317 .name = KBUILD_MODNAME, 318 .init = dtpm_cpu_init, 319 .exit = dtpm_cpu_exit, 320 .setup = dtpm_cpu_setup, 321 }; 322