1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Freescale Semiconductor, Inc. 4 * 5 * Copyright (C) 2014 Linaro. 6 * Viresh Kumar <viresh.kumar@linaro.org> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/clk.h> 12 #include <linux/cpu.h> 13 #include <linux/cpufreq.h> 14 #include <linux/cpumask.h> 15 #include <linux/err.h> 16 #include <linux/list.h> 17 #include <linux/module.h> 18 #include <linux/of.h> 19 #include <linux/pm_opp.h> 20 #include <linux/platform_device.h> 21 #include <linux/regulator/consumer.h> 22 #include <linux/slab.h> 23 #include <linux/thermal.h> 24 25 #include "cpufreq-dt.h" 26 27 struct private_data { 28 struct list_head node; 29 30 cpumask_var_t cpus; 31 struct device *cpu_dev; 32 struct cpufreq_frequency_table *freq_table; 33 bool have_static_opps; 34 int opp_token; 35 }; 36 37 static LIST_HEAD(priv_list); 38 39 static struct freq_attr *cpufreq_dt_attr[] = { 40 &cpufreq_freq_attr_scaling_available_freqs, 41 NULL, /* Extra space for boost-attr if required */ 42 NULL, 43 }; 44 45 static struct private_data *cpufreq_dt_find_data(int cpu) 46 { 47 struct private_data *priv; 48 49 list_for_each_entry(priv, &priv_list, node) { 50 if (cpumask_test_cpu(cpu, priv->cpus)) 51 return priv; 52 } 53 54 return NULL; 55 } 56 57 static int set_target(struct cpufreq_policy *policy, unsigned int index) 58 { 59 struct private_data *priv = policy->driver_data; 60 unsigned long freq = policy->freq_table[index].frequency; 61 62 return dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000); 63 } 64 65 /* 66 * An earlier version of opp-v1 bindings used to name the regulator 67 * "cpu0-supply", we still need to handle that for backwards compatibility. 68 */ 69 static const char *find_supply_name(struct device *dev) 70 { 71 struct device_node *np __free(device_node) = of_node_get(dev->of_node); 72 struct property *pp; 73 int cpu = dev->id; 74 75 /* This must be valid for sure */ 76 if (WARN_ON(!np)) 77 return NULL; 78 79 /* Try "cpu0" for older DTs */ 80 if (!cpu) { 81 pp = of_find_property(np, "cpu0-supply", NULL); 82 if (pp) 83 return "cpu0"; 84 } 85 86 pp = of_find_property(np, "cpu-supply", NULL); 87 if (pp) 88 return "cpu"; 89 90 dev_dbg(dev, "no regulator for cpu%d\n", cpu); 91 return NULL; 92 } 93 94 static int cpufreq_init(struct cpufreq_policy *policy) 95 { 96 struct private_data *priv; 97 struct device *cpu_dev; 98 struct clk *cpu_clk; 99 unsigned int transition_latency; 100 int ret; 101 102 priv = cpufreq_dt_find_data(policy->cpu); 103 if (!priv) { 104 pr_err("failed to find data for cpu%d\n", policy->cpu); 105 return -ENODEV; 106 } 107 cpu_dev = priv->cpu_dev; 108 109 cpu_clk = clk_get(cpu_dev, NULL); 110 if (IS_ERR(cpu_clk)) { 111 ret = PTR_ERR(cpu_clk); 112 dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret); 113 return ret; 114 } 115 116 transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev); 117 if (!transition_latency) 118 transition_latency = CPUFREQ_ETERNAL; 119 120 cpumask_copy(policy->cpus, priv->cpus); 121 policy->driver_data = priv; 122 policy->clk = cpu_clk; 123 policy->freq_table = priv->freq_table; 124 policy->suspend_freq = dev_pm_opp_get_suspend_opp_freq(cpu_dev) / 1000; 125 policy->cpuinfo.transition_latency = transition_latency; 126 policy->dvfs_possible_from_any_cpu = true; 127 128 /* Support turbo/boost mode */ 129 if (policy_has_boost_freq(policy)) { 130 /* This gets disabled by core on driver unregister */ 131 ret = cpufreq_enable_boost_support(); 132 if (ret) 133 goto out_clk_put; 134 cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; 135 } 136 137 return 0; 138 139 out_clk_put: 140 clk_put(cpu_clk); 141 142 return ret; 143 } 144 145 static int cpufreq_online(struct cpufreq_policy *policy) 146 { 147 /* We did light-weight tear down earlier, nothing to do here */ 148 return 0; 149 } 150 151 static int cpufreq_offline(struct cpufreq_policy *policy) 152 { 153 /* 154 * Preserve policy->driver_data and don't free resources on light-weight 155 * tear down. 156 */ 157 return 0; 158 } 159 160 static int cpufreq_exit(struct cpufreq_policy *policy) 161 { 162 clk_put(policy->clk); 163 return 0; 164 } 165 166 static struct cpufreq_driver dt_cpufreq_driver = { 167 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK | 168 CPUFREQ_IS_COOLING_DEV, 169 .verify = cpufreq_generic_frequency_table_verify, 170 .target_index = set_target, 171 .get = cpufreq_generic_get, 172 .init = cpufreq_init, 173 .exit = cpufreq_exit, 174 .online = cpufreq_online, 175 .offline = cpufreq_offline, 176 .register_em = cpufreq_register_em_with_opp, 177 .name = "cpufreq-dt", 178 .attr = cpufreq_dt_attr, 179 .suspend = cpufreq_generic_suspend, 180 }; 181 182 static int dt_cpufreq_early_init(struct device *dev, int cpu) 183 { 184 struct private_data *priv; 185 struct device *cpu_dev; 186 bool fallback = false; 187 const char *reg_name[] = { NULL, NULL }; 188 int ret; 189 190 /* Check if this CPU is already covered by some other policy */ 191 if (cpufreq_dt_find_data(cpu)) 192 return 0; 193 194 cpu_dev = get_cpu_device(cpu); 195 if (!cpu_dev) 196 return -EPROBE_DEFER; 197 198 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 199 if (!priv) 200 return -ENOMEM; 201 202 if (!zalloc_cpumask_var(&priv->cpus, GFP_KERNEL)) 203 return -ENOMEM; 204 205 cpumask_set_cpu(cpu, priv->cpus); 206 priv->cpu_dev = cpu_dev; 207 208 /* 209 * OPP layer will be taking care of regulators now, but it needs to know 210 * the name of the regulator first. 211 */ 212 reg_name[0] = find_supply_name(cpu_dev); 213 if (reg_name[0]) { 214 priv->opp_token = dev_pm_opp_set_regulators(cpu_dev, reg_name); 215 if (priv->opp_token < 0) { 216 ret = dev_err_probe(cpu_dev, priv->opp_token, 217 "failed to set regulators\n"); 218 goto free_cpumask; 219 } 220 } 221 222 /* Get OPP-sharing information from "operating-points-v2" bindings */ 223 ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus); 224 if (ret) { 225 if (ret != -ENOENT) 226 goto out; 227 228 /* 229 * operating-points-v2 not supported, fallback to all CPUs share 230 * OPP for backward compatibility if the platform hasn't set 231 * sharing CPUs. 232 */ 233 if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus)) 234 fallback = true; 235 } 236 237 /* 238 * Initialize OPP tables for all priv->cpus. They will be shared by 239 * all CPUs which have marked their CPUs shared with OPP bindings. 240 * 241 * For platforms not using operating-points-v2 bindings, we do this 242 * before updating priv->cpus. Otherwise, we will end up creating 243 * duplicate OPPs for the CPUs. 244 * 245 * OPPs might be populated at runtime, don't fail for error here unless 246 * it is -EPROBE_DEFER. 247 */ 248 ret = dev_pm_opp_of_cpumask_add_table(priv->cpus); 249 if (!ret) { 250 priv->have_static_opps = true; 251 } else if (ret == -EPROBE_DEFER) { 252 goto out; 253 } 254 255 /* 256 * The OPP table must be initialized, statically or dynamically, by this 257 * point. 258 */ 259 ret = dev_pm_opp_get_opp_count(cpu_dev); 260 if (ret <= 0) { 261 dev_err(cpu_dev, "OPP table can't be empty\n"); 262 ret = -ENODEV; 263 goto out; 264 } 265 266 if (fallback) { 267 cpumask_setall(priv->cpus); 268 ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus); 269 if (ret) 270 dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", 271 __func__, ret); 272 } 273 274 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &priv->freq_table); 275 if (ret) { 276 dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); 277 goto out; 278 } 279 280 list_add(&priv->node, &priv_list); 281 return 0; 282 283 out: 284 if (priv->have_static_opps) 285 dev_pm_opp_of_cpumask_remove_table(priv->cpus); 286 dev_pm_opp_put_regulators(priv->opp_token); 287 free_cpumask: 288 free_cpumask_var(priv->cpus); 289 return ret; 290 } 291 292 static void dt_cpufreq_release(void) 293 { 294 struct private_data *priv, *tmp; 295 296 list_for_each_entry_safe(priv, tmp, &priv_list, node) { 297 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &priv->freq_table); 298 if (priv->have_static_opps) 299 dev_pm_opp_of_cpumask_remove_table(priv->cpus); 300 dev_pm_opp_put_regulators(priv->opp_token); 301 free_cpumask_var(priv->cpus); 302 list_del(&priv->node); 303 } 304 } 305 306 static int dt_cpufreq_probe(struct platform_device *pdev) 307 { 308 struct cpufreq_dt_platform_data *data = dev_get_platdata(&pdev->dev); 309 int ret, cpu; 310 311 /* Request resources early so we can return in case of -EPROBE_DEFER */ 312 for_each_possible_cpu(cpu) { 313 ret = dt_cpufreq_early_init(&pdev->dev, cpu); 314 if (ret) 315 goto err; 316 } 317 318 if (data) { 319 if (data->have_governor_per_policy) 320 dt_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY; 321 322 dt_cpufreq_driver.resume = data->resume; 323 if (data->suspend) 324 dt_cpufreq_driver.suspend = data->suspend; 325 if (data->get_intermediate) { 326 dt_cpufreq_driver.target_intermediate = data->target_intermediate; 327 dt_cpufreq_driver.get_intermediate = data->get_intermediate; 328 } 329 } 330 331 ret = cpufreq_register_driver(&dt_cpufreq_driver); 332 if (ret) { 333 dev_err(&pdev->dev, "failed register driver: %d\n", ret); 334 goto err; 335 } 336 337 return 0; 338 err: 339 dt_cpufreq_release(); 340 return ret; 341 } 342 343 static void dt_cpufreq_remove(struct platform_device *pdev) 344 { 345 cpufreq_unregister_driver(&dt_cpufreq_driver); 346 dt_cpufreq_release(); 347 } 348 349 static struct platform_driver dt_cpufreq_platdrv = { 350 .driver = { 351 .name = "cpufreq-dt", 352 }, 353 .probe = dt_cpufreq_probe, 354 .remove_new = dt_cpufreq_remove, 355 }; 356 module_platform_driver(dt_cpufreq_platdrv); 357 358 MODULE_ALIAS("platform:cpufreq-dt"); 359 MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); 360 MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); 361 MODULE_DESCRIPTION("Generic cpufreq driver"); 362 MODULE_LICENSE("GPL"); 363