1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2020 MediaTek Inc. 4 */ 5 6 #include <linux/bitfield.h> 7 #include <linux/cpufreq.h> 8 #include <linux/energy_model.h> 9 #include <linux/init.h> 10 #include <linux/iopoll.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/of.h> 14 #include <linux/of_platform.h> 15 #include <linux/platform_device.h> 16 #include <linux/regulator/consumer.h> 17 #include <linux/slab.h> 18 19 #define LUT_MAX_ENTRIES 32U 20 #define LUT_FREQ GENMASK(11, 0) 21 #define LUT_ROW_SIZE 0x4 22 #define CPUFREQ_HW_STATUS BIT(0) 23 #define SVS_HW_STATUS BIT(1) 24 #define POLL_USEC 1000 25 #define TIMEOUT_USEC 300000 26 27 #define FDVFS_FDIV_HZ (26 * 1000) 28 29 enum { 30 REG_FREQ_LUT_TABLE, 31 REG_FREQ_ENABLE, 32 REG_FREQ_PERF_STATE, 33 REG_FREQ_HW_STATE, 34 REG_EM_POWER_TBL, 35 REG_FREQ_LATENCY, 36 37 REG_ARRAY_SIZE, 38 }; 39 40 struct mtk_cpufreq_priv { 41 struct device *dev; 42 const struct mtk_cpufreq_variant *variant; 43 void __iomem *fdvfs; 44 }; 45 46 struct mtk_cpufreq_domain { 47 struct mtk_cpufreq_priv *parent; 48 struct cpufreq_frequency_table *table; 49 void __iomem *reg_bases[REG_ARRAY_SIZE]; 50 struct resource *res; 51 void __iomem *base; 52 int nr_opp; 53 }; 54 55 struct mtk_cpufreq_variant { 56 int (*init)(struct mtk_cpufreq_priv *priv); 57 const u16 reg_offsets[REG_ARRAY_SIZE]; 58 const bool is_hybrid_dvfs; 59 }; 60 61 static const struct mtk_cpufreq_variant cpufreq_mtk_base_variant = { 62 .reg_offsets = { 63 [REG_FREQ_LUT_TABLE] = 0x0, 64 [REG_FREQ_ENABLE] = 0x84, 65 [REG_FREQ_PERF_STATE] = 0x88, 66 [REG_FREQ_HW_STATE] = 0x8c, 67 [REG_EM_POWER_TBL] = 0x90, 68 [REG_FREQ_LATENCY] = 0x110, 69 }, 70 }; 71 72 static int mtk_cpufreq_hw_mt8196_init(struct mtk_cpufreq_priv *priv) 73 { 74 priv->fdvfs = devm_of_iomap(priv->dev, priv->dev->of_node, 0, NULL); 75 if (IS_ERR(priv->fdvfs)) 76 return dev_err_probe(priv->dev, PTR_ERR(priv->fdvfs), 77 "failed to get fdvfs iomem\n"); 78 79 return 0; 80 } 81 82 static const struct mtk_cpufreq_variant cpufreq_mtk_mt8196_variant = { 83 .init = mtk_cpufreq_hw_mt8196_init, 84 .reg_offsets = { 85 [REG_FREQ_LUT_TABLE] = 0x0, 86 [REG_FREQ_ENABLE] = 0x84, 87 [REG_FREQ_PERF_STATE] = 0x88, 88 [REG_FREQ_HW_STATE] = 0x8c, 89 [REG_EM_POWER_TBL] = 0x90, 90 [REG_FREQ_LATENCY] = 0x114, 91 }, 92 .is_hybrid_dvfs = true, 93 }; 94 95 static int __maybe_unused 96 mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW, 97 unsigned long *KHz) 98 { 99 struct mtk_cpufreq_domain *data; 100 struct cpufreq_policy *policy; 101 int i; 102 103 policy = cpufreq_cpu_get_raw(cpu_dev->id); 104 if (!policy) 105 return -EINVAL; 106 107 data = policy->driver_data; 108 109 for (i = 0; i < data->nr_opp; i++) { 110 if (data->table[i].frequency < *KHz) 111 break; 112 } 113 i--; 114 115 *KHz = data->table[i].frequency; 116 /* Provide micro-Watts value to the Energy Model */ 117 *uW = readl_relaxed(data->reg_bases[REG_EM_POWER_TBL] + 118 i * LUT_ROW_SIZE); 119 120 return 0; 121 } 122 123 static void mtk_cpufreq_hw_fdvfs_switch(unsigned int target_freq, 124 struct cpufreq_policy *policy) 125 { 126 struct mtk_cpufreq_domain *data = policy->driver_data; 127 struct mtk_cpufreq_priv *priv = data->parent; 128 unsigned int cpu; 129 130 target_freq = DIV_ROUND_UP(target_freq, FDVFS_FDIV_HZ); 131 for_each_cpu(cpu, policy->real_cpus) { 132 writel_relaxed(target_freq, priv->fdvfs + cpu * 4); 133 } 134 } 135 136 static int mtk_cpufreq_hw_target_index(struct cpufreq_policy *policy, 137 unsigned int index) 138 { 139 struct mtk_cpufreq_domain *data = policy->driver_data; 140 unsigned int target_freq; 141 142 if (data->parent->fdvfs) { 143 target_freq = policy->freq_table[index].frequency; 144 mtk_cpufreq_hw_fdvfs_switch(target_freq, policy); 145 } else { 146 writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]); 147 } 148 149 return 0; 150 } 151 152 static unsigned int mtk_cpufreq_hw_get(unsigned int cpu) 153 { 154 struct mtk_cpufreq_domain *data; 155 struct cpufreq_policy *policy; 156 unsigned int index; 157 158 policy = cpufreq_cpu_get_raw(cpu); 159 if (!policy) 160 return 0; 161 162 data = policy->driver_data; 163 164 index = readl_relaxed(data->reg_bases[REG_FREQ_PERF_STATE]); 165 index = min(index, LUT_MAX_ENTRIES - 1); 166 167 return data->table[index].frequency; 168 } 169 170 static unsigned int mtk_cpufreq_hw_fast_switch(struct cpufreq_policy *policy, 171 unsigned int target_freq) 172 { 173 struct mtk_cpufreq_domain *data = policy->driver_data; 174 unsigned int index; 175 176 index = cpufreq_table_find_index_dl(policy, target_freq, false); 177 178 if (data->parent->fdvfs) 179 mtk_cpufreq_hw_fdvfs_switch(target_freq, policy); 180 else 181 writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]); 182 183 return policy->freq_table[index].frequency; 184 } 185 186 static int mtk_cpu_create_freq_table(struct platform_device *pdev, 187 struct mtk_cpufreq_domain *data) 188 { 189 struct device *dev = &pdev->dev; 190 u32 temp, i, freq, prev_freq = 0; 191 void __iomem *base_table; 192 193 data->table = devm_kcalloc(dev, LUT_MAX_ENTRIES + 1, 194 sizeof(*data->table), GFP_KERNEL); 195 if (!data->table) 196 return -ENOMEM; 197 198 base_table = data->reg_bases[REG_FREQ_LUT_TABLE]; 199 200 for (i = 0; i < LUT_MAX_ENTRIES; i++) { 201 temp = readl_relaxed(base_table + (i * LUT_ROW_SIZE)); 202 freq = FIELD_GET(LUT_FREQ, temp) * 1000; 203 204 if (freq == prev_freq) 205 break; 206 207 data->table[i].frequency = freq; 208 209 dev_dbg(dev, "index=%d freq=%d\n", i, data->table[i].frequency); 210 211 prev_freq = freq; 212 } 213 214 data->table[i].frequency = CPUFREQ_TABLE_END; 215 data->nr_opp = i; 216 217 return 0; 218 } 219 220 static int mtk_cpu_resources_init(struct platform_device *pdev, 221 struct cpufreq_policy *policy, 222 struct mtk_cpufreq_priv *priv) 223 { 224 struct mtk_cpufreq_domain *data; 225 struct device *dev = &pdev->dev; 226 struct resource *res; 227 struct of_phandle_args args; 228 void __iomem *base; 229 int ret, i; 230 int index; 231 232 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 233 if (!data) 234 return -ENOMEM; 235 236 ret = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains", 237 "#performance-domain-cells", 238 policy->cpus, &args); 239 if (ret < 0) 240 return ret; 241 242 index = args.args[0]; 243 of_node_put(args.np); 244 245 /* 246 * In a cpufreq with hybrid DVFS, such as the MT8196, the first declared 247 * register range is for FDVFS, followed by the frequency domain MMIOs. 248 */ 249 if (priv->variant->is_hybrid_dvfs) 250 index++; 251 252 data->parent = priv; 253 254 res = platform_get_resource(pdev, IORESOURCE_MEM, index); 255 if (!res) { 256 dev_err(dev, "failed to get mem resource %d\n", index); 257 return -ENODEV; 258 } 259 260 if (!request_mem_region(res->start, resource_size(res), res->name)) { 261 dev_err(dev, "failed to request resource %pR\n", res); 262 return -EBUSY; 263 } 264 265 base = ioremap(res->start, resource_size(res)); 266 if (!base) { 267 dev_err(dev, "failed to map resource %pR\n", res); 268 ret = -ENOMEM; 269 goto release_region; 270 } 271 272 data->base = base; 273 data->res = res; 274 275 for (i = REG_FREQ_LUT_TABLE; i < REG_ARRAY_SIZE; i++) 276 data->reg_bases[i] = base + priv->variant->reg_offsets[i]; 277 278 ret = mtk_cpu_create_freq_table(pdev, data); 279 if (ret) { 280 dev_info(dev, "Domain-%d failed to create freq table\n", index); 281 return ret; 282 } 283 284 policy->freq_table = data->table; 285 policy->driver_data = data; 286 287 return 0; 288 release_region: 289 release_mem_region(res->start, resource_size(res)); 290 return ret; 291 } 292 293 static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) 294 { 295 struct platform_device *pdev = cpufreq_get_driver_data(); 296 int sig, pwr_hw = CPUFREQ_HW_STATUS | SVS_HW_STATUS; 297 struct mtk_cpufreq_domain *data; 298 unsigned int latency; 299 int ret; 300 301 /* Get the bases of cpufreq for domains */ 302 ret = mtk_cpu_resources_init(pdev, policy, platform_get_drvdata(pdev)); 303 if (ret) { 304 dev_info(&pdev->dev, "CPUFreq resource init failed\n"); 305 return ret; 306 } 307 308 data = policy->driver_data; 309 310 latency = readl_relaxed(data->reg_bases[REG_FREQ_LATENCY]) * 1000; 311 if (!latency) 312 latency = CPUFREQ_ETERNAL; 313 314 policy->cpuinfo.transition_latency = latency; 315 policy->fast_switch_possible = true; 316 317 /* HW should be in enabled state to proceed now */ 318 writel_relaxed(0x1, data->reg_bases[REG_FREQ_ENABLE]); 319 if (readl_poll_timeout(data->reg_bases[REG_FREQ_HW_STATE], sig, 320 (sig & pwr_hw) == pwr_hw, POLL_USEC, 321 TIMEOUT_USEC)) { 322 if (!(sig & CPUFREQ_HW_STATUS)) { 323 pr_info("cpufreq hardware of CPU%d is not enabled\n", 324 policy->cpu); 325 return -ENODEV; 326 } 327 328 pr_info("SVS of CPU%d is not enabled\n", policy->cpu); 329 } 330 331 return 0; 332 } 333 334 static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) 335 { 336 struct mtk_cpufreq_domain *data = policy->driver_data; 337 struct resource *res = data->res; 338 void __iomem *base = data->base; 339 340 /* HW should be in paused state now */ 341 writel_relaxed(0x0, data->reg_bases[REG_FREQ_ENABLE]); 342 iounmap(base); 343 release_mem_region(res->start, resource_size(res)); 344 } 345 346 static void mtk_cpufreq_register_em(struct cpufreq_policy *policy) 347 { 348 struct em_data_callback em_cb = EM_DATA_CB(mtk_cpufreq_get_cpu_power); 349 struct mtk_cpufreq_domain *data = policy->driver_data; 350 351 em_dev_register_perf_domain(get_cpu_device(policy->cpu), data->nr_opp, 352 &em_cb, policy->cpus, true); 353 } 354 355 static struct cpufreq_driver cpufreq_mtk_hw_driver = { 356 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK | 357 CPUFREQ_HAVE_GOVERNOR_PER_POLICY | 358 CPUFREQ_IS_COOLING_DEV, 359 .verify = cpufreq_generic_frequency_table_verify, 360 .target_index = mtk_cpufreq_hw_target_index, 361 .get = mtk_cpufreq_hw_get, 362 .init = mtk_cpufreq_hw_cpu_init, 363 .exit = mtk_cpufreq_hw_cpu_exit, 364 .register_em = mtk_cpufreq_register_em, 365 .fast_switch = mtk_cpufreq_hw_fast_switch, 366 .name = "mtk-cpufreq-hw", 367 }; 368 369 static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev) 370 { 371 struct mtk_cpufreq_priv *priv; 372 const void *data; 373 int ret, cpu; 374 struct device *cpu_dev; 375 struct regulator *cpu_reg; 376 377 /* Make sure that all CPU supplies are available before proceeding. */ 378 for_each_present_cpu(cpu) { 379 cpu_dev = get_cpu_device(cpu); 380 if (!cpu_dev) 381 return dev_err_probe(&pdev->dev, -EPROBE_DEFER, 382 "Failed to get cpu%d device\n", cpu); 383 384 cpu_reg = devm_regulator_get(cpu_dev, "cpu"); 385 if (IS_ERR(cpu_reg)) 386 return dev_err_probe(&pdev->dev, PTR_ERR(cpu_reg), 387 "CPU%d regulator get failed\n", cpu); 388 } 389 390 391 data = of_device_get_match_data(&pdev->dev); 392 if (!data) 393 return -EINVAL; 394 395 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 396 if (!priv) 397 return -ENOMEM; 398 399 priv->variant = data; 400 priv->dev = &pdev->dev; 401 402 if (priv->variant->init) { 403 ret = priv->variant->init(priv); 404 if (ret) 405 return ret; 406 } 407 408 platform_set_drvdata(pdev, priv); 409 cpufreq_mtk_hw_driver.driver_data = pdev; 410 411 ret = cpufreq_register_driver(&cpufreq_mtk_hw_driver); 412 if (ret) 413 dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n"); 414 415 return ret; 416 } 417 418 static void mtk_cpufreq_hw_driver_remove(struct platform_device *pdev) 419 { 420 cpufreq_unregister_driver(&cpufreq_mtk_hw_driver); 421 } 422 423 static const struct of_device_id mtk_cpufreq_hw_match[] = { 424 { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_base_variant }, 425 { .compatible = "mediatek,mt8196-cpufreq-hw", .data = &cpufreq_mtk_mt8196_variant }, 426 {} 427 }; 428 MODULE_DEVICE_TABLE(of, mtk_cpufreq_hw_match); 429 430 static struct platform_driver mtk_cpufreq_hw_driver = { 431 .probe = mtk_cpufreq_hw_driver_probe, 432 .remove = mtk_cpufreq_hw_driver_remove, 433 .driver = { 434 .name = "mtk-cpufreq-hw", 435 .of_match_table = mtk_cpufreq_hw_match, 436 }, 437 }; 438 module_platform_driver(mtk_cpufreq_hw_driver); 439 440 MODULE_AUTHOR("Hector Yuan <hector.yuan@mediatek.com>"); 441 MODULE_DESCRIPTION("Mediatek cpufreq-hw driver"); 442 MODULE_LICENSE("GPL v2"); 443