1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
4 // http://www.samsung.com/
5 //
6 // Exynos - CPU PMU(Power Management Unit) support
7
8 #include <linux/array_size.h>
9 #include <linux/arm-smccc.h>
10 #include <linux/cpuhotplug.h>
11 #include <linux/of.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/core.h>
14 #include <linux/mfd/syscon.h>
15 #include <linux/of_platform.h>
16 #include <linux/platform_device.h>
17 #include <linux/delay.h>
18 #include <linux/regmap.h>
19
20 #include <linux/soc/samsung/exynos-regs-pmu.h>
21 #include <linux/soc/samsung/exynos-pmu.h>
22
23 #include "exynos-pmu.h"
24
25 #define PMUALIVE_MASK GENMASK(13, 0)
26 #define TENSOR_SET_BITS (BIT(15) | BIT(14))
27 #define TENSOR_CLR_BITS BIT(15)
28 #define TENSOR_SMC_PMU_SEC_REG 0x82000504
29 #define TENSOR_PMUREG_READ 0
30 #define TENSOR_PMUREG_WRITE 1
31 #define TENSOR_PMUREG_RMW 2
32
33 struct exynos_pmu_context {
34 struct device *dev;
35 const struct exynos_pmu_data *pmu_data;
36 struct regmap *pmureg;
37 struct regmap *pmuintrgen;
38 };
39
40 void __iomem *pmu_base_addr;
41 static struct exynos_pmu_context *pmu_context;
42 /* forward declaration */
43 static struct platform_driver exynos_pmu_driver;
44
45 /*
46 * Tensor SoCs are configured so that PMU_ALIVE registers can only be written
47 * from EL3, but are still read accessible. As Linux needs to write some of
48 * these registers, the following functions are provided and exposed via
49 * regmap.
50 *
51 * Note: This SMC interface is known to be implemented on gs101 and derivative
52 * SoCs.
53 */
54
55 /* Write to a protected PMU register. */
tensor_sec_reg_write(void * context,unsigned int reg,unsigned int val)56 static int tensor_sec_reg_write(void *context, unsigned int reg,
57 unsigned int val)
58 {
59 struct arm_smccc_res res;
60 unsigned long pmu_base = (unsigned long)context;
61
62 arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
63 TENSOR_PMUREG_WRITE, val, 0, 0, 0, 0, &res);
64
65 /* returns -EINVAL if access isn't allowed or 0 */
66 if (res.a0)
67 pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
68
69 return (int)res.a0;
70 }
71
72 /* Read/Modify/Write a protected PMU register. */
tensor_sec_reg_rmw(void * context,unsigned int reg,unsigned int mask,unsigned int val)73 static int tensor_sec_reg_rmw(void *context, unsigned int reg,
74 unsigned int mask, unsigned int val)
75 {
76 struct arm_smccc_res res;
77 unsigned long pmu_base = (unsigned long)context;
78
79 arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
80 TENSOR_PMUREG_RMW, mask, val, 0, 0, 0, &res);
81
82 /* returns -EINVAL if access isn't allowed or 0 */
83 if (res.a0)
84 pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
85
86 return (int)res.a0;
87 }
88
89 /*
90 * Read a protected PMU register. All PMU registers can be read by Linux.
91 * Note: The SMC read register is not used, as only registers that can be
92 * written are readable via SMC.
93 */
tensor_sec_reg_read(void * context,unsigned int reg,unsigned int * val)94 static int tensor_sec_reg_read(void *context, unsigned int reg,
95 unsigned int *val)
96 {
97 *val = pmu_raw_readl(reg);
98 return 0;
99 }
100
101 /*
102 * For SoCs that have set/clear bit hardware this function can be used when
103 * the PMU register will be accessed by multiple masters.
104 *
105 * For example, to set bits 13:8 in PMU reg offset 0x3e80
106 * tensor_set_bits_atomic(ctx, 0x3e80, 0x3f00, 0x3f00);
107 *
108 * Set bit 8, and clear bits 13:9 PMU reg offset 0x3e80
109 * tensor_set_bits_atomic(0x3e80, 0x100, 0x3f00);
110 */
tensor_set_bits_atomic(void * ctx,unsigned int offset,u32 val,u32 mask)111 static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val,
112 u32 mask)
113 {
114 int ret;
115 unsigned int i;
116
117 for (i = 0; i < 32; i++) {
118 if (!(mask & BIT(i)))
119 continue;
120
121 offset &= ~TENSOR_SET_BITS;
122
123 if (val & BIT(i))
124 offset |= TENSOR_SET_BITS;
125 else
126 offset |= TENSOR_CLR_BITS;
127
128 ret = tensor_sec_reg_write(ctx, offset, i);
129 if (ret)
130 return ret;
131 }
132 return 0;
133 }
134
tensor_is_atomic(unsigned int reg)135 static bool tensor_is_atomic(unsigned int reg)
136 {
137 /*
138 * Use atomic operations for PMU_ALIVE registers (offset 0~0x3FFF)
139 * as the target registers can be accessed by multiple masters. SFRs
140 * that don't support atomic are added to the switch statement below.
141 */
142 if (reg > PMUALIVE_MASK)
143 return false;
144
145 switch (reg) {
146 case GS101_SYSIP_DAT0:
147 case GS101_SYSTEM_CONFIGURATION:
148 return false;
149 default:
150 return true;
151 }
152 }
153
tensor_sec_update_bits(void * ctx,unsigned int reg,unsigned int mask,unsigned int val)154 static int tensor_sec_update_bits(void *ctx, unsigned int reg,
155 unsigned int mask, unsigned int val)
156 {
157
158 if (!tensor_is_atomic(reg))
159 return tensor_sec_reg_rmw(ctx, reg, mask, val);
160
161 return tensor_set_bits_atomic(ctx, reg, val, mask);
162 }
163
pmu_raw_writel(u32 val,u32 offset)164 void pmu_raw_writel(u32 val, u32 offset)
165 {
166 writel_relaxed(val, pmu_base_addr + offset);
167 }
168
pmu_raw_readl(u32 offset)169 u32 pmu_raw_readl(u32 offset)
170 {
171 return readl_relaxed(pmu_base_addr + offset);
172 }
173
exynos_sys_powerdown_conf(enum sys_powerdown mode)174 void exynos_sys_powerdown_conf(enum sys_powerdown mode)
175 {
176 unsigned int i;
177 const struct exynos_pmu_data *pmu_data;
178
179 if (!pmu_context || !pmu_context->pmu_data)
180 return;
181
182 pmu_data = pmu_context->pmu_data;
183
184 if (pmu_data->powerdown_conf)
185 pmu_data->powerdown_conf(mode);
186
187 if (pmu_data->pmu_config) {
188 for (i = 0; (pmu_data->pmu_config[i].offset != PMU_TABLE_END); i++)
189 pmu_raw_writel(pmu_data->pmu_config[i].val[mode],
190 pmu_data->pmu_config[i].offset);
191 }
192
193 if (pmu_data->powerdown_conf_extra)
194 pmu_data->powerdown_conf_extra(mode);
195
196 if (pmu_data->pmu_config_extra) {
197 for (i = 0; pmu_data->pmu_config_extra[i].offset != PMU_TABLE_END; i++)
198 pmu_raw_writel(pmu_data->pmu_config_extra[i].val[mode],
199 pmu_data->pmu_config_extra[i].offset);
200 }
201 }
202
203 /*
204 * Split the data between ARM architectures because it is relatively big
205 * and useless on other arch.
206 */
207 #ifdef CONFIG_EXYNOS_PMU_ARM_DRIVERS
208 #define exynos_pmu_data_arm_ptr(data) (&data)
209 #else
210 #define exynos_pmu_data_arm_ptr(data) NULL
211 #endif
212
213 static const struct regmap_config regmap_smccfg = {
214 .name = "pmu_regs",
215 .reg_bits = 32,
216 .reg_stride = 4,
217 .val_bits = 32,
218 .fast_io = true,
219 .use_single_read = true,
220 .use_single_write = true,
221 .reg_read = tensor_sec_reg_read,
222 .reg_write = tensor_sec_reg_write,
223 .reg_update_bits = tensor_sec_update_bits,
224 };
225
226 static const struct exynos_pmu_data gs101_pmu_data = {
227 .pmu_secure = true,
228 .pmu_cpuhp = true,
229 };
230
231 /*
232 * PMU platform driver and devicetree bindings.
233 */
234 static const struct of_device_id exynos_pmu_of_device_ids[] = {
235 {
236 .compatible = "google,gs101-pmu",
237 .data = &gs101_pmu_data,
238 }, {
239 .compatible = "samsung,exynos3250-pmu",
240 .data = exynos_pmu_data_arm_ptr(exynos3250_pmu_data),
241 }, {
242 .compatible = "samsung,exynos4210-pmu",
243 .data = exynos_pmu_data_arm_ptr(exynos4210_pmu_data),
244 }, {
245 .compatible = "samsung,exynos4212-pmu",
246 .data = exynos_pmu_data_arm_ptr(exynos4212_pmu_data),
247 }, {
248 .compatible = "samsung,exynos4412-pmu",
249 .data = exynos_pmu_data_arm_ptr(exynos4412_pmu_data),
250 }, {
251 .compatible = "samsung,exynos5250-pmu",
252 .data = exynos_pmu_data_arm_ptr(exynos5250_pmu_data),
253 }, {
254 .compatible = "samsung,exynos5410-pmu",
255 }, {
256 .compatible = "samsung,exynos5420-pmu",
257 .data = exynos_pmu_data_arm_ptr(exynos5420_pmu_data),
258 }, {
259 .compatible = "samsung,exynos5433-pmu",
260 }, {
261 .compatible = "samsung,exynos7-pmu",
262 }, {
263 .compatible = "samsung,exynos850-pmu",
264 },
265 { /*sentinel*/ },
266 };
267
268 static const struct mfd_cell exynos_pmu_devs[] = {
269 { .name = "exynos-clkout", },
270 };
271
272 /**
273 * exynos_get_pmu_regmap() - Obtain pmureg regmap
274 *
275 * Find the pmureg regmap previously configured in probe() and return regmap
276 * pointer.
277 *
278 * Return: A pointer to regmap if found or ERR_PTR error value.
279 */
exynos_get_pmu_regmap(void)280 struct regmap *exynos_get_pmu_regmap(void)
281 {
282 struct device_node *np = of_find_matching_node(NULL,
283 exynos_pmu_of_device_ids);
284 if (np)
285 return exynos_get_pmu_regmap_by_phandle(np, NULL);
286 return ERR_PTR(-ENODEV);
287 }
288 EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap);
289
290 /**
291 * exynos_get_pmu_regmap_by_phandle() - Obtain pmureg regmap via phandle
292 * @np: Device node holding PMU phandle property
293 * @propname: Name of property holding phandle value
294 *
295 * Find the pmureg regmap previously configured in probe() and return regmap
296 * pointer.
297 *
298 * Return: A pointer to regmap if found or ERR_PTR error value.
299 */
exynos_get_pmu_regmap_by_phandle(struct device_node * np,const char * propname)300 struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
301 const char *propname)
302 {
303 struct device_node *pmu_np;
304 struct device *dev;
305
306 if (propname)
307 pmu_np = of_parse_phandle(np, propname, 0);
308 else
309 pmu_np = np;
310
311 if (!pmu_np)
312 return ERR_PTR(-ENODEV);
313
314 /*
315 * Determine if exynos-pmu device has probed and therefore regmap
316 * has been created and can be returned to the caller. Otherwise we
317 * return -EPROBE_DEFER.
318 */
319 dev = driver_find_device_by_of_node(&exynos_pmu_driver.driver,
320 (void *)pmu_np);
321
322 if (propname)
323 of_node_put(pmu_np);
324
325 if (!dev)
326 return ERR_PTR(-EPROBE_DEFER);
327
328 return syscon_node_to_regmap(pmu_np);
329 }
330 EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle);
331
332 /*
333 * CPU_INFORM register hint values which are used by
334 * EL3 firmware (el3mon).
335 */
336 #define CPU_INFORM_CLEAR 0
337 #define CPU_INFORM_C2 1
338
gs101_cpuhp_pmu_online(unsigned int cpu)339 static int gs101_cpuhp_pmu_online(unsigned int cpu)
340 {
341 unsigned int cpuhint = smp_processor_id();
342 u32 reg, mask;
343
344 /* clear cpu inform hint */
345 regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint),
346 CPU_INFORM_CLEAR);
347
348 mask = BIT(cpu);
349
350 regmap_update_bits(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_ENABLE,
351 mask, (0 << cpu));
352
353 regmap_read(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_UPEND, ®);
354
355 regmap_write(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_CLEAR,
356 reg & mask);
357
358 return 0;
359 }
360
gs101_cpuhp_pmu_offline(unsigned int cpu)361 static int gs101_cpuhp_pmu_offline(unsigned int cpu)
362 {
363 u32 reg, mask;
364 unsigned int cpuhint = smp_processor_id();
365
366 /* set cpu inform hint */
367 regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint),
368 CPU_INFORM_C2);
369
370 mask = BIT(cpu);
371 regmap_update_bits(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_ENABLE,
372 mask, BIT(cpu));
373
374 regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, ®);
375 regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR,
376 reg & mask);
377
378 mask = (BIT(cpu + 8));
379 regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, ®);
380 regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR,
381 reg & mask);
382 return 0;
383 }
384
exynos_pmu_probe(struct platform_device * pdev)385 static int exynos_pmu_probe(struct platform_device *pdev)
386 {
387 struct device *dev = &pdev->dev;
388 struct regmap_config pmu_regmcfg;
389 struct regmap *regmap;
390 struct resource *res;
391 int ret;
392
393 pmu_base_addr = devm_platform_ioremap_resource(pdev, 0);
394 if (IS_ERR(pmu_base_addr))
395 return PTR_ERR(pmu_base_addr);
396
397 pmu_context = devm_kzalloc(&pdev->dev,
398 sizeof(struct exynos_pmu_context),
399 GFP_KERNEL);
400 if (!pmu_context)
401 return -ENOMEM;
402
403 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
404 if (!res)
405 return -ENODEV;
406
407 pmu_context->pmu_data = of_device_get_match_data(dev);
408
409 /* For SoCs that secure PMU register writes use custom regmap */
410 if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_secure) {
411 pmu_regmcfg = regmap_smccfg;
412 pmu_regmcfg.max_register = resource_size(res) -
413 pmu_regmcfg.reg_stride;
414 /* Need physical address for SMC call */
415 regmap = devm_regmap_init(dev, NULL,
416 (void *)(uintptr_t)res->start,
417 &pmu_regmcfg);
418
419 if (IS_ERR(regmap))
420 return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
421 "regmap init failed\n");
422
423 ret = of_syscon_register_regmap(dev->of_node, regmap);
424 if (ret)
425 return ret;
426 } else {
427 /* let syscon create mmio regmap */
428 regmap = syscon_node_to_regmap(dev->of_node);
429 if (IS_ERR(regmap))
430 return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
431 "syscon_node_to_regmap failed\n");
432 }
433
434 pmu_context->pmureg = regmap;
435 pmu_context->dev = dev;
436
437 if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_cpuhp) {
438 pmu_context->pmuintrgen = syscon_regmap_lookup_by_phandle(dev->of_node,
439 "google,pmu-intr-gen-syscon");
440 if (IS_ERR(pmu_context->pmuintrgen)) {
441 /*
442 * To maintain support for older DTs that didn't specify syscon phandle
443 * just issue a warning rather than fail to probe.
444 */
445 dev_warn(&pdev->dev, "pmu-intr-gen syscon unavailable\n");
446 } else {
447 cpuhp_setup_state(CPUHP_BP_PREPARE_DYN,
448 "soc/exynos-pmu:prepare",
449 gs101_cpuhp_pmu_online, NULL);
450
451 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
452 "soc/exynos-pmu:online",
453 NULL, gs101_cpuhp_pmu_offline);
454 }
455 }
456
457 if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init)
458 pmu_context->pmu_data->pmu_init();
459
460 platform_set_drvdata(pdev, pmu_context);
461
462 ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, exynos_pmu_devs,
463 ARRAY_SIZE(exynos_pmu_devs), NULL, 0, NULL);
464 if (ret)
465 return ret;
466
467 if (devm_of_platform_populate(dev))
468 dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n");
469
470 dev_dbg(dev, "Exynos PMU Driver probe done\n");
471 return 0;
472 }
473
474 static struct platform_driver exynos_pmu_driver = {
475 .driver = {
476 .name = "exynos-pmu",
477 .of_match_table = exynos_pmu_of_device_ids,
478 },
479 .probe = exynos_pmu_probe,
480 };
481
exynos_pmu_init(void)482 static int __init exynos_pmu_init(void)
483 {
484 return platform_driver_register(&exynos_pmu_driver);
485
486 }
487 postcore_initcall(exynos_pmu_init);
488