xref: /linux/drivers/soc/samsung/exynos-pmu.c (revision cc0f7c3f97bc6e888bf4be28a9da9dbd3735d2b4)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
4 //		http://www.samsung.com/
5 //
6 // Exynos - CPU PMU(Power Management Unit) support
7 
8 #include <linux/arm-smccc.h>
9 #include <linux/of.h>
10 #include <linux/of_address.h>
11 #include <linux/mfd/core.h>
12 #include <linux/mfd/syscon.h>
13 #include <linux/of_platform.h>
14 #include <linux/platform_device.h>
15 #include <linux/delay.h>
16 #include <linux/regmap.h>
17 
18 #include <linux/soc/samsung/exynos-regs-pmu.h>
19 #include <linux/soc/samsung/exynos-pmu.h>
20 
21 #include "exynos-pmu.h"
22 
23 #define PMUALIVE_MASK			GENMASK(13, 0)
24 #define TENSOR_SET_BITS			(BIT(15) | BIT(14))
25 #define TENSOR_CLR_BITS			BIT(15)
26 #define TENSOR_SMC_PMU_SEC_REG		0x82000504
27 #define TENSOR_PMUREG_READ		0
28 #define TENSOR_PMUREG_WRITE		1
29 #define TENSOR_PMUREG_RMW		2
30 
31 struct exynos_pmu_context {
32 	struct device *dev;
33 	const struct exynos_pmu_data *pmu_data;
34 	struct regmap *pmureg;
35 };
36 
37 void __iomem *pmu_base_addr;
38 static struct exynos_pmu_context *pmu_context;
39 /* forward declaration */
40 static struct platform_driver exynos_pmu_driver;
41 
42 /*
43  * Tensor SoCs are configured so that PMU_ALIVE registers can only be written
44  * from EL3, but are still read accessible. As Linux needs to write some of
45  * these registers, the following functions are provided and exposed via
46  * regmap.
47  *
48  * Note: This SMC interface is known to be implemented on gs101 and derivative
49  * SoCs.
50  */
51 
52 /* Write to a protected PMU register. */
53 static int tensor_sec_reg_write(void *context, unsigned int reg,
54 				unsigned int val)
55 {
56 	struct arm_smccc_res res;
57 	unsigned long pmu_base = (unsigned long)context;
58 
59 	arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
60 		      TENSOR_PMUREG_WRITE, val, 0, 0, 0, 0, &res);
61 
62 	/* returns -EINVAL if access isn't allowed or 0 */
63 	if (res.a0)
64 		pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
65 
66 	return (int)res.a0;
67 }
68 
69 /* Read/Modify/Write a protected PMU register. */
70 static int tensor_sec_reg_rmw(void *context, unsigned int reg,
71 			      unsigned int mask, unsigned int val)
72 {
73 	struct arm_smccc_res res;
74 	unsigned long pmu_base = (unsigned long)context;
75 
76 	arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
77 		      TENSOR_PMUREG_RMW, mask, val, 0, 0, 0, &res);
78 
79 	/* returns -EINVAL if access isn't allowed or 0 */
80 	if (res.a0)
81 		pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
82 
83 	return (int)res.a0;
84 }
85 
86 /*
87  * Read a protected PMU register. All PMU registers can be read by Linux.
88  * Note: The SMC read register is not used, as only registers that can be
89  * written are readable via SMC.
90  */
91 static int tensor_sec_reg_read(void *context, unsigned int reg,
92 			       unsigned int *val)
93 {
94 	*val = pmu_raw_readl(reg);
95 	return 0;
96 }
97 
98 /*
99  * For SoCs that have set/clear bit hardware this function can be used when
100  * the PMU register will be accessed by multiple masters.
101  *
102  * For example, to set bits 13:8 in PMU reg offset 0x3e80
103  * tensor_set_bits_atomic(ctx, 0x3e80, 0x3f00, 0x3f00);
104  *
105  * Set bit 8, and clear bits 13:9 PMU reg offset 0x3e80
106  * tensor_set_bits_atomic(0x3e80, 0x100, 0x3f00);
107  */
108 static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val,
109 				  u32 mask)
110 {
111 	int ret;
112 	unsigned int i;
113 
114 	for (i = 0; i < 32; i++) {
115 		if (!(mask & BIT(i)))
116 			continue;
117 
118 		offset &= ~TENSOR_SET_BITS;
119 
120 		if (val & BIT(i))
121 			offset |= TENSOR_SET_BITS;
122 		else
123 			offset |= TENSOR_CLR_BITS;
124 
125 		ret = tensor_sec_reg_write(ctx, offset, i);
126 		if (ret)
127 			return ret;
128 	}
129 	return ret;
130 }
131 
132 static bool tensor_is_atomic(unsigned int reg)
133 {
134 	/*
135 	 * Use atomic operations for PMU_ALIVE registers (offset 0~0x3FFF)
136 	 * as the target registers can be accessed by multiple masters. SFRs
137 	 * that don't support atomic are added to the switch statement below.
138 	 */
139 	if (reg > PMUALIVE_MASK)
140 		return false;
141 
142 	switch (reg) {
143 	case GS101_SYSIP_DAT0:
144 	case GS101_SYSTEM_CONFIGURATION:
145 		return false;
146 	default:
147 		return true;
148 	}
149 }
150 
151 static int tensor_sec_update_bits(void *ctx, unsigned int reg,
152 				  unsigned int mask, unsigned int val)
153 {
154 
155 	if (!tensor_is_atomic(reg))
156 		return tensor_sec_reg_rmw(ctx, reg, mask, val);
157 
158 	return tensor_set_bits_atomic(ctx, reg, val, mask);
159 }
160 
161 void pmu_raw_writel(u32 val, u32 offset)
162 {
163 	writel_relaxed(val, pmu_base_addr + offset);
164 }
165 
166 u32 pmu_raw_readl(u32 offset)
167 {
168 	return readl_relaxed(pmu_base_addr + offset);
169 }
170 
171 void exynos_sys_powerdown_conf(enum sys_powerdown mode)
172 {
173 	unsigned int i;
174 	const struct exynos_pmu_data *pmu_data;
175 
176 	if (!pmu_context || !pmu_context->pmu_data)
177 		return;
178 
179 	pmu_data = pmu_context->pmu_data;
180 
181 	if (pmu_data->powerdown_conf)
182 		pmu_data->powerdown_conf(mode);
183 
184 	if (pmu_data->pmu_config) {
185 		for (i = 0; (pmu_data->pmu_config[i].offset != PMU_TABLE_END); i++)
186 			pmu_raw_writel(pmu_data->pmu_config[i].val[mode],
187 					pmu_data->pmu_config[i].offset);
188 	}
189 
190 	if (pmu_data->powerdown_conf_extra)
191 		pmu_data->powerdown_conf_extra(mode);
192 
193 	if (pmu_data->pmu_config_extra) {
194 		for (i = 0; pmu_data->pmu_config_extra[i].offset != PMU_TABLE_END; i++)
195 			pmu_raw_writel(pmu_data->pmu_config_extra[i].val[mode],
196 				       pmu_data->pmu_config_extra[i].offset);
197 	}
198 }
199 
200 /*
201  * Split the data between ARM architectures because it is relatively big
202  * and useless on other arch.
203  */
204 #ifdef CONFIG_EXYNOS_PMU_ARM_DRIVERS
205 #define exynos_pmu_data_arm_ptr(data)	(&data)
206 #else
207 #define exynos_pmu_data_arm_ptr(data)	NULL
208 #endif
209 
210 static const struct regmap_config regmap_smccfg = {
211 	.name = "pmu_regs",
212 	.reg_bits = 32,
213 	.reg_stride = 4,
214 	.val_bits = 32,
215 	.fast_io = true,
216 	.use_single_read = true,
217 	.use_single_write = true,
218 	.reg_read = tensor_sec_reg_read,
219 	.reg_write = tensor_sec_reg_write,
220 	.reg_update_bits = tensor_sec_update_bits,
221 };
222 
223 static const struct regmap_config regmap_mmiocfg = {
224 	.name = "pmu_regs",
225 	.reg_bits = 32,
226 	.reg_stride = 4,
227 	.val_bits = 32,
228 	.fast_io = true,
229 	.use_single_read = true,
230 	.use_single_write = true,
231 };
232 
233 static const struct exynos_pmu_data gs101_pmu_data = {
234 	.pmu_secure = true
235 };
236 
237 /*
238  * PMU platform driver and devicetree bindings.
239  */
240 static const struct of_device_id exynos_pmu_of_device_ids[] = {
241 	{
242 		.compatible = "google,gs101-pmu",
243 		.data = &gs101_pmu_data,
244 	}, {
245 		.compatible = "samsung,exynos3250-pmu",
246 		.data = exynos_pmu_data_arm_ptr(exynos3250_pmu_data),
247 	}, {
248 		.compatible = "samsung,exynos4210-pmu",
249 		.data = exynos_pmu_data_arm_ptr(exynos4210_pmu_data),
250 	}, {
251 		.compatible = "samsung,exynos4212-pmu",
252 		.data = exynos_pmu_data_arm_ptr(exynos4212_pmu_data),
253 	}, {
254 		.compatible = "samsung,exynos4412-pmu",
255 		.data = exynos_pmu_data_arm_ptr(exynos4412_pmu_data),
256 	}, {
257 		.compatible = "samsung,exynos5250-pmu",
258 		.data = exynos_pmu_data_arm_ptr(exynos5250_pmu_data),
259 	}, {
260 		.compatible = "samsung,exynos5410-pmu",
261 	}, {
262 		.compatible = "samsung,exynos5420-pmu",
263 		.data = exynos_pmu_data_arm_ptr(exynos5420_pmu_data),
264 	}, {
265 		.compatible = "samsung,exynos5433-pmu",
266 	}, {
267 		.compatible = "samsung,exynos7-pmu",
268 	}, {
269 		.compatible = "samsung,exynos850-pmu",
270 	},
271 	{ /*sentinel*/ },
272 };
273 
274 static const struct mfd_cell exynos_pmu_devs[] = {
275 	{ .name = "exynos-clkout", },
276 };
277 
278 /**
279  * exynos_get_pmu_regmap() - Obtain pmureg regmap
280  *
281  * Find the pmureg regmap previously configured in probe() and return regmap
282  * pointer.
283  *
284  * Return: A pointer to regmap if found or ERR_PTR error value.
285  */
286 struct regmap *exynos_get_pmu_regmap(void)
287 {
288 	struct device_node *np = of_find_matching_node(NULL,
289 						      exynos_pmu_of_device_ids);
290 	if (np)
291 		return exynos_get_pmu_regmap_by_phandle(np, NULL);
292 	return ERR_PTR(-ENODEV);
293 }
294 EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap);
295 
296 /**
297  * exynos_get_pmu_regmap_by_phandle() - Obtain pmureg regmap via phandle
298  * @np: Device node holding PMU phandle property
299  * @propname: Name of property holding phandle value
300  *
301  * Find the pmureg regmap previously configured in probe() and return regmap
302  * pointer.
303  *
304  * Return: A pointer to regmap if found or ERR_PTR error value.
305  */
306 struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
307 						const char *propname)
308 {
309 	struct exynos_pmu_context *ctx;
310 	struct device_node *pmu_np;
311 	struct device *dev;
312 
313 	if (propname)
314 		pmu_np = of_parse_phandle(np, propname, 0);
315 	else
316 		pmu_np = np;
317 
318 	if (!pmu_np)
319 		return ERR_PTR(-ENODEV);
320 
321 	/*
322 	 * Determine if exynos-pmu device has probed and therefore regmap
323 	 * has been created and can be returned to the caller. Otherwise we
324 	 * return -EPROBE_DEFER.
325 	 */
326 	dev = driver_find_device_by_of_node(&exynos_pmu_driver.driver,
327 					    (void *)pmu_np);
328 
329 	if (propname)
330 		of_node_put(pmu_np);
331 
332 	if (!dev)
333 		return ERR_PTR(-EPROBE_DEFER);
334 
335 	ctx = dev_get_drvdata(dev);
336 
337 	return ctx->pmureg;
338 }
339 EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle);
340 
341 static int exynos_pmu_probe(struct platform_device *pdev)
342 {
343 	struct device *dev = &pdev->dev;
344 	struct regmap_config pmu_regmcfg;
345 	struct regmap *regmap;
346 	struct resource *res;
347 	int ret;
348 
349 	pmu_base_addr = devm_platform_ioremap_resource(pdev, 0);
350 	if (IS_ERR(pmu_base_addr))
351 		return PTR_ERR(pmu_base_addr);
352 
353 	pmu_context = devm_kzalloc(&pdev->dev,
354 			sizeof(struct exynos_pmu_context),
355 			GFP_KERNEL);
356 	if (!pmu_context)
357 		return -ENOMEM;
358 
359 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
360 	if (!res)
361 		return -ENODEV;
362 
363 	pmu_context->pmu_data = of_device_get_match_data(dev);
364 
365 	/* For SoCs that secure PMU register writes use custom regmap */
366 	if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_secure) {
367 		pmu_regmcfg = regmap_smccfg;
368 		pmu_regmcfg.max_register = resource_size(res) -
369 					   pmu_regmcfg.reg_stride;
370 		/* Need physical address for SMC call */
371 		regmap = devm_regmap_init(dev, NULL,
372 					  (void *)(uintptr_t)res->start,
373 					  &pmu_regmcfg);
374 	} else {
375 		/* All other SoCs use a MMIO regmap */
376 		pmu_regmcfg = regmap_mmiocfg;
377 		pmu_regmcfg.max_register = resource_size(res) -
378 					   pmu_regmcfg.reg_stride;
379 		regmap = devm_regmap_init_mmio(dev, pmu_base_addr,
380 					       &pmu_regmcfg);
381 	}
382 
383 	if (IS_ERR(regmap))
384 		return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
385 				     "regmap init failed\n");
386 
387 	pmu_context->pmureg = regmap;
388 	pmu_context->dev = dev;
389 
390 	if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init)
391 		pmu_context->pmu_data->pmu_init();
392 
393 	platform_set_drvdata(pdev, pmu_context);
394 
395 	ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, exynos_pmu_devs,
396 				   ARRAY_SIZE(exynos_pmu_devs), NULL, 0, NULL);
397 	if (ret)
398 		return ret;
399 
400 	if (devm_of_platform_populate(dev))
401 		dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n");
402 
403 	dev_dbg(dev, "Exynos PMU Driver probe done\n");
404 	return 0;
405 }
406 
407 static struct platform_driver exynos_pmu_driver = {
408 	.driver  = {
409 		.name   = "exynos-pmu",
410 		.of_match_table = exynos_pmu_of_device_ids,
411 	},
412 	.probe = exynos_pmu_probe,
413 };
414 
415 static int __init exynos_pmu_init(void)
416 {
417 	return platform_driver_register(&exynos_pmu_driver);
418 
419 }
420 postcore_initcall(exynos_pmu_init);
421