xref: /linux/drivers/soc/samsung/exynos-pmu.c (revision 60a2f25de7b8b785baee2932db932ae9a5b8c86d)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
4 //		http://www.samsung.com/
5 //
6 // Exynos - CPU PMU(Power Management Unit) support
7 
8 #include <linux/arm-smccc.h>
9 #include <linux/of.h>
10 #include <linux/of_address.h>
11 #include <linux/mfd/core.h>
12 #include <linux/mfd/syscon.h>
13 #include <linux/of_platform.h>
14 #include <linux/platform_device.h>
15 #include <linux/delay.h>
16 #include <linux/regmap.h>
17 
18 #include <linux/soc/samsung/exynos-regs-pmu.h>
19 #include <linux/soc/samsung/exynos-pmu.h>
20 
21 #include "exynos-pmu.h"
22 
23 #define PMUALIVE_MASK			GENMASK(13, 0)
24 #define TENSOR_SET_BITS			(BIT(15) | BIT(14))
25 #define TENSOR_CLR_BITS			BIT(15)
26 #define TENSOR_SMC_PMU_SEC_REG		0x82000504
27 #define TENSOR_PMUREG_READ		0
28 #define TENSOR_PMUREG_WRITE		1
29 #define TENSOR_PMUREG_RMW		2
30 
31 struct exynos_pmu_context {
32 	struct device *dev;
33 	const struct exynos_pmu_data *pmu_data;
34 	struct regmap *pmureg;
35 };
36 
37 void __iomem *pmu_base_addr;
38 static struct exynos_pmu_context *pmu_context;
39 /* forward declaration */
40 static struct platform_driver exynos_pmu_driver;
41 
42 /*
43  * Tensor SoCs are configured so that PMU_ALIVE registers can only be written
44  * from EL3, but are still read accessible. As Linux needs to write some of
45  * these registers, the following functions are provided and exposed via
46  * regmap.
47  *
48  * Note: This SMC interface is known to be implemented on gs101 and derivative
49  * SoCs.
50  */
51 
52 /* Write to a protected PMU register. */
53 static int tensor_sec_reg_write(void *context, unsigned int reg,
54 				unsigned int val)
55 {
56 	struct arm_smccc_res res;
57 	unsigned long pmu_base = (unsigned long)context;
58 
59 	arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
60 		      TENSOR_PMUREG_WRITE, val, 0, 0, 0, 0, &res);
61 
62 	/* returns -EINVAL if access isn't allowed or 0 */
63 	if (res.a0)
64 		pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
65 
66 	return (int)res.a0;
67 }
68 
69 /* Read/Modify/Write a protected PMU register. */
70 static int tensor_sec_reg_rmw(void *context, unsigned int reg,
71 			      unsigned int mask, unsigned int val)
72 {
73 	struct arm_smccc_res res;
74 	unsigned long pmu_base = (unsigned long)context;
75 
76 	arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
77 		      TENSOR_PMUREG_RMW, mask, val, 0, 0, 0, &res);
78 
79 	/* returns -EINVAL if access isn't allowed or 0 */
80 	if (res.a0)
81 		pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
82 
83 	return (int)res.a0;
84 }
85 
86 /*
87  * Read a protected PMU register. All PMU registers can be read by Linux.
88  * Note: The SMC read register is not used, as only registers that can be
89  * written are readable via SMC.
90  */
91 static int tensor_sec_reg_read(void *context, unsigned int reg,
92 			       unsigned int *val)
93 {
94 	*val = pmu_raw_readl(reg);
95 	return 0;
96 }
97 
98 /*
99  * For SoCs that have set/clear bit hardware this function can be used when
100  * the PMU register will be accessed by multiple masters.
101  *
102  * For example, to set bits 13:8 in PMU reg offset 0x3e80
103  * tensor_set_bits_atomic(ctx, 0x3e80, 0x3f00, 0x3f00);
104  *
105  * Set bit 8, and clear bits 13:9 PMU reg offset 0x3e80
106  * tensor_set_bits_atomic(0x3e80, 0x100, 0x3f00);
107  */
108 static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val,
109 				  u32 mask)
110 {
111 	int ret;
112 	unsigned int i;
113 
114 	for (i = 0; i < 32; i++) {
115 		if (!(mask & BIT(i)))
116 			continue;
117 
118 		offset &= ~TENSOR_SET_BITS;
119 
120 		if (val & BIT(i))
121 			offset |= TENSOR_SET_BITS;
122 		else
123 			offset |= TENSOR_CLR_BITS;
124 
125 		ret = tensor_sec_reg_write(ctx, offset, i);
126 		if (ret)
127 			return ret;
128 	}
129 	return ret;
130 }
131 
132 static int tensor_sec_update_bits(void *ctx, unsigned int reg,
133 				  unsigned int mask, unsigned int val)
134 {
135 	/*
136 	 * Use atomic operations for PMU_ALIVE registers (offset 0~0x3FFF)
137 	 * as the target registers can be accessed by multiple masters.
138 	 */
139 	if (reg > PMUALIVE_MASK)
140 		return tensor_sec_reg_rmw(ctx, reg, mask, val);
141 
142 	return tensor_set_bits_atomic(ctx, reg, val, mask);
143 }
144 
145 void pmu_raw_writel(u32 val, u32 offset)
146 {
147 	writel_relaxed(val, pmu_base_addr + offset);
148 }
149 
150 u32 pmu_raw_readl(u32 offset)
151 {
152 	return readl_relaxed(pmu_base_addr + offset);
153 }
154 
155 void exynos_sys_powerdown_conf(enum sys_powerdown mode)
156 {
157 	unsigned int i;
158 	const struct exynos_pmu_data *pmu_data;
159 
160 	if (!pmu_context || !pmu_context->pmu_data)
161 		return;
162 
163 	pmu_data = pmu_context->pmu_data;
164 
165 	if (pmu_data->powerdown_conf)
166 		pmu_data->powerdown_conf(mode);
167 
168 	if (pmu_data->pmu_config) {
169 		for (i = 0; (pmu_data->pmu_config[i].offset != PMU_TABLE_END); i++)
170 			pmu_raw_writel(pmu_data->pmu_config[i].val[mode],
171 					pmu_data->pmu_config[i].offset);
172 	}
173 
174 	if (pmu_data->powerdown_conf_extra)
175 		pmu_data->powerdown_conf_extra(mode);
176 
177 	if (pmu_data->pmu_config_extra) {
178 		for (i = 0; pmu_data->pmu_config_extra[i].offset != PMU_TABLE_END; i++)
179 			pmu_raw_writel(pmu_data->pmu_config_extra[i].val[mode],
180 				       pmu_data->pmu_config_extra[i].offset);
181 	}
182 }
183 
184 /*
185  * Split the data between ARM architectures because it is relatively big
186  * and useless on other arch.
187  */
188 #ifdef CONFIG_EXYNOS_PMU_ARM_DRIVERS
189 #define exynos_pmu_data_arm_ptr(data)	(&data)
190 #else
191 #define exynos_pmu_data_arm_ptr(data)	NULL
192 #endif
193 
194 static const struct regmap_config regmap_smccfg = {
195 	.name = "pmu_regs",
196 	.reg_bits = 32,
197 	.reg_stride = 4,
198 	.val_bits = 32,
199 	.fast_io = true,
200 	.use_single_read = true,
201 	.use_single_write = true,
202 	.reg_read = tensor_sec_reg_read,
203 	.reg_write = tensor_sec_reg_write,
204 	.reg_update_bits = tensor_sec_update_bits,
205 };
206 
207 static const struct regmap_config regmap_mmiocfg = {
208 	.name = "pmu_regs",
209 	.reg_bits = 32,
210 	.reg_stride = 4,
211 	.val_bits = 32,
212 	.fast_io = true,
213 	.use_single_read = true,
214 	.use_single_write = true,
215 };
216 
217 static const struct exynos_pmu_data gs101_pmu_data = {
218 	.pmu_secure = true
219 };
220 
221 /*
222  * PMU platform driver and devicetree bindings.
223  */
224 static const struct of_device_id exynos_pmu_of_device_ids[] = {
225 	{
226 		.compatible = "google,gs101-pmu",
227 		.data = &gs101_pmu_data,
228 	}, {
229 		.compatible = "samsung,exynos3250-pmu",
230 		.data = exynos_pmu_data_arm_ptr(exynos3250_pmu_data),
231 	}, {
232 		.compatible = "samsung,exynos4210-pmu",
233 		.data = exynos_pmu_data_arm_ptr(exynos4210_pmu_data),
234 	}, {
235 		.compatible = "samsung,exynos4212-pmu",
236 		.data = exynos_pmu_data_arm_ptr(exynos4212_pmu_data),
237 	}, {
238 		.compatible = "samsung,exynos4412-pmu",
239 		.data = exynos_pmu_data_arm_ptr(exynos4412_pmu_data),
240 	}, {
241 		.compatible = "samsung,exynos5250-pmu",
242 		.data = exynos_pmu_data_arm_ptr(exynos5250_pmu_data),
243 	}, {
244 		.compatible = "samsung,exynos5410-pmu",
245 	}, {
246 		.compatible = "samsung,exynos5420-pmu",
247 		.data = exynos_pmu_data_arm_ptr(exynos5420_pmu_data),
248 	}, {
249 		.compatible = "samsung,exynos5433-pmu",
250 	}, {
251 		.compatible = "samsung,exynos7-pmu",
252 	}, {
253 		.compatible = "samsung,exynos850-pmu",
254 	},
255 	{ /*sentinel*/ },
256 };
257 
258 static const struct mfd_cell exynos_pmu_devs[] = {
259 	{ .name = "exynos-clkout", },
260 };
261 
262 /**
263  * exynos_get_pmu_regmap() - Obtain pmureg regmap
264  *
265  * Find the pmureg regmap previously configured in probe() and return regmap
266  * pointer.
267  *
268  * Return: A pointer to regmap if found or ERR_PTR error value.
269  */
270 struct regmap *exynos_get_pmu_regmap(void)
271 {
272 	struct device_node *np = of_find_matching_node(NULL,
273 						      exynos_pmu_of_device_ids);
274 	if (np)
275 		return exynos_get_pmu_regmap_by_phandle(np, NULL);
276 	return ERR_PTR(-ENODEV);
277 }
278 EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap);
279 
280 /**
281  * exynos_get_pmu_regmap_by_phandle() - Obtain pmureg regmap via phandle
282  * @np: Device node holding PMU phandle property
283  * @propname: Name of property holding phandle value
284  *
285  * Find the pmureg regmap previously configured in probe() and return regmap
286  * pointer.
287  *
288  * Return: A pointer to regmap if found or ERR_PTR error value.
289  */
290 struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
291 						const char *propname)
292 {
293 	struct exynos_pmu_context *ctx;
294 	struct device_node *pmu_np;
295 	struct device *dev;
296 
297 	if (propname)
298 		pmu_np = of_parse_phandle(np, propname, 0);
299 	else
300 		pmu_np = np;
301 
302 	if (!pmu_np)
303 		return ERR_PTR(-ENODEV);
304 
305 	/*
306 	 * Determine if exynos-pmu device has probed and therefore regmap
307 	 * has been created and can be returned to the caller. Otherwise we
308 	 * return -EPROBE_DEFER.
309 	 */
310 	dev = driver_find_device_by_of_node(&exynos_pmu_driver.driver,
311 					    (void *)pmu_np);
312 
313 	if (propname)
314 		of_node_put(pmu_np);
315 
316 	if (!dev)
317 		return ERR_PTR(-EPROBE_DEFER);
318 
319 	ctx = dev_get_drvdata(dev);
320 
321 	return ctx->pmureg;
322 }
323 EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle);
324 
325 static int exynos_pmu_probe(struct platform_device *pdev)
326 {
327 	struct device *dev = &pdev->dev;
328 	struct regmap_config pmu_regmcfg;
329 	struct regmap *regmap;
330 	struct resource *res;
331 	int ret;
332 
333 	pmu_base_addr = devm_platform_ioremap_resource(pdev, 0);
334 	if (IS_ERR(pmu_base_addr))
335 		return PTR_ERR(pmu_base_addr);
336 
337 	pmu_context = devm_kzalloc(&pdev->dev,
338 			sizeof(struct exynos_pmu_context),
339 			GFP_KERNEL);
340 	if (!pmu_context)
341 		return -ENOMEM;
342 
343 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
344 	if (!res)
345 		return -ENODEV;
346 
347 	pmu_context->pmu_data = of_device_get_match_data(dev);
348 
349 	/* For SoCs that secure PMU register writes use custom regmap */
350 	if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_secure) {
351 		pmu_regmcfg = regmap_smccfg;
352 		pmu_regmcfg.max_register = resource_size(res) -
353 					   pmu_regmcfg.reg_stride;
354 		/* Need physical address for SMC call */
355 		regmap = devm_regmap_init(dev, NULL,
356 					  (void *)(uintptr_t)res->start,
357 					  &pmu_regmcfg);
358 	} else {
359 		/* All other SoCs use a MMIO regmap */
360 		pmu_regmcfg = regmap_mmiocfg;
361 		pmu_regmcfg.max_register = resource_size(res) -
362 					   pmu_regmcfg.reg_stride;
363 		regmap = devm_regmap_init_mmio(dev, pmu_base_addr,
364 					       &pmu_regmcfg);
365 	}
366 
367 	if (IS_ERR(regmap))
368 		return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
369 				     "regmap init failed\n");
370 
371 	pmu_context->pmureg = regmap;
372 	pmu_context->dev = dev;
373 
374 	if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init)
375 		pmu_context->pmu_data->pmu_init();
376 
377 	platform_set_drvdata(pdev, pmu_context);
378 
379 	ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, exynos_pmu_devs,
380 				   ARRAY_SIZE(exynos_pmu_devs), NULL, 0, NULL);
381 	if (ret)
382 		return ret;
383 
384 	if (devm_of_platform_populate(dev))
385 		dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n");
386 
387 	dev_dbg(dev, "Exynos PMU Driver probe done\n");
388 	return 0;
389 }
390 
391 static struct platform_driver exynos_pmu_driver = {
392 	.driver  = {
393 		.name   = "exynos-pmu",
394 		.of_match_table = exynos_pmu_of_device_ids,
395 	},
396 	.probe = exynos_pmu_probe,
397 };
398 
399 static int __init exynos_pmu_init(void)
400 {
401 	return platform_driver_register(&exynos_pmu_driver);
402 
403 }
404 postcore_initcall(exynos_pmu_init);
405