xref: /linux/drivers/soc/samsung/exynos-pmu.c (revision 11efc1cb7016e300047822fd60e0f4b4158bd56d)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
4 //		http://www.samsung.com/
5 //
6 // Exynos - CPU PMU(Power Management Unit) support
7 
8 #include <linux/array_size.h>
9 #include <linux/bitmap.h>
10 #include <linux/cpuhotplug.h>
11 #include <linux/cpu_pm.h>
12 #include <linux/of.h>
13 #include <linux/of_address.h>
14 #include <linux/mfd/core.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/of_platform.h>
17 #include <linux/platform_device.h>
18 #include <linux/delay.h>
19 #include <linux/reboot.h>
20 #include <linux/regmap.h>
21 
22 #include <linux/soc/samsung/exynos-regs-pmu.h>
23 #include <linux/soc/samsung/exynos-pmu.h>
24 
25 #include "exynos-pmu.h"
26 
27 struct exynos_pmu_context {
28 	struct device *dev;
29 	const struct exynos_pmu_data *pmu_data;
30 	struct regmap *pmureg;
31 	struct regmap *pmuintrgen;
32 	/*
33 	 * Serialization lock for CPU hot plug and cpuidle ACPM hint
34 	 * programming. Also protects in_cpuhp, sys_insuspend & sys_inreboot
35 	 * flags.
36 	 */
37 	raw_spinlock_t cpupm_lock;
38 	unsigned long *in_cpuhp;
39 	bool sys_insuspend;
40 	bool sys_inreboot;
41 };
42 
43 void __iomem *pmu_base_addr;
44 static struct exynos_pmu_context *pmu_context;
45 /* forward declaration */
46 static struct platform_driver exynos_pmu_driver;
47 
pmu_raw_writel(u32 val,u32 offset)48 void pmu_raw_writel(u32 val, u32 offset)
49 {
50 	writel_relaxed(val, pmu_base_addr + offset);
51 }
52 
pmu_raw_readl(u32 offset)53 u32 pmu_raw_readl(u32 offset)
54 {
55 	return readl_relaxed(pmu_base_addr + offset);
56 }
57 
exynos_sys_powerdown_conf(enum sys_powerdown mode)58 void exynos_sys_powerdown_conf(enum sys_powerdown mode)
59 {
60 	unsigned int i;
61 	const struct exynos_pmu_data *pmu_data;
62 
63 	if (!pmu_context || !pmu_context->pmu_data)
64 		return;
65 
66 	pmu_data = pmu_context->pmu_data;
67 
68 	if (pmu_data->powerdown_conf)
69 		pmu_data->powerdown_conf(mode);
70 
71 	if (pmu_data->pmu_config) {
72 		for (i = 0; (pmu_data->pmu_config[i].offset != PMU_TABLE_END); i++)
73 			pmu_raw_writel(pmu_data->pmu_config[i].val[mode],
74 					pmu_data->pmu_config[i].offset);
75 	}
76 
77 	if (pmu_data->powerdown_conf_extra)
78 		pmu_data->powerdown_conf_extra(mode);
79 
80 	if (pmu_data->pmu_config_extra) {
81 		for (i = 0; pmu_data->pmu_config_extra[i].offset != PMU_TABLE_END; i++)
82 			pmu_raw_writel(pmu_data->pmu_config_extra[i].val[mode],
83 				       pmu_data->pmu_config_extra[i].offset);
84 	}
85 }
86 
87 /*
88  * Split the data between ARM architectures because it is relatively big
89  * and useless on other arch.
90  */
91 #ifdef CONFIG_EXYNOS_PMU_ARM_DRIVERS
92 #define exynos_pmu_data_arm_ptr(data)	(&data)
93 #else
94 #define exynos_pmu_data_arm_ptr(data)	NULL
95 #endif
96 
97 static const struct regmap_config regmap_smccfg = {
98 	.name = "pmu_regs",
99 	.reg_bits = 32,
100 	.reg_stride = 4,
101 	.val_bits = 32,
102 	.fast_io = true,
103 	.use_single_read = true,
104 	.use_single_write = true,
105 	.reg_read = tensor_sec_reg_read,
106 	.reg_write = tensor_sec_reg_write,
107 	.reg_update_bits = tensor_sec_update_bits,
108 	.use_raw_spinlock = true,
109 };
110 
111 static const struct regmap_config regmap_pmu_intr = {
112 	.name = "pmu_intr_gen",
113 	.reg_bits = 32,
114 	.reg_stride = 4,
115 	.val_bits = 32,
116 	.use_raw_spinlock = true,
117 };
118 
119 /*
120  * PMU platform driver and devicetree bindings.
121  */
122 static const struct of_device_id exynos_pmu_of_device_ids[] = {
123 	{
124 		.compatible = "google,gs101-pmu",
125 		.data = &gs101_pmu_data,
126 	}, {
127 		.compatible = "samsung,exynos3250-pmu",
128 		.data = exynos_pmu_data_arm_ptr(exynos3250_pmu_data),
129 	}, {
130 		.compatible = "samsung,exynos4210-pmu",
131 		.data = exynos_pmu_data_arm_ptr(exynos4210_pmu_data),
132 	}, {
133 		.compatible = "samsung,exynos4212-pmu",
134 		.data = exynos_pmu_data_arm_ptr(exynos4212_pmu_data),
135 	}, {
136 		.compatible = "samsung,exynos4412-pmu",
137 		.data = exynos_pmu_data_arm_ptr(exynos4412_pmu_data),
138 	}, {
139 		.compatible = "samsung,exynos5250-pmu",
140 		.data = exynos_pmu_data_arm_ptr(exynos5250_pmu_data),
141 	}, {
142 		.compatible = "samsung,exynos5410-pmu",
143 	}, {
144 		.compatible = "samsung,exynos5420-pmu",
145 		.data = exynos_pmu_data_arm_ptr(exynos5420_pmu_data),
146 	}, {
147 		.compatible = "samsung,exynos5433-pmu",
148 	}, {
149 		.compatible = "samsung,exynos7-pmu",
150 	}, {
151 		.compatible = "samsung,exynos850-pmu",
152 	},
153 	{ /*sentinel*/ },
154 };
155 
156 static const struct mfd_cell exynos_pmu_devs[] = {
157 	{ .name = "exynos-clkout", },
158 };
159 
160 /**
161  * exynos_get_pmu_regmap() - Obtain pmureg regmap
162  *
163  * Find the pmureg regmap previously configured in probe() and return regmap
164  * pointer.
165  *
166  * Return: A pointer to regmap if found or ERR_PTR error value.
167  */
exynos_get_pmu_regmap(void)168 struct regmap *exynos_get_pmu_regmap(void)
169 {
170 	struct device_node *np = of_find_matching_node(NULL,
171 						      exynos_pmu_of_device_ids);
172 	if (np)
173 		return exynos_get_pmu_regmap_by_phandle(np, NULL);
174 	return ERR_PTR(-ENODEV);
175 }
176 EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap);
177 
178 /**
179  * exynos_get_pmu_regmap_by_phandle() - Obtain pmureg regmap via phandle
180  * @np: Device node holding PMU phandle property
181  * @propname: Name of property holding phandle value
182  *
183  * Find the pmureg regmap previously configured in probe() and return regmap
184  * pointer.
185  *
186  * Return: A pointer to regmap if found or ERR_PTR error value.
187  */
exynos_get_pmu_regmap_by_phandle(struct device_node * np,const char * propname)188 struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
189 						const char *propname)
190 {
191 	struct device_node *pmu_np;
192 	struct device *dev;
193 
194 	if (propname)
195 		pmu_np = of_parse_phandle(np, propname, 0);
196 	else
197 		pmu_np = np;
198 
199 	if (!pmu_np)
200 		return ERR_PTR(-ENODEV);
201 
202 	/*
203 	 * Determine if exynos-pmu device has probed and therefore regmap
204 	 * has been created and can be returned to the caller. Otherwise we
205 	 * return -EPROBE_DEFER.
206 	 */
207 	dev = driver_find_device_by_of_node(&exynos_pmu_driver.driver,
208 					    (void *)pmu_np);
209 
210 	if (propname)
211 		of_node_put(pmu_np);
212 
213 	if (!dev)
214 		return ERR_PTR(-EPROBE_DEFER);
215 
216 	put_device(dev);
217 
218 	return syscon_node_to_regmap(pmu_np);
219 }
220 EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle);
221 
222 /*
223  * CPU_INFORM register "hint" values are required to be programmed in addition to
224  * the standard PSCI calls to have functional CPU hotplug and CPU idle states.
225  * This is required to workaround limitations in the el3mon/ACPM firmware.
226  */
227 #define CPU_INFORM_CLEAR	0
228 #define CPU_INFORM_C2		1
229 
230 /*
231  * __gs101_cpu_pmu_ prefix functions are common code shared by CPU PM notifiers
232  * (CPUIdle) and CPU hotplug callbacks. Functions should be called with IRQs
233  * disabled and cpupm_lock held.
234  */
__gs101_cpu_pmu_online(unsigned int cpu)235 static int __gs101_cpu_pmu_online(unsigned int cpu)
236 	__must_hold(&pmu_context->cpupm_lock)
237 {
238 	unsigned int cpuhint = smp_processor_id();
239 	u32 reg, mask;
240 
241 	/* clear cpu inform hint */
242 	regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint),
243 		     CPU_INFORM_CLEAR);
244 
245 	mask = BIT(cpu);
246 
247 	regmap_update_bits(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_ENABLE,
248 			   mask, (0 << cpu));
249 
250 	regmap_read(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_UPEND, &reg);
251 
252 	regmap_write(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_CLEAR,
253 		     reg & mask);
254 
255 	return 0;
256 }
257 
258 /* Called from CPU PM notifier (CPUIdle code path) with IRQs disabled */
gs101_cpu_pmu_online(void)259 static int gs101_cpu_pmu_online(void)
260 {
261 	int cpu;
262 
263 	raw_spin_lock(&pmu_context->cpupm_lock);
264 
265 	if (pmu_context->sys_inreboot) {
266 		raw_spin_unlock(&pmu_context->cpupm_lock);
267 		return NOTIFY_OK;
268 	}
269 
270 	cpu = smp_processor_id();
271 	__gs101_cpu_pmu_online(cpu);
272 	raw_spin_unlock(&pmu_context->cpupm_lock);
273 
274 	return NOTIFY_OK;
275 }
276 
277 /* Called from CPU hot plug callback with IRQs enabled */
gs101_cpuhp_pmu_online(unsigned int cpu)278 static int gs101_cpuhp_pmu_online(unsigned int cpu)
279 {
280 	unsigned long flags;
281 
282 	raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
283 
284 	__gs101_cpu_pmu_online(cpu);
285 	/*
286 	 * Mark this CPU as having finished the hotplug.
287 	 * This means this CPU can now enter C2 idle state.
288 	 */
289 	clear_bit(cpu, pmu_context->in_cpuhp);
290 	raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
291 
292 	return 0;
293 }
294 
295 /* Common function shared by both CPU hot plug and CPUIdle */
__gs101_cpu_pmu_offline(unsigned int cpu)296 static int __gs101_cpu_pmu_offline(unsigned int cpu)
297 	__must_hold(&pmu_context->cpupm_lock)
298 {
299 	unsigned int cpuhint = smp_processor_id();
300 	u32 reg, mask;
301 
302 	/* set cpu inform hint */
303 	regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint),
304 		     CPU_INFORM_C2);
305 
306 	mask = BIT(cpu);
307 	regmap_update_bits(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_ENABLE,
308 			   mask, BIT(cpu));
309 
310 	regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, &reg);
311 	regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR,
312 		     reg & mask);
313 
314 	mask = (BIT(cpu + 8));
315 	regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, &reg);
316 	regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR,
317 		     reg & mask);
318 
319 	return 0;
320 }
321 
322 /* Called from CPU PM notifier (CPUIdle code path) with IRQs disabled */
gs101_cpu_pmu_offline(void)323 static int gs101_cpu_pmu_offline(void)
324 {
325 	int cpu;
326 
327 	raw_spin_lock(&pmu_context->cpupm_lock);
328 	cpu = smp_processor_id();
329 
330 	if (test_bit(cpu, pmu_context->in_cpuhp)) {
331 		raw_spin_unlock(&pmu_context->cpupm_lock);
332 		return NOTIFY_BAD;
333 	}
334 
335 	/* Ignore CPU_PM_ENTER event in reboot or suspend sequence. */
336 	if (pmu_context->sys_insuspend || pmu_context->sys_inreboot) {
337 		raw_spin_unlock(&pmu_context->cpupm_lock);
338 		return NOTIFY_OK;
339 	}
340 
341 	__gs101_cpu_pmu_offline(cpu);
342 	raw_spin_unlock(&pmu_context->cpupm_lock);
343 
344 	return NOTIFY_OK;
345 }
346 
347 /* Called from CPU hot plug callback with IRQs enabled */
gs101_cpuhp_pmu_offline(unsigned int cpu)348 static int gs101_cpuhp_pmu_offline(unsigned int cpu)
349 {
350 	unsigned long flags;
351 
352 	raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
353 	/*
354 	 * Mark this CPU as entering hotplug. So as not to confuse
355 	 * ACPM the CPU entering hotplug should not enter C2 idle state.
356 	 */
357 	set_bit(cpu, pmu_context->in_cpuhp);
358 	__gs101_cpu_pmu_offline(cpu);
359 
360 	raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
361 
362 	return 0;
363 }
364 
gs101_cpu_pm_notify_callback(struct notifier_block * self,unsigned long action,void * v)365 static int gs101_cpu_pm_notify_callback(struct notifier_block *self,
366 					unsigned long action, void *v)
367 {
368 	switch (action) {
369 	case CPU_PM_ENTER:
370 		return gs101_cpu_pmu_offline();
371 
372 	case CPU_PM_EXIT:
373 		return gs101_cpu_pmu_online();
374 	}
375 
376 	return NOTIFY_OK;
377 }
378 
379 static struct notifier_block gs101_cpu_pm_notifier = {
380 	.notifier_call = gs101_cpu_pm_notify_callback,
381 	/*
382 	 * We want to be called first, as the ACPM hint and handshake is what
383 	 * puts the CPU into C2.
384 	 */
385 	.priority = INT_MAX
386 };
387 
exynos_cpupm_reboot_notifier(struct notifier_block * nb,unsigned long event,void * v)388 static int exynos_cpupm_reboot_notifier(struct notifier_block *nb,
389 					unsigned long event, void *v)
390 {
391 	unsigned long flags;
392 
393 	switch (event) {
394 	case SYS_POWER_OFF:
395 	case SYS_RESTART:
396 		raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
397 		pmu_context->sys_inreboot = true;
398 		raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
399 		break;
400 	}
401 
402 	return NOTIFY_OK;
403 }
404 
405 static struct notifier_block exynos_cpupm_reboot_nb = {
406 	.priority = INT_MAX,
407 	.notifier_call = exynos_cpupm_reboot_notifier,
408 };
409 
setup_cpuhp_and_cpuidle(struct device * dev)410 static int setup_cpuhp_and_cpuidle(struct device *dev)
411 {
412 	struct device_node *intr_gen_node;
413 	struct resource intrgen_res;
414 	void __iomem *virt_addr;
415 	int ret, cpu;
416 
417 	intr_gen_node = of_parse_phandle(dev->of_node,
418 					 "google,pmu-intr-gen-syscon", 0);
419 	if (!intr_gen_node) {
420 		/*
421 		 * To maintain support for older DTs that didn't specify syscon
422 		 * phandle just issue a warning rather than fail to probe.
423 		 */
424 		dev_warn(dev, "pmu-intr-gen syscon unavailable\n");
425 		return 0;
426 	}
427 
428 	/*
429 	 * To avoid lockdep issues (CPU PM notifiers use raw spinlocks) create
430 	 * a mmio regmap for pmu-intr-gen that uses raw spinlocks instead of
431 	 * syscon provided regmap.
432 	 */
433 	ret = of_address_to_resource(intr_gen_node, 0, &intrgen_res);
434 	of_node_put(intr_gen_node);
435 
436 	virt_addr = devm_ioremap(dev, intrgen_res.start,
437 				 resource_size(&intrgen_res));
438 	if (!virt_addr)
439 		return -ENOMEM;
440 
441 	pmu_context->pmuintrgen = devm_regmap_init_mmio(dev, virt_addr,
442 							&regmap_pmu_intr);
443 	if (IS_ERR(pmu_context->pmuintrgen)) {
444 		dev_err(dev, "failed to initialize pmu-intr-gen regmap\n");
445 		return PTR_ERR(pmu_context->pmuintrgen);
446 	}
447 
448 	/* register custom mmio regmap with syscon */
449 	ret = of_syscon_register_regmap(intr_gen_node,
450 					pmu_context->pmuintrgen);
451 	if (ret)
452 		return ret;
453 
454 	pmu_context->in_cpuhp = devm_bitmap_zalloc(dev, num_possible_cpus(),
455 						   GFP_KERNEL);
456 	if (!pmu_context->in_cpuhp)
457 		return -ENOMEM;
458 
459 	/* set PMU to power on */
460 	for_each_online_cpu(cpu)
461 		gs101_cpuhp_pmu_online(cpu);
462 
463 	/* register CPU hotplug callbacks */
464 	cpuhp_setup_state(CPUHP_BP_PREPARE_DYN,	"soc/exynos-pmu:prepare",
465 			  gs101_cpuhp_pmu_online, NULL);
466 
467 	cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/exynos-pmu:online",
468 			  NULL, gs101_cpuhp_pmu_offline);
469 
470 	/* register CPU PM notifiers for cpuidle */
471 	cpu_pm_register_notifier(&gs101_cpu_pm_notifier);
472 	register_reboot_notifier(&exynos_cpupm_reboot_nb);
473 	return 0;
474 }
475 
exynos_pmu_probe(struct platform_device * pdev)476 static int exynos_pmu_probe(struct platform_device *pdev)
477 {
478 	struct device *dev = &pdev->dev;
479 	struct regmap_config pmu_regmcfg;
480 	struct regmap *regmap;
481 	struct resource *res;
482 	int ret;
483 
484 	pmu_base_addr = devm_platform_ioremap_resource(pdev, 0);
485 	if (IS_ERR(pmu_base_addr))
486 		return PTR_ERR(pmu_base_addr);
487 
488 	pmu_context = devm_kzalloc(&pdev->dev,
489 			sizeof(struct exynos_pmu_context),
490 			GFP_KERNEL);
491 	if (!pmu_context)
492 		return -ENOMEM;
493 
494 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
495 	if (!res)
496 		return -ENODEV;
497 
498 	pmu_context->pmu_data = of_device_get_match_data(dev);
499 
500 	/* For SoCs that secure PMU register writes use custom regmap */
501 	if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_secure) {
502 		pmu_regmcfg = regmap_smccfg;
503 		pmu_regmcfg.max_register = resource_size(res) -
504 					   pmu_regmcfg.reg_stride;
505 		pmu_regmcfg.wr_table = pmu_context->pmu_data->wr_table;
506 		pmu_regmcfg.rd_table = pmu_context->pmu_data->rd_table;
507 
508 		/* Need physical address for SMC call */
509 		regmap = devm_regmap_init(dev, NULL,
510 					  (void *)(uintptr_t)res->start,
511 					  &pmu_regmcfg);
512 
513 		if (IS_ERR(regmap))
514 			return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
515 					     "regmap init failed\n");
516 
517 		ret = of_syscon_register_regmap(dev->of_node, regmap);
518 		if (ret)
519 			return ret;
520 	} else {
521 		/* let syscon create mmio regmap */
522 		regmap = syscon_node_to_regmap(dev->of_node);
523 		if (IS_ERR(regmap))
524 			return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
525 					     "syscon_node_to_regmap failed\n");
526 	}
527 
528 	pmu_context->pmureg = regmap;
529 	pmu_context->dev = dev;
530 	raw_spin_lock_init(&pmu_context->cpupm_lock);
531 	pmu_context->sys_inreboot = false;
532 	pmu_context->sys_insuspend = false;
533 
534 	if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_cpuhp) {
535 		ret = setup_cpuhp_and_cpuidle(dev);
536 		if (ret)
537 			return ret;
538 	}
539 
540 	if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init)
541 		pmu_context->pmu_data->pmu_init();
542 
543 	platform_set_drvdata(pdev, pmu_context);
544 
545 	ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, exynos_pmu_devs,
546 				   ARRAY_SIZE(exynos_pmu_devs), NULL, 0, NULL);
547 	if (ret)
548 		return ret;
549 
550 	if (devm_of_platform_populate(dev))
551 		dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n");
552 
553 	dev_dbg(dev, "Exynos PMU Driver probe done\n");
554 	return 0;
555 }
556 
exynos_cpupm_suspend_noirq(struct device * dev)557 static int exynos_cpupm_suspend_noirq(struct device *dev)
558 {
559 	raw_spin_lock(&pmu_context->cpupm_lock);
560 	pmu_context->sys_insuspend = true;
561 	raw_spin_unlock(&pmu_context->cpupm_lock);
562 	return 0;
563 }
564 
exynos_cpupm_resume_noirq(struct device * dev)565 static int exynos_cpupm_resume_noirq(struct device *dev)
566 {
567 	raw_spin_lock(&pmu_context->cpupm_lock);
568 	pmu_context->sys_insuspend = false;
569 	raw_spin_unlock(&pmu_context->cpupm_lock);
570 	return 0;
571 }
572 
573 static const struct dev_pm_ops cpupm_pm_ops = {
574 	NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_cpupm_suspend_noirq,
575 				  exynos_cpupm_resume_noirq)
576 };
577 
578 static struct platform_driver exynos_pmu_driver = {
579 	.driver  = {
580 		.name   = "exynos-pmu",
581 		.of_match_table = exynos_pmu_of_device_ids,
582 		.pm = pm_sleep_ptr(&cpupm_pm_ops),
583 	},
584 	.probe = exynos_pmu_probe,
585 };
586 
exynos_pmu_init(void)587 static int __init exynos_pmu_init(void)
588 {
589 	return platform_driver_register(&exynos_pmu_driver);
590 
591 }
592 postcore_initcall(exynos_pmu_init);
593