xref: /linux/drivers/soc/samsung/exynos-pmu.c (revision 68a052239fc4b351e961f698b824f7654a346091)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
4 //		http://www.samsung.com/
5 //
6 // Exynos - CPU PMU(Power Management Unit) support
7 
8 #include <linux/array_size.h>
9 #include <linux/arm-smccc.h>
10 #include <linux/bitmap.h>
11 #include <linux/cpuhotplug.h>
12 #include <linux/cpu_pm.h>
13 #include <linux/of.h>
14 #include <linux/of_address.h>
15 #include <linux/mfd/core.h>
16 #include <linux/mfd/syscon.h>
17 #include <linux/of_platform.h>
18 #include <linux/platform_device.h>
19 #include <linux/delay.h>
20 #include <linux/reboot.h>
21 #include <linux/regmap.h>
22 
23 #include <linux/soc/samsung/exynos-regs-pmu.h>
24 #include <linux/soc/samsung/exynos-pmu.h>
25 
26 #include "exynos-pmu.h"
27 
28 #define PMUALIVE_MASK			GENMASK(13, 0)
29 #define TENSOR_SET_BITS			(BIT(15) | BIT(14))
30 #define TENSOR_CLR_BITS			BIT(15)
31 #define TENSOR_SMC_PMU_SEC_REG		0x82000504
32 #define TENSOR_PMUREG_READ		0
33 #define TENSOR_PMUREG_WRITE		1
34 #define TENSOR_PMUREG_RMW		2
35 
36 struct exynos_pmu_context {
37 	struct device *dev;
38 	const struct exynos_pmu_data *pmu_data;
39 	struct regmap *pmureg;
40 	struct regmap *pmuintrgen;
41 	/*
42 	 * Serialization lock for CPU hot plug and cpuidle ACPM hint
43 	 * programming. Also protects in_cpuhp, sys_insuspend & sys_inreboot
44 	 * flags.
45 	 */
46 	raw_spinlock_t cpupm_lock;
47 	unsigned long *in_cpuhp;
48 	bool sys_insuspend;
49 	bool sys_inreboot;
50 };
51 
52 void __iomem *pmu_base_addr;
53 static struct exynos_pmu_context *pmu_context;
54 /* forward declaration */
55 static struct platform_driver exynos_pmu_driver;
56 
57 /*
58  * Tensor SoCs are configured so that PMU_ALIVE registers can only be written
59  * from EL3, but are still read accessible. As Linux needs to write some of
60  * these registers, the following functions are provided and exposed via
61  * regmap.
62  *
63  * Note: This SMC interface is known to be implemented on gs101 and derivative
64  * SoCs.
65  */
66 
67 /* Write to a protected PMU register. */
68 static int tensor_sec_reg_write(void *context, unsigned int reg,
69 				unsigned int val)
70 {
71 	struct arm_smccc_res res;
72 	unsigned long pmu_base = (unsigned long)context;
73 
74 	arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
75 		      TENSOR_PMUREG_WRITE, val, 0, 0, 0, 0, &res);
76 
77 	/* returns -EINVAL if access isn't allowed or 0 */
78 	if (res.a0)
79 		pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
80 
81 	return (int)res.a0;
82 }
83 
84 /* Read/Modify/Write a protected PMU register. */
85 static int tensor_sec_reg_rmw(void *context, unsigned int reg,
86 			      unsigned int mask, unsigned int val)
87 {
88 	struct arm_smccc_res res;
89 	unsigned long pmu_base = (unsigned long)context;
90 
91 	arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
92 		      TENSOR_PMUREG_RMW, mask, val, 0, 0, 0, &res);
93 
94 	/* returns -EINVAL if access isn't allowed or 0 */
95 	if (res.a0)
96 		pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
97 
98 	return (int)res.a0;
99 }
100 
101 /*
102  * Read a protected PMU register. All PMU registers can be read by Linux.
103  * Note: The SMC read register is not used, as only registers that can be
104  * written are readable via SMC.
105  */
106 static int tensor_sec_reg_read(void *context, unsigned int reg,
107 			       unsigned int *val)
108 {
109 	*val = pmu_raw_readl(reg);
110 	return 0;
111 }
112 
113 /*
114  * For SoCs that have set/clear bit hardware this function can be used when
115  * the PMU register will be accessed by multiple masters.
116  *
117  * For example, to set bits 13:8 in PMU reg offset 0x3e80
118  * tensor_set_bits_atomic(ctx, 0x3e80, 0x3f00, 0x3f00);
119  *
120  * Set bit 8, and clear bits 13:9 PMU reg offset 0x3e80
121  * tensor_set_bits_atomic(0x3e80, 0x100, 0x3f00);
122  */
123 static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val,
124 				  u32 mask)
125 {
126 	int ret;
127 	unsigned int i;
128 
129 	for (i = 0; i < 32; i++) {
130 		if (!(mask & BIT(i)))
131 			continue;
132 
133 		offset &= ~TENSOR_SET_BITS;
134 
135 		if (val & BIT(i))
136 			offset |= TENSOR_SET_BITS;
137 		else
138 			offset |= TENSOR_CLR_BITS;
139 
140 		ret = tensor_sec_reg_write(ctx, offset, i);
141 		if (ret)
142 			return ret;
143 	}
144 	return 0;
145 }
146 
147 static bool tensor_is_atomic(unsigned int reg)
148 {
149 	/*
150 	 * Use atomic operations for PMU_ALIVE registers (offset 0~0x3FFF)
151 	 * as the target registers can be accessed by multiple masters. SFRs
152 	 * that don't support atomic are added to the switch statement below.
153 	 */
154 	if (reg > PMUALIVE_MASK)
155 		return false;
156 
157 	switch (reg) {
158 	case GS101_SYSIP_DAT0:
159 	case GS101_SYSTEM_CONFIGURATION:
160 		return false;
161 	default:
162 		return true;
163 	}
164 }
165 
166 static int tensor_sec_update_bits(void *ctx, unsigned int reg,
167 				  unsigned int mask, unsigned int val)
168 {
169 
170 	if (!tensor_is_atomic(reg))
171 		return tensor_sec_reg_rmw(ctx, reg, mask, val);
172 
173 	return tensor_set_bits_atomic(ctx, reg, val, mask);
174 }
175 
176 void pmu_raw_writel(u32 val, u32 offset)
177 {
178 	writel_relaxed(val, pmu_base_addr + offset);
179 }
180 
181 u32 pmu_raw_readl(u32 offset)
182 {
183 	return readl_relaxed(pmu_base_addr + offset);
184 }
185 
186 void exynos_sys_powerdown_conf(enum sys_powerdown mode)
187 {
188 	unsigned int i;
189 	const struct exynos_pmu_data *pmu_data;
190 
191 	if (!pmu_context || !pmu_context->pmu_data)
192 		return;
193 
194 	pmu_data = pmu_context->pmu_data;
195 
196 	if (pmu_data->powerdown_conf)
197 		pmu_data->powerdown_conf(mode);
198 
199 	if (pmu_data->pmu_config) {
200 		for (i = 0; (pmu_data->pmu_config[i].offset != PMU_TABLE_END); i++)
201 			pmu_raw_writel(pmu_data->pmu_config[i].val[mode],
202 					pmu_data->pmu_config[i].offset);
203 	}
204 
205 	if (pmu_data->powerdown_conf_extra)
206 		pmu_data->powerdown_conf_extra(mode);
207 
208 	if (pmu_data->pmu_config_extra) {
209 		for (i = 0; pmu_data->pmu_config_extra[i].offset != PMU_TABLE_END; i++)
210 			pmu_raw_writel(pmu_data->pmu_config_extra[i].val[mode],
211 				       pmu_data->pmu_config_extra[i].offset);
212 	}
213 }
214 
215 /*
216  * Split the data between ARM architectures because it is relatively big
217  * and useless on other arch.
218  */
219 #ifdef CONFIG_EXYNOS_PMU_ARM_DRIVERS
220 #define exynos_pmu_data_arm_ptr(data)	(&data)
221 #else
222 #define exynos_pmu_data_arm_ptr(data)	NULL
223 #endif
224 
225 static const struct regmap_config regmap_smccfg = {
226 	.name = "pmu_regs",
227 	.reg_bits = 32,
228 	.reg_stride = 4,
229 	.val_bits = 32,
230 	.fast_io = true,
231 	.use_single_read = true,
232 	.use_single_write = true,
233 	.reg_read = tensor_sec_reg_read,
234 	.reg_write = tensor_sec_reg_write,
235 	.reg_update_bits = tensor_sec_update_bits,
236 	.use_raw_spinlock = true,
237 };
238 
239 static const struct regmap_config regmap_pmu_intr = {
240 	.name = "pmu_intr_gen",
241 	.reg_bits = 32,
242 	.reg_stride = 4,
243 	.val_bits = 32,
244 	.use_raw_spinlock = true,
245 };
246 
247 static const struct exynos_pmu_data gs101_pmu_data = {
248 	.pmu_secure = true,
249 	.pmu_cpuhp = true,
250 };
251 
252 /*
253  * PMU platform driver and devicetree bindings.
254  */
255 static const struct of_device_id exynos_pmu_of_device_ids[] = {
256 	{
257 		.compatible = "google,gs101-pmu",
258 		.data = &gs101_pmu_data,
259 	}, {
260 		.compatible = "samsung,exynos3250-pmu",
261 		.data = exynos_pmu_data_arm_ptr(exynos3250_pmu_data),
262 	}, {
263 		.compatible = "samsung,exynos4210-pmu",
264 		.data = exynos_pmu_data_arm_ptr(exynos4210_pmu_data),
265 	}, {
266 		.compatible = "samsung,exynos4212-pmu",
267 		.data = exynos_pmu_data_arm_ptr(exynos4212_pmu_data),
268 	}, {
269 		.compatible = "samsung,exynos4412-pmu",
270 		.data = exynos_pmu_data_arm_ptr(exynos4412_pmu_data),
271 	}, {
272 		.compatible = "samsung,exynos5250-pmu",
273 		.data = exynos_pmu_data_arm_ptr(exynos5250_pmu_data),
274 	}, {
275 		.compatible = "samsung,exynos5410-pmu",
276 	}, {
277 		.compatible = "samsung,exynos5420-pmu",
278 		.data = exynos_pmu_data_arm_ptr(exynos5420_pmu_data),
279 	}, {
280 		.compatible = "samsung,exynos5433-pmu",
281 	}, {
282 		.compatible = "samsung,exynos7-pmu",
283 	}, {
284 		.compatible = "samsung,exynos850-pmu",
285 	},
286 	{ /*sentinel*/ },
287 };
288 
289 static const struct mfd_cell exynos_pmu_devs[] = {
290 	{ .name = "exynos-clkout", },
291 };
292 
293 /**
294  * exynos_get_pmu_regmap() - Obtain pmureg regmap
295  *
296  * Find the pmureg regmap previously configured in probe() and return regmap
297  * pointer.
298  *
299  * Return: A pointer to regmap if found or ERR_PTR error value.
300  */
301 struct regmap *exynos_get_pmu_regmap(void)
302 {
303 	struct device_node *np = of_find_matching_node(NULL,
304 						      exynos_pmu_of_device_ids);
305 	if (np)
306 		return exynos_get_pmu_regmap_by_phandle(np, NULL);
307 	return ERR_PTR(-ENODEV);
308 }
309 EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap);
310 
311 /**
312  * exynos_get_pmu_regmap_by_phandle() - Obtain pmureg regmap via phandle
313  * @np: Device node holding PMU phandle property
314  * @propname: Name of property holding phandle value
315  *
316  * Find the pmureg regmap previously configured in probe() and return regmap
317  * pointer.
318  *
319  * Return: A pointer to regmap if found or ERR_PTR error value.
320  */
321 struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
322 						const char *propname)
323 {
324 	struct device_node *pmu_np;
325 	struct device *dev;
326 
327 	if (propname)
328 		pmu_np = of_parse_phandle(np, propname, 0);
329 	else
330 		pmu_np = np;
331 
332 	if (!pmu_np)
333 		return ERR_PTR(-ENODEV);
334 
335 	/*
336 	 * Determine if exynos-pmu device has probed and therefore regmap
337 	 * has been created and can be returned to the caller. Otherwise we
338 	 * return -EPROBE_DEFER.
339 	 */
340 	dev = driver_find_device_by_of_node(&exynos_pmu_driver.driver,
341 					    (void *)pmu_np);
342 
343 	if (propname)
344 		of_node_put(pmu_np);
345 
346 	if (!dev)
347 		return ERR_PTR(-EPROBE_DEFER);
348 
349 	return syscon_node_to_regmap(pmu_np);
350 }
351 EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle);
352 
353 /*
354  * CPU_INFORM register "hint" values are required to be programmed in addition to
355  * the standard PSCI calls to have functional CPU hotplug and CPU idle states.
356  * This is required to workaround limitations in the el3mon/ACPM firmware.
357  */
358 #define CPU_INFORM_CLEAR	0
359 #define CPU_INFORM_C2		1
360 
361 /*
362  * __gs101_cpu_pmu_ prefix functions are common code shared by CPU PM notifiers
363  * (CPUIdle) and CPU hotplug callbacks. Functions should be called with IRQs
364  * disabled and cpupm_lock held.
365  */
366 static int __gs101_cpu_pmu_online(unsigned int cpu)
367 {
368 	unsigned int cpuhint = smp_processor_id();
369 	u32 reg, mask;
370 
371 	/* clear cpu inform hint */
372 	regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint),
373 		     CPU_INFORM_CLEAR);
374 
375 	mask = BIT(cpu);
376 
377 	regmap_update_bits(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_ENABLE,
378 			   mask, (0 << cpu));
379 
380 	regmap_read(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_UPEND, &reg);
381 
382 	regmap_write(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_CLEAR,
383 		     reg & mask);
384 
385 	return 0;
386 }
387 
388 /* Called from CPU PM notifier (CPUIdle code path) with IRQs disabled */
389 static int gs101_cpu_pmu_online(void)
390 {
391 	int cpu;
392 
393 	raw_spin_lock(&pmu_context->cpupm_lock);
394 
395 	if (pmu_context->sys_inreboot) {
396 		raw_spin_unlock(&pmu_context->cpupm_lock);
397 		return NOTIFY_OK;
398 	}
399 
400 	cpu = smp_processor_id();
401 	__gs101_cpu_pmu_online(cpu);
402 	raw_spin_unlock(&pmu_context->cpupm_lock);
403 
404 	return NOTIFY_OK;
405 }
406 
407 /* Called from CPU hot plug callback with IRQs enabled */
408 static int gs101_cpuhp_pmu_online(unsigned int cpu)
409 {
410 	unsigned long flags;
411 
412 	raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
413 
414 	__gs101_cpu_pmu_online(cpu);
415 	/*
416 	 * Mark this CPU as having finished the hotplug.
417 	 * This means this CPU can now enter C2 idle state.
418 	 */
419 	clear_bit(cpu, pmu_context->in_cpuhp);
420 	raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
421 
422 	return 0;
423 }
424 
425 /* Common function shared by both CPU hot plug and CPUIdle */
426 static int __gs101_cpu_pmu_offline(unsigned int cpu)
427 {
428 	unsigned int cpuhint = smp_processor_id();
429 	u32 reg, mask;
430 
431 	/* set cpu inform hint */
432 	regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint),
433 		     CPU_INFORM_C2);
434 
435 	mask = BIT(cpu);
436 	regmap_update_bits(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_ENABLE,
437 			   mask, BIT(cpu));
438 
439 	regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, &reg);
440 	regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR,
441 		     reg & mask);
442 
443 	mask = (BIT(cpu + 8));
444 	regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, &reg);
445 	regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR,
446 		     reg & mask);
447 
448 	return 0;
449 }
450 
451 /* Called from CPU PM notifier (CPUIdle code path) with IRQs disabled */
452 static int gs101_cpu_pmu_offline(void)
453 {
454 	int cpu;
455 
456 	raw_spin_lock(&pmu_context->cpupm_lock);
457 	cpu = smp_processor_id();
458 
459 	if (test_bit(cpu, pmu_context->in_cpuhp)) {
460 		raw_spin_unlock(&pmu_context->cpupm_lock);
461 		return NOTIFY_BAD;
462 	}
463 
464 	/* Ignore CPU_PM_ENTER event in reboot or suspend sequence. */
465 	if (pmu_context->sys_insuspend || pmu_context->sys_inreboot) {
466 		raw_spin_unlock(&pmu_context->cpupm_lock);
467 		return NOTIFY_OK;
468 	}
469 
470 	__gs101_cpu_pmu_offline(cpu);
471 	raw_spin_unlock(&pmu_context->cpupm_lock);
472 
473 	return NOTIFY_OK;
474 }
475 
476 /* Called from CPU hot plug callback with IRQs enabled */
477 static int gs101_cpuhp_pmu_offline(unsigned int cpu)
478 {
479 	unsigned long flags;
480 
481 	raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
482 	/*
483 	 * Mark this CPU as entering hotplug. So as not to confuse
484 	 * ACPM the CPU entering hotplug should not enter C2 idle state.
485 	 */
486 	set_bit(cpu, pmu_context->in_cpuhp);
487 	__gs101_cpu_pmu_offline(cpu);
488 
489 	raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
490 
491 	return 0;
492 }
493 
494 static int gs101_cpu_pm_notify_callback(struct notifier_block *self,
495 					unsigned long action, void *v)
496 {
497 	switch (action) {
498 	case CPU_PM_ENTER:
499 		return gs101_cpu_pmu_offline();
500 
501 	case CPU_PM_EXIT:
502 		return gs101_cpu_pmu_online();
503 	}
504 
505 	return NOTIFY_OK;
506 }
507 
508 static struct notifier_block gs101_cpu_pm_notifier = {
509 	.notifier_call = gs101_cpu_pm_notify_callback,
510 	/*
511 	 * We want to be called first, as the ACPM hint and handshake is what
512 	 * puts the CPU into C2.
513 	 */
514 	.priority = INT_MAX
515 };
516 
517 static int exynos_cpupm_reboot_notifier(struct notifier_block *nb,
518 					unsigned long event, void *v)
519 {
520 	unsigned long flags;
521 
522 	switch (event) {
523 	case SYS_POWER_OFF:
524 	case SYS_RESTART:
525 		raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
526 		pmu_context->sys_inreboot = true;
527 		raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
528 		break;
529 	}
530 
531 	return NOTIFY_OK;
532 }
533 
534 static struct notifier_block exynos_cpupm_reboot_nb = {
535 	.priority = INT_MAX,
536 	.notifier_call = exynos_cpupm_reboot_notifier,
537 };
538 
539 static int setup_cpuhp_and_cpuidle(struct device *dev)
540 {
541 	struct device_node *intr_gen_node;
542 	struct resource intrgen_res;
543 	void __iomem *virt_addr;
544 	int ret, cpu;
545 
546 	intr_gen_node = of_parse_phandle(dev->of_node,
547 					 "google,pmu-intr-gen-syscon", 0);
548 	if (!intr_gen_node) {
549 		/*
550 		 * To maintain support for older DTs that didn't specify syscon
551 		 * phandle just issue a warning rather than fail to probe.
552 		 */
553 		dev_warn(dev, "pmu-intr-gen syscon unavailable\n");
554 		return 0;
555 	}
556 
557 	/*
558 	 * To avoid lockdep issues (CPU PM notifiers use raw spinlocks) create
559 	 * a mmio regmap for pmu-intr-gen that uses raw spinlocks instead of
560 	 * syscon provided regmap.
561 	 */
562 	ret = of_address_to_resource(intr_gen_node, 0, &intrgen_res);
563 	of_node_put(intr_gen_node);
564 
565 	virt_addr = devm_ioremap(dev, intrgen_res.start,
566 				 resource_size(&intrgen_res));
567 	if (!virt_addr)
568 		return -ENOMEM;
569 
570 	pmu_context->pmuintrgen = devm_regmap_init_mmio(dev, virt_addr,
571 							&regmap_pmu_intr);
572 	if (IS_ERR(pmu_context->pmuintrgen)) {
573 		dev_err(dev, "failed to initialize pmu-intr-gen regmap\n");
574 		return PTR_ERR(pmu_context->pmuintrgen);
575 	}
576 
577 	/* register custom mmio regmap with syscon */
578 	ret = of_syscon_register_regmap(intr_gen_node,
579 					pmu_context->pmuintrgen);
580 	if (ret)
581 		return ret;
582 
583 	pmu_context->in_cpuhp = devm_bitmap_zalloc(dev, num_possible_cpus(),
584 						   GFP_KERNEL);
585 	if (!pmu_context->in_cpuhp)
586 		return -ENOMEM;
587 
588 	raw_spin_lock_init(&pmu_context->cpupm_lock);
589 	pmu_context->sys_inreboot = false;
590 	pmu_context->sys_insuspend = false;
591 
592 	/* set PMU to power on */
593 	for_each_online_cpu(cpu)
594 		gs101_cpuhp_pmu_online(cpu);
595 
596 	/* register CPU hotplug callbacks */
597 	cpuhp_setup_state(CPUHP_BP_PREPARE_DYN,	"soc/exynos-pmu:prepare",
598 			  gs101_cpuhp_pmu_online, NULL);
599 
600 	cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/exynos-pmu:online",
601 			  NULL, gs101_cpuhp_pmu_offline);
602 
603 	/* register CPU PM notifiers for cpuidle */
604 	cpu_pm_register_notifier(&gs101_cpu_pm_notifier);
605 	register_reboot_notifier(&exynos_cpupm_reboot_nb);
606 	return 0;
607 }
608 
609 static int exynos_pmu_probe(struct platform_device *pdev)
610 {
611 	struct device *dev = &pdev->dev;
612 	struct regmap_config pmu_regmcfg;
613 	struct regmap *regmap;
614 	struct resource *res;
615 	int ret;
616 
617 	pmu_base_addr = devm_platform_ioremap_resource(pdev, 0);
618 	if (IS_ERR(pmu_base_addr))
619 		return PTR_ERR(pmu_base_addr);
620 
621 	pmu_context = devm_kzalloc(&pdev->dev,
622 			sizeof(struct exynos_pmu_context),
623 			GFP_KERNEL);
624 	if (!pmu_context)
625 		return -ENOMEM;
626 
627 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
628 	if (!res)
629 		return -ENODEV;
630 
631 	pmu_context->pmu_data = of_device_get_match_data(dev);
632 
633 	/* For SoCs that secure PMU register writes use custom regmap */
634 	if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_secure) {
635 		pmu_regmcfg = regmap_smccfg;
636 		pmu_regmcfg.max_register = resource_size(res) -
637 					   pmu_regmcfg.reg_stride;
638 		/* Need physical address for SMC call */
639 		regmap = devm_regmap_init(dev, NULL,
640 					  (void *)(uintptr_t)res->start,
641 					  &pmu_regmcfg);
642 
643 		if (IS_ERR(regmap))
644 			return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
645 					     "regmap init failed\n");
646 
647 		ret = of_syscon_register_regmap(dev->of_node, regmap);
648 		if (ret)
649 			return ret;
650 	} else {
651 		/* let syscon create mmio regmap */
652 		regmap = syscon_node_to_regmap(dev->of_node);
653 		if (IS_ERR(regmap))
654 			return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
655 					     "syscon_node_to_regmap failed\n");
656 	}
657 
658 	pmu_context->pmureg = regmap;
659 	pmu_context->dev = dev;
660 
661 	if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_cpuhp) {
662 		ret = setup_cpuhp_and_cpuidle(dev);
663 		if (ret)
664 			return ret;
665 	}
666 
667 	if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init)
668 		pmu_context->pmu_data->pmu_init();
669 
670 	platform_set_drvdata(pdev, pmu_context);
671 
672 	ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, exynos_pmu_devs,
673 				   ARRAY_SIZE(exynos_pmu_devs), NULL, 0, NULL);
674 	if (ret)
675 		return ret;
676 
677 	if (devm_of_platform_populate(dev))
678 		dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n");
679 
680 	dev_dbg(dev, "Exynos PMU Driver probe done\n");
681 	return 0;
682 }
683 
684 static int exynos_cpupm_suspend_noirq(struct device *dev)
685 {
686 	raw_spin_lock(&pmu_context->cpupm_lock);
687 	pmu_context->sys_insuspend = true;
688 	raw_spin_unlock(&pmu_context->cpupm_lock);
689 	return 0;
690 }
691 
692 static int exynos_cpupm_resume_noirq(struct device *dev)
693 {
694 	raw_spin_lock(&pmu_context->cpupm_lock);
695 	pmu_context->sys_insuspend = false;
696 	raw_spin_unlock(&pmu_context->cpupm_lock);
697 	return 0;
698 }
699 
700 static const struct dev_pm_ops cpupm_pm_ops = {
701 	NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_cpupm_suspend_noirq,
702 				  exynos_cpupm_resume_noirq)
703 };
704 
705 static struct platform_driver exynos_pmu_driver = {
706 	.driver  = {
707 		.name   = "exynos-pmu",
708 		.of_match_table = exynos_pmu_of_device_ids,
709 		.pm = pm_sleep_ptr(&cpupm_pm_ops),
710 	},
711 	.probe = exynos_pmu_probe,
712 };
713 
714 static int __init exynos_pmu_init(void)
715 {
716 	return platform_driver_register(&exynos_pmu_driver);
717 
718 }
719 postcore_initcall(exynos_pmu_init);
720