106512c53SKrzysztof Kozlowski // SPDX-License-Identifier: GPL-2.0 206512c53SKrzysztof Kozlowski // 306512c53SKrzysztof Kozlowski // Copyright (c) 2011-2014 Samsung Electronics Co., Ltd. 406512c53SKrzysztof Kozlowski // http://www.samsung.com/ 506512c53SKrzysztof Kozlowski // 694500540SKrzysztof Kozlowski // Exynos - CPU PMU(Power Management Unit) support 7bfce552dSPankaj Dubey 8*0b7c6075SPeter Griffin #include <linux/arm-smccc.h> 9bfce552dSPankaj Dubey #include <linux/of.h> 10bfce552dSPankaj Dubey #include <linux/of_address.h> 1193618e34SKrzysztof Kozlowski #include <linux/mfd/core.h> 1276640b84SMarek Szyprowski #include <linux/mfd/syscon.h> 134c445837SRob Herring #include <linux/of_platform.h> 14bfce552dSPankaj Dubey #include <linux/platform_device.h> 15bfce552dSPankaj Dubey #include <linux/delay.h> 16*0b7c6075SPeter Griffin #include <linux/regmap.h> 17bfce552dSPankaj Dubey 18bfce552dSPankaj Dubey #include <linux/soc/samsung/exynos-regs-pmu.h> 19bfce552dSPankaj Dubey #include <linux/soc/samsung/exynos-pmu.h> 20bfce552dSPankaj Dubey 21bfce552dSPankaj Dubey #include "exynos-pmu.h" 22bfce552dSPankaj Dubey 23*0b7c6075SPeter Griffin #define PMUALIVE_MASK GENMASK(13, 0) 24*0b7c6075SPeter Griffin #define TENSOR_SET_BITS (BIT(15) | BIT(14)) 25*0b7c6075SPeter Griffin #define TENSOR_CLR_BITS BIT(15) 26*0b7c6075SPeter Griffin #define TENSOR_SMC_PMU_SEC_REG 0x82000504 27*0b7c6075SPeter Griffin #define TENSOR_PMUREG_READ 0 28*0b7c6075SPeter Griffin #define TENSOR_PMUREG_WRITE 1 29*0b7c6075SPeter Griffin #define TENSOR_PMUREG_RMW 2 30*0b7c6075SPeter Griffin 31bfce552dSPankaj Dubey struct exynos_pmu_context { 32bfce552dSPankaj Dubey struct device *dev; 33bfce552dSPankaj Dubey const struct exynos_pmu_data *pmu_data; 34*0b7c6075SPeter Griffin struct regmap *pmureg; 35bfce552dSPankaj Dubey }; 36bfce552dSPankaj Dubey 37bfce552dSPankaj Dubey void __iomem *pmu_base_addr; 38bfce552dSPankaj Dubey static struct exynos_pmu_context *pmu_context; 39*0b7c6075SPeter Griffin /* forward declaration */ 40*0b7c6075SPeter Griffin static struct platform_driver exynos_pmu_driver; 41*0b7c6075SPeter Griffin 42*0b7c6075SPeter Griffin /* 43*0b7c6075SPeter Griffin * Tensor SoCs are configured so that PMU_ALIVE registers can only be written 44*0b7c6075SPeter Griffin * from EL3, but are still read accessible. As Linux needs to write some of 45*0b7c6075SPeter Griffin * these registers, the following functions are provided and exposed via 46*0b7c6075SPeter Griffin * regmap. 47*0b7c6075SPeter Griffin * 48*0b7c6075SPeter Griffin * Note: This SMC interface is known to be implemented on gs101 and derivative 49*0b7c6075SPeter Griffin * SoCs. 50*0b7c6075SPeter Griffin */ 51*0b7c6075SPeter Griffin 52*0b7c6075SPeter Griffin /* Write to a protected PMU register. */ 53*0b7c6075SPeter Griffin static int tensor_sec_reg_write(void *context, unsigned int reg, 54*0b7c6075SPeter Griffin unsigned int val) 55*0b7c6075SPeter Griffin { 56*0b7c6075SPeter Griffin struct arm_smccc_res res; 57*0b7c6075SPeter Griffin unsigned long pmu_base = (unsigned long)context; 58*0b7c6075SPeter Griffin 59*0b7c6075SPeter Griffin arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg, 60*0b7c6075SPeter Griffin TENSOR_PMUREG_WRITE, val, 0, 0, 0, 0, &res); 61*0b7c6075SPeter Griffin 62*0b7c6075SPeter Griffin /* returns -EINVAL if access isn't allowed or 0 */ 63*0b7c6075SPeter Griffin if (res.a0) 64*0b7c6075SPeter Griffin pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0); 65*0b7c6075SPeter Griffin 66*0b7c6075SPeter Griffin return (int)res.a0; 67*0b7c6075SPeter Griffin } 68*0b7c6075SPeter Griffin 69*0b7c6075SPeter Griffin /* Read/Modify/Write a protected PMU register. */ 70*0b7c6075SPeter Griffin static int tensor_sec_reg_rmw(void *context, unsigned int reg, 71*0b7c6075SPeter Griffin unsigned int mask, unsigned int val) 72*0b7c6075SPeter Griffin { 73*0b7c6075SPeter Griffin struct arm_smccc_res res; 74*0b7c6075SPeter Griffin unsigned long pmu_base = (unsigned long)context; 75*0b7c6075SPeter Griffin 76*0b7c6075SPeter Griffin arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg, 77*0b7c6075SPeter Griffin TENSOR_PMUREG_RMW, mask, val, 0, 0, 0, &res); 78*0b7c6075SPeter Griffin 79*0b7c6075SPeter Griffin /* returns -EINVAL if access isn't allowed or 0 */ 80*0b7c6075SPeter Griffin if (res.a0) 81*0b7c6075SPeter Griffin pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0); 82*0b7c6075SPeter Griffin 83*0b7c6075SPeter Griffin return (int)res.a0; 84*0b7c6075SPeter Griffin } 85*0b7c6075SPeter Griffin 86*0b7c6075SPeter Griffin /* 87*0b7c6075SPeter Griffin * Read a protected PMU register. All PMU registers can be read by Linux. 88*0b7c6075SPeter Griffin * Note: The SMC read register is not used, as only registers that can be 89*0b7c6075SPeter Griffin * written are readable via SMC. 90*0b7c6075SPeter Griffin */ 91*0b7c6075SPeter Griffin static int tensor_sec_reg_read(void *context, unsigned int reg, 92*0b7c6075SPeter Griffin unsigned int *val) 93*0b7c6075SPeter Griffin { 94*0b7c6075SPeter Griffin *val = pmu_raw_readl(reg); 95*0b7c6075SPeter Griffin return 0; 96*0b7c6075SPeter Griffin } 97*0b7c6075SPeter Griffin 98*0b7c6075SPeter Griffin /* 99*0b7c6075SPeter Griffin * For SoCs that have set/clear bit hardware this function can be used when 100*0b7c6075SPeter Griffin * the PMU register will be accessed by multiple masters. 101*0b7c6075SPeter Griffin * 102*0b7c6075SPeter Griffin * For example, to set bits 13:8 in PMU reg offset 0x3e80 103*0b7c6075SPeter Griffin * tensor_set_bits_atomic(ctx, 0x3e80, 0x3f00, 0x3f00); 104*0b7c6075SPeter Griffin * 105*0b7c6075SPeter Griffin * Set bit 8, and clear bits 13:9 PMU reg offset 0x3e80 106*0b7c6075SPeter Griffin * tensor_set_bits_atomic(0x3e80, 0x100, 0x3f00); 107*0b7c6075SPeter Griffin */ 108*0b7c6075SPeter Griffin static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val, 109*0b7c6075SPeter Griffin u32 mask) 110*0b7c6075SPeter Griffin { 111*0b7c6075SPeter Griffin int ret; 112*0b7c6075SPeter Griffin unsigned int i; 113*0b7c6075SPeter Griffin 114*0b7c6075SPeter Griffin for (i = 0; i < 32; i++) { 115*0b7c6075SPeter Griffin if (!(mask & BIT(i))) 116*0b7c6075SPeter Griffin continue; 117*0b7c6075SPeter Griffin 118*0b7c6075SPeter Griffin offset &= ~TENSOR_SET_BITS; 119*0b7c6075SPeter Griffin 120*0b7c6075SPeter Griffin if (val & BIT(i)) 121*0b7c6075SPeter Griffin offset |= TENSOR_SET_BITS; 122*0b7c6075SPeter Griffin else 123*0b7c6075SPeter Griffin offset |= TENSOR_CLR_BITS; 124*0b7c6075SPeter Griffin 125*0b7c6075SPeter Griffin ret = tensor_sec_reg_write(ctx, offset, i); 126*0b7c6075SPeter Griffin if (ret) 127*0b7c6075SPeter Griffin return ret; 128*0b7c6075SPeter Griffin } 129*0b7c6075SPeter Griffin return ret; 130*0b7c6075SPeter Griffin } 131*0b7c6075SPeter Griffin 132*0b7c6075SPeter Griffin static int tensor_sec_update_bits(void *ctx, unsigned int reg, 133*0b7c6075SPeter Griffin unsigned int mask, unsigned int val) 134*0b7c6075SPeter Griffin { 135*0b7c6075SPeter Griffin /* 136*0b7c6075SPeter Griffin * Use atomic operations for PMU_ALIVE registers (offset 0~0x3FFF) 137*0b7c6075SPeter Griffin * as the target registers can be accessed by multiple masters. 138*0b7c6075SPeter Griffin */ 139*0b7c6075SPeter Griffin if (reg > PMUALIVE_MASK) 140*0b7c6075SPeter Griffin return tensor_sec_reg_rmw(ctx, reg, mask, val); 141*0b7c6075SPeter Griffin 142*0b7c6075SPeter Griffin return tensor_set_bits_atomic(ctx, reg, val, mask); 143*0b7c6075SPeter Griffin } 144bfce552dSPankaj Dubey 145bfce552dSPankaj Dubey void pmu_raw_writel(u32 val, u32 offset) 146bfce552dSPankaj Dubey { 147bfce552dSPankaj Dubey writel_relaxed(val, pmu_base_addr + offset); 148bfce552dSPankaj Dubey } 149bfce552dSPankaj Dubey 150bfce552dSPankaj Dubey u32 pmu_raw_readl(u32 offset) 151bfce552dSPankaj Dubey { 152bfce552dSPankaj Dubey return readl_relaxed(pmu_base_addr + offset); 153bfce552dSPankaj Dubey } 154bfce552dSPankaj Dubey 155bfce552dSPankaj Dubey void exynos_sys_powerdown_conf(enum sys_powerdown mode) 156bfce552dSPankaj Dubey { 157bfce552dSPankaj Dubey unsigned int i; 158bfce552dSPankaj Dubey const struct exynos_pmu_data *pmu_data; 159bfce552dSPankaj Dubey 160fa59aa70SMarek Szyprowski if (!pmu_context || !pmu_context->pmu_data) 161bfce552dSPankaj Dubey return; 162bfce552dSPankaj Dubey 163bfce552dSPankaj Dubey pmu_data = pmu_context->pmu_data; 164bfce552dSPankaj Dubey 165bfce552dSPankaj Dubey if (pmu_data->powerdown_conf) 166bfce552dSPankaj Dubey pmu_data->powerdown_conf(mode); 167bfce552dSPankaj Dubey 168bfce552dSPankaj Dubey if (pmu_data->pmu_config) { 169bfce552dSPankaj Dubey for (i = 0; (pmu_data->pmu_config[i].offset != PMU_TABLE_END); i++) 170bfce552dSPankaj Dubey pmu_raw_writel(pmu_data->pmu_config[i].val[mode], 171bfce552dSPankaj Dubey pmu_data->pmu_config[i].offset); 172bfce552dSPankaj Dubey } 173bfce552dSPankaj Dubey 174bfce552dSPankaj Dubey if (pmu_data->powerdown_conf_extra) 175bfce552dSPankaj Dubey pmu_data->powerdown_conf_extra(mode); 176514a935fSArtur Weber 177514a935fSArtur Weber if (pmu_data->pmu_config_extra) { 178514a935fSArtur Weber for (i = 0; pmu_data->pmu_config_extra[i].offset != PMU_TABLE_END; i++) 179514a935fSArtur Weber pmu_raw_writel(pmu_data->pmu_config_extra[i].val[mode], 180514a935fSArtur Weber pmu_data->pmu_config_extra[i].offset); 181514a935fSArtur Weber } 182bfce552dSPankaj Dubey } 183bfce552dSPankaj Dubey 184bfce552dSPankaj Dubey /* 185a0ebf662SKrzysztof Kozlowski * Split the data between ARM architectures because it is relatively big 186a0ebf662SKrzysztof Kozlowski * and useless on other arch. 187a0ebf662SKrzysztof Kozlowski */ 188a0ebf662SKrzysztof Kozlowski #ifdef CONFIG_EXYNOS_PMU_ARM_DRIVERS 189a0ebf662SKrzysztof Kozlowski #define exynos_pmu_data_arm_ptr(data) (&data) 190a0ebf662SKrzysztof Kozlowski #else 191a0ebf662SKrzysztof Kozlowski #define exynos_pmu_data_arm_ptr(data) NULL 192a0ebf662SKrzysztof Kozlowski #endif 193a0ebf662SKrzysztof Kozlowski 194*0b7c6075SPeter Griffin static const struct regmap_config regmap_smccfg = { 195*0b7c6075SPeter Griffin .name = "pmu_regs", 196*0b7c6075SPeter Griffin .reg_bits = 32, 197*0b7c6075SPeter Griffin .reg_stride = 4, 198*0b7c6075SPeter Griffin .val_bits = 32, 199*0b7c6075SPeter Griffin .fast_io = true, 200*0b7c6075SPeter Griffin .use_single_read = true, 201*0b7c6075SPeter Griffin .use_single_write = true, 202*0b7c6075SPeter Griffin .reg_read = tensor_sec_reg_read, 203*0b7c6075SPeter Griffin .reg_write = tensor_sec_reg_write, 204*0b7c6075SPeter Griffin .reg_update_bits = tensor_sec_update_bits, 205*0b7c6075SPeter Griffin }; 206*0b7c6075SPeter Griffin 207*0b7c6075SPeter Griffin static const struct regmap_config regmap_mmiocfg = { 208*0b7c6075SPeter Griffin .name = "pmu_regs", 209*0b7c6075SPeter Griffin .reg_bits = 32, 210*0b7c6075SPeter Griffin .reg_stride = 4, 211*0b7c6075SPeter Griffin .val_bits = 32, 212*0b7c6075SPeter Griffin .fast_io = true, 213*0b7c6075SPeter Griffin .use_single_read = true, 214*0b7c6075SPeter Griffin .use_single_write = true, 215*0b7c6075SPeter Griffin }; 216*0b7c6075SPeter Griffin 217*0b7c6075SPeter Griffin static const struct exynos_pmu_data gs101_pmu_data = { 218*0b7c6075SPeter Griffin .pmu_secure = true 219*0b7c6075SPeter Griffin }; 220*0b7c6075SPeter Griffin 221a0ebf662SKrzysztof Kozlowski /* 222bfce552dSPankaj Dubey * PMU platform driver and devicetree bindings. 223bfce552dSPankaj Dubey */ 224bfce552dSPankaj Dubey static const struct of_device_id exynos_pmu_of_device_ids[] = { 225bfce552dSPankaj Dubey { 226*0b7c6075SPeter Griffin .compatible = "google,gs101-pmu", 227*0b7c6075SPeter Griffin .data = &gs101_pmu_data, 228*0b7c6075SPeter Griffin }, { 229bfce552dSPankaj Dubey .compatible = "samsung,exynos3250-pmu", 230a0ebf662SKrzysztof Kozlowski .data = exynos_pmu_data_arm_ptr(exynos3250_pmu_data), 231bfce552dSPankaj Dubey }, { 232bfce552dSPankaj Dubey .compatible = "samsung,exynos4210-pmu", 233a0ebf662SKrzysztof Kozlowski .data = exynos_pmu_data_arm_ptr(exynos4210_pmu_data), 234bfce552dSPankaj Dubey }, { 235514a935fSArtur Weber .compatible = "samsung,exynos4212-pmu", 236514a935fSArtur Weber .data = exynos_pmu_data_arm_ptr(exynos4212_pmu_data), 237514a935fSArtur Weber }, { 238bfce552dSPankaj Dubey .compatible = "samsung,exynos4412-pmu", 239a0ebf662SKrzysztof Kozlowski .data = exynos_pmu_data_arm_ptr(exynos4412_pmu_data), 240bfce552dSPankaj Dubey }, { 241bfce552dSPankaj Dubey .compatible = "samsung,exynos5250-pmu", 242a0ebf662SKrzysztof Kozlowski .data = exynos_pmu_data_arm_ptr(exynos5250_pmu_data), 243bfce552dSPankaj Dubey }, { 2447353c546SKrzysztof Kozlowski .compatible = "samsung,exynos5410-pmu", 2457353c546SKrzysztof Kozlowski }, { 246bfce552dSPankaj Dubey .compatible = "samsung,exynos5420-pmu", 247a0ebf662SKrzysztof Kozlowski .data = exynos_pmu_data_arm_ptr(exynos5420_pmu_data), 248fa59aa70SMarek Szyprowski }, { 249fa59aa70SMarek Szyprowski .compatible = "samsung,exynos5433-pmu", 2507353c546SKrzysztof Kozlowski }, { 2517353c546SKrzysztof Kozlowski .compatible = "samsung,exynos7-pmu", 252f5dc0140SSam Protsenko }, { 253f5dc0140SSam Protsenko .compatible = "samsung,exynos850-pmu", 254bfce552dSPankaj Dubey }, 255bfce552dSPankaj Dubey { /*sentinel*/ }, 256bfce552dSPankaj Dubey }; 257bfce552dSPankaj Dubey 25893618e34SKrzysztof Kozlowski static const struct mfd_cell exynos_pmu_devs[] = { 25993618e34SKrzysztof Kozlowski { .name = "exynos-clkout", }, 26093618e34SKrzysztof Kozlowski }; 26193618e34SKrzysztof Kozlowski 262*0b7c6075SPeter Griffin /** 263*0b7c6075SPeter Griffin * exynos_get_pmu_regmap() - Obtain pmureg regmap 264*0b7c6075SPeter Griffin * 265*0b7c6075SPeter Griffin * Find the pmureg regmap previously configured in probe() and return regmap 266*0b7c6075SPeter Griffin * pointer. 267*0b7c6075SPeter Griffin * 268*0b7c6075SPeter Griffin * Return: A pointer to regmap if found or ERR_PTR error value. 269*0b7c6075SPeter Griffin */ 27076640b84SMarek Szyprowski struct regmap *exynos_get_pmu_regmap(void) 27176640b84SMarek Szyprowski { 27276640b84SMarek Szyprowski struct device_node *np = of_find_matching_node(NULL, 27376640b84SMarek Szyprowski exynos_pmu_of_device_ids); 27476640b84SMarek Szyprowski if (np) 275*0b7c6075SPeter Griffin return exynos_get_pmu_regmap_by_phandle(np, NULL); 27676640b84SMarek Szyprowski return ERR_PTR(-ENODEV); 27776640b84SMarek Szyprowski } 27876640b84SMarek Szyprowski EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap); 27976640b84SMarek Szyprowski 280*0b7c6075SPeter Griffin /** 281*0b7c6075SPeter Griffin * exynos_get_pmu_regmap_by_phandle() - Obtain pmureg regmap via phandle 282*0b7c6075SPeter Griffin * @np: Device node holding PMU phandle property 283*0b7c6075SPeter Griffin * @propname: Name of property holding phandle value 284*0b7c6075SPeter Griffin * 285*0b7c6075SPeter Griffin * Find the pmureg regmap previously configured in probe() and return regmap 286*0b7c6075SPeter Griffin * pointer. 287*0b7c6075SPeter Griffin * 288*0b7c6075SPeter Griffin * Return: A pointer to regmap if found or ERR_PTR error value. 289*0b7c6075SPeter Griffin */ 290*0b7c6075SPeter Griffin struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np, 291*0b7c6075SPeter Griffin const char *propname) 292*0b7c6075SPeter Griffin { 293*0b7c6075SPeter Griffin struct exynos_pmu_context *ctx; 294*0b7c6075SPeter Griffin struct device_node *pmu_np; 295*0b7c6075SPeter Griffin struct device *dev; 296*0b7c6075SPeter Griffin 297*0b7c6075SPeter Griffin if (propname) 298*0b7c6075SPeter Griffin pmu_np = of_parse_phandle(np, propname, 0); 299*0b7c6075SPeter Griffin else 300*0b7c6075SPeter Griffin pmu_np = np; 301*0b7c6075SPeter Griffin 302*0b7c6075SPeter Griffin if (!pmu_np) 303*0b7c6075SPeter Griffin return ERR_PTR(-ENODEV); 304*0b7c6075SPeter Griffin 305*0b7c6075SPeter Griffin /* 306*0b7c6075SPeter Griffin * Determine if exynos-pmu device has probed and therefore regmap 307*0b7c6075SPeter Griffin * has been created and can be returned to the caller. Otherwise we 308*0b7c6075SPeter Griffin * return -EPROBE_DEFER. 309*0b7c6075SPeter Griffin */ 310*0b7c6075SPeter Griffin dev = driver_find_device_by_of_node(&exynos_pmu_driver.driver, 311*0b7c6075SPeter Griffin (void *)pmu_np); 312*0b7c6075SPeter Griffin 313*0b7c6075SPeter Griffin if (propname) 314*0b7c6075SPeter Griffin of_node_put(pmu_np); 315*0b7c6075SPeter Griffin 316*0b7c6075SPeter Griffin if (!dev) 317*0b7c6075SPeter Griffin return ERR_PTR(-EPROBE_DEFER); 318*0b7c6075SPeter Griffin 319*0b7c6075SPeter Griffin ctx = dev_get_drvdata(dev); 320*0b7c6075SPeter Griffin 321*0b7c6075SPeter Griffin return ctx->pmureg; 322*0b7c6075SPeter Griffin } 323*0b7c6075SPeter Griffin EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle); 324*0b7c6075SPeter Griffin 325bfce552dSPankaj Dubey static int exynos_pmu_probe(struct platform_device *pdev) 326bfce552dSPankaj Dubey { 327bfce552dSPankaj Dubey struct device *dev = &pdev->dev; 328*0b7c6075SPeter Griffin struct regmap_config pmu_regmcfg; 329*0b7c6075SPeter Griffin struct regmap *regmap; 330*0b7c6075SPeter Griffin struct resource *res; 33193618e34SKrzysztof Kozlowski int ret; 332bfce552dSPankaj Dubey 33381a0efb7SYangtao Li pmu_base_addr = devm_platform_ioremap_resource(pdev, 0); 334bfce552dSPankaj Dubey if (IS_ERR(pmu_base_addr)) 335bfce552dSPankaj Dubey return PTR_ERR(pmu_base_addr); 336bfce552dSPankaj Dubey 337bfce552dSPankaj Dubey pmu_context = devm_kzalloc(&pdev->dev, 338bfce552dSPankaj Dubey sizeof(struct exynos_pmu_context), 339bfce552dSPankaj Dubey GFP_KERNEL); 3401da6de33SMarek Szyprowski if (!pmu_context) 341bfce552dSPankaj Dubey return -ENOMEM; 342*0b7c6075SPeter Griffin 343*0b7c6075SPeter Griffin res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 344*0b7c6075SPeter Griffin if (!res) 345*0b7c6075SPeter Griffin return -ENODEV; 346*0b7c6075SPeter Griffin 347ec7cc5b1SMarek Szyprowski pmu_context->pmu_data = of_device_get_match_data(dev); 348bfce552dSPankaj Dubey 349*0b7c6075SPeter Griffin /* For SoCs that secure PMU register writes use custom regmap */ 350*0b7c6075SPeter Griffin if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_secure) { 351*0b7c6075SPeter Griffin pmu_regmcfg = regmap_smccfg; 352*0b7c6075SPeter Griffin pmu_regmcfg.max_register = resource_size(res) - 353*0b7c6075SPeter Griffin pmu_regmcfg.reg_stride; 354*0b7c6075SPeter Griffin /* Need physical address for SMC call */ 355*0b7c6075SPeter Griffin regmap = devm_regmap_init(dev, NULL, 356*0b7c6075SPeter Griffin (void *)(uintptr_t)res->start, 357*0b7c6075SPeter Griffin &pmu_regmcfg); 358*0b7c6075SPeter Griffin } else { 359*0b7c6075SPeter Griffin /* All other SoCs use a MMIO regmap */ 360*0b7c6075SPeter Griffin pmu_regmcfg = regmap_mmiocfg; 361*0b7c6075SPeter Griffin pmu_regmcfg.max_register = resource_size(res) - 362*0b7c6075SPeter Griffin pmu_regmcfg.reg_stride; 363*0b7c6075SPeter Griffin regmap = devm_regmap_init_mmio(dev, pmu_base_addr, 364*0b7c6075SPeter Griffin &pmu_regmcfg); 365*0b7c6075SPeter Griffin } 366*0b7c6075SPeter Griffin 367*0b7c6075SPeter Griffin if (IS_ERR(regmap)) 368*0b7c6075SPeter Griffin return dev_err_probe(&pdev->dev, PTR_ERR(regmap), 369*0b7c6075SPeter Griffin "regmap init failed\n"); 370*0b7c6075SPeter Griffin 371*0b7c6075SPeter Griffin pmu_context->pmureg = regmap; 372*0b7c6075SPeter Griffin pmu_context->dev = dev; 373*0b7c6075SPeter Griffin 374fa59aa70SMarek Szyprowski if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init) 375bfce552dSPankaj Dubey pmu_context->pmu_data->pmu_init(); 376bfce552dSPankaj Dubey 377bfce552dSPankaj Dubey platform_set_drvdata(pdev, pmu_context); 378bfce552dSPankaj Dubey 37993618e34SKrzysztof Kozlowski ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, exynos_pmu_devs, 38093618e34SKrzysztof Kozlowski ARRAY_SIZE(exynos_pmu_devs), NULL, 0, NULL); 38193618e34SKrzysztof Kozlowski if (ret) 38293618e34SKrzysztof Kozlowski return ret; 38393618e34SKrzysztof Kozlowski 3847353c546SKrzysztof Kozlowski if (devm_of_platform_populate(dev)) 3857353c546SKrzysztof Kozlowski dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n"); 3867353c546SKrzysztof Kozlowski 387bfce552dSPankaj Dubey dev_dbg(dev, "Exynos PMU Driver probe done\n"); 388bfce552dSPankaj Dubey return 0; 389bfce552dSPankaj Dubey } 390bfce552dSPankaj Dubey 391bfce552dSPankaj Dubey static struct platform_driver exynos_pmu_driver = { 392bfce552dSPankaj Dubey .driver = { 393bfce552dSPankaj Dubey .name = "exynos-pmu", 394bfce552dSPankaj Dubey .of_match_table = exynos_pmu_of_device_ids, 395bfce552dSPankaj Dubey }, 396bfce552dSPankaj Dubey .probe = exynos_pmu_probe, 397bfce552dSPankaj Dubey }; 398bfce552dSPankaj Dubey 399bfce552dSPankaj Dubey static int __init exynos_pmu_init(void) 400bfce552dSPankaj Dubey { 401bfce552dSPankaj Dubey return platform_driver_register(&exynos_pmu_driver); 402bfce552dSPankaj Dubey 403bfce552dSPankaj Dubey } 404bfce552dSPankaj Dubey postcore_initcall(exynos_pmu_init); 405