1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Copyright (c) 2011-2014 Samsung Electronics Co., Ltd. 4 // http://www.samsung.com/ 5 // 6 // Exynos - CPU PMU(Power Management Unit) support 7 8 #include <linux/arm-smccc.h> 9 #include <linux/of.h> 10 #include <linux/of_address.h> 11 #include <linux/mfd/core.h> 12 #include <linux/mfd/syscon.h> 13 #include <linux/of_platform.h> 14 #include <linux/platform_device.h> 15 #include <linux/delay.h> 16 #include <linux/regmap.h> 17 18 #include <linux/soc/samsung/exynos-regs-pmu.h> 19 #include <linux/soc/samsung/exynos-pmu.h> 20 21 #include "exynos-pmu.h" 22 23 #define PMUALIVE_MASK GENMASK(13, 0) 24 #define TENSOR_SET_BITS (BIT(15) | BIT(14)) 25 #define TENSOR_CLR_BITS BIT(15) 26 #define TENSOR_SMC_PMU_SEC_REG 0x82000504 27 #define TENSOR_PMUREG_READ 0 28 #define TENSOR_PMUREG_WRITE 1 29 #define TENSOR_PMUREG_RMW 2 30 31 struct exynos_pmu_context { 32 struct device *dev; 33 const struct exynos_pmu_data *pmu_data; 34 struct regmap *pmureg; 35 }; 36 37 void __iomem *pmu_base_addr; 38 static struct exynos_pmu_context *pmu_context; 39 /* forward declaration */ 40 static struct platform_driver exynos_pmu_driver; 41 42 /* 43 * Tensor SoCs are configured so that PMU_ALIVE registers can only be written 44 * from EL3, but are still read accessible. As Linux needs to write some of 45 * these registers, the following functions are provided and exposed via 46 * regmap. 47 * 48 * Note: This SMC interface is known to be implemented on gs101 and derivative 49 * SoCs. 50 */ 51 52 /* Write to a protected PMU register. */ 53 static int tensor_sec_reg_write(void *context, unsigned int reg, 54 unsigned int val) 55 { 56 struct arm_smccc_res res; 57 unsigned long pmu_base = (unsigned long)context; 58 59 arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg, 60 TENSOR_PMUREG_WRITE, val, 0, 0, 0, 0, &res); 61 62 /* returns -EINVAL if access isn't allowed or 0 */ 63 if (res.a0) 64 pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0); 65 66 return (int)res.a0; 67 } 68 69 /* Read/Modify/Write a protected PMU register. */ 70 static int tensor_sec_reg_rmw(void *context, unsigned int reg, 71 unsigned int mask, unsigned int val) 72 { 73 struct arm_smccc_res res; 74 unsigned long pmu_base = (unsigned long)context; 75 76 arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg, 77 TENSOR_PMUREG_RMW, mask, val, 0, 0, 0, &res); 78 79 /* returns -EINVAL if access isn't allowed or 0 */ 80 if (res.a0) 81 pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0); 82 83 return (int)res.a0; 84 } 85 86 /* 87 * Read a protected PMU register. All PMU registers can be read by Linux. 88 * Note: The SMC read register is not used, as only registers that can be 89 * written are readable via SMC. 90 */ 91 static int tensor_sec_reg_read(void *context, unsigned int reg, 92 unsigned int *val) 93 { 94 *val = pmu_raw_readl(reg); 95 return 0; 96 } 97 98 /* 99 * For SoCs that have set/clear bit hardware this function can be used when 100 * the PMU register will be accessed by multiple masters. 101 * 102 * For example, to set bits 13:8 in PMU reg offset 0x3e80 103 * tensor_set_bits_atomic(ctx, 0x3e80, 0x3f00, 0x3f00); 104 * 105 * Set bit 8, and clear bits 13:9 PMU reg offset 0x3e80 106 * tensor_set_bits_atomic(0x3e80, 0x100, 0x3f00); 107 */ 108 static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val, 109 u32 mask) 110 { 111 int ret; 112 unsigned int i; 113 114 for (i = 0; i < 32; i++) { 115 if (!(mask & BIT(i))) 116 continue; 117 118 offset &= ~TENSOR_SET_BITS; 119 120 if (val & BIT(i)) 121 offset |= TENSOR_SET_BITS; 122 else 123 offset |= TENSOR_CLR_BITS; 124 125 ret = tensor_sec_reg_write(ctx, offset, i); 126 if (ret) 127 return ret; 128 } 129 return ret; 130 } 131 132 static bool tensor_is_atomic(unsigned int reg) 133 { 134 /* 135 * Use atomic operations for PMU_ALIVE registers (offset 0~0x3FFF) 136 * as the target registers can be accessed by multiple masters. SFRs 137 * that don't support atomic are added to the switch statement below. 138 */ 139 if (reg > PMUALIVE_MASK) 140 return false; 141 142 switch (reg) { 143 case GS101_SYSIP_DAT0: 144 case GS101_SYSTEM_CONFIGURATION: 145 return false; 146 default: 147 return true; 148 } 149 } 150 151 static int tensor_sec_update_bits(void *ctx, unsigned int reg, 152 unsigned int mask, unsigned int val) 153 { 154 155 if (!tensor_is_atomic(reg)) 156 return tensor_sec_reg_rmw(ctx, reg, mask, val); 157 158 return tensor_set_bits_atomic(ctx, reg, val, mask); 159 } 160 161 void pmu_raw_writel(u32 val, u32 offset) 162 { 163 writel_relaxed(val, pmu_base_addr + offset); 164 } 165 166 u32 pmu_raw_readl(u32 offset) 167 { 168 return readl_relaxed(pmu_base_addr + offset); 169 } 170 171 void exynos_sys_powerdown_conf(enum sys_powerdown mode) 172 { 173 unsigned int i; 174 const struct exynos_pmu_data *pmu_data; 175 176 if (!pmu_context || !pmu_context->pmu_data) 177 return; 178 179 pmu_data = pmu_context->pmu_data; 180 181 if (pmu_data->powerdown_conf) 182 pmu_data->powerdown_conf(mode); 183 184 if (pmu_data->pmu_config) { 185 for (i = 0; (pmu_data->pmu_config[i].offset != PMU_TABLE_END); i++) 186 pmu_raw_writel(pmu_data->pmu_config[i].val[mode], 187 pmu_data->pmu_config[i].offset); 188 } 189 190 if (pmu_data->powerdown_conf_extra) 191 pmu_data->powerdown_conf_extra(mode); 192 193 if (pmu_data->pmu_config_extra) { 194 for (i = 0; pmu_data->pmu_config_extra[i].offset != PMU_TABLE_END; i++) 195 pmu_raw_writel(pmu_data->pmu_config_extra[i].val[mode], 196 pmu_data->pmu_config_extra[i].offset); 197 } 198 } 199 200 /* 201 * Split the data between ARM architectures because it is relatively big 202 * and useless on other arch. 203 */ 204 #ifdef CONFIG_EXYNOS_PMU_ARM_DRIVERS 205 #define exynos_pmu_data_arm_ptr(data) (&data) 206 #else 207 #define exynos_pmu_data_arm_ptr(data) NULL 208 #endif 209 210 static const struct regmap_config regmap_smccfg = { 211 .name = "pmu_regs", 212 .reg_bits = 32, 213 .reg_stride = 4, 214 .val_bits = 32, 215 .fast_io = true, 216 .use_single_read = true, 217 .use_single_write = true, 218 .reg_read = tensor_sec_reg_read, 219 .reg_write = tensor_sec_reg_write, 220 .reg_update_bits = tensor_sec_update_bits, 221 }; 222 223 static const struct exynos_pmu_data gs101_pmu_data = { 224 .pmu_secure = true 225 }; 226 227 /* 228 * PMU platform driver and devicetree bindings. 229 */ 230 static const struct of_device_id exynos_pmu_of_device_ids[] = { 231 { 232 .compatible = "google,gs101-pmu", 233 .data = &gs101_pmu_data, 234 }, { 235 .compatible = "samsung,exynos3250-pmu", 236 .data = exynos_pmu_data_arm_ptr(exynos3250_pmu_data), 237 }, { 238 .compatible = "samsung,exynos4210-pmu", 239 .data = exynos_pmu_data_arm_ptr(exynos4210_pmu_data), 240 }, { 241 .compatible = "samsung,exynos4212-pmu", 242 .data = exynos_pmu_data_arm_ptr(exynos4212_pmu_data), 243 }, { 244 .compatible = "samsung,exynos4412-pmu", 245 .data = exynos_pmu_data_arm_ptr(exynos4412_pmu_data), 246 }, { 247 .compatible = "samsung,exynos5250-pmu", 248 .data = exynos_pmu_data_arm_ptr(exynos5250_pmu_data), 249 }, { 250 .compatible = "samsung,exynos5410-pmu", 251 }, { 252 .compatible = "samsung,exynos5420-pmu", 253 .data = exynos_pmu_data_arm_ptr(exynos5420_pmu_data), 254 }, { 255 .compatible = "samsung,exynos5433-pmu", 256 }, { 257 .compatible = "samsung,exynos7-pmu", 258 }, { 259 .compatible = "samsung,exynos850-pmu", 260 }, 261 { /*sentinel*/ }, 262 }; 263 264 static const struct mfd_cell exynos_pmu_devs[] = { 265 { .name = "exynos-clkout", }, 266 }; 267 268 /** 269 * exynos_get_pmu_regmap() - Obtain pmureg regmap 270 * 271 * Find the pmureg regmap previously configured in probe() and return regmap 272 * pointer. 273 * 274 * Return: A pointer to regmap if found or ERR_PTR error value. 275 */ 276 struct regmap *exynos_get_pmu_regmap(void) 277 { 278 struct device_node *np = of_find_matching_node(NULL, 279 exynos_pmu_of_device_ids); 280 if (np) 281 return exynos_get_pmu_regmap_by_phandle(np, NULL); 282 return ERR_PTR(-ENODEV); 283 } 284 EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap); 285 286 /** 287 * exynos_get_pmu_regmap_by_phandle() - Obtain pmureg regmap via phandle 288 * @np: Device node holding PMU phandle property 289 * @propname: Name of property holding phandle value 290 * 291 * Find the pmureg regmap previously configured in probe() and return regmap 292 * pointer. 293 * 294 * Return: A pointer to regmap if found or ERR_PTR error value. 295 */ 296 struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np, 297 const char *propname) 298 { 299 struct device_node *pmu_np; 300 struct device *dev; 301 302 if (propname) 303 pmu_np = of_parse_phandle(np, propname, 0); 304 else 305 pmu_np = np; 306 307 if (!pmu_np) 308 return ERR_PTR(-ENODEV); 309 310 /* 311 * Determine if exynos-pmu device has probed and therefore regmap 312 * has been created and can be returned to the caller. Otherwise we 313 * return -EPROBE_DEFER. 314 */ 315 dev = driver_find_device_by_of_node(&exynos_pmu_driver.driver, 316 (void *)pmu_np); 317 318 if (propname) 319 of_node_put(pmu_np); 320 321 if (!dev) 322 return ERR_PTR(-EPROBE_DEFER); 323 324 return syscon_node_to_regmap(pmu_np); 325 } 326 EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle); 327 328 static int exynos_pmu_probe(struct platform_device *pdev) 329 { 330 struct device *dev = &pdev->dev; 331 struct regmap_config pmu_regmcfg; 332 struct regmap *regmap; 333 struct resource *res; 334 int ret; 335 336 pmu_base_addr = devm_platform_ioremap_resource(pdev, 0); 337 if (IS_ERR(pmu_base_addr)) 338 return PTR_ERR(pmu_base_addr); 339 340 pmu_context = devm_kzalloc(&pdev->dev, 341 sizeof(struct exynos_pmu_context), 342 GFP_KERNEL); 343 if (!pmu_context) 344 return -ENOMEM; 345 346 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 347 if (!res) 348 return -ENODEV; 349 350 pmu_context->pmu_data = of_device_get_match_data(dev); 351 352 /* For SoCs that secure PMU register writes use custom regmap */ 353 if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_secure) { 354 pmu_regmcfg = regmap_smccfg; 355 pmu_regmcfg.max_register = resource_size(res) - 356 pmu_regmcfg.reg_stride; 357 /* Need physical address for SMC call */ 358 regmap = devm_regmap_init(dev, NULL, 359 (void *)(uintptr_t)res->start, 360 &pmu_regmcfg); 361 362 if (IS_ERR(regmap)) 363 return dev_err_probe(&pdev->dev, PTR_ERR(regmap), 364 "regmap init failed\n"); 365 366 ret = of_syscon_register_regmap(dev->of_node, regmap); 367 if (ret) 368 return ret; 369 } else { 370 /* let syscon create mmio regmap */ 371 regmap = syscon_node_to_regmap(dev->of_node); 372 if (IS_ERR(regmap)) 373 return dev_err_probe(&pdev->dev, PTR_ERR(regmap), 374 "syscon_node_to_regmap failed\n"); 375 } 376 377 pmu_context->pmureg = regmap; 378 pmu_context->dev = dev; 379 380 if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init) 381 pmu_context->pmu_data->pmu_init(); 382 383 platform_set_drvdata(pdev, pmu_context); 384 385 ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, exynos_pmu_devs, 386 ARRAY_SIZE(exynos_pmu_devs), NULL, 0, NULL); 387 if (ret) 388 return ret; 389 390 if (devm_of_platform_populate(dev)) 391 dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n"); 392 393 dev_dbg(dev, "Exynos PMU Driver probe done\n"); 394 return 0; 395 } 396 397 static struct platform_driver exynos_pmu_driver = { 398 .driver = { 399 .name = "exynos-pmu", 400 .of_match_table = exynos_pmu_of_device_ids, 401 }, 402 .probe = exynos_pmu_probe, 403 }; 404 405 static int __init exynos_pmu_init(void) 406 { 407 return platform_driver_register(&exynos_pmu_driver); 408 409 } 410 postcore_initcall(exynos_pmu_init); 411