1 /* 2 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 3 * http://www.samsung.com 4 * 5 * arch/arm/mach-exynos/mcpm-exynos.c 6 * 7 * Based on arch/arm/mach-vexpress/dcscb.c 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/arm-cci.h> 15 #include <linux/delay.h> 16 #include <linux/io.h> 17 #include <linux/of_address.h> 18 19 #include <asm/cputype.h> 20 #include <asm/cp15.h> 21 #include <asm/mcpm.h> 22 23 #include "regs-pmu.h" 24 #include "common.h" 25 26 #define EXYNOS5420_CPUS_PER_CLUSTER 4 27 #define EXYNOS5420_NR_CLUSTERS 2 28 29 #define EXYNOS5420_ENABLE_AUTOMATIC_CORE_DOWN BIT(9) 30 #define EXYNOS5420_USE_ARM_CORE_DOWN_STATE BIT(29) 31 #define EXYNOS5420_USE_L2_COMMON_UP_STATE BIT(30) 32 33 /* 34 * The common v7_exit_coherency_flush API could not be used because of the 35 * Erratum 799270 workaround. This macro is the same as the common one (in 36 * arch/arm/include/asm/cacheflush.h) except for the erratum handling. 37 */ 38 #define exynos_v7_exit_coherency_flush(level) \ 39 asm volatile( \ 40 "stmfd sp!, {fp, ip}\n\t"\ 41 "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR\n\t" \ 42 "bic r0, r0, #"__stringify(CR_C)"\n\t" \ 43 "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR\n\t" \ 44 "isb\n\t"\ 45 "bl v7_flush_dcache_"__stringify(level)"\n\t" \ 46 "clrex\n\t"\ 47 "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR\n\t" \ 48 "bic r0, r0, #(1 << 6) @ disable local coherency\n\t" \ 49 /* Dummy Load of a device register to avoid Erratum 799270 */ \ 50 "ldr r4, [%0]\n\t" \ 51 "and r4, r4, #0\n\t" \ 52 "orr r0, r0, r4\n\t" \ 53 "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR\n\t" \ 54 "isb\n\t" \ 55 "dsb\n\t" \ 56 "ldmfd sp!, {fp, ip}" \ 57 : \ 58 : "Ir" (S5P_INFORM0) \ 59 : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \ 60 "r9", "r10", "lr", "memory") 61 62 /* 63 * We can't use regular spinlocks. In the switcher case, it is possible 64 * for an outbound CPU to call power_down() after its inbound counterpart 65 * is already live using the same logical CPU number which trips lockdep 66 * debugging. 67 */ 68 static arch_spinlock_t exynos_mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED; 69 static int 70 cpu_use_count[EXYNOS5420_CPUS_PER_CLUSTER][EXYNOS5420_NR_CLUSTERS]; 71 72 #define exynos_cluster_usecnt(cluster) \ 73 (cpu_use_count[0][cluster] + \ 74 cpu_use_count[1][cluster] + \ 75 cpu_use_count[2][cluster] + \ 76 cpu_use_count[3][cluster]) 77 78 #define exynos_cluster_unused(cluster) !exynos_cluster_usecnt(cluster) 79 80 static int exynos_power_up(unsigned int cpu, unsigned int cluster) 81 { 82 unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); 83 84 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 85 if (cpu >= EXYNOS5420_CPUS_PER_CLUSTER || 86 cluster >= EXYNOS5420_NR_CLUSTERS) 87 return -EINVAL; 88 89 /* 90 * Since this is called with IRQs enabled, and no arch_spin_lock_irq 91 * variant exists, we need to disable IRQs manually here. 92 */ 93 local_irq_disable(); 94 arch_spin_lock(&exynos_mcpm_lock); 95 96 cpu_use_count[cpu][cluster]++; 97 if (cpu_use_count[cpu][cluster] == 1) { 98 bool was_cluster_down = 99 (exynos_cluster_usecnt(cluster) == 1); 100 101 /* 102 * Turn on the cluster (L2/COMMON) and then power on the 103 * cores. 104 */ 105 if (was_cluster_down) 106 exynos_cluster_power_up(cluster); 107 108 exynos_cpu_power_up(cpunr); 109 } else if (cpu_use_count[cpu][cluster] != 2) { 110 /* 111 * The only possible values are: 112 * 0 = CPU down 113 * 1 = CPU (still) up 114 * 2 = CPU requested to be up before it had a chance 115 * to actually make itself down. 116 * Any other value is a bug. 117 */ 118 BUG(); 119 } 120 121 arch_spin_unlock(&exynos_mcpm_lock); 122 local_irq_enable(); 123 124 return 0; 125 } 126 127 /* 128 * NOTE: This function requires the stack data to be visible through power down 129 * and can only be executed on processors like A15 and A7 that hit the cache 130 * with the C bit clear in the SCTLR register. 131 */ 132 static void exynos_power_down(void) 133 { 134 unsigned int mpidr, cpu, cluster; 135 bool last_man = false, skip_wfi = false; 136 unsigned int cpunr; 137 138 mpidr = read_cpuid_mpidr(); 139 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 140 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 141 cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); 142 143 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 144 BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER || 145 cluster >= EXYNOS5420_NR_CLUSTERS); 146 147 __mcpm_cpu_going_down(cpu, cluster); 148 149 arch_spin_lock(&exynos_mcpm_lock); 150 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); 151 cpu_use_count[cpu][cluster]--; 152 if (cpu_use_count[cpu][cluster] == 0) { 153 exynos_cpu_power_down(cpunr); 154 155 if (exynos_cluster_unused(cluster)) { 156 exynos_cluster_power_down(cluster); 157 last_man = true; 158 } 159 } else if (cpu_use_count[cpu][cluster] == 1) { 160 /* 161 * A power_up request went ahead of us. 162 * Even if we do not want to shut this CPU down, 163 * the caller expects a certain state as if the WFI 164 * was aborted. So let's continue with cache cleaning. 165 */ 166 skip_wfi = true; 167 } else { 168 BUG(); 169 } 170 171 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { 172 arch_spin_unlock(&exynos_mcpm_lock); 173 174 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) { 175 /* 176 * On the Cortex-A15 we need to disable 177 * L2 prefetching before flushing the cache. 178 */ 179 asm volatile( 180 "mcr p15, 1, %0, c15, c0, 3\n\t" 181 "isb\n\t" 182 "dsb" 183 : : "r" (0x400)); 184 } 185 186 /* Flush all cache levels for this cluster. */ 187 exynos_v7_exit_coherency_flush(all); 188 189 /* 190 * Disable cluster-level coherency by masking 191 * incoming snoops and DVM messages: 192 */ 193 cci_disable_port_by_cpu(mpidr); 194 195 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); 196 } else { 197 arch_spin_unlock(&exynos_mcpm_lock); 198 199 /* Disable and flush the local CPU cache. */ 200 exynos_v7_exit_coherency_flush(louis); 201 } 202 203 __mcpm_cpu_down(cpu, cluster); 204 205 /* Now we are prepared for power-down, do it: */ 206 if (!skip_wfi) 207 wfi(); 208 209 /* Not dead at this point? Let our caller cope. */ 210 } 211 212 static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster) 213 { 214 unsigned int tries = 100; 215 unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); 216 217 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 218 BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER || 219 cluster >= EXYNOS5420_NR_CLUSTERS); 220 221 /* Wait for the core state to be OFF */ 222 while (tries--) { 223 if (ACCESS_ONCE(cpu_use_count[cpu][cluster]) == 0) { 224 if ((exynos_cpu_power_state(cpunr) == 0)) 225 return 0; /* success: the CPU is halted */ 226 } 227 228 /* Otherwise, wait and retry: */ 229 msleep(1); 230 } 231 232 return -ETIMEDOUT; /* timeout */ 233 } 234 235 static void exynos_powered_up(void) 236 { 237 unsigned int mpidr, cpu, cluster; 238 239 mpidr = read_cpuid_mpidr(); 240 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 241 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 242 243 arch_spin_lock(&exynos_mcpm_lock); 244 if (cpu_use_count[cpu][cluster] == 0) 245 cpu_use_count[cpu][cluster] = 1; 246 arch_spin_unlock(&exynos_mcpm_lock); 247 } 248 249 static void exynos_suspend(u64 residency) 250 { 251 unsigned int mpidr, cpunr; 252 253 exynos_power_down(); 254 255 /* 256 * Execution reaches here only if cpu did not power down. 257 * Hence roll back the changes done in exynos_power_down function. 258 * 259 * CAUTION: "This function requires the stack data to be visible through 260 * power down and can only be executed on processors like A15 and A7 261 * that hit the cache with the C bit clear in the SCTLR register." 262 */ 263 mpidr = read_cpuid_mpidr(); 264 cpunr = exynos_pmu_cpunr(mpidr); 265 266 exynos_cpu_power_up(cpunr); 267 } 268 269 static const struct mcpm_platform_ops exynos_power_ops = { 270 .power_up = exynos_power_up, 271 .power_down = exynos_power_down, 272 .wait_for_powerdown = exynos_wait_for_powerdown, 273 .suspend = exynos_suspend, 274 .powered_up = exynos_powered_up, 275 }; 276 277 static void __init exynos_mcpm_usage_count_init(void) 278 { 279 unsigned int mpidr, cpu, cluster; 280 281 mpidr = read_cpuid_mpidr(); 282 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 283 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 284 285 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 286 BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER || 287 cluster >= EXYNOS5420_NR_CLUSTERS); 288 289 cpu_use_count[cpu][cluster] = 1; 290 } 291 292 /* 293 * Enable cluster-level coherency, in preparation for turning on the MMU. 294 */ 295 static void __naked exynos_pm_power_up_setup(unsigned int affinity_level) 296 { 297 asm volatile ("\n" 298 "cmp r0, #1\n" 299 "bxne lr\n" 300 "b cci_enable_port_for_self"); 301 } 302 303 static const struct of_device_id exynos_dt_mcpm_match[] = { 304 { .compatible = "samsung,exynos5420" }, 305 { .compatible = "samsung,exynos5800" }, 306 {}, 307 }; 308 309 static int __init exynos_mcpm_init(void) 310 { 311 struct device_node *node; 312 void __iomem *ns_sram_base_addr; 313 unsigned int value, i; 314 int ret; 315 316 node = of_find_matching_node(NULL, exynos_dt_mcpm_match); 317 if (!node) 318 return -ENODEV; 319 of_node_put(node); 320 321 if (!cci_probed()) 322 return -ENODEV; 323 324 node = of_find_compatible_node(NULL, NULL, 325 "samsung,exynos4210-sysram-ns"); 326 if (!node) 327 return -ENODEV; 328 329 ns_sram_base_addr = of_iomap(node, 0); 330 of_node_put(node); 331 if (!ns_sram_base_addr) { 332 pr_err("failed to map non-secure iRAM base address\n"); 333 return -ENOMEM; 334 } 335 336 /* 337 * To increase the stability of KFC reset we need to program 338 * the PMU SPARE3 register 339 */ 340 __raw_writel(EXYNOS5420_SWRESET_KFC_SEL, S5P_PMU_SPARE3); 341 342 exynos_mcpm_usage_count_init(); 343 344 ret = mcpm_platform_register(&exynos_power_ops); 345 if (!ret) 346 ret = mcpm_sync_init(exynos_pm_power_up_setup); 347 if (ret) { 348 iounmap(ns_sram_base_addr); 349 return ret; 350 } 351 352 mcpm_smp_set_ops(); 353 354 pr_info("Exynos MCPM support installed\n"); 355 356 /* 357 * On Exynos5420/5800 for the A15 and A7 clusters: 358 * 359 * EXYNOS5420_ENABLE_AUTOMATIC_CORE_DOWN ensures that all the cores 360 * in a cluster are turned off before turning off the cluster L2. 361 * 362 * EXYNOS5420_USE_ARM_CORE_DOWN_STATE ensures that a cores is powered 363 * off before waking it up. 364 * 365 * EXYNOS5420_USE_L2_COMMON_UP_STATE ensures that cluster L2 will be 366 * turned on before the first man is powered up. 367 */ 368 for (i = 0; i < EXYNOS5420_NR_CLUSTERS; i++) { 369 value = __raw_readl(EXYNOS_COMMON_OPTION(i)); 370 value |= EXYNOS5420_ENABLE_AUTOMATIC_CORE_DOWN | 371 EXYNOS5420_USE_ARM_CORE_DOWN_STATE | 372 EXYNOS5420_USE_L2_COMMON_UP_STATE; 373 __raw_writel(value, EXYNOS_COMMON_OPTION(i)); 374 } 375 376 /* 377 * U-Boot SPL is hardcoded to jump to the start of ns_sram_base_addr 378 * as part of secondary_cpu_start(). Let's redirect it to the 379 * mcpm_entry_point(). 380 */ 381 __raw_writel(0xe59f0000, ns_sram_base_addr); /* ldr r0, [pc, #0] */ 382 __raw_writel(0xe12fff10, ns_sram_base_addr + 4); /* bx r0 */ 383 __raw_writel(virt_to_phys(mcpm_entry_point), ns_sram_base_addr + 8); 384 385 iounmap(ns_sram_base_addr); 386 387 return ret; 388 } 389 390 early_initcall(exynos_mcpm_init); 391