1 /* 2 * OMAP MPUSS low power code 3 * 4 * Copyright (C) 2011 Texas Instruments, Inc. 5 * Santosh Shilimkar <santosh.shilimkar@ti.com> 6 * 7 * OMAP4430 MPUSS mainly consists of dual Cortex-A9 with per-CPU 8 * Local timer and Watchdog, GIC, SCU, PL310 L2 cache controller, 9 * CPU0 and CPU1 LPRM modules. 10 * CPU0, CPU1 and MPUSS each have there own power domain and 11 * hence multiple low power combinations of MPUSS are possible. 12 * 13 * The CPU0 and CPU1 can't support Closed switch Retention (CSWR) 14 * because the mode is not supported by hw constraints of dormant 15 * mode. While waking up from the dormant mode, a reset signal 16 * to the Cortex-A9 processor must be asserted by the external 17 * power controller. 18 * 19 * With architectural inputs and hardware recommendations, only 20 * below modes are supported from power gain vs latency point of view. 21 * 22 * CPU0 CPU1 MPUSS 23 * ---------------------------------------------- 24 * ON ON ON 25 * ON(Inactive) OFF ON(Inactive) 26 * OFF OFF CSWR 27 * OFF OFF OSWR 28 * OFF OFF OFF(Device OFF *TBD) 29 * ---------------------------------------------- 30 * 31 * Note: CPU0 is the master core and it is the last CPU to go down 32 * and first to wake-up when MPUSS low power states are excercised 33 * 34 * 35 * This program is free software; you can redistribute it and/or modify 36 * it under the terms of the GNU General Public License version 2 as 37 * published by the Free Software Foundation. 38 */ 39 40 #include <linux/kernel.h> 41 #include <linux/io.h> 42 #include <linux/errno.h> 43 #include <linux/linkage.h> 44 #include <linux/smp.h> 45 46 #include <asm/cacheflush.h> 47 #include <asm/tlbflush.h> 48 #include <asm/smp_scu.h> 49 #include <asm/pgalloc.h> 50 #include <asm/suspend.h> 51 #include <asm/hardware/cache-l2x0.h> 52 53 #include "soc.h" 54 #include "common.h" 55 #include "omap44xx.h" 56 #include "omap4-sar-layout.h" 57 #include "pm.h" 58 #include "prcm_mpu44xx.h" 59 #include "prcm_mpu54xx.h" 60 #include "prminst44xx.h" 61 #include "prcm44xx.h" 62 #include "prm44xx.h" 63 #include "prm-regbits-44xx.h" 64 65 #ifdef CONFIG_SMP 66 67 struct omap4_cpu_pm_info { 68 struct powerdomain *pwrdm; 69 void __iomem *scu_sar_addr; 70 void __iomem *wkup_sar_addr; 71 void __iomem *l2x0_sar_addr; 72 }; 73 74 /** 75 * struct cpu_pm_ops - CPU pm operations 76 * @finish_suspend: CPU suspend finisher function pointer 77 * @resume: CPU resume function pointer 78 * @scu_prepare: CPU Snoop Control program function pointer 79 * @hotplug_restart: CPU restart function pointer 80 * 81 * Structure holds functions pointer for CPU low power operations like 82 * suspend, resume and scu programming. 83 */ 84 struct cpu_pm_ops { 85 int (*finish_suspend)(unsigned long cpu_state); 86 void (*resume)(void); 87 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state); 88 void (*hotplug_restart)(void); 89 }; 90 91 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info); 92 static struct powerdomain *mpuss_pd; 93 static void __iomem *sar_base; 94 static u32 cpu_context_offset; 95 96 static int default_finish_suspend(unsigned long cpu_state) 97 { 98 omap_do_wfi(); 99 return 0; 100 } 101 102 static void dummy_cpu_resume(void) 103 {} 104 105 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state) 106 {} 107 108 static struct cpu_pm_ops omap_pm_ops = { 109 .finish_suspend = default_finish_suspend, 110 .resume = dummy_cpu_resume, 111 .scu_prepare = dummy_scu_prepare, 112 .hotplug_restart = dummy_cpu_resume, 113 }; 114 115 /* 116 * Program the wakeup routine address for the CPU0 and CPU1 117 * used for OFF or DORMANT wakeup. 118 */ 119 static inline void set_cpu_wakeup_addr(unsigned int cpu_id, u32 addr) 120 { 121 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); 122 123 if (pm_info->wkup_sar_addr) 124 writel_relaxed(addr, pm_info->wkup_sar_addr); 125 } 126 127 /* 128 * Store the SCU power status value to scratchpad memory 129 */ 130 static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state) 131 { 132 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); 133 u32 scu_pwr_st; 134 135 switch (cpu_state) { 136 case PWRDM_POWER_RET: 137 scu_pwr_st = SCU_PM_DORMANT; 138 break; 139 case PWRDM_POWER_OFF: 140 scu_pwr_st = SCU_PM_POWEROFF; 141 break; 142 case PWRDM_POWER_ON: 143 case PWRDM_POWER_INACTIVE: 144 default: 145 scu_pwr_st = SCU_PM_NORMAL; 146 break; 147 } 148 149 if (pm_info->scu_sar_addr) 150 writel_relaxed(scu_pwr_st, pm_info->scu_sar_addr); 151 } 152 153 /* Helper functions for MPUSS OSWR */ 154 static inline void mpuss_clear_prev_logic_pwrst(void) 155 { 156 u32 reg; 157 158 reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, 159 OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET); 160 omap4_prminst_write_inst_reg(reg, OMAP4430_PRM_PARTITION, 161 OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET); 162 } 163 164 static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id) 165 { 166 u32 reg; 167 168 if (cpu_id) { 169 reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU1_INST, 170 cpu_context_offset); 171 omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU1_INST, 172 cpu_context_offset); 173 } else { 174 reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU0_INST, 175 cpu_context_offset); 176 omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU0_INST, 177 cpu_context_offset); 178 } 179 } 180 181 /* 182 * Store the CPU cluster state for L2X0 low power operations. 183 */ 184 static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state) 185 { 186 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); 187 188 if (pm_info->l2x0_sar_addr) 189 writel_relaxed(save_state, pm_info->l2x0_sar_addr); 190 } 191 192 /* 193 * Save the L2X0 AUXCTRL and POR value to SAR memory. Its used to 194 * in every restore MPUSS OFF path. 195 */ 196 #ifdef CONFIG_CACHE_L2X0 197 static void __init save_l2x0_context(void) 198 { 199 void __iomem *l2x0_base = omap4_get_l2cache_base(); 200 201 if (l2x0_base && sar_base) { 202 writel_relaxed(l2x0_saved_regs.aux_ctrl, 203 sar_base + L2X0_AUXCTRL_OFFSET); 204 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 205 sar_base + L2X0_PREFETCH_CTRL_OFFSET); 206 } 207 } 208 #else 209 static void __init save_l2x0_context(void) 210 {} 211 #endif 212 213 /** 214 * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function 215 * The purpose of this function is to manage low power programming 216 * of OMAP4 MPUSS subsystem 217 * @cpu : CPU ID 218 * @power_state: Low power state. 219 * 220 * MPUSS states for the context save: 221 * save_state = 222 * 0 - Nothing lost and no need to save: MPUSS INACTIVE 223 * 1 - CPUx L1 and logic lost: MPUSS CSWR 224 * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR 225 * 3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF 226 */ 227 int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) 228 { 229 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); 230 unsigned int save_state = 0, cpu_logic_state = PWRDM_POWER_RET; 231 unsigned int wakeup_cpu; 232 233 if (omap_rev() == OMAP4430_REV_ES1_0) 234 return -ENXIO; 235 236 switch (power_state) { 237 case PWRDM_POWER_ON: 238 case PWRDM_POWER_INACTIVE: 239 save_state = 0; 240 break; 241 case PWRDM_POWER_OFF: 242 cpu_logic_state = PWRDM_POWER_OFF; 243 save_state = 1; 244 break; 245 case PWRDM_POWER_RET: 246 if (IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE)) { 247 save_state = 0; 248 break; 249 } 250 default: 251 /* 252 * CPUx CSWR is invalid hardware state. Also CPUx OSWR 253 * doesn't make much scense, since logic is lost and $L1 254 * needs to be cleaned because of coherency. This makes 255 * CPUx OSWR equivalent to CPUX OFF and hence not supported 256 */ 257 WARN_ON(1); 258 return -ENXIO; 259 } 260 261 pwrdm_pre_transition(NULL); 262 263 /* 264 * Check MPUSS next state and save interrupt controller if needed. 265 * In MPUSS OSWR or device OFF, interrupt controller contest is lost. 266 */ 267 mpuss_clear_prev_logic_pwrst(); 268 if ((pwrdm_read_next_pwrst(mpuss_pd) == PWRDM_POWER_RET) && 269 (pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF)) 270 save_state = 2; 271 272 cpu_clear_prev_logic_pwrst(cpu); 273 pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); 274 pwrdm_set_logic_retst(pm_info->pwrdm, cpu_logic_state); 275 set_cpu_wakeup_addr(cpu, virt_to_phys(omap_pm_ops.resume)); 276 omap_pm_ops.scu_prepare(cpu, power_state); 277 l2x0_pwrst_prepare(cpu, save_state); 278 279 /* 280 * Call low level function with targeted low power state. 281 */ 282 if (save_state) 283 cpu_suspend(save_state, omap_pm_ops.finish_suspend); 284 else 285 omap_pm_ops.finish_suspend(save_state); 286 287 if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && cpu) 288 gic_dist_enable(); 289 290 /* 291 * Restore the CPUx power state to ON otherwise CPUx 292 * power domain can transitions to programmed low power 293 * state while doing WFI outside the low powe code. On 294 * secure devices, CPUx does WFI which can result in 295 * domain transition 296 */ 297 wakeup_cpu = smp_processor_id(); 298 pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); 299 300 pwrdm_post_transition(NULL); 301 302 return 0; 303 } 304 305 /** 306 * omap4_hotplug_cpu: OMAP4 CPU hotplug entry 307 * @cpu : CPU ID 308 * @power_state: CPU low power state. 309 */ 310 int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state) 311 { 312 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); 313 unsigned int cpu_state = 0; 314 315 if (omap_rev() == OMAP4430_REV_ES1_0) 316 return -ENXIO; 317 318 /* Use the achievable power state for the domain */ 319 power_state = pwrdm_get_valid_lp_state(pm_info->pwrdm, 320 false, power_state); 321 322 if (power_state == PWRDM_POWER_OFF) 323 cpu_state = 1; 324 325 pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); 326 pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); 327 set_cpu_wakeup_addr(cpu, virt_to_phys(omap_pm_ops.hotplug_restart)); 328 omap_pm_ops.scu_prepare(cpu, power_state); 329 330 /* 331 * CPU never retuns back if targeted power state is OFF mode. 332 * CPU ONLINE follows normal CPU ONLINE ptah via 333 * omap4_secondary_startup(). 334 */ 335 omap_pm_ops.finish_suspend(cpu_state); 336 337 pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); 338 return 0; 339 } 340 341 342 /* 343 * Enable Mercury Fast HG retention mode by default. 344 */ 345 static void enable_mercury_retention_mode(void) 346 { 347 u32 reg; 348 349 reg = omap4_prcm_mpu_read_inst_reg(OMAP54XX_PRCM_MPU_DEVICE_INST, 350 OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET); 351 /* Enable HG_EN, HG_RAMPUP = fast mode */ 352 reg |= BIT(24) | BIT(25); 353 omap4_prcm_mpu_write_inst_reg(reg, OMAP54XX_PRCM_MPU_DEVICE_INST, 354 OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET); 355 } 356 357 /* 358 * Initialise OMAP4 MPUSS 359 */ 360 int __init omap4_mpuss_init(void) 361 { 362 struct omap4_cpu_pm_info *pm_info; 363 364 if (omap_rev() == OMAP4430_REV_ES1_0) { 365 WARN(1, "Power Management not supported on OMAP4430 ES1.0\n"); 366 return -ENODEV; 367 } 368 369 if (cpu_is_omap44xx()) 370 sar_base = omap4_get_sar_ram_base(); 371 372 /* Initilaise per CPU PM information */ 373 pm_info = &per_cpu(omap4_pm_info, 0x0); 374 if (sar_base) { 375 pm_info->scu_sar_addr = sar_base + SCU_OFFSET0; 376 pm_info->wkup_sar_addr = sar_base + 377 CPU0_WAKEUP_NS_PA_ADDR_OFFSET; 378 pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0; 379 } 380 pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm"); 381 if (!pm_info->pwrdm) { 382 pr_err("Lookup failed for CPU0 pwrdm\n"); 383 return -ENODEV; 384 } 385 386 /* Clear CPU previous power domain state */ 387 pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); 388 cpu_clear_prev_logic_pwrst(0); 389 390 /* Initialise CPU0 power domain state to ON */ 391 pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); 392 393 pm_info = &per_cpu(omap4_pm_info, 0x1); 394 if (sar_base) { 395 pm_info->scu_sar_addr = sar_base + SCU_OFFSET1; 396 pm_info->wkup_sar_addr = sar_base + 397 CPU1_WAKEUP_NS_PA_ADDR_OFFSET; 398 pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1; 399 } 400 401 pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm"); 402 if (!pm_info->pwrdm) { 403 pr_err("Lookup failed for CPU1 pwrdm\n"); 404 return -ENODEV; 405 } 406 407 /* Clear CPU previous power domain state */ 408 pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); 409 cpu_clear_prev_logic_pwrst(1); 410 411 /* Initialise CPU1 power domain state to ON */ 412 pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); 413 414 mpuss_pd = pwrdm_lookup("mpu_pwrdm"); 415 if (!mpuss_pd) { 416 pr_err("Failed to lookup MPUSS power domain\n"); 417 return -ENODEV; 418 } 419 pwrdm_clear_all_prev_pwrst(mpuss_pd); 420 mpuss_clear_prev_logic_pwrst(); 421 422 if (sar_base) { 423 /* Save device type on scratchpad for low level code to use */ 424 writel_relaxed((omap_type() != OMAP2_DEVICE_TYPE_GP) ? 1 : 0, 425 sar_base + OMAP_TYPE_OFFSET); 426 save_l2x0_context(); 427 } 428 429 if (cpu_is_omap44xx()) { 430 omap_pm_ops.finish_suspend = omap4_finish_suspend; 431 omap_pm_ops.resume = omap4_cpu_resume; 432 omap_pm_ops.scu_prepare = scu_pwrst_prepare; 433 omap_pm_ops.hotplug_restart = omap4_secondary_startup; 434 cpu_context_offset = OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET; 435 } else if (soc_is_omap54xx() || soc_is_dra7xx()) { 436 cpu_context_offset = OMAP54XX_RM_CPU0_CPU0_CONTEXT_OFFSET; 437 enable_mercury_retention_mode(); 438 } 439 440 if (cpu_is_omap446x()) 441 omap_pm_ops.hotplug_restart = omap4460_secondary_startup; 442 443 return 0; 444 } 445 446 #endif 447