1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * OMAP3 Power Management Routines 4 * 5 * Copyright (C) 2006-2008 Nokia Corporation 6 * Tony Lindgren <tony@atomide.com> 7 * Jouni Hogander 8 * 9 * Copyright (C) 2007 Texas Instruments, Inc. 10 * Rajendra Nayak <rnayak@ti.com> 11 * 12 * Copyright (C) 2005 Texas Instruments, Inc. 13 * Richard Woodruff <r-woodruff2@ti.com> 14 * 15 * Based on pm.c for omap1 16 */ 17 18 #include <linux/cpu_pm.h> 19 #include <linux/pm.h> 20 #include <linux/suspend.h> 21 #include <linux/interrupt.h> 22 #include <linux/module.h> 23 #include <linux/list.h> 24 #include <linux/err.h> 25 #include <linux/clk.h> 26 #include <linux/delay.h> 27 #include <linux/slab.h> 28 #include <linux/omap-gpmc.h> 29 30 #include <trace/events/power.h> 31 32 #include <asm/fncpy.h> 33 #include <asm/suspend.h> 34 #include <asm/system_misc.h> 35 36 #include "clockdomain.h" 37 #include "powerdomain.h" 38 #include "soc.h" 39 #include "common.h" 40 #include "cm3xxx.h" 41 #include "cm-regbits-34xx.h" 42 #include "prm-regbits-34xx.h" 43 #include "prm3xxx.h" 44 #include "pm.h" 45 #include "sdrc.h" 46 #include "omap-secure.h" 47 #include "sram.h" 48 #include "control.h" 49 #include "vc.h" 50 51 /* pm34xx errata defined in pm.h */ 52 u16 pm34xx_errata; 53 54 struct power_state { 55 struct powerdomain *pwrdm; 56 u32 next_state; 57 #ifdef CONFIG_SUSPEND 58 u32 saved_state; 59 #endif 60 struct list_head node; 61 }; 62 63 static LIST_HEAD(pwrst_list); 64 65 void (*omap3_do_wfi_sram)(void); 66 67 static struct powerdomain *mpu_pwrdm, *neon_pwrdm; 68 static struct powerdomain *core_pwrdm, *per_pwrdm; 69 70 static void omap3_core_save_context(void) 71 { 72 omap3_ctrl_save_padconf(); 73 74 /* 75 * Force write last pad into memory, as this can fail in some 76 * cases according to errata 1.157, 1.185 77 */ 78 omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14), 79 OMAP343X_CONTROL_MEM_WKUP + 0x2a0); 80 81 /* Save the Interrupt controller context */ 82 omap_intc_save_context(); 83 /* Save the GPMC context */ 84 omap3_gpmc_save_context(); 85 /* Save the system control module context, padconf already save above*/ 86 omap3_control_save_context(); 87 } 88 89 static void omap3_core_restore_context(void) 90 { 91 /* Restore the control module context, padconf restored by h/w */ 92 omap3_control_restore_context(); 93 /* Restore the GPMC context */ 94 omap3_gpmc_restore_context(); 95 /* Restore the interrupt controller context */ 96 omap_intc_restore_context(); 97 } 98 99 /* 100 * FIXME: This function should be called before entering off-mode after 101 * OMAP3 secure services have been accessed. Currently it is only called 102 * once during boot sequence, but this works as we are not using secure 103 * services. 104 */ 105 static void omap3_save_secure_ram_context(void) 106 { 107 u32 ret; 108 int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); 109 110 if (omap_type() != OMAP2_DEVICE_TYPE_GP) { 111 /* 112 * MPU next state must be set to POWER_ON temporarily, 113 * otherwise the WFI executed inside the ROM code 114 * will hang the system. 115 */ 116 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); 117 ret = omap3_save_secure_ram(omap3_secure_ram_storage, 118 OMAP3_SAVE_SECURE_RAM_SZ); 119 pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state); 120 /* Following is for error tracking, it should not happen */ 121 if (ret) { 122 pr_err("save_secure_sram() returns %08x\n", ret); 123 while (1) 124 ; 125 } 126 } 127 } 128 129 static irqreturn_t _prcm_int_handle_io(int irq, void *unused) 130 { 131 int c; 132 133 c = omap_prm_clear_mod_irqs(WKUP_MOD, 1, OMAP3430_ST_IO_MASK | 134 OMAP3430_ST_IO_CHAIN_MASK); 135 136 return c ? IRQ_HANDLED : IRQ_NONE; 137 } 138 139 static irqreturn_t _prcm_int_handle_wakeup(int irq, void *unused) 140 { 141 int c; 142 143 /* 144 * Clear all except ST_IO and ST_IO_CHAIN for wkup module, 145 * these are handled in a separate handler to avoid acking 146 * IO events before parsing in mux code 147 */ 148 c = omap_prm_clear_mod_irqs(WKUP_MOD, 1, ~(OMAP3430_ST_IO_MASK | 149 OMAP3430_ST_IO_CHAIN_MASK)); 150 c += omap_prm_clear_mod_irqs(CORE_MOD, 1, ~0); 151 c += omap_prm_clear_mod_irqs(OMAP3430_PER_MOD, 1, ~0); 152 if (omap_rev() > OMAP3430_REV_ES1_0) { 153 c += omap_prm_clear_mod_irqs(CORE_MOD, 3, ~0); 154 c += omap_prm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1, ~0); 155 } 156 157 return c ? IRQ_HANDLED : IRQ_NONE; 158 } 159 160 static void omap34xx_save_context(u32 *save) 161 { 162 u32 val; 163 164 /* Read Auxiliary Control Register */ 165 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (val)); 166 *save++ = 1; 167 *save++ = val; 168 169 /* Read L2 AUX ctrl register */ 170 asm("mrc p15, 1, %0, c9, c0, 2" : "=r" (val)); 171 *save++ = 1; 172 *save++ = val; 173 } 174 175 static int omap34xx_do_sram_idle(unsigned long save_state) 176 { 177 omap34xx_cpu_suspend(save_state); 178 return 0; 179 } 180 181 void omap_sram_idle(void) 182 { 183 /* Variable to tell what needs to be saved and restored 184 * in omap_sram_idle*/ 185 /* save_state = 0 => Nothing to save and restored */ 186 /* save_state = 1 => Only L1 and logic lost */ 187 /* save_state = 2 => Only L2 lost */ 188 /* save_state = 3 => L1, L2 and logic lost */ 189 int save_state = 0; 190 int mpu_next_state = PWRDM_POWER_ON; 191 int per_next_state = PWRDM_POWER_ON; 192 int core_next_state = PWRDM_POWER_ON; 193 u32 sdrc_pwr = 0; 194 int error; 195 196 mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); 197 switch (mpu_next_state) { 198 case PWRDM_POWER_ON: 199 case PWRDM_POWER_RET: 200 /* No need to save context */ 201 save_state = 0; 202 break; 203 case PWRDM_POWER_OFF: 204 save_state = 3; 205 break; 206 default: 207 /* Invalid state */ 208 pr_err("Invalid mpu state in sram_idle\n"); 209 return; 210 } 211 212 /* NEON control */ 213 if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON) 214 pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state); 215 216 /* Enable IO-PAD and IO-CHAIN wakeups */ 217 per_next_state = pwrdm_read_next_pwrst(per_pwrdm); 218 core_next_state = pwrdm_read_next_pwrst(core_pwrdm); 219 220 pwrdm_pre_transition(NULL); 221 222 /* PER */ 223 if (per_next_state == PWRDM_POWER_OFF) { 224 error = cpu_cluster_pm_enter(); 225 if (error) 226 return; 227 } 228 229 /* CORE */ 230 if (core_next_state < PWRDM_POWER_ON) { 231 if (core_next_state == PWRDM_POWER_OFF) { 232 omap3_core_save_context(); 233 omap3_cm_save_context(); 234 } 235 } 236 237 /* Configure PMIC signaling for I2C4 or sys_off_mode */ 238 omap3_vc_set_pmic_signaling(core_next_state); 239 240 omap3_intc_prepare_idle(); 241 242 /* 243 * On EMU/HS devices ROM code restores a SRDC value 244 * from scratchpad which has automatic self refresh on timeout 245 * of AUTO_CNT = 1 enabled. This takes care of erratum ID i443. 246 * Hence store/restore the SDRC_POWER register here. 247 */ 248 if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 && 249 (omap_type() == OMAP2_DEVICE_TYPE_EMU || 250 omap_type() == OMAP2_DEVICE_TYPE_SEC) && 251 core_next_state == PWRDM_POWER_OFF) 252 sdrc_pwr = sdrc_read_reg(SDRC_POWER); 253 254 /* 255 * omap3_arm_context is the location where some ARM context 256 * get saved. The rest is placed on the stack, and restored 257 * from there before resuming. 258 */ 259 if (save_state) 260 omap34xx_save_context(omap3_arm_context); 261 if (save_state == 1 || save_state == 3) 262 cpu_suspend(save_state, omap34xx_do_sram_idle); 263 else 264 omap34xx_do_sram_idle(save_state); 265 266 /* Restore normal SDRC POWER settings */ 267 if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 && 268 (omap_type() == OMAP2_DEVICE_TYPE_EMU || 269 omap_type() == OMAP2_DEVICE_TYPE_SEC) && 270 core_next_state == PWRDM_POWER_OFF) 271 sdrc_write_reg(sdrc_pwr, SDRC_POWER); 272 273 /* CORE */ 274 if (core_next_state < PWRDM_POWER_ON && 275 pwrdm_read_prev_pwrst(core_pwrdm) == PWRDM_POWER_OFF) { 276 omap3_core_restore_context(); 277 omap3_cm_restore_context(); 278 omap3_sram_restore_context(); 279 omap2_sms_restore_context(); 280 } else { 281 /* 282 * In off-mode resume path above, omap3_core_restore_context 283 * also handles the INTC autoidle restore done here so limit 284 * this to non-off mode resume paths so we don't do it twice. 285 */ 286 omap3_intc_resume_idle(); 287 } 288 289 pwrdm_post_transition(NULL); 290 291 /* PER */ 292 if (per_next_state == PWRDM_POWER_OFF) 293 cpu_cluster_pm_exit(); 294 } 295 296 static void omap3_pm_idle(void) 297 { 298 if (omap_irq_pending()) 299 return; 300 301 trace_cpu_idle_rcuidle(1, smp_processor_id()); 302 303 omap_sram_idle(); 304 305 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 306 } 307 308 #ifdef CONFIG_SUSPEND 309 static int omap3_pm_suspend(void) 310 { 311 struct power_state *pwrst; 312 int state, ret = 0; 313 314 /* Read current next_pwrsts */ 315 list_for_each_entry(pwrst, &pwrst_list, node) 316 pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm); 317 /* Set ones wanted by suspend */ 318 list_for_each_entry(pwrst, &pwrst_list, node) { 319 if (omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state)) 320 goto restore; 321 if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm)) 322 goto restore; 323 } 324 325 omap3_intc_suspend(); 326 327 omap_sram_idle(); 328 329 restore: 330 /* Restore next_pwrsts */ 331 list_for_each_entry(pwrst, &pwrst_list, node) { 332 state = pwrdm_read_prev_pwrst(pwrst->pwrdm); 333 if (state > pwrst->next_state) { 334 pr_info("Powerdomain (%s) didn't enter target state %d\n", 335 pwrst->pwrdm->name, pwrst->next_state); 336 ret = -1; 337 } 338 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state); 339 } 340 if (ret) 341 pr_err("Could not enter target state in pm_suspend\n"); 342 else 343 pr_info("Successfully put all powerdomains to target state\n"); 344 345 return ret; 346 } 347 #else 348 #define omap3_pm_suspend NULL 349 #endif /* CONFIG_SUSPEND */ 350 351 static void __init prcm_setup_regs(void) 352 { 353 omap3_ctrl_init(); 354 355 omap3_prm_init_pm(cpu_is_omap3630(), omap3_has_iva()); 356 } 357 358 void omap3_pm_off_mode_enable(int enable) 359 { 360 struct power_state *pwrst; 361 u32 state; 362 363 if (enable) 364 state = PWRDM_POWER_OFF; 365 else 366 state = PWRDM_POWER_RET; 367 368 list_for_each_entry(pwrst, &pwrst_list, node) { 369 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) && 370 pwrst->pwrdm == core_pwrdm && 371 state == PWRDM_POWER_OFF) { 372 pwrst->next_state = PWRDM_POWER_RET; 373 pr_warn("%s: Core OFF disabled due to errata i583\n", 374 __func__); 375 } else { 376 pwrst->next_state = state; 377 } 378 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state); 379 } 380 } 381 382 int omap3_pm_get_suspend_state(struct powerdomain *pwrdm) 383 { 384 struct power_state *pwrst; 385 386 list_for_each_entry(pwrst, &pwrst_list, node) { 387 if (pwrst->pwrdm == pwrdm) 388 return pwrst->next_state; 389 } 390 return -EINVAL; 391 } 392 393 int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state) 394 { 395 struct power_state *pwrst; 396 397 list_for_each_entry(pwrst, &pwrst_list, node) { 398 if (pwrst->pwrdm == pwrdm) { 399 pwrst->next_state = state; 400 return 0; 401 } 402 } 403 return -EINVAL; 404 } 405 406 static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused) 407 { 408 struct power_state *pwrst; 409 410 if (!pwrdm->pwrsts) 411 return 0; 412 413 pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC); 414 if (!pwrst) 415 return -ENOMEM; 416 pwrst->pwrdm = pwrdm; 417 pwrst->next_state = PWRDM_POWER_RET; 418 list_add(&pwrst->node, &pwrst_list); 419 420 if (pwrdm_has_hdwr_sar(pwrdm)) 421 pwrdm_enable_hdwr_sar(pwrdm); 422 423 return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state); 424 } 425 426 /* 427 * Push functions to SRAM 428 * 429 * The minimum set of functions is pushed to SRAM for execution: 430 * - omap3_do_wfi for erratum i581 WA, 431 */ 432 void omap_push_sram_idle(void) 433 { 434 omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz); 435 } 436 437 static void __init pm_errata_configure(void) 438 { 439 if (cpu_is_omap3630()) { 440 pm34xx_errata |= PM_RTA_ERRATUM_i608; 441 /* Enable the l2 cache toggling in sleep logic */ 442 enable_omap3630_toggle_l2_on_restore(); 443 if (omap_rev() < OMAP3630_REV_ES1_2) 444 pm34xx_errata |= (PM_SDRC_WAKEUP_ERRATUM_i583 | 445 PM_PER_MEMORIES_ERRATUM_i582); 446 } else if (cpu_is_omap34xx()) { 447 pm34xx_errata |= PM_PER_MEMORIES_ERRATUM_i582; 448 } 449 } 450 451 int __init omap3_pm_init(void) 452 { 453 struct power_state *pwrst, *tmp; 454 struct clockdomain *neon_clkdm, *mpu_clkdm, *per_clkdm, *wkup_clkdm; 455 int ret; 456 457 if (!omap3_has_io_chain_ctrl()) 458 pr_warn("PM: no software I/O chain control; some wakeups may be lost\n"); 459 460 pm_errata_configure(); 461 462 /* XXX prcm_setup_regs needs to be before enabling hw 463 * supervised mode for powerdomains */ 464 prcm_setup_regs(); 465 466 ret = request_irq(omap_prcm_event_to_irq("wkup"), 467 _prcm_int_handle_wakeup, IRQF_NO_SUSPEND, "pm_wkup", NULL); 468 469 if (ret) { 470 pr_err("pm: Failed to request pm_wkup irq\n"); 471 goto err1; 472 } 473 474 /* IO interrupt is shared with mux code */ 475 ret = request_irq(omap_prcm_event_to_irq("io"), 476 _prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io", 477 omap3_pm_init); 478 479 if (ret) { 480 pr_err("pm: Failed to request pm_io irq\n"); 481 goto err2; 482 } 483 484 ret = pwrdm_for_each(pwrdms_setup, NULL); 485 if (ret) { 486 pr_err("Failed to setup powerdomains\n"); 487 goto err3; 488 } 489 490 (void) clkdm_for_each(omap_pm_clkdms_setup, NULL); 491 492 mpu_pwrdm = pwrdm_lookup("mpu_pwrdm"); 493 if (mpu_pwrdm == NULL) { 494 pr_err("Failed to get mpu_pwrdm\n"); 495 ret = -EINVAL; 496 goto err3; 497 } 498 499 neon_pwrdm = pwrdm_lookup("neon_pwrdm"); 500 per_pwrdm = pwrdm_lookup("per_pwrdm"); 501 core_pwrdm = pwrdm_lookup("core_pwrdm"); 502 503 neon_clkdm = clkdm_lookup("neon_clkdm"); 504 mpu_clkdm = clkdm_lookup("mpu_clkdm"); 505 per_clkdm = clkdm_lookup("per_clkdm"); 506 wkup_clkdm = clkdm_lookup("wkup_clkdm"); 507 508 omap_common_suspend_init(omap3_pm_suspend); 509 510 arm_pm_idle = omap3_pm_idle; 511 omap3_idle_init(); 512 513 /* 514 * RTA is disabled during initialization as per erratum i608 515 * it is safer to disable RTA by the bootloader, but we would like 516 * to be doubly sure here and prevent any mishaps. 517 */ 518 if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608)) 519 omap3630_ctrl_disable_rta(); 520 521 /* 522 * The UART3/4 FIFO and the sidetone memory in McBSP2/3 are 523 * not correctly reset when the PER powerdomain comes back 524 * from OFF or OSWR when the CORE powerdomain is kept active. 525 * See OMAP36xx Erratum i582 "PER Domain reset issue after 526 * Domain-OFF/OSWR Wakeup". This wakeup dependency is not a 527 * complete workaround. The kernel must also prevent the PER 528 * powerdomain from going to OSWR/OFF while the CORE 529 * powerdomain is not going to OSWR/OFF. And if PER last 530 * power state was off while CORE last power state was ON, the 531 * UART3/4 and McBSP2/3 SIDETONE devices need to run a 532 * self-test using their loopback tests; if that fails, those 533 * devices are unusable until the PER/CORE can complete a transition 534 * from ON to OSWR/OFF and then back to ON. 535 * 536 * XXX Technically this workaround is only needed if off-mode 537 * or OSWR is enabled. 538 */ 539 if (IS_PM34XX_ERRATUM(PM_PER_MEMORIES_ERRATUM_i582)) 540 clkdm_add_wkdep(per_clkdm, wkup_clkdm); 541 542 clkdm_add_wkdep(neon_clkdm, mpu_clkdm); 543 if (omap_type() != OMAP2_DEVICE_TYPE_GP) { 544 omap3_secure_ram_storage = 545 kmalloc(OMAP3_SAVE_SECURE_RAM_SZ, GFP_KERNEL); 546 if (!omap3_secure_ram_storage) 547 pr_err("Memory allocation failed when allocating for secure sram context\n"); 548 549 local_irq_disable(); 550 551 omap3_save_secure_ram_context(); 552 553 local_irq_enable(); 554 } 555 556 omap3_save_scratchpad_contents(); 557 return ret; 558 559 err3: 560 list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) { 561 list_del(&pwrst->node); 562 kfree(pwrst); 563 } 564 free_irq(omap_prcm_event_to_irq("io"), omap3_pm_init); 565 err2: 566 free_irq(omap_prcm_event_to_irq("wkup"), NULL); 567 err1: 568 return ret; 569 } 570