1/* 2 * (C) Copyright 2007 3 * Texas Instruments 4 * Karthik Dasu <karthik-dp@ti.com> 5 * 6 * (C) Copyright 2004 7 * Texas Instruments, <www.ti.com> 8 * Richard Woodruff <r-woodruff2@ti.com> 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License as 12 * published by the Free Software Foundation; either version 2 of 13 * the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 23 * MA 02111-1307 USA 24 */ 25#include <linux/linkage.h> 26 27#include <asm/assembler.h> 28 29#include "../plat-omap/sram.h" 30 31#include "omap34xx.h" 32#include "iomap.h" 33#include "cm2xxx_3xxx.h" 34#include "prm2xxx_3xxx.h" 35#include "sdrc.h" 36#include "control.h" 37 38/* 39 * Registers access definitions 40 */ 41#define SDRC_SCRATCHPAD_SEM_OFFS 0xc 42#define SDRC_SCRATCHPAD_SEM_V OMAP343X_SCRATCHPAD_REGADDR\ 43 (SDRC_SCRATCHPAD_SEM_OFFS) 44#define PM_PREPWSTST_CORE_P OMAP3430_PRM_BASE + CORE_MOD +\ 45 OMAP3430_PM_PREPWSTST 46#define PM_PWSTCTRL_MPU_P OMAP3430_PRM_BASE + MPU_MOD + OMAP2_PM_PWSTCTRL 47#define CM_IDLEST1_CORE_V OMAP34XX_CM_REGADDR(CORE_MOD, CM_IDLEST1) 48#define CM_IDLEST_CKGEN_V OMAP34XX_CM_REGADDR(PLL_MOD, CM_IDLEST) 49#define SRAM_BASE_P OMAP3_SRAM_PA 50#define CONTROL_STAT OMAP343X_CTRL_BASE + OMAP343X_CONTROL_STATUS 51#define CONTROL_MEM_RTA_CTRL (OMAP343X_CTRL_BASE +\ 52 OMAP36XX_CONTROL_MEM_RTA_CTRL) 53 54/* Move this as correct place is available */ 55#define SCRATCHPAD_MEM_OFFS 0x310 56#define SCRATCHPAD_BASE_P (OMAP343X_CTRL_BASE +\ 57 OMAP343X_CONTROL_MEM_WKUP +\ 58 SCRATCHPAD_MEM_OFFS) 59#define SDRC_POWER_V OMAP34XX_SDRC_REGADDR(SDRC_POWER) 60#define SDRC_SYSCONFIG_P (OMAP343X_SDRC_BASE + SDRC_SYSCONFIG) 61#define SDRC_MR_0_P (OMAP343X_SDRC_BASE + SDRC_MR_0) 62#define SDRC_EMR2_0_P (OMAP343X_SDRC_BASE + SDRC_EMR2_0) 63#define SDRC_MANUAL_0_P (OMAP343X_SDRC_BASE + SDRC_MANUAL_0) 64#define SDRC_MR_1_P (OMAP343X_SDRC_BASE + SDRC_MR_1) 65#define SDRC_EMR2_1_P (OMAP343X_SDRC_BASE + SDRC_EMR2_1) 66#define SDRC_MANUAL_1_P (OMAP343X_SDRC_BASE + SDRC_MANUAL_1) 67#define SDRC_DLLA_STATUS_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS) 68#define SDRC_DLLA_CTRL_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_CTRL) 69 70/* 71 * This file needs be built unconditionally as ARM to interoperate correctly 72 * with non-Thumb-2-capable firmware. 73 */ 74 .arm 75 76/* 77 * API functions 78 */ 79 80 .text 81/* 82 * L2 cache needs to be toggled for stable OFF mode functionality on 3630. 83 * This function sets up a flag that will allow for this toggling to take 84 * place on 3630. Hopefully some version in the future may not need this. 85 */ 86ENTRY(enable_omap3630_toggle_l2_on_restore) 87 stmfd sp!, {lr} @ save registers on stack 88 /* Setup so that we will disable and enable l2 */ 89 mov r1, #0x1 90 adrl r2, l2dis_3630 @ may be too distant for plain adr 91 str r1, [r2] 92 ldmfd sp!, {pc} @ restore regs and return 93ENDPROC(enable_omap3630_toggle_l2_on_restore) 94 95 .text 96/* Function to call rom code to save secure ram context */ 97 .align 3 98ENTRY(save_secure_ram_context) 99 stmfd sp!, {r4 - r11, lr} @ save registers on stack 100 adr r3, api_params @ r3 points to parameters 101 str r0, [r3,#0x4] @ r0 has sdram address 102 ldr r12, high_mask 103 and r3, r3, r12 104 ldr r12, sram_phy_addr_mask 105 orr r3, r3, r12 106 mov r0, #25 @ set service ID for PPA 107 mov r12, r0 @ copy secure service ID in r12 108 mov r1, #0 @ set task id for ROM code in r1 109 mov r2, #4 @ set some flags in r2, r6 110 mov r6, #0xff 111 dsb @ data write barrier 112 dmb @ data memory barrier 113 smc #1 @ call SMI monitor (smi #1) 114 nop 115 nop 116 nop 117 nop 118 ldmfd sp!, {r4 - r11, pc} 119 .align 120sram_phy_addr_mask: 121 .word SRAM_BASE_P 122high_mask: 123 .word 0xffff 124api_params: 125 .word 0x4, 0x0, 0x0, 0x1, 0x1 126ENDPROC(save_secure_ram_context) 127ENTRY(save_secure_ram_context_sz) 128 .word . - save_secure_ram_context 129 130/* 131 * ====================== 132 * == Idle entry point == 133 * ====================== 134 */ 135 136/* 137 * Forces OMAP into idle state 138 * 139 * omap34xx_cpu_suspend() - This bit of code saves the CPU context if needed 140 * and executes the WFI instruction. Calling WFI effectively changes the 141 * power domains states to the desired target power states. 142 * 143 * 144 * Notes: 145 * - only the minimum set of functions gets copied to internal SRAM at boot 146 * and after wake-up from OFF mode, cf. omap_push_sram_idle. The function 147 * pointers in SDRAM or SRAM are called depending on the desired low power 148 * target state. 149 * - when the OMAP wakes up it continues at different execution points 150 * depending on the low power mode (non-OFF vs OFF modes), 151 * cf. 'Resume path for xxx mode' comments. 152 */ 153 .align 3 154ENTRY(omap34xx_cpu_suspend) 155 stmfd sp!, {r4 - r11, lr} @ save registers on stack 156 157 /* 158 * r0 contains information about saving context: 159 * 0 - No context lost 160 * 1 - Only L1 and logic lost 161 * 2 - Only L2 lost (Even L1 is retained we clean it along with L2) 162 * 3 - Both L1 and L2 lost and logic lost 163 */ 164 165 /* 166 * For OFF mode: save context and jump to WFI in SDRAM (omap3_do_wfi) 167 * For non-OFF modes: jump to the WFI code in SRAM (omap3_do_wfi_sram) 168 */ 169 ldr r4, omap3_do_wfi_sram_addr 170 ldr r5, [r4] 171 cmp r0, #0x0 @ If no context save required, 172 bxeq r5 @ jump to the WFI code in SRAM 173 174 175 /* Otherwise fall through to the save context code */ 176save_context_wfi: 177 /* 178 * jump out to kernel flush routine 179 * - reuse that code is better 180 * - it executes in a cached space so is faster than refetch per-block 181 * - should be faster and will change with kernel 182 * - 'might' have to copy address, load and jump to it 183 * Flush all data from the L1 data cache before disabling 184 * SCTLR.C bit. 185 */ 186 ldr r1, kernel_flush 187 mov lr, pc 188 bx r1 189 190 /* 191 * Clear the SCTLR.C bit to prevent further data cache 192 * allocation. Clearing SCTLR.C would make all the data accesses 193 * strongly ordered and would not hit the cache. 194 */ 195 mrc p15, 0, r0, c1, c0, 0 196 bic r0, r0, #(1 << 2) @ Disable the C bit 197 mcr p15, 0, r0, c1, c0, 0 198 isb 199 200 /* 201 * Invalidate L1 data cache. Even though only invalidate is 202 * necessary exported flush API is used here. Doing clean 203 * on already clean cache would be almost NOP. 204 */ 205 ldr r1, kernel_flush 206 blx r1 207 /* 208 * The kernel doesn't interwork: v7_flush_dcache_all in particluar will 209 * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled. 210 * This sequence switches back to ARM. Note that .align may insert a 211 * nop: bx pc needs to be word-aligned in order to work. 212 */ 213 THUMB( .thumb ) 214 THUMB( .align ) 215 THUMB( bx pc ) 216 THUMB( nop ) 217 .arm 218 219 b omap3_do_wfi 220 221/* 222 * Local variables 223 */ 224omap3_do_wfi_sram_addr: 225 .word omap3_do_wfi_sram 226kernel_flush: 227 .word v7_flush_dcache_all 228 229/* =================================== 230 * == WFI instruction => Enter idle == 231 * =================================== 232 */ 233 234/* 235 * Do WFI instruction 236 * Includes the resume path for non-OFF modes 237 * 238 * This code gets copied to internal SRAM and is accessible 239 * from both SDRAM and SRAM: 240 * - executed from SRAM for non-off modes (omap3_do_wfi_sram), 241 * - executed from SDRAM for OFF mode (omap3_do_wfi). 242 */ 243 .align 3 244ENTRY(omap3_do_wfi) 245 ldr r4, sdrc_power @ read the SDRC_POWER register 246 ldr r5, [r4] @ read the contents of SDRC_POWER 247 orr r5, r5, #0x40 @ enable self refresh on idle req 248 str r5, [r4] @ write back to SDRC_POWER register 249 250 /* Data memory barrier and Data sync barrier */ 251 dsb 252 dmb 253 254/* 255 * =================================== 256 * == WFI instruction => Enter idle == 257 * =================================== 258 */ 259 wfi @ wait for interrupt 260 261/* 262 * =================================== 263 * == Resume path for non-OFF modes == 264 * =================================== 265 */ 266 nop 267 nop 268 nop 269 nop 270 nop 271 nop 272 nop 273 nop 274 nop 275 nop 276 277/* 278 * This function implements the erratum ID i581 WA: 279 * SDRC state restore before accessing the SDRAM 280 * 281 * Only used at return from non-OFF mode. For OFF 282 * mode the ROM code configures the SDRC and 283 * the DPLL before calling the restore code directly 284 * from DDR. 285 */ 286 287/* Make sure SDRC accesses are ok */ 288wait_sdrc_ok: 289 290/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */ 291 ldr r4, cm_idlest_ckgen 292wait_dpll3_lock: 293 ldr r5, [r4] 294 tst r5, #1 295 beq wait_dpll3_lock 296 297 ldr r4, cm_idlest1_core 298wait_sdrc_ready: 299 ldr r5, [r4] 300 tst r5, #0x2 301 bne wait_sdrc_ready 302 /* allow DLL powerdown upon hw idle req */ 303 ldr r4, sdrc_power 304 ldr r5, [r4] 305 bic r5, r5, #0x40 306 str r5, [r4] 307 308/* 309 * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a 310 * base instead. 311 * Be careful not to clobber r7 when maintaing this code. 312 */ 313 314is_dll_in_lock_mode: 315 /* Is dll in lock mode? */ 316 ldr r4, sdrc_dlla_ctrl 317 ldr r5, [r4] 318 tst r5, #0x4 319 bne exit_nonoff_modes @ Return if locked 320 /* wait till dll locks */ 321 adr r7, kick_counter 322wait_dll_lock_timed: 323 ldr r4, wait_dll_lock_counter 324 add r4, r4, #1 325 str r4, [r7, #wait_dll_lock_counter - kick_counter] 326 ldr r4, sdrc_dlla_status 327 /* Wait 20uS for lock */ 328 mov r6, #8 329wait_dll_lock: 330 subs r6, r6, #0x1 331 beq kick_dll 332 ldr r5, [r4] 333 and r5, r5, #0x4 334 cmp r5, #0x4 335 bne wait_dll_lock 336 b exit_nonoff_modes @ Return when locked 337 338 /* disable/reenable DLL if not locked */ 339kick_dll: 340 ldr r4, sdrc_dlla_ctrl 341 ldr r5, [r4] 342 mov r6, r5 343 bic r6, #(1<<3) @ disable dll 344 str r6, [r4] 345 dsb 346 orr r6, r6, #(1<<3) @ enable dll 347 str r6, [r4] 348 dsb 349 ldr r4, kick_counter 350 add r4, r4, #1 351 str r4, [r7] @ kick_counter 352 b wait_dll_lock_timed 353 354exit_nonoff_modes: 355 /* Re-enable C-bit if needed */ 356 mrc p15, 0, r0, c1, c0, 0 357 tst r0, #(1 << 2) @ Check C bit enabled? 358 orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared 359 mcreq p15, 0, r0, c1, c0, 0 360 isb 361 362/* 363 * =================================== 364 * == Exit point from non-OFF modes == 365 * =================================== 366 */ 367 ldmfd sp!, {r4 - r11, pc} @ restore regs and return 368 369/* 370 * Local variables 371 */ 372sdrc_power: 373 .word SDRC_POWER_V 374cm_idlest1_core: 375 .word CM_IDLEST1_CORE_V 376cm_idlest_ckgen: 377 .word CM_IDLEST_CKGEN_V 378sdrc_dlla_status: 379 .word SDRC_DLLA_STATUS_V 380sdrc_dlla_ctrl: 381 .word SDRC_DLLA_CTRL_V 382 /* 383 * When exporting to userspace while the counters are in SRAM, 384 * these 2 words need to be at the end to facilitate retrival! 385 */ 386kick_counter: 387 .word 0 388wait_dll_lock_counter: 389 .word 0 390 391ENTRY(omap3_do_wfi_sz) 392 .word . - omap3_do_wfi 393 394 395/* 396 * ============================== 397 * == Resume path for OFF mode == 398 * ============================== 399 */ 400 401/* 402 * The restore_* functions are called by the ROM code 403 * when back from WFI in OFF mode. 404 * Cf. the get_*restore_pointer functions. 405 * 406 * restore_es3: applies to 34xx >= ES3.0 407 * restore_3630: applies to 36xx 408 * restore: common code for 3xxx 409 * 410 * Note: when back from CORE and MPU OFF mode we are running 411 * from SDRAM, without MMU, without the caches and prediction. 412 * Also the SRAM content has been cleared. 413 */ 414ENTRY(omap3_restore_es3) 415 ldr r5, pm_prepwstst_core_p 416 ldr r4, [r5] 417 and r4, r4, #0x3 418 cmp r4, #0x0 @ Check if previous power state of CORE is OFF 419 bne omap3_restore @ Fall through to OMAP3 common code 420 adr r0, es3_sdrc_fix 421 ldr r1, sram_base 422 ldr r2, es3_sdrc_fix_sz 423 mov r2, r2, ror #2 424copy_to_sram: 425 ldmia r0!, {r3} @ val = *src 426 stmia r1!, {r3} @ *dst = val 427 subs r2, r2, #0x1 @ num_words-- 428 bne copy_to_sram 429 ldr r1, sram_base 430 blx r1 431 b omap3_restore @ Fall through to OMAP3 common code 432ENDPROC(omap3_restore_es3) 433 434ENTRY(omap3_restore_3630) 435 ldr r1, pm_prepwstst_core_p 436 ldr r2, [r1] 437 and r2, r2, #0x3 438 cmp r2, #0x0 @ Check if previous power state of CORE is OFF 439 bne omap3_restore @ Fall through to OMAP3 common code 440 /* Disable RTA before giving control */ 441 ldr r1, control_mem_rta 442 mov r2, #OMAP36XX_RTA_DISABLE 443 str r2, [r1] 444ENDPROC(omap3_restore_3630) 445 446 /* Fall through to common code for the remaining logic */ 447 448ENTRY(omap3_restore) 449 /* 450 * Read the pwstctrl register to check the reason for mpu reset. 451 * This tells us what was lost. 452 */ 453 ldr r1, pm_pwstctrl_mpu 454 ldr r2, [r1] 455 and r2, r2, #0x3 456 cmp r2, #0x0 @ Check if target power state was OFF or RET 457 bne logic_l1_restore 458 459 ldr r0, l2dis_3630 460 cmp r0, #0x1 @ should we disable L2 on 3630? 461 bne skipl2dis 462 mrc p15, 0, r0, c1, c0, 1 463 bic r0, r0, #2 @ disable L2 cache 464 mcr p15, 0, r0, c1, c0, 1 465skipl2dis: 466 ldr r0, control_stat 467 ldr r1, [r0] 468 and r1, #0x700 469 cmp r1, #0x300 470 beq l2_inv_gp 471 mov r0, #40 @ set service ID for PPA 472 mov r12, r0 @ copy secure Service ID in r12 473 mov r1, #0 @ set task id for ROM code in r1 474 mov r2, #4 @ set some flags in r2, r6 475 mov r6, #0xff 476 adr r3, l2_inv_api_params @ r3 points to dummy parameters 477 dsb @ data write barrier 478 dmb @ data memory barrier 479 smc #1 @ call SMI monitor (smi #1) 480 /* Write to Aux control register to set some bits */ 481 mov r0, #42 @ set service ID for PPA 482 mov r12, r0 @ copy secure Service ID in r12 483 mov r1, #0 @ set task id for ROM code in r1 484 mov r2, #4 @ set some flags in r2, r6 485 mov r6, #0xff 486 ldr r4, scratchpad_base 487 ldr r3, [r4, #0xBC] @ r3 points to parameters 488 dsb @ data write barrier 489 dmb @ data memory barrier 490 smc #1 @ call SMI monitor (smi #1) 491 492#ifdef CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE 493 /* Restore L2 aux control register */ 494 @ set service ID for PPA 495 mov r0, #CONFIG_OMAP3_L2_AUX_SECURE_SERVICE_SET_ID 496 mov r12, r0 @ copy service ID in r12 497 mov r1, #0 @ set task ID for ROM code in r1 498 mov r2, #4 @ set some flags in r2, r6 499 mov r6, #0xff 500 ldr r4, scratchpad_base 501 ldr r3, [r4, #0xBC] 502 adds r3, r3, #8 @ r3 points to parameters 503 dsb @ data write barrier 504 dmb @ data memory barrier 505 smc #1 @ call SMI monitor (smi #1) 506#endif 507 b logic_l1_restore 508 509 .align 510l2_inv_api_params: 511 .word 0x1, 0x00 512l2_inv_gp: 513 /* Execute smi to invalidate L2 cache */ 514 mov r12, #0x1 @ set up to invalidate L2 515 smc #0 @ Call SMI monitor (smieq) 516 /* Write to Aux control register to set some bits */ 517 ldr r4, scratchpad_base 518 ldr r3, [r4,#0xBC] 519 ldr r0, [r3,#4] 520 mov r12, #0x3 521 smc #0 @ Call SMI monitor (smieq) 522 ldr r4, scratchpad_base 523 ldr r3, [r4,#0xBC] 524 ldr r0, [r3,#12] 525 mov r12, #0x2 526 smc #0 @ Call SMI monitor (smieq) 527logic_l1_restore: 528 ldr r1, l2dis_3630 529 cmp r1, #0x1 @ Test if L2 re-enable needed on 3630 530 bne skipl2reen 531 mrc p15, 0, r1, c1, c0, 1 532 orr r1, r1, #2 @ re-enable L2 cache 533 mcr p15, 0, r1, c1, c0, 1 534skipl2reen: 535 536 /* Now branch to the common CPU resume function */ 537 b cpu_resume 538ENDPROC(omap3_restore) 539 540 .ltorg 541 542/* 543 * Local variables 544 */ 545pm_prepwstst_core_p: 546 .word PM_PREPWSTST_CORE_P 547pm_pwstctrl_mpu: 548 .word PM_PWSTCTRL_MPU_P 549scratchpad_base: 550 .word SCRATCHPAD_BASE_P 551sram_base: 552 .word SRAM_BASE_P + 0x8000 553control_stat: 554 .word CONTROL_STAT 555control_mem_rta: 556 .word CONTROL_MEM_RTA_CTRL 557l2dis_3630: 558 .word 0 559 560/* 561 * Internal functions 562 */ 563 564/* 565 * This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 566 * Copied to and run from SRAM in order to reconfigure the SDRC parameters. 567 */ 568 .text 569 .align 3 570ENTRY(es3_sdrc_fix) 571 ldr r4, sdrc_syscfg @ get config addr 572 ldr r5, [r4] @ get value 573 tst r5, #0x100 @ is part access blocked 574 it eq 575 biceq r5, r5, #0x100 @ clear bit if set 576 str r5, [r4] @ write back change 577 ldr r4, sdrc_mr_0 @ get config addr 578 ldr r5, [r4] @ get value 579 str r5, [r4] @ write back change 580 ldr r4, sdrc_emr2_0 @ get config addr 581 ldr r5, [r4] @ get value 582 str r5, [r4] @ write back change 583 ldr r4, sdrc_manual_0 @ get config addr 584 mov r5, #0x2 @ autorefresh command 585 str r5, [r4] @ kick off refreshes 586 ldr r4, sdrc_mr_1 @ get config addr 587 ldr r5, [r4] @ get value 588 str r5, [r4] @ write back change 589 ldr r4, sdrc_emr2_1 @ get config addr 590 ldr r5, [r4] @ get value 591 str r5, [r4] @ write back change 592 ldr r4, sdrc_manual_1 @ get config addr 593 mov r5, #0x2 @ autorefresh command 594 str r5, [r4] @ kick off refreshes 595 bx lr 596 597/* 598 * Local variables 599 */ 600 .align 601sdrc_syscfg: 602 .word SDRC_SYSCONFIG_P 603sdrc_mr_0: 604 .word SDRC_MR_0_P 605sdrc_emr2_0: 606 .word SDRC_EMR2_0_P 607sdrc_manual_0: 608 .word SDRC_MANUAL_0_P 609sdrc_mr_1: 610 .word SDRC_MR_1_P 611sdrc_emr2_1: 612 .word SDRC_EMR2_1_P 613sdrc_manual_1: 614 .word SDRC_MANUAL_1_P 615ENDPROC(es3_sdrc_fix) 616ENTRY(es3_sdrc_fix_sz) 617 .word . - es3_sdrc_fix 618