xref: /linux/arch/arm/mach-omap2/sleep44xx.S (revision 5b6e3eb576e8ad03264d46982afed77bdc6323a3)
1b2b9762fSSantosh Shilimkar/*
2b2b9762fSSantosh Shilimkar * OMAP44xx sleep code.
3b2b9762fSSantosh Shilimkar *
4b2b9762fSSantosh Shilimkar * Copyright (C) 2011 Texas Instruments, Inc.
5b2b9762fSSantosh Shilimkar * 	Santosh Shilimkar <santosh.shilimkar@ti.com>
6b2b9762fSSantosh Shilimkar *
7b2b9762fSSantosh Shilimkar * This program is free software,you can redistribute it and/or modify
8b2b9762fSSantosh Shilimkar * it under the terms of the GNU General Public License version 2 as
9b2b9762fSSantosh Shilimkar * published by the Free Software Foundation.
10b2b9762fSSantosh Shilimkar */
11b2b9762fSSantosh Shilimkar
12b2b9762fSSantosh Shilimkar#include <linux/linkage.h>
13b2b9762fSSantosh Shilimkar#include <asm/smp_scu.h>
14b2b9762fSSantosh Shilimkar#include <asm/memory.h>
15b2b9762fSSantosh Shilimkar#include <asm/hardware/cache-l2x0.h>
16b2b9762fSSantosh Shilimkar
17b2b9762fSSantosh Shilimkar#include <plat/omap44xx.h>
18b2b9762fSSantosh Shilimkar#include <mach/omap-secure.h>
19b2b9762fSSantosh Shilimkar
20b2b9762fSSantosh Shilimkar#include "common.h"
21b2b9762fSSantosh Shilimkar#include "omap4-sar-layout.h"
22b2b9762fSSantosh Shilimkar
23b2b9762fSSantosh Shilimkar#if defined(CONFIG_SMP) && defined(CONFIG_PM)
24b2b9762fSSantosh Shilimkar
25b2b9762fSSantosh Shilimkar.macro	DO_SMC
26b2b9762fSSantosh Shilimkar	dsb
27b2b9762fSSantosh Shilimkar	smc	#0
28b2b9762fSSantosh Shilimkar	dsb
29b2b9762fSSantosh Shilimkar.endm
30b2b9762fSSantosh Shilimkar
31b2b9762fSSantosh Shilimkarppa_zero_params:
32b2b9762fSSantosh Shilimkar	.word		0x0
33b2b9762fSSantosh Shilimkar
345e94c6e3SSantosh Shilimkarppa_por_params:
355e94c6e3SSantosh Shilimkar	.word		1, 0
365e94c6e3SSantosh Shilimkar
37b2b9762fSSantosh Shilimkar/*
38b2b9762fSSantosh Shilimkar * =============================
39b2b9762fSSantosh Shilimkar * == CPU suspend finisher ==
40b2b9762fSSantosh Shilimkar * =============================
41b2b9762fSSantosh Shilimkar *
42b2b9762fSSantosh Shilimkar * void omap4_finish_suspend(unsigned long cpu_state)
43b2b9762fSSantosh Shilimkar *
44b2b9762fSSantosh Shilimkar * This function code saves the CPU context and performs the CPU
45b2b9762fSSantosh Shilimkar * power down sequence. Calling WFI effectively changes the CPU
46b2b9762fSSantosh Shilimkar * power domains states to the desired target power state.
47b2b9762fSSantosh Shilimkar *
48b2b9762fSSantosh Shilimkar * @cpu_state : contains context save state (r0)
49b2b9762fSSantosh Shilimkar *	0 - No context lost
50b2b9762fSSantosh Shilimkar * 	1 - CPUx L1 and logic lost: MPUSS CSWR
51b2b9762fSSantosh Shilimkar * 	2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
52b2b9762fSSantosh Shilimkar *	3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
53b2b9762fSSantosh Shilimkar * @return: This function never returns for CPU OFF and DORMANT power states.
54b2b9762fSSantosh Shilimkar * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
55b2b9762fSSantosh Shilimkar * from this follows a full CPU reset path via ROM code to CPU restore code.
56b2b9762fSSantosh Shilimkar * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
57b2b9762fSSantosh Shilimkar * It returns to the caller for CPU INACTIVE and ON power states or in case
58b2b9762fSSantosh Shilimkar * CPU failed to transition to targeted OFF/DORMANT state.
59*5b6e3eb5SSantosh Shilimkar *
60*5b6e3eb5SSantosh Shilimkar * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save
61*5b6e3eb5SSantosh Shilimkar * stack frame and it expects the caller to take care of it. Hence the entire
62*5b6e3eb5SSantosh Shilimkar * stack frame is saved to avoid possible stack corruption.
63b2b9762fSSantosh Shilimkar */
64b2b9762fSSantosh ShilimkarENTRY(omap4_finish_suspend)
65*5b6e3eb5SSantosh Shilimkar	stmfd	sp!, {r4-r12, lr}
66b2b9762fSSantosh Shilimkar	cmp	r0, #0x0
67b2b9762fSSantosh Shilimkar	beq	do_WFI				@ No lowpower state, jump to WFI
68b2b9762fSSantosh Shilimkar
69b2b9762fSSantosh Shilimkar	/*
70b2b9762fSSantosh Shilimkar	 * Flush all data from the L1 data cache before disabling
71b2b9762fSSantosh Shilimkar	 * SCTLR.C bit.
72b2b9762fSSantosh Shilimkar	 */
73b2b9762fSSantosh Shilimkar	bl	omap4_get_sar_ram_base
74b2b9762fSSantosh Shilimkar	ldr	r9, [r0, #OMAP_TYPE_OFFSET]
75b2b9762fSSantosh Shilimkar	cmp	r9, #0x1			@ Check for HS device
76b2b9762fSSantosh Shilimkar	bne	skip_secure_l1_clean
77b2b9762fSSantosh Shilimkar	mov	r0, #SCU_PM_NORMAL
78b2b9762fSSantosh Shilimkar	mov	r1, #0xFF			@ clean seucre L1
79b2b9762fSSantosh Shilimkar	stmfd   r13!, {r4-r12, r14}
80b2b9762fSSantosh Shilimkar	ldr	r12, =OMAP4_MON_SCU_PWR_INDEX
81b2b9762fSSantosh Shilimkar	DO_SMC
82b2b9762fSSantosh Shilimkar	ldmfd   r13!, {r4-r12, r14}
83b2b9762fSSantosh Shilimkarskip_secure_l1_clean:
84b2b9762fSSantosh Shilimkar	bl	v7_flush_dcache_all
85b2b9762fSSantosh Shilimkar
86b2b9762fSSantosh Shilimkar	/*
87b2b9762fSSantosh Shilimkar	 * Clear the SCTLR.C bit to prevent further data cache
88b2b9762fSSantosh Shilimkar	 * allocation. Clearing SCTLR.C would make all the data accesses
89b2b9762fSSantosh Shilimkar	 * strongly ordered and would not hit the cache.
90b2b9762fSSantosh Shilimkar	 */
91b2b9762fSSantosh Shilimkar	mrc	p15, 0, r0, c1, c0, 0
92b2b9762fSSantosh Shilimkar	bic	r0, r0, #(1 << 2)		@ Disable the C bit
93b2b9762fSSantosh Shilimkar	mcr	p15, 0, r0, c1, c0, 0
94b2b9762fSSantosh Shilimkar	isb
95b2b9762fSSantosh Shilimkar
96b2b9762fSSantosh Shilimkar	/*
97b2b9762fSSantosh Shilimkar	 * Invalidate L1 data cache. Even though only invalidate is
98b2b9762fSSantosh Shilimkar	 * necessary exported flush API is used here. Doing clean
99b2b9762fSSantosh Shilimkar	 * on already clean cache would be almost NOP.
100b2b9762fSSantosh Shilimkar	 */
101b2b9762fSSantosh Shilimkar	bl	v7_flush_dcache_all
102b2b9762fSSantosh Shilimkar
103b2b9762fSSantosh Shilimkar	/*
104b2b9762fSSantosh Shilimkar	 * Switch the CPU from Symmetric Multiprocessing (SMP) mode
105b2b9762fSSantosh Shilimkar	 * to AsymmetricMultiprocessing (AMP) mode by programming
106b2b9762fSSantosh Shilimkar	 * the SCU power status to DORMANT or OFF mode.
107b2b9762fSSantosh Shilimkar	 * This enables the CPU to be taken out of coherency by
108b2b9762fSSantosh Shilimkar	 * preventing the CPU from receiving cache, TLB, or BTB
109b2b9762fSSantosh Shilimkar	 * maintenance operations broadcast by other CPUs in the cluster.
110b2b9762fSSantosh Shilimkar	 */
111b2b9762fSSantosh Shilimkar	bl	omap4_get_sar_ram_base
112b2b9762fSSantosh Shilimkar	mov	r8, r0
113b2b9762fSSantosh Shilimkar	ldr	r9, [r8, #OMAP_TYPE_OFFSET]
114b2b9762fSSantosh Shilimkar	cmp	r9, #0x1			@ Check for HS device
115b2b9762fSSantosh Shilimkar	bne	scu_gp_set
116b2b9762fSSantosh Shilimkar	mrc	p15, 0, r0, c0, c0, 5		@ Read MPIDR
117b2b9762fSSantosh Shilimkar	ands	r0, r0, #0x0f
118b2b9762fSSantosh Shilimkar	ldreq	r0, [r8, #SCU_OFFSET0]
119b2b9762fSSantosh Shilimkar	ldrne	r0, [r8, #SCU_OFFSET1]
120b2b9762fSSantosh Shilimkar	mov	r1, #0x00
121b2b9762fSSantosh Shilimkar	stmfd   r13!, {r4-r12, r14}
122b2b9762fSSantosh Shilimkar	ldr	r12, =OMAP4_MON_SCU_PWR_INDEX
123b2b9762fSSantosh Shilimkar	DO_SMC
124b2b9762fSSantosh Shilimkar	ldmfd   r13!, {r4-r12, r14}
125b2b9762fSSantosh Shilimkar	b	skip_scu_gp_set
126b2b9762fSSantosh Shilimkarscu_gp_set:
127b2b9762fSSantosh Shilimkar	mrc	p15, 0, r0, c0, c0, 5		@ Read MPIDR
128b2b9762fSSantosh Shilimkar	ands	r0, r0, #0x0f
129b2b9762fSSantosh Shilimkar	ldreq	r1, [r8, #SCU_OFFSET0]
130b2b9762fSSantosh Shilimkar	ldrne	r1, [r8, #SCU_OFFSET1]
131b2b9762fSSantosh Shilimkar	bl	omap4_get_scu_base
132b2b9762fSSantosh Shilimkar	bl	scu_power_mode
133b2b9762fSSantosh Shilimkarskip_scu_gp_set:
134b2b9762fSSantosh Shilimkar	mrc	p15, 0, r0, c1, c1, 2		@ Read NSACR data
135b2b9762fSSantosh Shilimkar	tst	r0, #(1 << 18)
136b2b9762fSSantosh Shilimkar	mrcne	p15, 0, r0, c1, c0, 1
137b2b9762fSSantosh Shilimkar	bicne	r0, r0, #(1 << 6)		@ Disable SMP bit
138b2b9762fSSantosh Shilimkar	mcrne	p15, 0, r0, c1, c0, 1
139b2b9762fSSantosh Shilimkar	isb
140b2b9762fSSantosh Shilimkar	dsb
1415e94c6e3SSantosh Shilimkar#ifdef CONFIG_CACHE_L2X0
1425e94c6e3SSantosh Shilimkar	/*
1435e94c6e3SSantosh Shilimkar	 * Clean and invalidate the L2 cache.
1445e94c6e3SSantosh Shilimkar	 * Common cache-l2x0.c functions can't be used here since it
1455e94c6e3SSantosh Shilimkar	 * uses spinlocks. We are out of coherency here with data cache
1465e94c6e3SSantosh Shilimkar	 * disabled. The spinlock implementation uses exclusive load/store
1475e94c6e3SSantosh Shilimkar	 * instruction which can fail without data cache being enabled.
1485e94c6e3SSantosh Shilimkar	 * OMAP4 hardware doesn't support exclusive monitor which can
1495e94c6e3SSantosh Shilimkar	 * overcome exclusive access issue. Because of this, CPU can
1505e94c6e3SSantosh Shilimkar	 * lead to deadlock.
1515e94c6e3SSantosh Shilimkar	 */
1525e94c6e3SSantosh Shilimkar	bl	omap4_get_sar_ram_base
1535e94c6e3SSantosh Shilimkar	mov	r8, r0
1545e94c6e3SSantosh Shilimkar	mrc	p15, 0, r5, c0, c0, 5		@ Read MPIDR
1555e94c6e3SSantosh Shilimkar	ands	r5, r5, #0x0f
1565e94c6e3SSantosh Shilimkar	ldreq	r0, [r8, #L2X0_SAVE_OFFSET0]	@ Retrieve L2 state from SAR
1575e94c6e3SSantosh Shilimkar	ldrne	r0, [r8, #L2X0_SAVE_OFFSET1]	@ memory.
1585e94c6e3SSantosh Shilimkar	cmp	r0, #3
1595e94c6e3SSantosh Shilimkar	bne	do_WFI
1605e94c6e3SSantosh Shilimkar#ifdef CONFIG_PL310_ERRATA_727915
1615e94c6e3SSantosh Shilimkar	mov	r0, #0x03
1625e94c6e3SSantosh Shilimkar	mov	r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
1635e94c6e3SSantosh Shilimkar	DO_SMC
1645e94c6e3SSantosh Shilimkar#endif
1655e94c6e3SSantosh Shilimkar	bl	omap4_get_l2cache_base
1665e94c6e3SSantosh Shilimkar	mov	r2, r0
1675e94c6e3SSantosh Shilimkar	ldr	r0, =0xffff
1685e94c6e3SSantosh Shilimkar	str	r0, [r2, #L2X0_CLEAN_INV_WAY]
1695e94c6e3SSantosh Shilimkarwait:
1705e94c6e3SSantosh Shilimkar	ldr	r0, [r2, #L2X0_CLEAN_INV_WAY]
1715e94c6e3SSantosh Shilimkar	ldr	r1, =0xffff
1725e94c6e3SSantosh Shilimkar	ands	r0, r0, r1
1735e94c6e3SSantosh Shilimkar	bne	wait
1745e94c6e3SSantosh Shilimkar#ifdef CONFIG_PL310_ERRATA_727915
1755e94c6e3SSantosh Shilimkar	mov	r0, #0x00
1765e94c6e3SSantosh Shilimkar	mov	r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
1775e94c6e3SSantosh Shilimkar	DO_SMC
1785e94c6e3SSantosh Shilimkar#endif
1795e94c6e3SSantosh Shilimkarl2x_sync:
1805e94c6e3SSantosh Shilimkar	bl	omap4_get_l2cache_base
1815e94c6e3SSantosh Shilimkar	mov	r2, r0
1825e94c6e3SSantosh Shilimkar	mov	r0, #0x0
1835e94c6e3SSantosh Shilimkar	str	r0, [r2, #L2X0_CACHE_SYNC]
1845e94c6e3SSantosh Shilimkarsync:
1855e94c6e3SSantosh Shilimkar	ldr	r0, [r2, #L2X0_CACHE_SYNC]
1865e94c6e3SSantosh Shilimkar	ands	r0, r0, #0x1
1875e94c6e3SSantosh Shilimkar	bne	sync
1885e94c6e3SSantosh Shilimkar#endif
189b2b9762fSSantosh Shilimkar
190b2b9762fSSantosh Shilimkardo_WFI:
191b2b9762fSSantosh Shilimkar	bl	omap_do_wfi
192b2b9762fSSantosh Shilimkar
193b2b9762fSSantosh Shilimkar	/*
194b2b9762fSSantosh Shilimkar	 * CPU is here when it failed to enter OFF/DORMANT or
195b2b9762fSSantosh Shilimkar	 * no low power state was attempted.
196b2b9762fSSantosh Shilimkar	 */
197b2b9762fSSantosh Shilimkar	mrc	p15, 0, r0, c1, c0, 0
198b2b9762fSSantosh Shilimkar	tst	r0, #(1 << 2)			@ Check C bit enabled?
199b2b9762fSSantosh Shilimkar	orreq	r0, r0, #(1 << 2)		@ Enable the C bit
200b2b9762fSSantosh Shilimkar	mcreq	p15, 0, r0, c1, c0, 0
201b2b9762fSSantosh Shilimkar	isb
202b2b9762fSSantosh Shilimkar
203b2b9762fSSantosh Shilimkar	/*
204b2b9762fSSantosh Shilimkar	 * Ensure the CPU power state is set to NORMAL in
205b2b9762fSSantosh Shilimkar	 * SCU power state so that CPU is back in coherency.
206b2b9762fSSantosh Shilimkar	 * In non-coherent mode CPU can lock-up and lead to
207b2b9762fSSantosh Shilimkar	 * system deadlock.
208b2b9762fSSantosh Shilimkar	 */
209b2b9762fSSantosh Shilimkar	mrc	p15, 0, r0, c1, c0, 1
210b2b9762fSSantosh Shilimkar	tst	r0, #(1 << 6)			@ Check SMP bit enabled?
211b2b9762fSSantosh Shilimkar	orreq	r0, r0, #(1 << 6)
212b2b9762fSSantosh Shilimkar	mcreq	p15, 0, r0, c1, c0, 1
213b2b9762fSSantosh Shilimkar	isb
214b2b9762fSSantosh Shilimkar	bl	omap4_get_sar_ram_base
215b2b9762fSSantosh Shilimkar	mov	r8, r0
216b2b9762fSSantosh Shilimkar	ldr	r9, [r8, #OMAP_TYPE_OFFSET]
217b2b9762fSSantosh Shilimkar	cmp	r9, #0x1			@ Check for HS device
218b2b9762fSSantosh Shilimkar	bne	scu_gp_clear
219b2b9762fSSantosh Shilimkar	mov	r0, #SCU_PM_NORMAL
220b2b9762fSSantosh Shilimkar	mov	r1, #0x00
221b2b9762fSSantosh Shilimkar	stmfd   r13!, {r4-r12, r14}
222b2b9762fSSantosh Shilimkar	ldr	r12, =OMAP4_MON_SCU_PWR_INDEX
223b2b9762fSSantosh Shilimkar	DO_SMC
224b2b9762fSSantosh Shilimkar	ldmfd   r13!, {r4-r12, r14}
225b2b9762fSSantosh Shilimkar	b	skip_scu_gp_clear
226b2b9762fSSantosh Shilimkarscu_gp_clear:
227b2b9762fSSantosh Shilimkar	bl	omap4_get_scu_base
228b2b9762fSSantosh Shilimkar	mov	r1, #SCU_PM_NORMAL
229b2b9762fSSantosh Shilimkar	bl	scu_power_mode
230b2b9762fSSantosh Shilimkarskip_scu_gp_clear:
231b2b9762fSSantosh Shilimkar	isb
232b2b9762fSSantosh Shilimkar	dsb
233*5b6e3eb5SSantosh Shilimkar	ldmfd	sp!, {r4-r12, pc}
234b2b9762fSSantosh ShilimkarENDPROC(omap4_finish_suspend)
235b2b9762fSSantosh Shilimkar
236b2b9762fSSantosh Shilimkar/*
237b2b9762fSSantosh Shilimkar * ============================
238b2b9762fSSantosh Shilimkar * == CPU resume entry point ==
239b2b9762fSSantosh Shilimkar * ============================
240b2b9762fSSantosh Shilimkar *
241b2b9762fSSantosh Shilimkar * void omap4_cpu_resume(void)
242b2b9762fSSantosh Shilimkar *
243b2b9762fSSantosh Shilimkar * ROM code jumps to this function while waking up from CPU
244b2b9762fSSantosh Shilimkar * OFF or DORMANT state. Physical address of the function is
245b2b9762fSSantosh Shilimkar * stored in the SAR RAM while entering to OFF or DORMANT mode.
246b2b9762fSSantosh Shilimkar * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
247b2b9762fSSantosh Shilimkar */
248b2b9762fSSantosh ShilimkarENTRY(omap4_cpu_resume)
249b2b9762fSSantosh Shilimkar	/*
250b2b9762fSSantosh Shilimkar	 * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
251b2b9762fSSantosh Shilimkar	 * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
252b2b9762fSSantosh Shilimkar	 * init and for CPU1, a secure PPA API provided. CPU0 must be ON
253b2b9762fSSantosh Shilimkar	 * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
254b2b9762fSSantosh Shilimkar	 * OMAP443X GP devices- SMP bit isn't accessible.
255b2b9762fSSantosh Shilimkar	 * OMAP446X GP devices - SMP bit access is enabled on both CPUs.
256b2b9762fSSantosh Shilimkar	 */
257b2b9762fSSantosh Shilimkar	ldr	r8, =OMAP44XX_SAR_RAM_BASE
258b2b9762fSSantosh Shilimkar	ldr	r9, [r8, #OMAP_TYPE_OFFSET]
259b2b9762fSSantosh Shilimkar	cmp	r9, #0x1			@ Skip if GP device
260b2b9762fSSantosh Shilimkar	bne	skip_ns_smp_enable
261b2b9762fSSantosh Shilimkar	mrc     p15, 0, r0, c0, c0, 5
262b2b9762fSSantosh Shilimkar	ands    r0, r0, #0x0f
263b2b9762fSSantosh Shilimkar	beq	skip_ns_smp_enable
264b2b9762fSSantosh Shilimkarppa_actrl_retry:
265b2b9762fSSantosh Shilimkar	mov     r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
266b2b9762fSSantosh Shilimkar	adr	r3, ppa_zero_params		@ Pointer to parameters
267b2b9762fSSantosh Shilimkar	mov	r1, #0x0			@ Process ID
268b2b9762fSSantosh Shilimkar	mov	r2, #0x4			@ Flag
269b2b9762fSSantosh Shilimkar	mov	r6, #0xff
270b2b9762fSSantosh Shilimkar	mov	r12, #0x00			@ Secure Service ID
271b2b9762fSSantosh Shilimkar	DO_SMC
272b2b9762fSSantosh Shilimkar	cmp	r0, #0x0			@ API returns 0 on success.
273b2b9762fSSantosh Shilimkar	beq	enable_smp_bit
274b2b9762fSSantosh Shilimkar	b	ppa_actrl_retry
275b2b9762fSSantosh Shilimkarenable_smp_bit:
276b2b9762fSSantosh Shilimkar	mrc	p15, 0, r0, c1, c0, 1
277b2b9762fSSantosh Shilimkar	tst	r0, #(1 << 6)			@ Check SMP bit enabled?
278b2b9762fSSantosh Shilimkar	orreq	r0, r0, #(1 << 6)
279b2b9762fSSantosh Shilimkar	mcreq	p15, 0, r0, c1, c0, 1
280b2b9762fSSantosh Shilimkar	isb
281b2b9762fSSantosh Shilimkarskip_ns_smp_enable:
2825e94c6e3SSantosh Shilimkar#ifdef CONFIG_CACHE_L2X0
2835e94c6e3SSantosh Shilimkar	/*
2845e94c6e3SSantosh Shilimkar	 * Restore the L2 AUXCTRL and enable the L2 cache.
2855e94c6e3SSantosh Shilimkar	 * OMAP4_MON_L2X0_AUXCTRL_INDEX =  Program the L2X0 AUXCTRL
2865e94c6e3SSantosh Shilimkar	 * OMAP4_MON_L2X0_CTRL_INDEX =  Enable the L2 using L2X0 CTRL
2875e94c6e3SSantosh Shilimkar	 * register r0 contains value to be programmed.
2885e94c6e3SSantosh Shilimkar	 * L2 cache is already invalidate by ROM code as part
2895e94c6e3SSantosh Shilimkar	 * of MPUSS OFF wakeup path.
2905e94c6e3SSantosh Shilimkar	 */
2915e94c6e3SSantosh Shilimkar	ldr	r2, =OMAP44XX_L2CACHE_BASE
2925e94c6e3SSantosh Shilimkar	ldr	r0, [r2, #L2X0_CTRL]
2935e94c6e3SSantosh Shilimkar	and	r0, #0x0f
2945e94c6e3SSantosh Shilimkar	cmp	r0, #1
2955e94c6e3SSantosh Shilimkar	beq	skip_l2en			@ Skip if already enabled
2965e94c6e3SSantosh Shilimkar	ldr	r3, =OMAP44XX_SAR_RAM_BASE
2975e94c6e3SSantosh Shilimkar	ldr	r1, [r3, #OMAP_TYPE_OFFSET]
2985e94c6e3SSantosh Shilimkar	cmp	r1, #0x1			@ Check for HS device
2995e94c6e3SSantosh Shilimkar	bne     set_gp_por
3005e94c6e3SSantosh Shilimkar	ldr     r0, =OMAP4_PPA_L2_POR_INDEX
3015e94c6e3SSantosh Shilimkar	ldr     r1, =OMAP44XX_SAR_RAM_BASE
3025e94c6e3SSantosh Shilimkar	ldr     r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
3035e94c6e3SSantosh Shilimkar	adr     r3, ppa_por_params
3045e94c6e3SSantosh Shilimkar	str     r4, [r3, #0x04]
3055e94c6e3SSantosh Shilimkar	mov	r1, #0x0			@ Process ID
3065e94c6e3SSantosh Shilimkar	mov	r2, #0x4			@ Flag
3075e94c6e3SSantosh Shilimkar	mov	r6, #0xff
3085e94c6e3SSantosh Shilimkar	mov	r12, #0x00			@ Secure Service ID
3095e94c6e3SSantosh Shilimkar	DO_SMC
3105e94c6e3SSantosh Shilimkar	b	set_aux_ctrl
3115e94c6e3SSantosh Shilimkarset_gp_por:
3125e94c6e3SSantosh Shilimkar	ldr     r1, =OMAP44XX_SAR_RAM_BASE
3135e94c6e3SSantosh Shilimkar	ldr     r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
3145e94c6e3SSantosh Shilimkar	ldr	r12, =OMAP4_MON_L2X0_PREFETCH_INDEX	@ Setup L2 PREFETCH
3155e94c6e3SSantosh Shilimkar	DO_SMC
3165e94c6e3SSantosh Shilimkarset_aux_ctrl:
3175e94c6e3SSantosh Shilimkar	ldr     r1, =OMAP44XX_SAR_RAM_BASE
3185e94c6e3SSantosh Shilimkar	ldr	r0, [r1, #L2X0_AUXCTRL_OFFSET]
3195e94c6e3SSantosh Shilimkar	ldr	r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX	@ Setup L2 AUXCTRL
3205e94c6e3SSantosh Shilimkar	DO_SMC
3215e94c6e3SSantosh Shilimkar	mov	r0, #0x1
3225e94c6e3SSantosh Shilimkar	ldr	r12, =OMAP4_MON_L2X0_CTRL_INDEX		@ Enable L2 cache
3235e94c6e3SSantosh Shilimkar	DO_SMC
3245e94c6e3SSantosh Shilimkarskip_l2en:
3255e94c6e3SSantosh Shilimkar#endif
326b2b9762fSSantosh Shilimkar
327b2b9762fSSantosh Shilimkar	b	cpu_resume			@ Jump to generic resume
328b2b9762fSSantosh ShilimkarENDPROC(omap4_cpu_resume)
329b2b9762fSSantosh Shilimkar#endif
330b2b9762fSSantosh Shilimkar
331137d105dSSantosh Shilimkar#ifndef CONFIG_OMAP4_ERRATA_I688
332137d105dSSantosh ShilimkarENTRY(omap_bus_sync)
333137d105dSSantosh Shilimkar	mov	pc, lr
334137d105dSSantosh ShilimkarENDPROC(omap_bus_sync)
335137d105dSSantosh Shilimkar#endif
336137d105dSSantosh Shilimkar
337b2b9762fSSantosh ShilimkarENTRY(omap_do_wfi)
338b2b9762fSSantosh Shilimkar	stmfd	sp!, {lr}
339137d105dSSantosh Shilimkar	/* Drain interconnect write buffers. */
340137d105dSSantosh Shilimkar	bl omap_bus_sync
341b2b9762fSSantosh Shilimkar
342b2b9762fSSantosh Shilimkar	/*
343b2b9762fSSantosh Shilimkar	 * Execute an ISB instruction to ensure that all of the
344b2b9762fSSantosh Shilimkar	 * CP15 register changes have been committed.
345b2b9762fSSantosh Shilimkar	 */
346b2b9762fSSantosh Shilimkar	isb
347b2b9762fSSantosh Shilimkar
348b2b9762fSSantosh Shilimkar	/*
349b2b9762fSSantosh Shilimkar	 * Execute a barrier instruction to ensure that all cache,
350b2b9762fSSantosh Shilimkar	 * TLB and branch predictor maintenance operations issued
351b2b9762fSSantosh Shilimkar	 * by any CPU in the cluster have completed.
352b2b9762fSSantosh Shilimkar	 */
353b2b9762fSSantosh Shilimkar	dsb
354b2b9762fSSantosh Shilimkar	dmb
355b2b9762fSSantosh Shilimkar
356b2b9762fSSantosh Shilimkar	/*
357b2b9762fSSantosh Shilimkar	 * Execute a WFI instruction and wait until the
358b2b9762fSSantosh Shilimkar	 * STANDBYWFI output is asserted to indicate that the
359b2b9762fSSantosh Shilimkar	 * CPU is in idle and low power state. CPU can specualatively
360b2b9762fSSantosh Shilimkar	 * prefetch the instructions so add NOPs after WFI. Sixteen
361b2b9762fSSantosh Shilimkar	 * NOPs as per Cortex-A9 pipeline.
362b2b9762fSSantosh Shilimkar	 */
363b2b9762fSSantosh Shilimkar	wfi					@ Wait For Interrupt
364b2b9762fSSantosh Shilimkar	nop
365b2b9762fSSantosh Shilimkar	nop
366b2b9762fSSantosh Shilimkar	nop
367b2b9762fSSantosh Shilimkar	nop
368b2b9762fSSantosh Shilimkar	nop
369b2b9762fSSantosh Shilimkar	nop
370b2b9762fSSantosh Shilimkar	nop
371b2b9762fSSantosh Shilimkar	nop
372b2b9762fSSantosh Shilimkar	nop
373b2b9762fSSantosh Shilimkar	nop
374b2b9762fSSantosh Shilimkar	nop
375b2b9762fSSantosh Shilimkar	nop
376b2b9762fSSantosh Shilimkar	nop
377b2b9762fSSantosh Shilimkar	nop
378b2b9762fSSantosh Shilimkar	nop
379b2b9762fSSantosh Shilimkar	nop
380b2b9762fSSantosh Shilimkar
381b2b9762fSSantosh Shilimkar	ldmfd	sp!, {pc}
382b2b9762fSSantosh ShilimkarENDPROC(omap_do_wfi)
383