xref: /linux/arch/arm/mach-omap2/sleep44xx.S (revision cdd5b5a9761fd66d17586e4f4ba6588c70e640ea)
1d2912cb1SThomas Gleixner/* SPDX-License-Identifier: GPL-2.0-only */
2b2b9762fSSantosh Shilimkar/*
3b2b9762fSSantosh Shilimkar * OMAP44xx sleep code.
4b2b9762fSSantosh Shilimkar *
5b2b9762fSSantosh Shilimkar * Copyright (C) 2011 Texas Instruments, Inc.
6b2b9762fSSantosh Shilimkar * 	Santosh Shilimkar <santosh.shilimkar@ti.com>
7b2b9762fSSantosh Shilimkar */
8b2b9762fSSantosh Shilimkar
9b2b9762fSSantosh Shilimkar#include <linux/linkage.h>
106ebbf2ceSRussell King#include <asm/assembler.h>
11b2b9762fSSantosh Shilimkar#include <asm/smp_scu.h>
12*a9ff6961SLinus Walleij#include <asm/page.h>
13b2b9762fSSantosh Shilimkar#include <asm/hardware/cache-l2x0.h>
14b2b9762fSSantosh Shilimkar
15c1db9d73STony Lindgren#include "omap-secure.h"
16b2b9762fSSantosh Shilimkar
17b2b9762fSSantosh Shilimkar#include "common.h"
18c49f34bcSTony Lindgren#include "omap44xx.h"
19b2b9762fSSantosh Shilimkar#include "omap4-sar-layout.h"
20b2b9762fSSantosh Shilimkar
213fe1ee40SStefan Agner	.arch armv7-a
223fe1ee40SStefan Agner
23b2b9762fSSantosh Shilimkar#if defined(CONFIG_SMP) && defined(CONFIG_PM)
24b2b9762fSSantosh Shilimkar
253fe1ee40SStefan Agner	.arch_extension sec
26b2b9762fSSantosh Shilimkar.macro	DO_SMC
27b2b9762fSSantosh Shilimkar	dsb
28b2b9762fSSantosh Shilimkar	smc	#0
29b2b9762fSSantosh Shilimkar	dsb
30b2b9762fSSantosh Shilimkar.endm
31b2b9762fSSantosh Shilimkar
32b46355a9SNishanth Menon#ifdef CONFIG_ARCH_OMAP4
33b46355a9SNishanth Menon
34b2b9762fSSantosh Shilimkar/*
35b2b9762fSSantosh Shilimkar * =============================
36b2b9762fSSantosh Shilimkar * == CPU suspend finisher ==
37b2b9762fSSantosh Shilimkar * =============================
38b2b9762fSSantosh Shilimkar *
39b2b9762fSSantosh Shilimkar * void omap4_finish_suspend(unsigned long cpu_state)
40b2b9762fSSantosh Shilimkar *
41b2b9762fSSantosh Shilimkar * This function code saves the CPU context and performs the CPU
42b2b9762fSSantosh Shilimkar * power down sequence. Calling WFI effectively changes the CPU
43b2b9762fSSantosh Shilimkar * power domains states to the desired target power state.
44b2b9762fSSantosh Shilimkar *
45b2b9762fSSantosh Shilimkar * @cpu_state : contains context save state (r0)
46b2b9762fSSantosh Shilimkar *	0 - No context lost
47b2b9762fSSantosh Shilimkar * 	1 - CPUx L1 and logic lost: MPUSS CSWR
48b2b9762fSSantosh Shilimkar * 	2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
49b2b9762fSSantosh Shilimkar *	3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
50b2b9762fSSantosh Shilimkar * @return: This function never returns for CPU OFF and DORMANT power states.
51b2b9762fSSantosh Shilimkar * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
52b2b9762fSSantosh Shilimkar * from this follows a full CPU reset path via ROM code to CPU restore code.
53b2b9762fSSantosh Shilimkar * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
54b2b9762fSSantosh Shilimkar * It returns to the caller for CPU INACTIVE and ON power states or in case
55b2b9762fSSantosh Shilimkar * CPU failed to transition to targeted OFF/DORMANT state.
565b6e3eb5SSantosh Shilimkar *
575b6e3eb5SSantosh Shilimkar * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save
585b6e3eb5SSantosh Shilimkar * stack frame and it expects the caller to take care of it. Hence the entire
595b6e3eb5SSantosh Shilimkar * stack frame is saved to avoid possible stack corruption.
60b2b9762fSSantosh Shilimkar */
61b2b9762fSSantosh ShilimkarENTRY(omap4_finish_suspend)
625b6e3eb5SSantosh Shilimkar	stmfd	sp!, {r4-r12, lr}
63b2b9762fSSantosh Shilimkar	cmp	r0, #0x0
64b2b9762fSSantosh Shilimkar	beq	do_WFI				@ No lowpower state, jump to WFI
65b2b9762fSSantosh Shilimkar
66b2b9762fSSantosh Shilimkar	/*
67b2b9762fSSantosh Shilimkar	 * Flush all data from the L1 data cache before disabling
68b2b9762fSSantosh Shilimkar	 * SCTLR.C bit.
69b2b9762fSSantosh Shilimkar	 */
70b2b9762fSSantosh Shilimkar	bl	omap4_get_sar_ram_base
71b2b9762fSSantosh Shilimkar	ldr	r9, [r0, #OMAP_TYPE_OFFSET]
72b2b9762fSSantosh Shilimkar	cmp	r9, #0x1			@ Check for HS device
73b2b9762fSSantosh Shilimkar	bne	skip_secure_l1_clean
74b2b9762fSSantosh Shilimkar	mov	r0, #SCU_PM_NORMAL
75b2b9762fSSantosh Shilimkar	mov	r1, #0xFF			@ clean seucre L1
76b2b9762fSSantosh Shilimkar	stmfd   r13!, {r4-r12, r14}
77b2b9762fSSantosh Shilimkar	ldr	r12, =OMAP4_MON_SCU_PWR_INDEX
78b2b9762fSSantosh Shilimkar	DO_SMC
79b2b9762fSSantosh Shilimkar	ldmfd   r13!, {r4-r12, r14}
80b2b9762fSSantosh Shilimkarskip_secure_l1_clean:
81b2b9762fSSantosh Shilimkar	bl	v7_flush_dcache_all
82b2b9762fSSantosh Shilimkar
83b2b9762fSSantosh Shilimkar	/*
84b2b9762fSSantosh Shilimkar	 * Clear the SCTLR.C bit to prevent further data cache
85b2b9762fSSantosh Shilimkar	 * allocation. Clearing SCTLR.C would make all the data accesses
86b2b9762fSSantosh Shilimkar	 * strongly ordered and would not hit the cache.
87b2b9762fSSantosh Shilimkar	 */
88b2b9762fSSantosh Shilimkar	mrc	p15, 0, r0, c1, c0, 0
89b2b9762fSSantosh Shilimkar	bic	r0, r0, #(1 << 2)		@ Disable the C bit
90b2b9762fSSantosh Shilimkar	mcr	p15, 0, r0, c1, c0, 0
91b2b9762fSSantosh Shilimkar	isb
92b2b9762fSSantosh Shilimkar
932402ff05STony Lindgren	bl	v7_invalidate_l1
94b2b9762fSSantosh Shilimkar
95b2b9762fSSantosh Shilimkar	/*
96b2b9762fSSantosh Shilimkar	 * Switch the CPU from Symmetric Multiprocessing (SMP) mode
97b2b9762fSSantosh Shilimkar	 * to AsymmetricMultiprocessing (AMP) mode by programming
98b2b9762fSSantosh Shilimkar	 * the SCU power status to DORMANT or OFF mode.
99b2b9762fSSantosh Shilimkar	 * This enables the CPU to be taken out of coherency by
100b2b9762fSSantosh Shilimkar	 * preventing the CPU from receiving cache, TLB, or BTB
101b2b9762fSSantosh Shilimkar	 * maintenance operations broadcast by other CPUs in the cluster.
102b2b9762fSSantosh Shilimkar	 */
103b2b9762fSSantosh Shilimkar	bl	omap4_get_sar_ram_base
104b2b9762fSSantosh Shilimkar	mov	r8, r0
105b2b9762fSSantosh Shilimkar	ldr	r9, [r8, #OMAP_TYPE_OFFSET]
106b2b9762fSSantosh Shilimkar	cmp	r9, #0x1			@ Check for HS device
107b2b9762fSSantosh Shilimkar	bne	scu_gp_set
108b2b9762fSSantosh Shilimkar	mrc	p15, 0, r0, c0, c0, 5		@ Read MPIDR
109b2b9762fSSantosh Shilimkar	ands	r0, r0, #0x0f
110b2b9762fSSantosh Shilimkar	ldreq	r0, [r8, #SCU_OFFSET0]
111b2b9762fSSantosh Shilimkar	ldrne	r0, [r8, #SCU_OFFSET1]
112b2b9762fSSantosh Shilimkar	mov	r1, #0x00
113b2b9762fSSantosh Shilimkar	stmfd   r13!, {r4-r12, r14}
114b2b9762fSSantosh Shilimkar	ldr	r12, =OMAP4_MON_SCU_PWR_INDEX
115b2b9762fSSantosh Shilimkar	DO_SMC
116b2b9762fSSantosh Shilimkar	ldmfd   r13!, {r4-r12, r14}
117b2b9762fSSantosh Shilimkar	b	skip_scu_gp_set
118b2b9762fSSantosh Shilimkarscu_gp_set:
119b2b9762fSSantosh Shilimkar	mrc	p15, 0, r0, c0, c0, 5		@ Read MPIDR
120b2b9762fSSantosh Shilimkar	ands	r0, r0, #0x0f
121b2b9762fSSantosh Shilimkar	ldreq	r1, [r8, #SCU_OFFSET0]
122b2b9762fSSantosh Shilimkar	ldrne	r1, [r8, #SCU_OFFSET1]
123b2b9762fSSantosh Shilimkar	bl	omap4_get_scu_base
124b2b9762fSSantosh Shilimkar	bl	scu_power_mode
125b2b9762fSSantosh Shilimkarskip_scu_gp_set:
126b2b9762fSSantosh Shilimkar	mrc	p15, 0, r0, c1, c1, 2		@ Read NSACR data
127b2b9762fSSantosh Shilimkar	tst	r0, #(1 << 18)
128b2b9762fSSantosh Shilimkar	mrcne	p15, 0, r0, c1, c0, 1
129b2b9762fSSantosh Shilimkar	bicne	r0, r0, #(1 << 6)		@ Disable SMP bit
130b2b9762fSSantosh Shilimkar	mcrne	p15, 0, r0, c1, c0, 1
131b2b9762fSSantosh Shilimkar	isb
132b2b9762fSSantosh Shilimkar	dsb
1335e94c6e3SSantosh Shilimkar#ifdef CONFIG_CACHE_L2X0
1345e94c6e3SSantosh Shilimkar	/*
1355e94c6e3SSantosh Shilimkar	 * Clean and invalidate the L2 cache.
1365e94c6e3SSantosh Shilimkar	 * Common cache-l2x0.c functions can't be used here since it
1375e94c6e3SSantosh Shilimkar	 * uses spinlocks. We are out of coherency here with data cache
1385e94c6e3SSantosh Shilimkar	 * disabled. The spinlock implementation uses exclusive load/store
1395e94c6e3SSantosh Shilimkar	 * instruction which can fail without data cache being enabled.
1405e94c6e3SSantosh Shilimkar	 * OMAP4 hardware doesn't support exclusive monitor which can
1415e94c6e3SSantosh Shilimkar	 * overcome exclusive access issue. Because of this, CPU can
1425e94c6e3SSantosh Shilimkar	 * lead to deadlock.
1435e94c6e3SSantosh Shilimkar	 */
1445e94c6e3SSantosh Shilimkar	bl	omap4_get_sar_ram_base
1455e94c6e3SSantosh Shilimkar	mov	r8, r0
1465e94c6e3SSantosh Shilimkar	mrc	p15, 0, r5, c0, c0, 5		@ Read MPIDR
1475e94c6e3SSantosh Shilimkar	ands	r5, r5, #0x0f
1485e94c6e3SSantosh Shilimkar	ldreq	r0, [r8, #L2X0_SAVE_OFFSET0]	@ Retrieve L2 state from SAR
1495e94c6e3SSantosh Shilimkar	ldrne	r0, [r8, #L2X0_SAVE_OFFSET1]	@ memory.
1505e94c6e3SSantosh Shilimkar	cmp	r0, #3
1515e94c6e3SSantosh Shilimkar	bne	do_WFI
1525e94c6e3SSantosh Shilimkar#ifdef CONFIG_PL310_ERRATA_727915
1535e94c6e3SSantosh Shilimkar	mov	r0, #0x03
1545e94c6e3SSantosh Shilimkar	mov	r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
1555e94c6e3SSantosh Shilimkar	DO_SMC
1565e94c6e3SSantosh Shilimkar#endif
1575e94c6e3SSantosh Shilimkar	bl	omap4_get_l2cache_base
1585e94c6e3SSantosh Shilimkar	mov	r2, r0
1595e94c6e3SSantosh Shilimkar	ldr	r0, =0xffff
1605e94c6e3SSantosh Shilimkar	str	r0, [r2, #L2X0_CLEAN_INV_WAY]
1615e94c6e3SSantosh Shilimkarwait:
1625e94c6e3SSantosh Shilimkar	ldr	r0, [r2, #L2X0_CLEAN_INV_WAY]
1635e94c6e3SSantosh Shilimkar	ldr	r1, =0xffff
1645e94c6e3SSantosh Shilimkar	ands	r0, r0, r1
1655e94c6e3SSantosh Shilimkar	bne	wait
1665e94c6e3SSantosh Shilimkar#ifdef CONFIG_PL310_ERRATA_727915
1675e94c6e3SSantosh Shilimkar	mov	r0, #0x00
1685e94c6e3SSantosh Shilimkar	mov	r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
1695e94c6e3SSantosh Shilimkar	DO_SMC
1705e94c6e3SSantosh Shilimkar#endif
1715e94c6e3SSantosh Shilimkarl2x_sync:
1725e94c6e3SSantosh Shilimkar	bl	omap4_get_l2cache_base
1735e94c6e3SSantosh Shilimkar	mov	r2, r0
1745e94c6e3SSantosh Shilimkar	mov	r0, #0x0
1755e94c6e3SSantosh Shilimkar	str	r0, [r2, #L2X0_CACHE_SYNC]
1765e94c6e3SSantosh Shilimkarsync:
1775e94c6e3SSantosh Shilimkar	ldr	r0, [r2, #L2X0_CACHE_SYNC]
1785e94c6e3SSantosh Shilimkar	ands	r0, r0, #0x1
1795e94c6e3SSantosh Shilimkar	bne	sync
1805e94c6e3SSantosh Shilimkar#endif
181b2b9762fSSantosh Shilimkar
182b2b9762fSSantosh Shilimkardo_WFI:
183b2b9762fSSantosh Shilimkar	bl	omap_do_wfi
184b2b9762fSSantosh Shilimkar
185b2b9762fSSantosh Shilimkar	/*
186b2b9762fSSantosh Shilimkar	 * CPU is here when it failed to enter OFF/DORMANT or
187b2b9762fSSantosh Shilimkar	 * no low power state was attempted.
188b2b9762fSSantosh Shilimkar	 */
189b2b9762fSSantosh Shilimkar	mrc	p15, 0, r0, c1, c0, 0
190b2b9762fSSantosh Shilimkar	tst	r0, #(1 << 2)			@ Check C bit enabled?
191b2b9762fSSantosh Shilimkar	orreq	r0, r0, #(1 << 2)		@ Enable the C bit
192b2b9762fSSantosh Shilimkar	mcreq	p15, 0, r0, c1, c0, 0
193b2b9762fSSantosh Shilimkar	isb
194b2b9762fSSantosh Shilimkar
195b2b9762fSSantosh Shilimkar	/*
196b2b9762fSSantosh Shilimkar	 * Ensure the CPU power state is set to NORMAL in
197b2b9762fSSantosh Shilimkar	 * SCU power state so that CPU is back in coherency.
198b2b9762fSSantosh Shilimkar	 * In non-coherent mode CPU can lock-up and lead to
199b2b9762fSSantosh Shilimkar	 * system deadlock.
200b2b9762fSSantosh Shilimkar	 */
201b2b9762fSSantosh Shilimkar	mrc	p15, 0, r0, c1, c0, 1
202b2b9762fSSantosh Shilimkar	tst	r0, #(1 << 6)			@ Check SMP bit enabled?
203b2b9762fSSantosh Shilimkar	orreq	r0, r0, #(1 << 6)
204b2b9762fSSantosh Shilimkar	mcreq	p15, 0, r0, c1, c0, 1
205b2b9762fSSantosh Shilimkar	isb
206b2b9762fSSantosh Shilimkar	bl	omap4_get_sar_ram_base
207b2b9762fSSantosh Shilimkar	mov	r8, r0
208b2b9762fSSantosh Shilimkar	ldr	r9, [r8, #OMAP_TYPE_OFFSET]
209b2b9762fSSantosh Shilimkar	cmp	r9, #0x1			@ Check for HS device
210b2b9762fSSantosh Shilimkar	bne	scu_gp_clear
211b2b9762fSSantosh Shilimkar	mov	r0, #SCU_PM_NORMAL
212b2b9762fSSantosh Shilimkar	mov	r1, #0x00
213b2b9762fSSantosh Shilimkar	stmfd   r13!, {r4-r12, r14}
214b2b9762fSSantosh Shilimkar	ldr	r12, =OMAP4_MON_SCU_PWR_INDEX
215b2b9762fSSantosh Shilimkar	DO_SMC
216b2b9762fSSantosh Shilimkar	ldmfd   r13!, {r4-r12, r14}
217b2b9762fSSantosh Shilimkar	b	skip_scu_gp_clear
218b2b9762fSSantosh Shilimkarscu_gp_clear:
219b2b9762fSSantosh Shilimkar	bl	omap4_get_scu_base
220b2b9762fSSantosh Shilimkar	mov	r1, #SCU_PM_NORMAL
221b2b9762fSSantosh Shilimkar	bl	scu_power_mode
222b2b9762fSSantosh Shilimkarskip_scu_gp_clear:
223b2b9762fSSantosh Shilimkar	isb
224b2b9762fSSantosh Shilimkar	dsb
2255b6e3eb5SSantosh Shilimkar	ldmfd	sp!, {r4-r12, pc}
226b2b9762fSSantosh ShilimkarENDPROC(omap4_finish_suspend)
227b2b9762fSSantosh Shilimkar
228b2b9762fSSantosh Shilimkar/*
229b2b9762fSSantosh Shilimkar * ============================
230b2b9762fSSantosh Shilimkar * == CPU resume entry point ==
231b2b9762fSSantosh Shilimkar * ============================
232b2b9762fSSantosh Shilimkar *
233b2b9762fSSantosh Shilimkar * void omap4_cpu_resume(void)
234b2b9762fSSantosh Shilimkar *
235b2b9762fSSantosh Shilimkar * ROM code jumps to this function while waking up from CPU
236b2b9762fSSantosh Shilimkar * OFF or DORMANT state. Physical address of the function is
237b2b9762fSSantosh Shilimkar * stored in the SAR RAM while entering to OFF or DORMANT mode.
238b2b9762fSSantosh Shilimkar * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
239b2b9762fSSantosh Shilimkar */
240b2b9762fSSantosh ShilimkarENTRY(omap4_cpu_resume)
241b2b9762fSSantosh Shilimkar	/*
242b2b9762fSSantosh Shilimkar	 * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
243b2b9762fSSantosh Shilimkar	 * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
244b2b9762fSSantosh Shilimkar	 * init and for CPU1, a secure PPA API provided. CPU0 must be ON
245b2b9762fSSantosh Shilimkar	 * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
246b2b9762fSSantosh Shilimkar	 * OMAP443X GP devices- SMP bit isn't accessible.
247b2b9762fSSantosh Shilimkar	 * OMAP446X GP devices - SMP bit access is enabled on both CPUs.
248b2b9762fSSantosh Shilimkar	 */
249b2b9762fSSantosh Shilimkar	ldr	r8, =OMAP44XX_SAR_RAM_BASE
250b2b9762fSSantosh Shilimkar	ldr	r9, [r8, #OMAP_TYPE_OFFSET]
251b2b9762fSSantosh Shilimkar	cmp	r9, #0x1			@ Skip if GP device
252b2b9762fSSantosh Shilimkar	bne	skip_ns_smp_enable
253b2b9762fSSantosh Shilimkar	mrc     p15, 0, r0, c0, c0, 5
254b2b9762fSSantosh Shilimkar	ands    r0, r0, #0x0f
255b2b9762fSSantosh Shilimkar	beq	skip_ns_smp_enable
256b2b9762fSSantosh Shilimkarppa_actrl_retry:
257b2b9762fSSantosh Shilimkar	mov     r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
2584da597d1STony Lindgren	adr	r1, ppa_zero_params_offset
2594da597d1STony Lindgren	ldr	r3, [r1]
2604da597d1STony Lindgren	add	r3, r3, r1			@ Pointer to ppa_zero_params
261b2b9762fSSantosh Shilimkar	mov	r1, #0x0			@ Process ID
262b2b9762fSSantosh Shilimkar	mov	r2, #0x4			@ Flag
263b2b9762fSSantosh Shilimkar	mov	r6, #0xff
264b2b9762fSSantosh Shilimkar	mov	r12, #0x00			@ Secure Service ID
265b2b9762fSSantosh Shilimkar	DO_SMC
266b2b9762fSSantosh Shilimkar	cmp	r0, #0x0			@ API returns 0 on success.
267b2b9762fSSantosh Shilimkar	beq	enable_smp_bit
268b2b9762fSSantosh Shilimkar	b	ppa_actrl_retry
269b2b9762fSSantosh Shilimkarenable_smp_bit:
270b2b9762fSSantosh Shilimkar	mrc	p15, 0, r0, c1, c0, 1
271b2b9762fSSantosh Shilimkar	tst	r0, #(1 << 6)			@ Check SMP bit enabled?
272b2b9762fSSantosh Shilimkar	orreq	r0, r0, #(1 << 6)
273b2b9762fSSantosh Shilimkar	mcreq	p15, 0, r0, c1, c0, 1
274b2b9762fSSantosh Shilimkar	isb
275b2b9762fSSantosh Shilimkarskip_ns_smp_enable:
2765e94c6e3SSantosh Shilimkar#ifdef CONFIG_CACHE_L2X0
2775e94c6e3SSantosh Shilimkar	/*
2785e94c6e3SSantosh Shilimkar	 * Restore the L2 AUXCTRL and enable the L2 cache.
2795e94c6e3SSantosh Shilimkar	 * OMAP4_MON_L2X0_AUXCTRL_INDEX =  Program the L2X0 AUXCTRL
2805e94c6e3SSantosh Shilimkar	 * OMAP4_MON_L2X0_CTRL_INDEX =  Enable the L2 using L2X0 CTRL
2815e94c6e3SSantosh Shilimkar	 * register r0 contains value to be programmed.
2825e94c6e3SSantosh Shilimkar	 * L2 cache is already invalidate by ROM code as part
2835e94c6e3SSantosh Shilimkar	 * of MPUSS OFF wakeup path.
2845e94c6e3SSantosh Shilimkar	 */
2855e94c6e3SSantosh Shilimkar	ldr	r2, =OMAP44XX_L2CACHE_BASE
2865e94c6e3SSantosh Shilimkar	ldr	r0, [r2, #L2X0_CTRL]
2875e94c6e3SSantosh Shilimkar	and	r0, #0x0f
2885e94c6e3SSantosh Shilimkar	cmp	r0, #1
2895e94c6e3SSantosh Shilimkar	beq	skip_l2en			@ Skip if already enabled
2905e94c6e3SSantosh Shilimkar	ldr	r3, =OMAP44XX_SAR_RAM_BASE
2915e94c6e3SSantosh Shilimkar	ldr	r1, [r3, #OMAP_TYPE_OFFSET]
2925e94c6e3SSantosh Shilimkar	cmp	r1, #0x1			@ Check for HS device
2935e94c6e3SSantosh Shilimkar	bne     set_gp_por
2945e94c6e3SSantosh Shilimkar	ldr     r0, =OMAP4_PPA_L2_POR_INDEX
2955e94c6e3SSantosh Shilimkar	ldr     r1, =OMAP44XX_SAR_RAM_BASE
2965e94c6e3SSantosh Shilimkar	ldr     r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
2974da597d1STony Lindgren	adr     r1, ppa_por_params_offset
2984da597d1STony Lindgren	ldr	r3, [r1]
2994da597d1STony Lindgren	add	r3, r3, r1			@ Pointer to ppa_por_params
3005e94c6e3SSantosh Shilimkar	str     r4, [r3, #0x04]
3015e94c6e3SSantosh Shilimkar	mov	r1, #0x0			@ Process ID
3025e94c6e3SSantosh Shilimkar	mov	r2, #0x4			@ Flag
3035e94c6e3SSantosh Shilimkar	mov	r6, #0xff
3045e94c6e3SSantosh Shilimkar	mov	r12, #0x00			@ Secure Service ID
3055e94c6e3SSantosh Shilimkar	DO_SMC
3065e94c6e3SSantosh Shilimkar	b	set_aux_ctrl
3075e94c6e3SSantosh Shilimkarset_gp_por:
3085e94c6e3SSantosh Shilimkar	ldr     r1, =OMAP44XX_SAR_RAM_BASE
3095e94c6e3SSantosh Shilimkar	ldr     r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
3105e94c6e3SSantosh Shilimkar	ldr	r12, =OMAP4_MON_L2X0_PREFETCH_INDEX	@ Setup L2 PREFETCH
3115e94c6e3SSantosh Shilimkar	DO_SMC
3125e94c6e3SSantosh Shilimkarset_aux_ctrl:
3135e94c6e3SSantosh Shilimkar	ldr     r1, =OMAP44XX_SAR_RAM_BASE
3145e94c6e3SSantosh Shilimkar	ldr	r0, [r1, #L2X0_AUXCTRL_OFFSET]
3155e94c6e3SSantosh Shilimkar	ldr	r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX	@ Setup L2 AUXCTRL
3165e94c6e3SSantosh Shilimkar	DO_SMC
3175e94c6e3SSantosh Shilimkar	mov	r0, #0x1
3185e94c6e3SSantosh Shilimkar	ldr	r12, =OMAP4_MON_L2X0_CTRL_INDEX		@ Enable L2 cache
3195e94c6e3SSantosh Shilimkar	DO_SMC
3205e94c6e3SSantosh Shilimkarskip_l2en:
3215e94c6e3SSantosh Shilimkar#endif
322b2b9762fSSantosh Shilimkar
323b2b9762fSSantosh Shilimkar	b	cpu_resume			@ Jump to generic resume
3244da597d1STony Lindgrenppa_por_params_offset:
3254da597d1STony Lindgren	.long	ppa_por_params - .
326b2b9762fSSantosh ShilimkarENDPROC(omap4_cpu_resume)
327b46355a9SNishanth Menon#endif	/* CONFIG_ARCH_OMAP4 */
328b46355a9SNishanth Menon
329b46355a9SNishanth Menon#endif	/* defined(CONFIG_SMP) && defined(CONFIG_PM) */
330b2b9762fSSantosh Shilimkar
331b2b9762fSSantosh ShilimkarENTRY(omap_do_wfi)
332b2b9762fSSantosh Shilimkar	stmfd	sp!, {lr}
3333fa60975SRussell King#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
334137d105dSSantosh Shilimkar	/* Drain interconnect write buffers. */
3353fa60975SRussell King	bl	omap_interconnect_sync
3363fa60975SRussell King#endif
337b2b9762fSSantosh Shilimkar
338b2b9762fSSantosh Shilimkar	/*
339b2b9762fSSantosh Shilimkar	 * Execute an ISB instruction to ensure that all of the
340b2b9762fSSantosh Shilimkar	 * CP15 register changes have been committed.
341b2b9762fSSantosh Shilimkar	 */
342b2b9762fSSantosh Shilimkar	isb
343b2b9762fSSantosh Shilimkar
344b2b9762fSSantosh Shilimkar	/*
345b2b9762fSSantosh Shilimkar	 * Execute a barrier instruction to ensure that all cache,
346b2b9762fSSantosh Shilimkar	 * TLB and branch predictor maintenance operations issued
347b2b9762fSSantosh Shilimkar	 * by any CPU in the cluster have completed.
348b2b9762fSSantosh Shilimkar	 */
349b2b9762fSSantosh Shilimkar	dsb
350b2b9762fSSantosh Shilimkar	dmb
351b2b9762fSSantosh Shilimkar
352b2b9762fSSantosh Shilimkar	/*
353b2b9762fSSantosh Shilimkar	 * Execute a WFI instruction and wait until the
354b2b9762fSSantosh Shilimkar	 * STANDBYWFI output is asserted to indicate that the
355b2b9762fSSantosh Shilimkar	 * CPU is in idle and low power state. CPU can specualatively
356b2b9762fSSantosh Shilimkar	 * prefetch the instructions so add NOPs after WFI. Sixteen
357b2b9762fSSantosh Shilimkar	 * NOPs as per Cortex-A9 pipeline.
358b2b9762fSSantosh Shilimkar	 */
359b2b9762fSSantosh Shilimkar	wfi					@ Wait For Interrupt
360b2b9762fSSantosh Shilimkar	nop
361b2b9762fSSantosh Shilimkar	nop
362b2b9762fSSantosh Shilimkar	nop
363b2b9762fSSantosh Shilimkar	nop
364b2b9762fSSantosh Shilimkar	nop
365b2b9762fSSantosh Shilimkar	nop
366b2b9762fSSantosh Shilimkar	nop
367b2b9762fSSantosh Shilimkar	nop
368b2b9762fSSantosh Shilimkar	nop
369b2b9762fSSantosh Shilimkar	nop
370b2b9762fSSantosh Shilimkar	nop
371b2b9762fSSantosh Shilimkar	nop
372b2b9762fSSantosh Shilimkar	nop
373b2b9762fSSantosh Shilimkar	nop
374b2b9762fSSantosh Shilimkar	nop
375b2b9762fSSantosh Shilimkar	nop
376b2b9762fSSantosh Shilimkar
377b2b9762fSSantosh Shilimkar	ldmfd	sp!, {pc}
3784da597d1STony Lindgrenppa_zero_params_offset:
3794da597d1STony Lindgren	.long	ppa_zero_params - .
380b2b9762fSSantosh ShilimkarENDPROC(omap_do_wfi)
3814da597d1STony Lindgren
3824da597d1STony Lindgren	.data
3831abd3502SRussell King	.align	2
3844da597d1STony Lindgrenppa_zero_params:
3854da597d1STony Lindgren	.word		0
3864da597d1STony Lindgren
3874da597d1STony Lindgrenppa_por_params:
3884da597d1STony Lindgren	.word		1, 0
389