xref: /linux/arch/arm/mach-omap2/sleep44xx.S (revision 31d166642c7c601c65eccf0ff2e0afe9a0538be2)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * OMAP44xx sleep code.
4 *
5 * Copyright (C) 2011 Texas Instruments, Inc.
6 * 	Santosh Shilimkar <santosh.shilimkar@ti.com>
7 */
8
9#include <linux/linkage.h>
10#include <asm/assembler.h>
11#include <asm/smp_scu.h>
12#include <asm/memory.h>
13#include <asm/hardware/cache-l2x0.h>
14
15#include "omap-secure.h"
16
17#include "common.h"
18#include "omap44xx.h"
19#include "omap4-sar-layout.h"
20
21#if defined(CONFIG_SMP) && defined(CONFIG_PM)
22
23.macro	DO_SMC
24	dsb
25	smc	#0
26	dsb
27.endm
28
29#ifdef CONFIG_ARCH_OMAP4
30
31/*
32 * =============================
33 * == CPU suspend finisher ==
34 * =============================
35 *
36 * void omap4_finish_suspend(unsigned long cpu_state)
37 *
38 * This function code saves the CPU context and performs the CPU
39 * power down sequence. Calling WFI effectively changes the CPU
40 * power domains states to the desired target power state.
41 *
42 * @cpu_state : contains context save state (r0)
43 *	0 - No context lost
44 * 	1 - CPUx L1 and logic lost: MPUSS CSWR
45 * 	2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
46 *	3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
47 * @return: This function never returns for CPU OFF and DORMANT power states.
48 * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
49 * from this follows a full CPU reset path via ROM code to CPU restore code.
50 * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
51 * It returns to the caller for CPU INACTIVE and ON power states or in case
52 * CPU failed to transition to targeted OFF/DORMANT state.
53 *
54 * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save
55 * stack frame and it expects the caller to take care of it. Hence the entire
56 * stack frame is saved to avoid possible stack corruption.
57 */
58ENTRY(omap4_finish_suspend)
59	stmfd	sp!, {r4-r12, lr}
60	cmp	r0, #0x0
61	beq	do_WFI				@ No lowpower state, jump to WFI
62
63	/*
64	 * Flush all data from the L1 data cache before disabling
65	 * SCTLR.C bit.
66	 */
67	bl	omap4_get_sar_ram_base
68	ldr	r9, [r0, #OMAP_TYPE_OFFSET]
69	cmp	r9, #0x1			@ Check for HS device
70	bne	skip_secure_l1_clean
71	mov	r0, #SCU_PM_NORMAL
72	mov	r1, #0xFF			@ clean seucre L1
73	stmfd   r13!, {r4-r12, r14}
74	ldr	r12, =OMAP4_MON_SCU_PWR_INDEX
75	DO_SMC
76	ldmfd   r13!, {r4-r12, r14}
77skip_secure_l1_clean:
78	bl	v7_flush_dcache_all
79
80	/*
81	 * Clear the SCTLR.C bit to prevent further data cache
82	 * allocation. Clearing SCTLR.C would make all the data accesses
83	 * strongly ordered and would not hit the cache.
84	 */
85	mrc	p15, 0, r0, c1, c0, 0
86	bic	r0, r0, #(1 << 2)		@ Disable the C bit
87	mcr	p15, 0, r0, c1, c0, 0
88	isb
89
90	bl	v7_invalidate_l1
91
92	/*
93	 * Switch the CPU from Symmetric Multiprocessing (SMP) mode
94	 * to AsymmetricMultiprocessing (AMP) mode by programming
95	 * the SCU power status to DORMANT or OFF mode.
96	 * This enables the CPU to be taken out of coherency by
97	 * preventing the CPU from receiving cache, TLB, or BTB
98	 * maintenance operations broadcast by other CPUs in the cluster.
99	 */
100	bl	omap4_get_sar_ram_base
101	mov	r8, r0
102	ldr	r9, [r8, #OMAP_TYPE_OFFSET]
103	cmp	r9, #0x1			@ Check for HS device
104	bne	scu_gp_set
105	mrc	p15, 0, r0, c0, c0, 5		@ Read MPIDR
106	ands	r0, r0, #0x0f
107	ldreq	r0, [r8, #SCU_OFFSET0]
108	ldrne	r0, [r8, #SCU_OFFSET1]
109	mov	r1, #0x00
110	stmfd   r13!, {r4-r12, r14}
111	ldr	r12, =OMAP4_MON_SCU_PWR_INDEX
112	DO_SMC
113	ldmfd   r13!, {r4-r12, r14}
114	b	skip_scu_gp_set
115scu_gp_set:
116	mrc	p15, 0, r0, c0, c0, 5		@ Read MPIDR
117	ands	r0, r0, #0x0f
118	ldreq	r1, [r8, #SCU_OFFSET0]
119	ldrne	r1, [r8, #SCU_OFFSET1]
120	bl	omap4_get_scu_base
121	bl	scu_power_mode
122skip_scu_gp_set:
123	mrc	p15, 0, r0, c1, c1, 2		@ Read NSACR data
124	tst	r0, #(1 << 18)
125	mrcne	p15, 0, r0, c1, c0, 1
126	bicne	r0, r0, #(1 << 6)		@ Disable SMP bit
127	mcrne	p15, 0, r0, c1, c0, 1
128	isb
129	dsb
130#ifdef CONFIG_CACHE_L2X0
131	/*
132	 * Clean and invalidate the L2 cache.
133	 * Common cache-l2x0.c functions can't be used here since it
134	 * uses spinlocks. We are out of coherency here with data cache
135	 * disabled. The spinlock implementation uses exclusive load/store
136	 * instruction which can fail without data cache being enabled.
137	 * OMAP4 hardware doesn't support exclusive monitor which can
138	 * overcome exclusive access issue. Because of this, CPU can
139	 * lead to deadlock.
140	 */
141	bl	omap4_get_sar_ram_base
142	mov	r8, r0
143	mrc	p15, 0, r5, c0, c0, 5		@ Read MPIDR
144	ands	r5, r5, #0x0f
145	ldreq	r0, [r8, #L2X0_SAVE_OFFSET0]	@ Retrieve L2 state from SAR
146	ldrne	r0, [r8, #L2X0_SAVE_OFFSET1]	@ memory.
147	cmp	r0, #3
148	bne	do_WFI
149#ifdef CONFIG_PL310_ERRATA_727915
150	mov	r0, #0x03
151	mov	r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
152	DO_SMC
153#endif
154	bl	omap4_get_l2cache_base
155	mov	r2, r0
156	ldr	r0, =0xffff
157	str	r0, [r2, #L2X0_CLEAN_INV_WAY]
158wait:
159	ldr	r0, [r2, #L2X0_CLEAN_INV_WAY]
160	ldr	r1, =0xffff
161	ands	r0, r0, r1
162	bne	wait
163#ifdef CONFIG_PL310_ERRATA_727915
164	mov	r0, #0x00
165	mov	r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
166	DO_SMC
167#endif
168l2x_sync:
169	bl	omap4_get_l2cache_base
170	mov	r2, r0
171	mov	r0, #0x0
172	str	r0, [r2, #L2X0_CACHE_SYNC]
173sync:
174	ldr	r0, [r2, #L2X0_CACHE_SYNC]
175	ands	r0, r0, #0x1
176	bne	sync
177#endif
178
179do_WFI:
180	bl	omap_do_wfi
181
182	/*
183	 * CPU is here when it failed to enter OFF/DORMANT or
184	 * no low power state was attempted.
185	 */
186	mrc	p15, 0, r0, c1, c0, 0
187	tst	r0, #(1 << 2)			@ Check C bit enabled?
188	orreq	r0, r0, #(1 << 2)		@ Enable the C bit
189	mcreq	p15, 0, r0, c1, c0, 0
190	isb
191
192	/*
193	 * Ensure the CPU power state is set to NORMAL in
194	 * SCU power state so that CPU is back in coherency.
195	 * In non-coherent mode CPU can lock-up and lead to
196	 * system deadlock.
197	 */
198	mrc	p15, 0, r0, c1, c0, 1
199	tst	r0, #(1 << 6)			@ Check SMP bit enabled?
200	orreq	r0, r0, #(1 << 6)
201	mcreq	p15, 0, r0, c1, c0, 1
202	isb
203	bl	omap4_get_sar_ram_base
204	mov	r8, r0
205	ldr	r9, [r8, #OMAP_TYPE_OFFSET]
206	cmp	r9, #0x1			@ Check for HS device
207	bne	scu_gp_clear
208	mov	r0, #SCU_PM_NORMAL
209	mov	r1, #0x00
210	stmfd   r13!, {r4-r12, r14}
211	ldr	r12, =OMAP4_MON_SCU_PWR_INDEX
212	DO_SMC
213	ldmfd   r13!, {r4-r12, r14}
214	b	skip_scu_gp_clear
215scu_gp_clear:
216	bl	omap4_get_scu_base
217	mov	r1, #SCU_PM_NORMAL
218	bl	scu_power_mode
219skip_scu_gp_clear:
220	isb
221	dsb
222	ldmfd	sp!, {r4-r12, pc}
223ENDPROC(omap4_finish_suspend)
224
225/*
226 * ============================
227 * == CPU resume entry point ==
228 * ============================
229 *
230 * void omap4_cpu_resume(void)
231 *
232 * ROM code jumps to this function while waking up from CPU
233 * OFF or DORMANT state. Physical address of the function is
234 * stored in the SAR RAM while entering to OFF or DORMANT mode.
235 * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
236 */
237ENTRY(omap4_cpu_resume)
238	/*
239	 * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
240	 * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
241	 * init and for CPU1, a secure PPA API provided. CPU0 must be ON
242	 * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
243	 * OMAP443X GP devices- SMP bit isn't accessible.
244	 * OMAP446X GP devices - SMP bit access is enabled on both CPUs.
245	 */
246	ldr	r8, =OMAP44XX_SAR_RAM_BASE
247	ldr	r9, [r8, #OMAP_TYPE_OFFSET]
248	cmp	r9, #0x1			@ Skip if GP device
249	bne	skip_ns_smp_enable
250	mrc     p15, 0, r0, c0, c0, 5
251	ands    r0, r0, #0x0f
252	beq	skip_ns_smp_enable
253ppa_actrl_retry:
254	mov     r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
255	adr	r1, ppa_zero_params_offset
256	ldr	r3, [r1]
257	add	r3, r3, r1			@ Pointer to ppa_zero_params
258	mov	r1, #0x0			@ Process ID
259	mov	r2, #0x4			@ Flag
260	mov	r6, #0xff
261	mov	r12, #0x00			@ Secure Service ID
262	DO_SMC
263	cmp	r0, #0x0			@ API returns 0 on success.
264	beq	enable_smp_bit
265	b	ppa_actrl_retry
266enable_smp_bit:
267	mrc	p15, 0, r0, c1, c0, 1
268	tst	r0, #(1 << 6)			@ Check SMP bit enabled?
269	orreq	r0, r0, #(1 << 6)
270	mcreq	p15, 0, r0, c1, c0, 1
271	isb
272skip_ns_smp_enable:
273#ifdef CONFIG_CACHE_L2X0
274	/*
275	 * Restore the L2 AUXCTRL and enable the L2 cache.
276	 * OMAP4_MON_L2X0_AUXCTRL_INDEX =  Program the L2X0 AUXCTRL
277	 * OMAP4_MON_L2X0_CTRL_INDEX =  Enable the L2 using L2X0 CTRL
278	 * register r0 contains value to be programmed.
279	 * L2 cache is already invalidate by ROM code as part
280	 * of MPUSS OFF wakeup path.
281	 */
282	ldr	r2, =OMAP44XX_L2CACHE_BASE
283	ldr	r0, [r2, #L2X0_CTRL]
284	and	r0, #0x0f
285	cmp	r0, #1
286	beq	skip_l2en			@ Skip if already enabled
287	ldr	r3, =OMAP44XX_SAR_RAM_BASE
288	ldr	r1, [r3, #OMAP_TYPE_OFFSET]
289	cmp	r1, #0x1			@ Check for HS device
290	bne     set_gp_por
291	ldr     r0, =OMAP4_PPA_L2_POR_INDEX
292	ldr     r1, =OMAP44XX_SAR_RAM_BASE
293	ldr     r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
294	adr     r1, ppa_por_params_offset
295	ldr	r3, [r1]
296	add	r3, r3, r1			@ Pointer to ppa_por_params
297	str     r4, [r3, #0x04]
298	mov	r1, #0x0			@ Process ID
299	mov	r2, #0x4			@ Flag
300	mov	r6, #0xff
301	mov	r12, #0x00			@ Secure Service ID
302	DO_SMC
303	b	set_aux_ctrl
304set_gp_por:
305	ldr     r1, =OMAP44XX_SAR_RAM_BASE
306	ldr     r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
307	ldr	r12, =OMAP4_MON_L2X0_PREFETCH_INDEX	@ Setup L2 PREFETCH
308	DO_SMC
309set_aux_ctrl:
310	ldr     r1, =OMAP44XX_SAR_RAM_BASE
311	ldr	r0, [r1, #L2X0_AUXCTRL_OFFSET]
312	ldr	r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX	@ Setup L2 AUXCTRL
313	DO_SMC
314	mov	r0, #0x1
315	ldr	r12, =OMAP4_MON_L2X0_CTRL_INDEX		@ Enable L2 cache
316	DO_SMC
317skip_l2en:
318#endif
319
320	b	cpu_resume			@ Jump to generic resume
321ppa_por_params_offset:
322	.long	ppa_por_params - .
323ENDPROC(omap4_cpu_resume)
324#endif	/* CONFIG_ARCH_OMAP4 */
325
326#endif	/* defined(CONFIG_SMP) && defined(CONFIG_PM) */
327
328ENTRY(omap_do_wfi)
329	stmfd	sp!, {lr}
330#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
331	/* Drain interconnect write buffers. */
332	bl	omap_interconnect_sync
333#endif
334
335	/*
336	 * Execute an ISB instruction to ensure that all of the
337	 * CP15 register changes have been committed.
338	 */
339	isb
340
341	/*
342	 * Execute a barrier instruction to ensure that all cache,
343	 * TLB and branch predictor maintenance operations issued
344	 * by any CPU in the cluster have completed.
345	 */
346	dsb
347	dmb
348
349	/*
350	 * Execute a WFI instruction and wait until the
351	 * STANDBYWFI output is asserted to indicate that the
352	 * CPU is in idle and low power state. CPU can specualatively
353	 * prefetch the instructions so add NOPs after WFI. Sixteen
354	 * NOPs as per Cortex-A9 pipeline.
355	 */
356	wfi					@ Wait For Interrupt
357	nop
358	nop
359	nop
360	nop
361	nop
362	nop
363	nop
364	nop
365	nop
366	nop
367	nop
368	nop
369	nop
370	nop
371	nop
372	nop
373
374	ldmfd	sp!, {pc}
375ppa_zero_params_offset:
376	.long	ppa_zero_params - .
377ENDPROC(omap_do_wfi)
378
379	.data
380	.align	2
381ppa_zero_params:
382	.word		0
383
384ppa_por_params:
385	.word		1, 0
386