xref: /linux/arch/powerpc/kernel/idle_book3s.S (revision da1d9caf95def6f0320819cf941c9fd1069ba9e1)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  Copyright 2018, IBM Corporation.
4 *
5 *  This file contains general idle entry/exit functions to save
6 *  and restore stack and NVGPRs which allows C code to call idle
7 *  states that lose GPRs, and it will return transparently with
8 *  SRR1 wakeup reason return value.
9 *
10 *  The platform / CPU caller must ensure SPRs and any other non-GPR
11 *  state is saved and restored correctly, handle KVM, interrupts, etc.
12 */
13
14#include <asm/ppc_asm.h>
15#include <asm/asm-offsets.h>
16#include <asm/ppc-opcode.h>
17#include <asm/cpuidle.h>
18#include <asm/thread_info.h> /* TLF_NAPPING */
19
20#ifdef CONFIG_PPC_P7_NAP
21/*
22 * Desired PSSCR in r3
23 *
24 * No state will be lost regardless of wakeup mechanism (interrupt or NIA).
25 *
26 * An EC=0 type wakeup will return with a value of 0. SRESET wakeup (which can
27 * happen with xscom SRESET and possibly MCE) may clobber volatiles except LR,
28 * and must blr, to return to caller with r3 set according to caller's expected
29 * return code (for Book3S/64 that is SRR1).
30 */
31_GLOBAL(isa300_idle_stop_noloss)
32	mtspr 	SPRN_PSSCR,r3
33	PPC_STOP
34	li	r3,0
35	blr
36
37/*
38 * Desired PSSCR in r3
39 *
40 * GPRs may be lost, so they are saved here. Wakeup is by interrupt only.
41 * The SRESET wakeup returns to this function's caller by calling
42 * idle_return_gpr_loss with r3 set to desired return value.
43 *
44 * A wakeup without GPR loss may alteratively be handled as in
45 * isa300_idle_stop_noloss and blr directly, as an optimisation.
46 *
47 * The caller is responsible for saving/restoring SPRs, MSR, timebase,
48 * etc.
49 */
50_GLOBAL(isa300_idle_stop_mayloss)
51	mtspr 	SPRN_PSSCR,r3
52	std	r1,PACAR1(r13)
53	mflr	r4
54	mfcr	r5
55	/*
56	 * Use the stack red zone rather than a new frame for saving regs since
57	 * in the case of no GPR loss the wakeup code branches directly back to
58	 * the caller without deallocating the stack frame first.
59	 */
60	std	r2,-8*1(r1)
61	std	r14,-8*2(r1)
62	std	r15,-8*3(r1)
63	std	r16,-8*4(r1)
64	std	r17,-8*5(r1)
65	std	r18,-8*6(r1)
66	std	r19,-8*7(r1)
67	std	r20,-8*8(r1)
68	std	r21,-8*9(r1)
69	std	r22,-8*10(r1)
70	std	r23,-8*11(r1)
71	std	r24,-8*12(r1)
72	std	r25,-8*13(r1)
73	std	r26,-8*14(r1)
74	std	r27,-8*15(r1)
75	std	r28,-8*16(r1)
76	std	r29,-8*17(r1)
77	std	r30,-8*18(r1)
78	std	r31,-8*19(r1)
79	std	r4,-8*20(r1)
80	std	r5,-8*21(r1)
81	/* 168 bytes */
82	PPC_STOP
83	b	.	/* catch bugs */
84
85/*
86 * Desired return value in r3
87 *
88 * The idle wakeup SRESET interrupt can call this after calling
89 * to return to the idle sleep function caller with r3 as the return code.
90 *
91 * This must not be used if idle was entered via a _noloss function (use
92 * a simple blr instead).
93 */
94_GLOBAL(idle_return_gpr_loss)
95	ld	r1,PACAR1(r13)
96	ld	r4,-8*20(r1)
97	ld	r5,-8*21(r1)
98	mtlr	r4
99	mtcr	r5
100	/*
101	 * KVM nap requires r2 to be saved, rather than just restoring it
102	 * from PACATOC. This could be avoided for that less common case
103	 * if KVM saved its r2.
104	 */
105	ld	r2,-8*1(r1)
106	ld	r14,-8*2(r1)
107	ld	r15,-8*3(r1)
108	ld	r16,-8*4(r1)
109	ld	r17,-8*5(r1)
110	ld	r18,-8*6(r1)
111	ld	r19,-8*7(r1)
112	ld	r20,-8*8(r1)
113	ld	r21,-8*9(r1)
114	ld	r22,-8*10(r1)
115	ld	r23,-8*11(r1)
116	ld	r24,-8*12(r1)
117	ld	r25,-8*13(r1)
118	ld	r26,-8*14(r1)
119	ld	r27,-8*15(r1)
120	ld	r28,-8*16(r1)
121	ld	r29,-8*17(r1)
122	ld	r30,-8*18(r1)
123	ld	r31,-8*19(r1)
124	blr
125
126/*
127 * This is the sequence required to execute idle instructions, as
128 * specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0.
129 * We have to store a GPR somewhere, ptesync, then reload it, and create
130 * a false dependency on the result of the load. It doesn't matter which
131 * GPR we store, or where we store it. We have already stored r2 to the
132 * stack at -8(r1) in isa206_idle_insn_mayloss, so use that.
133 */
134#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST)			\
135	/* Magic NAP/SLEEP/WINKLE mode enter sequence */	\
136	std	r2,-8(r1);					\
137	ptesync;						\
138	ld	r2,-8(r1);					\
139236:	cmpd	cr0,r2,r2;					\
140	bne	236b;						\
141	IDLE_INST;						\
142	b	.	/* catch bugs */
143
144/*
145 * Desired instruction type in r3
146 *
147 * GPRs may be lost, so they are saved here. Wakeup is by interrupt only.
148 * The SRESET wakeup returns to this function's caller by calling
149 * idle_return_gpr_loss with r3 set to desired return value.
150 *
151 * A wakeup without GPR loss may alteratively be handled as in
152 * isa300_idle_stop_noloss and blr directly, as an optimisation.
153 *
154 * The caller is responsible for saving/restoring SPRs, MSR, timebase,
155 * etc.
156 *
157 * This must be called in real-mode (MSR_IDLE).
158 */
159_GLOBAL(isa206_idle_insn_mayloss)
160	std	r1,PACAR1(r13)
161	mflr	r4
162	mfcr	r5
163	/*
164	 * Use the stack red zone rather than a new frame for saving regs since
165	 * in the case of no GPR loss the wakeup code branches directly back to
166	 * the caller without deallocating the stack frame first.
167	 */
168	std	r2,-8*1(r1)
169	std	r14,-8*2(r1)
170	std	r15,-8*3(r1)
171	std	r16,-8*4(r1)
172	std	r17,-8*5(r1)
173	std	r18,-8*6(r1)
174	std	r19,-8*7(r1)
175	std	r20,-8*8(r1)
176	std	r21,-8*9(r1)
177	std	r22,-8*10(r1)
178	std	r23,-8*11(r1)
179	std	r24,-8*12(r1)
180	std	r25,-8*13(r1)
181	std	r26,-8*14(r1)
182	std	r27,-8*15(r1)
183	std	r28,-8*16(r1)
184	std	r29,-8*17(r1)
185	std	r30,-8*18(r1)
186	std	r31,-8*19(r1)
187	std	r4,-8*20(r1)
188	std	r5,-8*21(r1)
189	cmpwi	r3,PNV_THREAD_NAP
190	bne	1f
191	IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP)
1921:	cmpwi	r3,PNV_THREAD_SLEEP
193	bne	2f
194	IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP)
1952:	IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE)
196#endif
197
198#ifdef CONFIG_PPC_970_NAP
199_GLOBAL(power4_idle_nap)
200	LOAD_REG_IMMEDIATE(r7, MSR_KERNEL|MSR_EE|MSR_POW)
201	ld	r9,PACA_THREAD_INFO(r13)
202	ld	r8,TI_LOCAL_FLAGS(r9)
203	ori	r8,r8,_TLF_NAPPING
204	std	r8,TI_LOCAL_FLAGS(r9)
205	/*
206	 * NAPPING bit is set, from this point onward power4_fixup_nap
207	 * will cause exceptions to return to power4_idle_nap_return.
208	 */
2091:	sync
210	isync
211	mtmsrd	r7
212	isync
213	b	1b
214
215	.globl power4_idle_nap_return
216power4_idle_nap_return:
217	blr
218#endif
219