xref: /linux/arch/powerpc/platforms/pseries/hvCall.S (revision fe2017ba24f318e5feef487b7552e40a3de2d50a)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * This file contains the generic code to perform a call to the
4 * pSeries LPAR hypervisor.
5 */
6#include <linux/jump_label.h>
7#include <asm/hvcall.h>
8#include <asm/processor.h>
9#include <asm/ppc_asm.h>
10#include <asm/asm-offsets.h>
11#include <asm/ptrace.h>
12#include <asm/feature-fixups.h>
13
14	.section	".text"
15
16#ifdef CONFIG_TRACEPOINTS
17
18#ifndef CONFIG_JUMP_LABEL
19	.data
20
21	.globl hcall_tracepoint_refcount
22hcall_tracepoint_refcount:
23	.8byte	0
24
25	.section	".text"
26#endif
27
28/*
29 * precall must preserve all registers.  use unused STK_PARAM()
30 * areas to save snapshots and opcode. STK_PARAM() in the caller's
31 * frame will be available even on ELFv2 because these are all
32 * variadic functions.
33 */
34#define HCALL_INST_PRECALL(FIRST_REG)				\
35	mflr	r0;						\
36	std	r3,STK_PARAM(R3)(r1);				\
37	std	r4,STK_PARAM(R4)(r1);				\
38	std	r5,STK_PARAM(R5)(r1);				\
39	std	r6,STK_PARAM(R6)(r1);				\
40	std	r7,STK_PARAM(R7)(r1);				\
41	std	r8,STK_PARAM(R8)(r1);				\
42	std	r9,STK_PARAM(R9)(r1);				\
43	std	r10,STK_PARAM(R10)(r1);				\
44	std	r0,16(r1);					\
45	addi	r4,r1,STK_PARAM(FIRST_REG);			\
46	stdu	r1,-STACK_FRAME_MIN_SIZE(r1);			\
47	bl	CFUNC(__trace_hcall_entry);			\
48	ld	r3,STACK_FRAME_MIN_SIZE+STK_PARAM(R3)(r1);	\
49	ld	r4,STACK_FRAME_MIN_SIZE+STK_PARAM(R4)(r1);	\
50	ld	r5,STACK_FRAME_MIN_SIZE+STK_PARAM(R5)(r1);	\
51	ld	r6,STACK_FRAME_MIN_SIZE+STK_PARAM(R6)(r1);	\
52	ld	r7,STACK_FRAME_MIN_SIZE+STK_PARAM(R7)(r1);	\
53	ld	r8,STACK_FRAME_MIN_SIZE+STK_PARAM(R8)(r1);	\
54	ld	r9,STACK_FRAME_MIN_SIZE+STK_PARAM(R9)(r1);	\
55	ld	r10,STACK_FRAME_MIN_SIZE+STK_PARAM(R10)(r1)
56
57/*
58 * postcall is performed immediately before function return which
59 * allows liberal use of volatile registers.
60 */
61#define __HCALL_INST_POSTCALL					\
62	ld	r0,STACK_FRAME_MIN_SIZE+STK_PARAM(R3)(r1);	\
63	std	r3,STACK_FRAME_MIN_SIZE+STK_PARAM(R3)(r1);	\
64	mr	r4,r3;						\
65	mr	r3,r0;						\
66	bl	CFUNC(__trace_hcall_exit);			\
67	ld	r0,STACK_FRAME_MIN_SIZE+16(r1);			\
68	addi	r1,r1,STACK_FRAME_MIN_SIZE;			\
69	ld	r3,STK_PARAM(R3)(r1);				\
70	mtlr	r0
71
72#define HCALL_INST_POSTCALL_NORETS				\
73	li	r5,0;						\
74	__HCALL_INST_POSTCALL
75
76#define HCALL_INST_POSTCALL(BUFREG)				\
77	mr	r5,BUFREG;					\
78	__HCALL_INST_POSTCALL
79
80#ifdef CONFIG_JUMP_LABEL
81#define HCALL_BRANCH(LABEL)					\
82	ARCH_STATIC_BRANCH(LABEL, hcall_tracepoint_key)
83#else
84
85/*
86 * We branch around this in early init (eg when populating the MMU
87 * hashtable) by using an unconditional cpu feature.
88 */
89#define HCALL_BRANCH(LABEL)					\
90BEGIN_FTR_SECTION;						\
91	b	1f;						\
92END_FTR_SECTION(0, 1);						\
93	LOAD_REG_ADDR(r12, hcall_tracepoint_refcount) ;		\
94	ld	r12,0(r12);					\
95	cmpdi	r12,0;						\
96	bne-	LABEL;						\
971:
98#endif
99
100#else
101#define HCALL_INST_PRECALL(FIRST_ARG)
102#define HCALL_INST_POSTCALL_NORETS
103#define HCALL_INST_POSTCALL(BUFREG)
104#define HCALL_BRANCH(LABEL)
105#endif
106
107_GLOBAL_TOC(plpar_hcall_norets_notrace)
108	HMT_MEDIUM
109
110	mfcr	r0
111	stw	r0,8(r1)
112	HVSC				/* invoke the hypervisor */
113
114	li	r4,0
115	stb	r4,PACASRR_VALID(r13)
116
117	lwz	r0,8(r1)
118	mtcrf	0xff,r0
119	blr				/* return r3 = status */
120
121_GLOBAL_TOC(plpar_hcall_norets)
122	HMT_MEDIUM
123
124	mfcr	r0
125	stw	r0,8(r1)
126	HCALL_BRANCH(plpar_hcall_norets_trace)
127	HVSC				/* invoke the hypervisor */
128
129	li	r4,0
130	stb	r4,PACASRR_VALID(r13)
131
132	lwz	r0,8(r1)
133	mtcrf	0xff,r0
134	blr				/* return r3 = status */
135
136#ifdef CONFIG_TRACEPOINTS
137plpar_hcall_norets_trace:
138	HCALL_INST_PRECALL(R4)
139	HVSC
140	HCALL_INST_POSTCALL_NORETS
141
142	li	r4,0
143	stb	r4,PACASRR_VALID(r13)
144
145	lwz	r0,8(r1)
146	mtcrf	0xff,r0
147	blr
148#endif
149
150_GLOBAL_TOC(plpar_hcall)
151	HMT_MEDIUM
152
153	mfcr	r0
154	stw	r0,8(r1)
155
156	HCALL_BRANCH(plpar_hcall_trace)
157
158	std     r4,STK_PARAM(R4)(r1)     /* Save ret buffer */
159
160	mr	r4,r5
161	mr	r5,r6
162	mr	r6,r7
163	mr	r7,r8
164	mr	r8,r9
165	mr	r9,r10
166
167	HVSC				/* invoke the hypervisor */
168
169	ld	r12,STK_PARAM(R4)(r1)
170	std	r4,  0(r12)
171	std	r5,  8(r12)
172	std	r6, 16(r12)
173	std	r7, 24(r12)
174
175	li	r4,0
176	stb	r4,PACASRR_VALID(r13)
177
178	lwz	r0,8(r1)
179	mtcrf	0xff,r0
180
181	blr				/* return r3 = status */
182
183#ifdef CONFIG_TRACEPOINTS
184plpar_hcall_trace:
185	HCALL_INST_PRECALL(R5)
186
187	mr	r4,r5
188	mr	r5,r6
189	mr	r6,r7
190	mr	r7,r8
191	mr	r8,r9
192	mr	r9,r10
193
194	HVSC
195
196	ld	r12,STACK_FRAME_MIN_SIZE+STK_PARAM(R4)(r1)
197	std	r4,0(r12)
198	std	r5,8(r12)
199	std	r6,16(r12)
200	std	r7,24(r12)
201
202	HCALL_INST_POSTCALL(r12)
203
204	li	r4,0
205	stb	r4,PACASRR_VALID(r13)
206
207	lwz	r0,8(r1)
208	mtcrf	0xff,r0
209
210	blr
211#endif
212
213/*
214 * plpar_hcall_raw can be called in real mode. kexec/kdump need some
215 * hypervisor calls to be executed in real mode. So plpar_hcall_raw
216 * does not access the per cpu hypervisor call statistics variables,
217 * since these variables may not be present in the RMO region.
218 */
219_GLOBAL(plpar_hcall_raw)
220	HMT_MEDIUM
221
222	mfcr	r0
223	stw	r0,8(r1)
224
225	std     r4,STK_PARAM(R4)(r1)     /* Save ret buffer */
226
227	mr	r4,r5
228	mr	r5,r6
229	mr	r6,r7
230	mr	r7,r8
231	mr	r8,r9
232	mr	r9,r10
233
234	HVSC				/* invoke the hypervisor */
235
236	ld	r12,STK_PARAM(R4)(r1)
237	std	r4,  0(r12)
238	std	r5,  8(r12)
239	std	r6, 16(r12)
240	std	r7, 24(r12)
241
242	li	r4,0
243	stb	r4,PACASRR_VALID(r13)
244
245	lwz	r0,8(r1)
246	mtcrf	0xff,r0
247
248	blr				/* return r3 = status */
249
250_GLOBAL_TOC(plpar_hcall9)
251	HMT_MEDIUM
252
253	mfcr	r0
254	stw	r0,8(r1)
255
256	HCALL_BRANCH(plpar_hcall9_trace)
257
258	std     r4,STK_PARAM(R4)(r1)     /* Save ret buffer */
259
260	mr	r4,r5
261	mr	r5,r6
262	mr	r6,r7
263	mr	r7,r8
264	mr	r8,r9
265	mr	r9,r10
266	ld	r10,STK_PARAM(R11)(r1)	 /* put arg7 in R10 */
267	ld	r11,STK_PARAM(R12)(r1)	 /* put arg8 in R11 */
268	ld	r12,STK_PARAM(R13)(r1)    /* put arg9 in R12 */
269
270	HVSC				/* invoke the hypervisor */
271
272	mr	r0,r12
273	ld	r12,STK_PARAM(R4)(r1)
274	std	r4,  0(r12)
275	std	r5,  8(r12)
276	std	r6, 16(r12)
277	std	r7, 24(r12)
278	std	r8, 32(r12)
279	std	r9, 40(r12)
280	std	r10,48(r12)
281	std	r11,56(r12)
282	std	r0, 64(r12)
283
284	li	r4,0
285	stb	r4,PACASRR_VALID(r13)
286
287	lwz	r0,8(r1)
288	mtcrf	0xff,r0
289
290	blr				/* return r3 = status */
291
292#ifdef CONFIG_TRACEPOINTS
293plpar_hcall9_trace:
294	HCALL_INST_PRECALL(R5)
295
296	mr	r4,r5
297	mr	r5,r6
298	mr	r6,r7
299	mr	r7,r8
300	mr	r8,r9
301	mr	r9,r10
302	ld	r10,STACK_FRAME_MIN_SIZE+STK_PARAM(R11)(r1)
303	ld	r11,STACK_FRAME_MIN_SIZE+STK_PARAM(R12)(r1)
304	ld	r12,STACK_FRAME_MIN_SIZE+STK_PARAM(R13)(r1)
305
306	HVSC
307
308	mr	r0,r12
309	ld	r12,STACK_FRAME_MIN_SIZE+STK_PARAM(R4)(r1)
310	std	r4,0(r12)
311	std	r5,8(r12)
312	std	r6,16(r12)
313	std	r7,24(r12)
314	std	r8,32(r12)
315	std	r9,40(r12)
316	std	r10,48(r12)
317	std	r11,56(r12)
318	std	r0,64(r12)
319
320	HCALL_INST_POSTCALL(r12)
321
322	li	r4,0
323	stb	r4,PACASRR_VALID(r13)
324
325	lwz	r0,8(r1)
326	mtcrf	0xff,r0
327
328	blr
329#endif
330
331/* See plpar_hcall_raw to see why this is needed */
332_GLOBAL(plpar_hcall9_raw)
333	HMT_MEDIUM
334
335	mfcr	r0
336	stw	r0,8(r1)
337
338	std     r4,STK_PARAM(R4)(r1)     /* Save ret buffer */
339
340	mr	r4,r5
341	mr	r5,r6
342	mr	r6,r7
343	mr	r7,r8
344	mr	r8,r9
345	mr	r9,r10
346	ld	r10,STK_PARAM(R11)(r1)	 /* put arg7 in R10 */
347	ld	r11,STK_PARAM(R12)(r1)	 /* put arg8 in R11 */
348	ld	r12,STK_PARAM(R13)(r1)    /* put arg9 in R12 */
349
350	HVSC				/* invoke the hypervisor */
351
352	mr	r0,r12
353	ld	r12,STK_PARAM(R4)(r1)
354	std	r4,  0(r12)
355	std	r5,  8(r12)
356	std	r6, 16(r12)
357	std	r7, 24(r12)
358	std	r8, 32(r12)
359	std	r9, 40(r12)
360	std	r10,48(r12)
361	std	r11,56(r12)
362	std	r0, 64(r12)
363
364	li	r4,0
365	stb	r4,PACASRR_VALID(r13)
366
367	lwz	r0,8(r1)
368	mtcrf	0xff,r0
369
370	blr				/* return r3 = status */
371