xref: /linux/arch/powerpc/kernel/vector.S (revision ca853314e78b0a65c20b6a889a23c31f918d4aa2)
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <asm/processor.h>
3#include <asm/ppc_asm.h>
4#include <asm/reg.h>
5#include <asm/asm-offsets.h>
6#include <asm/cputable.h>
7#include <asm/thread_info.h>
8#include <asm/page.h>
9#include <asm/ptrace.h>
10#include <asm/export.h>
11#include <asm/asm-compat.h>
12
13/*
14 * Load state from memory into VMX registers including VSCR.
15 * Assumes the caller has enabled VMX in the MSR.
16 */
17_GLOBAL(load_vr_state)
18	li	r4,VRSTATE_VSCR
19	lvx	v0,r4,r3
20	mtvscr	v0
21	REST_32VRS(0,r4,r3)
22	blr
23EXPORT_SYMBOL(load_vr_state)
24_ASM_NOKPROBE_SYMBOL(load_vr_state); /* used by restore_math */
25
26/*
27 * Store VMX state into memory, including VSCR.
28 * Assumes the caller has enabled VMX in the MSR.
29 */
30_GLOBAL(store_vr_state)
31	SAVE_32VRS(0, r4, r3)
32	mfvscr	v0
33	li	r4, VRSTATE_VSCR
34	stvx	v0, r4, r3
35	blr
36EXPORT_SYMBOL(store_vr_state)
37
38/*
39 * Disable VMX for the task which had it previously,
40 * and save its vector registers in its thread_struct.
41 * Enables the VMX for use in the kernel on return.
42 * On SMP we know the VMX is free, since we give it up every
43 * switch (ie, no lazy save of the vector registers).
44 *
45 * Note that on 32-bit this can only use registers that will be
46 * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
47 */
48_GLOBAL(load_up_altivec)
49	mfmsr	r5			/* grab the current MSR */
50	oris	r5,r5,MSR_VEC@h
51	MTMSRD(r5)			/* enable use of AltiVec now */
52	isync
53
54	/*
55	 * While userspace in general ignores VRSAVE, glibc uses it as a boolean
56	 * to optimise userspace context save/restore. Whenever we take an
57	 * altivec unavailable exception we must set VRSAVE to something non
58	 * zero. Set it to all 1s. See also the programming note in the ISA.
59	 */
60	mfspr	r4,SPRN_VRSAVE
61	cmpwi	0,r4,0
62	bne+	1f
63	li	r4,-1
64	mtspr	SPRN_VRSAVE,r4
651:
66	/* enable use of VMX after return */
67#ifdef CONFIG_PPC32
68	mfspr	r5,SPRN_SPRG_THREAD		/* current task's THREAD (phys) */
69	oris	r9,r9,MSR_VEC@h
70#ifdef CONFIG_VMAP_STACK
71	tovirt(r5, r5)
72#endif
73#else
74	ld	r4,PACACURRENT(r13)
75	addi	r5,r4,THREAD		/* Get THREAD */
76	oris	r12,r12,MSR_VEC@h
77	std	r12,_MSR(r1)
78#endif
79	li	r4,1
80	stb	r4,THREAD_LOAD_VEC(r5)
81	addi	r6,r5,THREAD_VRSTATE
82	li	r4,1
83	li	r10,VRSTATE_VSCR
84	stw	r4,THREAD_USED_VR(r5)
85	lvx	v0,r10,r6
86	mtvscr	v0
87	REST_32VRS(0,r4,r6)
88	/* restore registers and return */
89	blr
90_ASM_NOKPROBE_SYMBOL(load_up_altivec)
91
92/*
93 * save_altivec(tsk)
94 * Save the vector registers to its thread_struct
95 */
96_GLOBAL(save_altivec)
97	addi	r3,r3,THREAD		/* want THREAD of task */
98	PPC_LL	r7,THREAD_VRSAVEAREA(r3)
99	PPC_LL	r5,PT_REGS(r3)
100	PPC_LCMPI	0,r7,0
101	bne	2f
102	addi	r7,r3,THREAD_VRSTATE
1032:	SAVE_32VRS(0,r4,r7)
104	mfvscr	v0
105	li	r4,VRSTATE_VSCR
106	stvx	v0,r4,r7
107	blr
108
109#ifdef CONFIG_VSX
110
111#ifdef CONFIG_PPC32
112#error This asm code isn't ready for 32-bit kernels
113#endif
114
115/*
116 * load_up_vsx(unused, unused, tsk)
117 * Disable VSX for the task which had it previously,
118 * and save its vector registers in its thread_struct.
119 * Reuse the fp and vsx saves, but first check to see if they have
120 * been saved already.
121 */
122_GLOBAL(load_up_vsx)
123/* Load FP and VSX registers if they haven't been done yet */
124	andi.	r5,r12,MSR_FP
125	beql+	load_up_fpu		/* skip if already loaded */
126	andis.	r5,r12,MSR_VEC@h
127	beql+	load_up_altivec		/* skip if already loaded */
128
129	ld	r4,PACACURRENT(r13)
130	addi	r4,r4,THREAD		/* Get THREAD */
131	li	r6,1
132	stw	r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
133	/* enable use of VSX after return */
134	oris	r12,r12,MSR_VSX@h
135	std	r12,_MSR(r1)
136	b	fast_interrupt_return
137
138#endif /* CONFIG_VSX */
139
140
141/*
142 * The routines below are in assembler so we can closely control the
143 * usage of floating-point registers.  These routines must be called
144 * with preempt disabled.
145 */
146#ifdef CONFIG_PPC32
147	.data
148fpzero:
149	.long	0
150fpone:
151	.long	0x3f800000	/* 1.0 in single-precision FP */
152fphalf:
153	.long	0x3f000000	/* 0.5 in single-precision FP */
154
155#define LDCONST(fr, name)	\
156	lis	r11,name@ha;	\
157	lfs	fr,name@l(r11)
158#else
159
160	.section ".toc","aw"
161fpzero:
162	.tc	FD_0_0[TC],0
163fpone:
164	.tc	FD_3ff00000_0[TC],0x3ff0000000000000	/* 1.0 */
165fphalf:
166	.tc	FD_3fe00000_0[TC],0x3fe0000000000000	/* 0.5 */
167
168#define LDCONST(fr, name)	\
169	lfd	fr,name@toc(r2)
170#endif
171
172	.text
173/*
174 * Internal routine to enable floating point and set FPSCR to 0.
175 * Don't call it from C; it doesn't use the normal calling convention.
176 */
177fpenable:
178#ifdef CONFIG_PPC32
179	stwu	r1,-64(r1)
180#else
181	stdu	r1,-64(r1)
182#endif
183	mfmsr	r10
184	ori	r11,r10,MSR_FP
185	mtmsr	r11
186	isync
187	stfd	fr0,24(r1)
188	stfd	fr1,16(r1)
189	stfd	fr31,8(r1)
190	LDCONST(fr1, fpzero)
191	mffs	fr31
192	MTFSF_L(fr1)
193	blr
194
195fpdisable:
196	mtlr	r12
197	MTFSF_L(fr31)
198	lfd	fr31,8(r1)
199	lfd	fr1,16(r1)
200	lfd	fr0,24(r1)
201	mtmsr	r10
202	isync
203	addi	r1,r1,64
204	blr
205
206/*
207 * Vector add, floating point.
208 */
209_GLOBAL(vaddfp)
210	mflr	r12
211	bl	fpenable
212	li	r0,4
213	mtctr	r0
214	li	r6,0
2151:	lfsx	fr0,r4,r6
216	lfsx	fr1,r5,r6
217	fadds	fr0,fr0,fr1
218	stfsx	fr0,r3,r6
219	addi	r6,r6,4
220	bdnz	1b
221	b	fpdisable
222
223/*
224 * Vector subtract, floating point.
225 */
226_GLOBAL(vsubfp)
227	mflr	r12
228	bl	fpenable
229	li	r0,4
230	mtctr	r0
231	li	r6,0
2321:	lfsx	fr0,r4,r6
233	lfsx	fr1,r5,r6
234	fsubs	fr0,fr0,fr1
235	stfsx	fr0,r3,r6
236	addi	r6,r6,4
237	bdnz	1b
238	b	fpdisable
239
240/*
241 * Vector multiply and add, floating point.
242 */
243_GLOBAL(vmaddfp)
244	mflr	r12
245	bl	fpenable
246	stfd	fr2,32(r1)
247	li	r0,4
248	mtctr	r0
249	li	r7,0
2501:	lfsx	fr0,r4,r7
251	lfsx	fr1,r5,r7
252	lfsx	fr2,r6,r7
253	fmadds	fr0,fr0,fr2,fr1
254	stfsx	fr0,r3,r7
255	addi	r7,r7,4
256	bdnz	1b
257	lfd	fr2,32(r1)
258	b	fpdisable
259
260/*
261 * Vector negative multiply and subtract, floating point.
262 */
263_GLOBAL(vnmsubfp)
264	mflr	r12
265	bl	fpenable
266	stfd	fr2,32(r1)
267	li	r0,4
268	mtctr	r0
269	li	r7,0
2701:	lfsx	fr0,r4,r7
271	lfsx	fr1,r5,r7
272	lfsx	fr2,r6,r7
273	fnmsubs	fr0,fr0,fr2,fr1
274	stfsx	fr0,r3,r7
275	addi	r7,r7,4
276	bdnz	1b
277	lfd	fr2,32(r1)
278	b	fpdisable
279
280/*
281 * Vector reciprocal estimate.  We just compute 1.0/x.
282 * r3 -> destination, r4 -> source.
283 */
284_GLOBAL(vrefp)
285	mflr	r12
286	bl	fpenable
287	li	r0,4
288	LDCONST(fr1, fpone)
289	mtctr	r0
290	li	r6,0
2911:	lfsx	fr0,r4,r6
292	fdivs	fr0,fr1,fr0
293	stfsx	fr0,r3,r6
294	addi	r6,r6,4
295	bdnz	1b
296	b	fpdisable
297
298/*
299 * Vector reciprocal square-root estimate, floating point.
300 * We use the frsqrte instruction for the initial estimate followed
301 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
302 * r3 -> destination, r4 -> source.
303 */
304_GLOBAL(vrsqrtefp)
305	mflr	r12
306	bl	fpenable
307	stfd	fr2,32(r1)
308	stfd	fr3,40(r1)
309	stfd	fr4,48(r1)
310	stfd	fr5,56(r1)
311	li	r0,4
312	LDCONST(fr4, fpone)
313	LDCONST(fr5, fphalf)
314	mtctr	r0
315	li	r6,0
3161:	lfsx	fr0,r4,r6
317	frsqrte	fr1,fr0		/* r = frsqrte(s) */
318	fmuls	fr3,fr1,fr0	/* r * s */
319	fmuls	fr2,fr1,fr5	/* r * 0.5 */
320	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
321	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
322	fmuls	fr3,fr1,fr0	/* r * s */
323	fmuls	fr2,fr1,fr5	/* r * 0.5 */
324	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
325	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
326	stfsx	fr1,r3,r6
327	addi	r6,r6,4
328	bdnz	1b
329	lfd	fr5,56(r1)
330	lfd	fr4,48(r1)
331	lfd	fr3,40(r1)
332	lfd	fr2,32(r1)
333	b	fpdisable
334