xref: /linux/arch/powerpc/lib/ldstfp.S (revision 30e48a75df9c6ead93866bdf1511ca6ecfe17fbe)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Floating-point, VMX/Altivec and VSX loads and stores
4 * for use in instruction emulation.
5 *
6 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 */
8
9#include <asm/processor.h>
10#include <asm/ppc_asm.h>
11#include <asm/ppc-opcode.h>
12#include <asm/reg.h>
13#include <asm/asm-offsets.h>
14#include <asm/asm-compat.h>
15#include <linux/errno.h>
16
17#define STKFRM	(PPC_MIN_STKFRM + 16)
18
19/* Get the contents of frN into *p; N is in r3 and p is in r4. */
20_GLOBAL(get_fpr)
21	mflr	r0
22	mfmsr	r6
23	ori	r7, r6, MSR_FP
24	MTMSRD(r7)
25	isync
26	rlwinm	r3,r3,3,0xf8
27	bcl	20,31,1f
28reg = 0
29	.rept	32
30	stfd	reg, 0(r4)
31	b	2f
32reg = reg + 1
33	.endr
341:	mflr	r5
35	add	r5,r3,r5
36	mtctr	r5
37	mtlr	r0
38	bctr
392:	MTMSRD(r6)
40	isync
41	blr
42
43/* Put the contents of *p into frN; N is in r3 and p is in r4. */
44_GLOBAL(put_fpr)
45	mflr	r0
46	mfmsr	r6
47	ori	r7, r6, MSR_FP
48	MTMSRD(r7)
49	isync
50	rlwinm	r3,r3,3,0xf8
51	bcl	20,31,1f
52reg = 0
53	.rept	32
54	lfd	reg, 0(r4)
55	b	2f
56reg = reg + 1
57	.endr
581:	mflr	r5
59	add	r5,r3,r5
60	mtctr	r5
61	mtlr	r0
62	bctr
632:	MTMSRD(r6)
64	isync
65	blr
66
67#ifdef CONFIG_ALTIVEC
68/* Get the contents of vrN into *p; N is in r3 and p is in r4. */
69_GLOBAL(get_vr)
70	mflr	r0
71	mfmsr	r6
72	oris	r7, r6, MSR_VEC@h
73	MTMSRD(r7)
74	isync
75	rlwinm	r3,r3,3,0xf8
76	bcl	20,31,1f
77reg = 0
78	.rept	32
79	stvx	reg, 0, r4
80	b	2f
81reg = reg + 1
82	.endr
831:	mflr	r5
84	add	r5,r3,r5
85	mtctr	r5
86	mtlr	r0
87	bctr
882:	MTMSRD(r6)
89	isync
90	blr
91
92/* Put the contents of *p into vrN; N is in r3 and p is in r4. */
93_GLOBAL(put_vr)
94	mflr	r0
95	mfmsr	r6
96	oris	r7, r6, MSR_VEC@h
97	MTMSRD(r7)
98	isync
99	rlwinm	r3,r3,3,0xf8
100	bcl	20,31,1f
101reg = 0
102	.rept	32
103	lvx	reg, 0, r4
104	b	2f
105reg = reg + 1
106	.endr
1071:	mflr	r5
108	add	r5,r3,r5
109	mtctr	r5
110	mtlr	r0
111	bctr
1122:	MTMSRD(r6)
113	isync
114	blr
115#endif /* CONFIG_ALTIVEC */
116
117#ifdef CONFIG_VSX
118/* Get the contents of vsN into vs0; N is in r3. */
119_GLOBAL(get_vsr)
120	mflr	r0
121	rlwinm	r3,r3,3,0x1f8
122	bcl	20,31,1f
123	blr			/* vs0 is already in vs0 */
124	nop
125reg = 1
126	.rept	63
127	XXLOR(0,reg,reg)
128	blr
129reg = reg + 1
130	.endr
1311:	mflr	r5
132	add	r5,r3,r5
133	mtctr	r5
134	mtlr	r0
135	bctr
136
137/* Put the contents of vs0 into vsN; N is in r3. */
138_GLOBAL(put_vsr)
139	mflr	r0
140	rlwinm	r3,r3,3,0x1f8
141	bcl	20,31,1f
142	blr			/* v0 is already in v0 */
143	nop
144reg = 1
145	.rept	63
146	XXLOR(reg,0,0)
147	blr
148reg = reg + 1
149	.endr
1501:	mflr	r5
151	add	r5,r3,r5
152	mtctr	r5
153	mtlr	r0
154	bctr
155
156/* Load VSX reg N from vector doubleword *p.  N is in r3, p in r4. */
157_GLOBAL(load_vsrn)
158	PPC_STLU r1,-STKFRM(r1)
159	mflr	r0
160	PPC_STL	r0,STKFRM+PPC_LR_STKOFF(r1)
161	mfmsr	r6
162	oris	r7,r6,MSR_VSX@h
163	cmpwi	cr7,r3,0
164	li	r8,STKFRM-16
165	MTMSRD(r7)
166	isync
167	beq	cr7,1f
168	STXVD2X(0,R1,R8)
1691:	LXVD2X(0,R0,R4)
170#ifdef __LITTLE_ENDIAN__
171	XXSWAPD(0,0)
172#endif
173	beq	cr7,4f
174	bl	put_vsr
175	LXVD2X(0,R1,R8)
1764:	PPC_LL	r0,STKFRM+PPC_LR_STKOFF(r1)
177	mtlr	r0
178	MTMSRD(r6)
179	isync
180	addi	r1,r1,STKFRM
181	blr
182
183/* Store VSX reg N to vector doubleword *p.  N is in r3, p in r4. */
184_GLOBAL(store_vsrn)
185	PPC_STLU r1,-STKFRM(r1)
186	mflr	r0
187	PPC_STL	r0,STKFRM+PPC_LR_STKOFF(r1)
188	mfmsr	r6
189	oris	r7,r6,MSR_VSX@h
190	li	r8,STKFRM-16
191	MTMSRD(r7)
192	isync
193	STXVD2X(0,R1,R8)
194	bl	get_vsr
195#ifdef __LITTLE_ENDIAN__
196	XXSWAPD(0,0)
197#endif
198	STXVD2X(0,R0,R4)
199	LXVD2X(0,R1,R8)
200	PPC_LL	r0,STKFRM+PPC_LR_STKOFF(r1)
201	mtlr	r0
202	MTMSRD(r6)
203	isync
204	mr	r3,r9
205	addi	r1,r1,STKFRM
206	blr
207#endif /* CONFIG_VSX */
208
209/* Convert single-precision to double, without disturbing FPRs. */
210/* conv_sp_to_dp(float *sp, double *dp) */
211_GLOBAL(conv_sp_to_dp)
212	mfmsr	r6
213	ori	r7, r6, MSR_FP
214	MTMSRD(r7)
215	isync
216	stfd	fr0, -16(r1)
217	lfs	fr0, 0(r3)
218	stfd	fr0, 0(r4)
219	lfd	fr0, -16(r1)
220	MTMSRD(r6)
221	isync
222	blr
223
224/* Convert single-precision to double, without disturbing FPRs. */
225/* conv_sp_to_dp(double *dp, float *sp) */
226_GLOBAL(conv_dp_to_sp)
227	mfmsr	r6
228	ori	r7, r6, MSR_FP
229	MTMSRD(r7)
230	isync
231	stfd	fr0, -16(r1)
232	lfd	fr0, 0(r3)
233	stfs	fr0, 0(r4)
234	lfd	fr0, -16(r1)
235	MTMSRD(r6)
236	isync
237	blr
238