xref: /freebsd/lib/libc/arm/aeabi/aeabi_vfp_double.S (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1/*
2 * Copyright (C) 2013 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <machine/asm.h>
29#include "aeabi_vfp.h"
30
31.fpu	vfp
32.syntax	unified
33
34/* void __aeabi_cdcmpeq(double, double) */
35AEABI_ENTRY(cdcmpeq)
36	LOAD_DREG(d0, r0, r1)
37	LOAD_DREG(d1, r2, r3)
38	vcmp.f64	d0, d1
39	vmrs		APSR_nzcv, fpscr
40	RET
41AEABI_END(cdcmpeq)
42
43/* void __aeabi_cdcmple(double, double) */
44AEABI_ENTRY(cdcmple)
45	LOAD_DREG(d0, r0, r1)
46	LOAD_DREG(d1, r2, r3)
47	vcmpe.f64	d0, d1
48	vmrs		APSR_nzcv, fpscr
49	RET
50AEABI_END(cdcmple)
51
52/* void __aeabi_cdrcmple(double, double) */
53AEABI_ENTRY(cdrcmple)
54	LOAD_DREG(d0, r0, r1)
55	LOAD_DREG(d1, r2, r3)
56	vcmpe.f64	d1, d0
57	vmrs		APSR_nzcv, fpscr
58	RET
59AEABI_END(cdrcmple)
60
61/* int __aeabi_dcmpeq(double, double) */
62AEABI_ENTRY(dcmpeq)
63	LOAD_DREG(d0, r0, r1)
64	LOAD_DREG(d1, r2, r3)
65	vcmp.f64 d0, d1
66	vmrs     APSR_nzcv, fpscr
67	ite      ne
68	movne    r0, #0
69	moveq    r0, #1
70	RET
71AEABI_END(dcmpeq)
72
73/* int __aeabi_dcmplt(double, double) */
74AEABI_ENTRY(dcmplt)
75	LOAD_DREG(d0, r0, r1)
76	LOAD_DREG(d1, r2, r3)
77	vcmp.f64 d0, d1
78	vmrs     APSR_nzcv, fpscr
79	ite      cs
80	movcs    r0, #0
81	movcc    r0, #1
82	RET
83AEABI_END(dcmplt)
84
85/* int __aeabi_dcmple(double, double) */
86AEABI_ENTRY(dcmple)
87	LOAD_DREG(d0, r0, r1)
88	LOAD_DREG(d1, r2, r3)
89	vcmp.f64 d0, d1
90	vmrs     APSR_nzcv, fpscr
91	ite      hi
92	movhi    r0, #0
93	movls    r0, #1
94	RET
95AEABI_END(dcmple)
96
97/* int __aeabi_dcmpge(double, double) */
98AEABI_ENTRY(dcmpge)
99	LOAD_DREG(d0, r0, r1)
100	LOAD_DREG(d1, r2, r3)
101	vcmp.f64 d0, d1
102	vmrs     APSR_nzcv, fpscr
103	ite      lt
104	movlt    r0, #0
105	movge    r0, #1
106	RET
107AEABI_END(dcmpge)
108
109/* int __aeabi_dcmpgt(double, double) */
110AEABI_ENTRY(dcmpgt)
111	LOAD_DREG(d0, r0, r1)
112	LOAD_DREG(d1, r2, r3)
113	vcmp.f64 d0, d1
114	vmrs     APSR_nzcv, fpscr
115	ite      le
116	movle    r0, #0
117	movgt    r0, #1
118	RET
119AEABI_END(dcmpgt)
120
121/* int __aeabi_dcmpun(double, double) */
122AEABI_ENTRY(dcmpun)
123	LOAD_DREG(d0, r0, r1)
124	LOAD_DREG(d1, r2, r3)
125	vcmp.f64 d0, d1
126	vmrs     APSR_nzcv, fpscr
127	ite      vc
128	movvc    r0, #0
129	movvs    r0, #1
130	RET
131AEABI_END(dcmpun)
132
133/* int __aeabi_d2iz(double) */
134AEABI_ENTRY(d2iz)
135	LOAD_DREG(d0, r0, r1)
136#if 0
137	/*
138	 * This should be the correct instruction, but binutils incorrectly
139	 * encodes it as the version that used FPSCR to determine the rounding.
140	 * When binutils is fixed we can use this again.
141	 */
142	vcvt.s32.f64 s0, d0
143#else
144	ftosizd s0, d0
145#endif
146	vmov         r0, s0
147	RET
148AEABI_END(d2iz)
149
150/* float __aeabi_d2f(double) */
151AEABI_ENTRY(d2f)
152	LOAD_DREG(d0, r0, r1)
153	vcvt.f32.f64 s0, d0
154	UNLOAD_SREG(r0, s0)
155	RET
156AEABI_END(d2f)
157
158/* double __aeabi_i2d(int) */
159AEABI_ENTRY(i2d)
160	vmov         s0, r0
161	vcvt.f64.s32 d0, s0
162	UNLOAD_DREG(r0, r1, d0)
163	RET
164AEABI_END(i2d)
165
166/* double __aeabi_dadd(double, double) */
167AEABI_ENTRY(dadd)
168	LOAD_DREG(d0, r0, r1)
169	LOAD_DREG(d1, r2, r3)
170	vadd.f64 d0, d0, d1
171	UNLOAD_DREG(r0, r1, d0)
172	RET
173AEABI_END(dadd)
174
175/* double __aeabi_ddiv(double, double) */
176AEABI_ENTRY(ddiv)
177	LOAD_DREG(d0, r0, r1)
178	LOAD_DREG(d1, r2, r3)
179	vdiv.f64 d0, d0, d1
180	UNLOAD_DREG(r0, r1, d0)
181	RET
182AEABI_END(ddiv)
183
184/* double __aeabi_dmul(double, double) */
185AEABI_ENTRY(dmul)
186	LOAD_DREG(d0, r0, r1)
187	LOAD_DREG(d1, r2, r3)
188	vmul.f64 d0, d0, d1
189	UNLOAD_DREG(r0, r1, d0)
190	RET
191AEABI_END(dmul)
192
193/* double __aeabi_dsub(double, double) */
194AEABI_ENTRY(dsub)
195	LOAD_DREG(d0, r0, r1)
196	LOAD_DREG(d1, r2, r3)
197	vsub.f64 d0, d0, d1
198	UNLOAD_DREG(r0, r1, d0)
199	RET
200AEABI_END(dsub)
201
202	.section .note.GNU-stack,"",%progbits
203