xref: /freebsd/lib/libc/arm/aeabi/aeabi_vfp_double.S (revision fcb560670601b2a4d87bb31d7531c8dcc37ee71b)
1/*
2 * Copyright (C) 2013 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <machine/asm.h>
29__FBSDID("$FreeBSD$");
30
31#include "aeabi_vfp.h"
32
33.fpu	vfp
34.syntax	unified
35
36/* void __aeabi_cdcmpeq(double, double) */
37AEABI_ENTRY(cdcmpeq)
38	LOAD_DREG(d0, r0, r1)
39	LOAD_DREG(d1, r2, r3)
40	vcmp.f64	d0, d1
41	vmrs		APSR_nzcv, fpscr
42	RET
43AEABI_END(cdcmpeq)
44
45/* void __aeabi_cdcmple(double, double) */
46AEABI_ENTRY(cdcmple)
47	LOAD_DREG(d0, r0, r1)
48	LOAD_DREG(d1, r2, r3)
49	vcmpe.f64	d0, d1
50	vmrs		APSR_nzcv, fpscr
51	RET
52AEABI_END(cdcmple)
53
54/* void __aeabi_cdrcmple(double, double) */
55AEABI_ENTRY(cdrcmple)
56	LOAD_DREG(d0, r0, r1)
57	LOAD_DREG(d1, r2, r3)
58	vcmpe.f64	d1, d0
59	vmrs		APSR_nzcv, fpscr
60	RET
61AEABI_END(cdrcmple)
62
63/* int __aeabi_dcmpeq(double, double) */
64AEABI_ENTRY(dcmpeq)
65	LOAD_DREG(d0, r0, r1)
66	LOAD_DREG(d1, r2, r3)
67	vcmp.f64 d0, d1
68	vmrs     APSR_nzcv, fpscr
69	movne    r0, #0
70	moveq    r0, #1
71	RET
72AEABI_END(dcmpeq)
73
74/* int __aeabi_dcmplt(double, double) */
75AEABI_ENTRY(dcmplt)
76	LOAD_DREG(d0, r0, r1)
77	LOAD_DREG(d1, r2, r3)
78	vcmp.f64 d0, d1
79	vmrs     APSR_nzcv, fpscr
80	movcs    r0, #0
81	movlt    r0, #1
82	RET
83AEABI_END(dcmplt)
84
85/* int __aeabi_dcmple(double, double) */
86AEABI_ENTRY(dcmple)
87	LOAD_DREG(d0, r0, r1)
88	LOAD_DREG(d1, r2, r3)
89	vcmp.f64 d0, d1
90	vmrs     APSR_nzcv, fpscr
91	movhi    r0, #0
92	movls    r0, #1
93	RET
94AEABI_END(dcmple)
95
96/* int __aeabi_dcmpge(double, double) */
97AEABI_ENTRY(dcmpge)
98	LOAD_DREG(d0, r0, r1)
99	LOAD_DREG(d1, r2, r3)
100	vcmp.f64 d0, d1
101	vmrs     APSR_nzcv, fpscr
102	movlt    r0, #0
103	movge    r0, #1
104	RET
105AEABI_END(dcmpge)
106
107/* int __aeabi_dcmpgt(double, double) */
108AEABI_ENTRY(dcmpgt)
109	LOAD_DREG(d0, r0, r1)
110	LOAD_DREG(d1, r2, r3)
111	vcmp.f64 d0, d1
112	vmrs     APSR_nzcv, fpscr
113	movle    r0, #0
114	movgt    r0, #1
115	RET
116AEABI_END(dcmpgt)
117
118/* int __aeabi_dcmpun(double, double) */
119AEABI_ENTRY(dcmpun)
120	LOAD_DREG(d0, r0, r1)
121	LOAD_DREG(d1, r2, r3)
122	vcmp.f64 d0, d1
123	vmrs     APSR_nzcv, fpscr
124	movvc    r0, #0
125	movvs    r0, #1
126	RET
127AEABI_END(dcmpun)
128
129/* int __aeabi_d2iz(double) */
130AEABI_ENTRY(d2iz)
131	LOAD_DREG(d0, r0, r1)
132#if 0
133	/*
134	 * This should be the correct instruction, but binutils incorrectly
135	 * encodes it as the version that used FPSCR to determine the rounding.
136	 * When binutils is fixed we can use this again.
137	 */
138	vcvt.s32.f64 s0, d0
139#else
140	ftosizd s0, d0
141#endif
142	vmov         r0, s0
143	RET
144AEABI_END(d2iz)
145
146/* float __aeabi_d2f(double) */
147AEABI_ENTRY(d2f)
148	LOAD_DREG(d0, r0, r1)
149	vcvt.f32.f64 s0, d0
150	UNLOAD_SREG(r0, s0)
151	RET
152AEABI_END(d2f)
153
154/* double __aeabi_i2d(int) */
155AEABI_ENTRY(i2d)
156	vmov         s0, r0
157	vcvt.f64.s32 d0, s0
158	UNLOAD_DREG(r0, r1, d0)
159	RET
160AEABI_END(i2d)
161
162/* double __aeabi_dadd(double, double) */
163AEABI_ENTRY(dadd)
164	LOAD_DREG(d0, r0, r1)
165	LOAD_DREG(d1, r2, r3)
166	vadd.f64 d0, d0, d1
167	UNLOAD_DREG(r0, r1, d0)
168	RET
169AEABI_END(dadd)
170
171/* double __aeabi_ddiv(double, double) */
172AEABI_ENTRY(ddiv)
173	LOAD_DREG(d0, r0, r1)
174	LOAD_DREG(d1, r2, r3)
175	vdiv.f64 d0, d0, d1
176	UNLOAD_DREG(r0, r1, d0)
177	RET
178AEABI_END(ddiv)
179
180/* double __aeabi_dmul(double, double) */
181AEABI_ENTRY(dmul)
182	LOAD_DREG(d0, r0, r1)
183	LOAD_DREG(d1, r2, r3)
184	vmul.f64 d0, d0, d1
185	UNLOAD_DREG(r0, r1, d0)
186	RET
187AEABI_END(dmul)
188
189/* double __aeabi_dsub(double, double) */
190AEABI_ENTRY(dsub)
191	LOAD_DREG(d0, r0, r1)
192	LOAD_DREG(d1, r2, r3)
193	vsub.f64 d0, d0, d1
194	UNLOAD_DREG(r0, r1, d0)
195	RET
196AEABI_END(dsub)
197
198