xref: /freebsd/lib/libc/arm/aeabi/aeabi_vfp_double.S (revision f4b37ed0f8b307b1f3f0f630ca725d68f1dff30d)
1/*
2 * Copyright (C) 2013 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <machine/asm.h>
29__FBSDID("$FreeBSD$");
30
31#include "aeabi_vfp.h"
32
33.fpu	vfp
34.syntax	unified
35
36/* void __aeabi_cdcmpeq(double, double) */
37AEABI_ENTRY(cdcmpeq)
38	LOAD_DREG(d0, r0, r1)
39	LOAD_DREG(d1, r2, r3)
40	vcmp.f64	d0, d1
41	vmrs		APSR_nzcv, fpscr
42	RET
43AEABI_END(cdcmpeq)
44
45/* void __aeabi_cdcmple(double, double) */
46AEABI_ENTRY(cdcmple)
47	LOAD_DREG(d0, r0, r1)
48	LOAD_DREG(d1, r2, r3)
49	vcmpe.f64	d0, d1
50	vmrs		APSR_nzcv, fpscr
51	RET
52AEABI_END(cdcmple)
53
54/* void __aeabi_cdrcmple(double, double) */
55AEABI_ENTRY(cdrcmple)
56	LOAD_DREG(d0, r0, r1)
57	LOAD_DREG(d1, r2, r3)
58	vcmpe.f64	d1, d0
59	vmrs		APSR_nzcv, fpscr
60	RET
61AEABI_END(cdrcmple)
62
63/* int __aeabi_dcmpeq(double, double) */
64AEABI_ENTRY(dcmpeq)
65	LOAD_DREG(d0, r0, r1)
66	LOAD_DREG(d1, r2, r3)
67	vcmp.f64 d0, d1
68	vmrs     APSR_nzcv, fpscr
69	ite      ne
70	movne    r0, #0
71	moveq    r0, #1
72	RET
73AEABI_END(dcmpeq)
74
75/* int __aeabi_dcmplt(double, double) */
76AEABI_ENTRY(dcmplt)
77	LOAD_DREG(d0, r0, r1)
78	LOAD_DREG(d1, r2, r3)
79	vcmp.f64 d0, d1
80	vmrs     APSR_nzcv, fpscr
81	ite      cs
82	movcs    r0, #0
83	movcc    r0, #1
84	RET
85AEABI_END(dcmplt)
86
87/* int __aeabi_dcmple(double, double) */
88AEABI_ENTRY(dcmple)
89	LOAD_DREG(d0, r0, r1)
90	LOAD_DREG(d1, r2, r3)
91	vcmp.f64 d0, d1
92	vmrs     APSR_nzcv, fpscr
93	ite      hi
94	movhi    r0, #0
95	movls    r0, #1
96	RET
97AEABI_END(dcmple)
98
99/* int __aeabi_dcmpge(double, double) */
100AEABI_ENTRY(dcmpge)
101	LOAD_DREG(d0, r0, r1)
102	LOAD_DREG(d1, r2, r3)
103	vcmp.f64 d0, d1
104	vmrs     APSR_nzcv, fpscr
105	ite      lt
106	movlt    r0, #0
107	movge    r0, #1
108	RET
109AEABI_END(dcmpge)
110
111/* int __aeabi_dcmpgt(double, double) */
112AEABI_ENTRY(dcmpgt)
113	LOAD_DREG(d0, r0, r1)
114	LOAD_DREG(d1, r2, r3)
115	vcmp.f64 d0, d1
116	vmrs     APSR_nzcv, fpscr
117	ite      le
118	movle    r0, #0
119	movgt    r0, #1
120	RET
121AEABI_END(dcmpgt)
122
123/* int __aeabi_dcmpun(double, double) */
124AEABI_ENTRY(dcmpun)
125	LOAD_DREG(d0, r0, r1)
126	LOAD_DREG(d1, r2, r3)
127	vcmp.f64 d0, d1
128	vmrs     APSR_nzcv, fpscr
129	ite      vc
130	movvc    r0, #0
131	movvs    r0, #1
132	RET
133AEABI_END(dcmpun)
134
135/* int __aeabi_d2iz(double) */
136AEABI_ENTRY(d2iz)
137	LOAD_DREG(d0, r0, r1)
138#if 0
139	/*
140	 * This should be the correct instruction, but binutils incorrectly
141	 * encodes it as the version that used FPSCR to determine the rounding.
142	 * When binutils is fixed we can use this again.
143	 */
144	vcvt.s32.f64 s0, d0
145#else
146	ftosizd s0, d0
147#endif
148	vmov         r0, s0
149	RET
150AEABI_END(d2iz)
151
152/* float __aeabi_d2f(double) */
153AEABI_ENTRY(d2f)
154	LOAD_DREG(d0, r0, r1)
155	vcvt.f32.f64 s0, d0
156	UNLOAD_SREG(r0, s0)
157	RET
158AEABI_END(d2f)
159
160/* double __aeabi_i2d(int) */
161AEABI_ENTRY(i2d)
162	vmov         s0, r0
163	vcvt.f64.s32 d0, s0
164	UNLOAD_DREG(r0, r1, d0)
165	RET
166AEABI_END(i2d)
167
168/* double __aeabi_dadd(double, double) */
169AEABI_ENTRY(dadd)
170	LOAD_DREG(d0, r0, r1)
171	LOAD_DREG(d1, r2, r3)
172	vadd.f64 d0, d0, d1
173	UNLOAD_DREG(r0, r1, d0)
174	RET
175AEABI_END(dadd)
176
177/* double __aeabi_ddiv(double, double) */
178AEABI_ENTRY(ddiv)
179	LOAD_DREG(d0, r0, r1)
180	LOAD_DREG(d1, r2, r3)
181	vdiv.f64 d0, d0, d1
182	UNLOAD_DREG(r0, r1, d0)
183	RET
184AEABI_END(ddiv)
185
186/* double __aeabi_dmul(double, double) */
187AEABI_ENTRY(dmul)
188	LOAD_DREG(d0, r0, r1)
189	LOAD_DREG(d1, r2, r3)
190	vmul.f64 d0, d0, d1
191	UNLOAD_DREG(r0, r1, d0)
192	RET
193AEABI_END(dmul)
194
195/* double __aeabi_dsub(double, double) */
196AEABI_ENTRY(dsub)
197	LOAD_DREG(d0, r0, r1)
198	LOAD_DREG(d1, r2, r3)
199	vsub.f64 d0, d0, d1
200	UNLOAD_DREG(r0, r1, d0)
201	RET
202AEABI_END(dsub)
203
204