xref: /freebsd/lib/libc/arm/aeabi/aeabi_vfp_double.S (revision 6574b8ed19b093f0af09501d2c9676c28993cb97)
1/*
2 * Copyright (C) 2013 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <machine/asm.h>
29__FBSDID("$FreeBSD$");
30
31#include "aeabi_vfp.h"
32
33.fpu	vfp
34.syntax	unified
35
36/* int __aeabi_dcmpeq(double, double) */
37AEABI_ENTRY(dcmpeq)
38	LOAD_DREG(d0, r0, r1)
39	LOAD_DREG(d1, r2, r3)
40	vcmp.f64 d0, d1
41	vmrs     APSR_nzcv, fpscr
42	movne    r0, #0
43	moveq    r0, #1
44	RET
45AEABI_END(dcmpeq)
46
47/* int __aeabi_dcmplt(double, double) */
48AEABI_ENTRY(dcmplt)
49	LOAD_DREG(d0, r0, r1)
50	LOAD_DREG(d1, r2, r3)
51	vcmp.f64 d0, d1
52	vmrs     APSR_nzcv, fpscr
53	movcs    r0, #0
54	movlt    r0, #1
55	RET
56AEABI_END(dcmplt)
57
58/* int __aeabi_dcmple(double, double) */
59AEABI_ENTRY(dcmple)
60	LOAD_DREG(d0, r0, r1)
61	LOAD_DREG(d1, r2, r3)
62	vcmp.f64 d0, d1
63	vmrs     APSR_nzcv, fpscr
64	movhi    r0, #0
65	movls    r0, #1
66	RET
67AEABI_END(dcmple)
68
69/* int __aeabi_dcmpge(double, double) */
70AEABI_ENTRY(dcmpge)
71	LOAD_DREG(d0, r0, r1)
72	LOAD_DREG(d1, r2, r3)
73	vcmp.f64 d0, d1
74	vmrs     APSR_nzcv, fpscr
75	movlt    r0, #0
76	movge    r0, #1
77	RET
78AEABI_END(dcmpge)
79
80/* int __aeabi_dcmpgt(double, double) */
81AEABI_ENTRY(dcmpgt)
82	LOAD_DREG(d0, r0, r1)
83	LOAD_DREG(d1, r2, r3)
84	vcmp.f64 d0, d1
85	vmrs     APSR_nzcv, fpscr
86	movle    r0, #0
87	movgt    r0, #1
88	RET
89AEABI_END(dcmpgt)
90
91/* int __aeabi_dcmpun(double, double) */
92AEABI_ENTRY(dcmpun)
93	LOAD_DREG(d0, r0, r1)
94	LOAD_DREG(d1, r2, r3)
95	vcmp.f64 d0, d1
96	vmrs     APSR_nzcv, fpscr
97	movvc    r0, #0
98	movvs    r0, #1
99	RET
100AEABI_END(dcmpun)
101
102/* int __aeabi_d2iz(double) */
103AEABI_ENTRY(d2iz)
104	LOAD_DREG(d0, r0, r1)
105#if 0
106	/*
107	 * This should be the correct instruction, but binutils incorrectly
108	 * encodes it as the version that used FPSCR to determine the rounding.
109	 * When binutils is fixed we can use this again.
110	 */
111	vcvt.s32.f64 s0, d0
112#else
113	ftosizd s0, d0
114#endif
115	vmov         r0, s0
116	RET
117AEABI_END(d2iz)
118
119/* float __aeabi_d2f(double) */
120AEABI_ENTRY(d2f)
121	LOAD_DREG(d0, r0, r1)
122	vcvt.f32.f64 s0, d0
123	UNLOAD_SREG(r0, s0)
124	RET
125AEABI_END(d2f)
126
127/* double __aeabi_i2d(int) */
128AEABI_ENTRY(i2d)
129	vmov         s0, r0
130	vcvt.f64.s32 d0, s0
131	UNLOAD_DREG(r0, r1, d0)
132	RET
133AEABI_END(i2d)
134
135/* double __aeabi_dadd(double, double) */
136AEABI_ENTRY(dadd)
137	LOAD_DREG(d0, r0, r1)
138	LOAD_DREG(d1, r2, r3)
139	vadd.f64 d0, d0, d1
140	UNLOAD_DREG(r0, r1, d0)
141	RET
142AEABI_END(dadd)
143
144/* double __aeabi_ddiv(double, double) */
145AEABI_ENTRY(ddiv)
146	LOAD_DREG(d0, r0, r1)
147	LOAD_DREG(d1, r2, r3)
148	vdiv.f64 d0, d0, d1
149	UNLOAD_DREG(r0, r1, d0)
150	RET
151AEABI_END(ddiv)
152
153/* double __aeabi_dmul(double, double) */
154AEABI_ENTRY(dmul)
155	LOAD_DREG(d0, r0, r1)
156	LOAD_DREG(d1, r2, r3)
157	vmul.f64 d0, d0, d1
158	UNLOAD_DREG(r0, r1, d0)
159	RET
160AEABI_END(dmul)
161
162/* double __aeabi_dsub(double, double) */
163AEABI_ENTRY(dsub)
164	LOAD_DREG(d0, r0, r1)
165	LOAD_DREG(d1, r2, r3)
166	vsub.f64 d0, d0, d1
167	UNLOAD_DREG(r0, r1, d0)
168	RET
169AEABI_END(dsub)
170
171